Exemplo n.º 1
0
def _eval_and_score_actions(cache,
                            model,
                            simulator,
                            num_actions,
                            batch_size,
                            observations,
                            action_paths=None):
    actions = cache.action_array[:num_actions]
    indices = np.random.RandomState(1).permutation(
        len(observations))[:AUCCESS_EVAL_TASKS]
    evaluator = phyre.Evaluator(
        [simulator.task_ids[index] for index in indices])
    for i, task_index in enumerate(indices):
        scores = eval_actions(model,
                              actions,
                              batch_size,
                              observations[task_index],
                              action_path=action_paths[task_index]).tolist()

        _, sorted_actions = zip(
            *sorted(zip(scores, actions), key=lambda x: (-x[0], tuple(x[1]))))
        for action in sorted_actions:
            if (evaluator.get_attempts_for_task(i) >= phyre.MAX_TEST_ATTEMPTS):
                break
            status = simulator.simulate_action(task_index,
                                               action,
                                               need_images=False).status
            evaluator.maybe_log_attempt(i, status)
    return evaluator.get_aucess()
Exemplo n.º 2
0
    def eval(cls, state, task_ids, *args, **kwargs):
        mem_template_aware = kwargs.pop('mem_template_aware')

        evaluator = phyre.Evaluator(task_ids)
        cache = state['cache']
        train_statuses = state['simulation_statuses']
        if mem_template_aware:
            train_tpl_ids = frozenset(
                x.split(':')[0] for x in state['train_task_ids'])
            test_tpl_to_ids = collections.defaultdict(list)
            for task_id in task_ids:
                test_tpl_to_ids[task_id.split(':')[0]].append(task_id)
            within_template = (
                frozenset(test_tpl_to_ids) == frozenset(train_tpl_ids))
            if within_template:
                logging.info('Going to build sub-agent for each template id')
                for tpl, task_ids in test_tpl_to_ids.items():
                    indices = [
                        i for i, task_id in enumerate(state['train_task_ids'])
                        if task_id.split(':')[0] == tpl
                    ]
                    cls._eval(cache, train_statuses[indices], task_ids,
                              evaluator, *args, **kwargs)
            else:
                cls._eval(cache, train_statuses, task_ids, evaluator, *args,
                          **kwargs)
        else:
            cls._eval(cache, train_statuses, task_ids, evaluator, *args,
                      **kwargs)
        return evaluator
Exemplo n.º 3
0
    def eval(self, state, task_ids, tier):
        model = state['model']
        cache = state['cache']
        # NOTE: Current agent is only using the actions that are seen in the training set,
        #       though agent has the ability to rank the actions that are not seen in the training set
        actions = state['cache'].action_array[:self.params['rank_size']]

        model.cuda()
        simulator = phyre.initialize_simulator(task_ids, tier)
        observations = simulator.initial_scenes
        evaluator = phyre.Evaluator(task_ids)

        for task_index in range(len(task_ids)):
            task_id = simulator.task_ids[task_index]
            observation = observations[task_index]
            scores = self.neural_model.eval_actions(
                model, actions, self.params['eval_batch_size'], observation)
            # Rank of the actions in descending order
            action_order = np.argsort(-scores)
            # Result of the actions are already stored in cache
            statuses = cache.load_simulation_states(task_id)

            for action_id in action_order:
                if evaluator.get_attempts_for_task(
                        task_index) >= self.params['max_attempts_per_task']:
                    break
                status = phyre.SimulationStatus(statuses[action_id])
                evaluator.maybe_log_attempt(task_index, status)
        return evaluator
def evaluate_simple_agent(tasks, tier):
    """Evaluates the random agent on the given tasks/tier.

  Args:
      tasks: A list of task instances (strings) in the split to evaluate.
      tier: A string of the action tier.

  Returns:
      A Evaluator object updated with the results of all the siulations.
  """

    # Create a simulator for the task and tier.
    simulator = phyre.initialize_simulator(tasks, tier)
    evaluator = phyre.Evaluator(tasks)
    assert tuple(tasks) == simulator.task_ids
    tasks_solved = 0
    for task_index in tqdm(range(len(tasks)), desc='Evaluate tasks'):
        domain = [{
            'name': 'var1',
            'type': 'continuous',
            'domain': (0, 1)
        }, {
            'name': 'var2',
            'type': 'continuous',
            'domain': (0, 1)
        }, {
            'name': 'var3',
            'type': 'continuous',
            'domain': (0, 1)
        }]

        X_init = np.array([[0.5, .5, .5]])
        eval_result = evalAction(X_init, simulator, task_index, evaluator)
        Y_init = np.array([[eval_result['score']]])

        X_step = X_init
        Y_step = Y_init

        solved_task = eval_result['solved']
        while evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS and not solved_task:
            bo_step = GPyOpt.methods.BayesianOptimization(
                f=None,
                domain=domain,
                X=X_step,
                Y=Y_step,
                de_duplication=True,
                acquisition_type='MPI',
                model_type='sparseGP')
            x_next = bo_step.suggest_next_locations()
            eval_result = evalAction(x_next, simulator, task_index, evaluator)
            X_step = np.vstack((X_step, x_next))
            Y_step = np.vstack((Y_step, eval_result['score']))
            #if eval_result['valid']:
            #  print(tasks[task_index],evaluator.get_attempts_for_task(task_index),x_next,eval_result)
            if eval_result['solved']:
                solved_task = True

    print(tasks_solved, "Tasks solved out of ", len(tasks), "Total Tasks")
    return evaluator
Exemplo n.º 5
0
    def real_eval(cls, cache, model, actions, task_ids, tier,
                  max_attempts_per_task, eval_batch_size, finetune_iterations,
                  refine_iterations, refine_loss, refine_lr):

        # TODO: move to a flag.
        finetune_lr = 1e-4

        model.cuda()

        simulator = phyre.initialize_simulator(task_ids, tier)
        observations = simulator.initial_scenes
        assert tuple(task_ids) == simulator.task_ids

        logging.info('Ranking %d actions and simulating top %d', len(actions),
                     max_attempts_per_task)
        if refine_iterations > 0:
            logging.info(
                'Will do refining for %d iterations with lr=%e and loss=%s',
                refine_iterations, refine_lr, refine_loss)
        evaluator = phyre.Evaluator(task_ids)
        for task_index in tqdm.trange(len(task_ids)):
            task_id = simulator.task_ids[task_index]
            if refine_iterations > 0:
                refined_actions = neural_agent.refine_actions(
                    model, actions, observations[task_index], refine_lr,
                    refine_iterations, eval_batch_size, refine_loss)
            else:
                refined_actions = actions
            scores = neural_agent.eval_actions(model, refined_actions,
                                               eval_batch_size,
                                               observations[task_index])
            # Order of descendig scores.
            action_order = np.argsort(-scores)
            if not refine_iterations > 0:
                statuses = cache.load_simulation_states(task_id)

            finetune_data = []
            for action_id in action_order:
                if evaluator.get_attempts_for_task(
                        task_index) >= max_attempts_per_task:
                    break
                action = refined_actions[action_id]
                if refine_iterations > 0:
                    status = simulator.simulate_action(
                        task_index,
                        action,
                        need_images=False,
                        need_scenes=False).status
                else:
                    status = phyre.SimulationStatus(statuses[action_id])
                finetune_data.append((task_index, status, action))
                evaluator.maybe_log_attempt(task_index, status)
            if evaluator.get_attempts_for_task(task_index) == 0:
                logging.warning('Made 0 attempts for task %s', task_id)
            if finetune_iterations > 0:
                neural_agent.finetune(model, finetune_data, simulator,
                                      finetune_lr, finetune_iterations)

        return evaluator
Exemplo n.º 6
0
 def eval(cls, state, task_ids, max_attempts_per_task, **kwargs):
     cache = state['cache']
     evaluator = phyre.Evaluator(task_ids)
     for i, task_id in enumerate(task_ids):
         statuses = cache.load_simulation_states(task_id)
         valid_statuses = statuses[
             statuses != phyre.simulation_cache.INVALID]
         for status in valid_statuses[:max_attempts_per_task]:
             evaluator.maybe_log_attempt(i, status)
     return evaluator
Exemplo n.º 7
0
def evaluate_simple_agent(tasks, tier):
    """Evaluates the random agent on the given tasks/tier.

  Args:
      tasks: A list of task instances (strings) in the split to evaluate.
      tier: A string of the action tier.

  Returns:
      A Evaluator object updated with the results of all the siulations.
  """

    # Create a simulator for the task and tier.
    simulator = phyre.initialize_simulator(tasks, tier)
    evaluator = phyre.Evaluator(tasks)
    assert tuple(tasks) == simulator.task_ids
    tasks_solved = 0
    for task_index in tqdm(range(len(tasks)), desc='Evaluate tasks'):
        simFunc = partial(evalAction,
                          simulator=simulator,
                          task_index=task_index,
                          evaluator=evaluator)
        space = {
            'x': hp.uniform('x', 0, 1),
            'y': hp.uniform('y', 0, 1),
            'r': hp.uniform('r', 0, 1),
        }
        trials = Trials()

        max_evals = 0

        solved_task = False
        while evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS and not solved_task:
            max_evals += phyre.MAX_TEST_ATTEMPTS - evaluator.get_attempts_for_task(
                task_index)
            best = fmin(simFunc,
                        space=space,
                        algo=tpe.suggest,
                        max_evals=max_evals,
                        trials=trials,
                        rstate=random.seed(0),
                        show_progressbar=False)
            counter = Counter(result['solved'] for result in trials.results)
            solved_task = counter[True] > 0
            if solved_task:
                tasks_solved += 1

    print(tasks_solved, "Tasks solved out of ", len(tasks), "Total Tasks")
    return evaluator
Exemplo n.º 8
0
 def eval(cls, state: State, task_ids: TaskIds, max_attempts_per_task: int,
          oracle_rank_size: int, **kwargs):
     assert oracle_rank_size
     cache = state['cache']
     evaluator = phyre.Evaluator(task_ids)
     for i, task_id in enumerate(task_ids):
         statuses = cache.load_simulation_states(task_id)[:oracle_rank_size]
         assert len(statuses) == oracle_rank_size, (len(statuses),
                                                    oracle_rank_size)
         if (statuses == phyre.simulation_cache.SOLVED).any():
             evaluator.maybe_log_attempt(i, phyre.SimulationStatus.SOLVED)
         else:
             evaluator.maybe_log_attempt(i,
                                         phyre.SimulationStatus.NOT_SOLVED)
     return evaluator
Exemplo n.º 9
0
def evaluate_random_agent(tasks, tier):
    # Create a simulator for the task and tier.
    simulator = phyre.initialize_simulator(tasks, tier)
    evaluator = phyre.Evaluator(tasks)
    assert tuple(tasks) == simulator.task_ids
    images = []
    actions = []
    for task_index in tqdm_notebook(range(len(tasks)), desc='Evaluate tasks'):
        while evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS:
            # Sample a random valid action from the simulator for the given action space.
            action = simulator.sample()
            # Simulate the given action and add the status from taking the action to the evaluator.
            status = simulator.simulate_action(task_index,
                                               action,
                                               need_images=True)

            stati = status.status
            actions.append(action)
            images.append(status.images)
            evaluator.maybe_log_attempt(task_index, stati)
    return evaluator, images, actions
Exemplo n.º 10
0
    def eval(cls, state: State, task_ids: TaskIds, max_attempts_per_task: int,
             tier: str, **kwargs):

        cache = state['cache']
        evaluator = phyre.Evaluator(task_ids)
        simulator = phyre.initialize_simulator(task_ids, tier)

        assert tuple(task_ids) == simulator.task_ids
        for i, task_id in enumerate(task_ids):
            statuses = cache.load_simulation_states(task_id)
            valid_mask = statuses != phyre.simulation_cache.INVALID
            actions, statuses = cache.action_array[valid_mask], statuses[
                valid_mask]
            for action, status in zip(actions, statuses):
                if evaluator.get_attempts_for_task(i) >= max_attempts_per_task:
                    break
                if cls.in_prior(action, simulator._tasks[i].scene.bodies):
                    evaluator.maybe_log_attempt(i, status)
            else:
                print("Not enough actions in prior", task_id,
                      evaluator.get_attempts_for_task(i))

        return evaluator
Exemplo n.º 11
0
def simulate_result(chosen_action, chosen_score, model_number,
                    generation_number):
    eval_setup = 'ball_cross_template'
    fold_id = 0  # For simplicity, we will just use one fold for evaluation.
    train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, 0)
    action_tier = phyre.eval_setup_to_action_tier(eval_setup)
    tasks = dev_tasks[0:1]
    simulator = phyre.initialize_simulator(tasks, action_tier)
    evaluator = phyre.Evaluator(tasks)
    # Simulate the given action and add the status from taking the action to the evaluator.
    simulation_result = simulator.simulate_action(0,
                                                  chosen_action,
                                                  need_images=True,
                                                  need_featurized_objects=True)
    simulation_score = sf.ScoreFunctionValue(simulation_result)
    pair = np.array([chosen_action, simulation_score])
    timestr = time.strftime("%Y%m%d-%H%M%S")
    score_pair = [
        chosen_score, simulation_score, model_number, generation_number
    ]
    score_string = "ScoreLog" + timestr
    path = "/home/kyra/Desktop/phyre/agents/Scores"
    np.save(os.path.join(path, score_string), score_pair)
    return pair, simulation_result
Exemplo n.º 12
0
    def _eval_and_score_actions(self, cache, model, data, num_actions,
                                batch_size, num_tasks):
        """ Evaluate the AUCESS for the given data & model"""
        _, _, _, simulator, observations = data

        actions = cache.action_array[:num_actions]
        indices = np.random.RandomState(1).permutation(
            len(observations))[:num_tasks]
        evaluator = phyre.Evaluator(
            [simulator.task_ids[index] for index in indices])
        for i, task_index in enumerate(indices):
            scores = self.eval_actions(model, actions, batch_size,
                                       observations[task_index]).tolist()
            _, sorted_actions = zip(*sorted(
                zip(scores, actions), key=lambda x: (-x[0], tuple(x[1]))))
            for action in sorted_actions:
                if (evaluator.get_attempts_for_task(i) >=
                        phyre.MAX_TEST_ATTEMPTS):
                    break
                simulation = simulator.simulate_action(task_index,
                                                       action,
                                                       need_images=False)
                evaluator.maybe_log_attempt(i, simulation.simulation_status)
        return evaluator.get_aucess()
Exemplo n.º 13
0
def get_auccess(solver, tasks, solve_noise=False, save_tries=False, brute=False):
    if save_tries:
        font = ImageFont.truetype("/usr/share/fonts/truetype/ubuntu/Ubuntu-R.ttf", 10)

    eval_setup = 'ball_within_template'
    sim = phyre.initialize_simulator(tasks, 'ball')
    init_scenes = T.tensor([[cv2.resize((scene==channel).astype(float), (32,32)) for channel in range(2,7)] for scene in sim.initial_scenes]).float().flip(-2)
    eva = phyre.Evaluator(tasks)

    # Get Actions from solver:
    if brute:
        all_actions = solver.get_actions(tasks, init_scenes, brute =True)
    else:
        all_actions = solver.get_actions(tasks, init_scenes)
    #L.info(list(zip(tasks, all_actions)))
    #return 0

    # Loop through actions
    for t_idx, task in enumerate(tasks):
        # Get 100 actions from solver
        if solve_noise:
            # expects one action for task
            task_actions = [all_actions[t_idx]]
        else:
            # expects 100 actions for task
            task_actions = all_actions[t_idx]
    
        # Loop through actions
        for j, action in enumerate(task_actions):
            # Setting up visualization array
            vis_wid = 64
            vis_stack = T.zeros(6,10,vis_wid,vis_wid,3)
            vis_count = 1

            # Simulate action
            res = sim.simulate_action(t_idx, action, need_featurized_objects=False)  

            # Refining if invalid Action
            t = 0
            temp = 1
            base_action = action.copy()
            L.info(base_action, 'base action')     
            # Checking for valid action
            while res.status.is_invalid():
                t += 1
                action = base_action + (np.random.rand(3)-0.5)*0.05*temp
                L.info(action, f"potential action for task {task}")
                res = sim.simulate_action(t_idx, action,  need_featurized_objects=False)
                temp *= 1.01 if temp <5 else 1
                #assert(t>500, "too many invalid tries")
            L.info(action, 'valid action')

            # Log first Attempt
            eva.maybe_log_attempt(t_idx, res.status)
            # Visualizing first attempt
            if save_tries:
                for i in range(min(len(res.images), 10)):
                    vis_stack[0,i] = T.tensor(cv2.resize(phyre.observations_to_uint8_rgb(res.images[i]), (vis_wid,vis_wid)))

            # Collecting 100 Actions if solve noise
            warning_flag = False
            if solve_noise:
                base_action = action
                temp = 1
                error = False
                t = 0
                delta_generator = action_delta_generator()

                # Looping while less then 100 attempts
                while eva.attempts_per_task_index[t_idx]<100:
                    # Searching for new action while not solved
                    if not res.status.is_solved():
                        """ OLD APPROACH
                        action = base_action + (np.random.rand(3)-0.5)*np.array([0.3,0.05,0.05])*temp
                        temp *= 1.01 if temp <5 else 1
                        """
                        if t<1000:
                            action = base_action + delta_generator.__next__()
                            res = sim.simulate_action(t_idx, action,  need_featurized_objects=False)
                            eva.maybe_log_attempt(t_idx, res.status)
                            t += 1
                        else:
                            if not warning_flag:
                                L.info(f"WARNING can't find valid action for {task}")
                                warning_flag = True
                                error = True
                            eva.maybe_log_attempt(t_idx, phyre.SimulationStatus.NOT_SOLVED)

                    # if solved -> repeating action
                    else:
                        if not warning_flag:
                            L.info(f"{task} solved after", eva.attempts_per_task_index[t_idx])

                            # Visualization
                            if save_tries and not error:
                                for i in range(min(len(res.images), 10)):
                                    vis_stack[5,i] = T.tensor(cv2.resize(phyre.observations_to_uint8_rgb(res.images[i]), (vis_wid,vis_wid)))
                        warning_flag = True
                        eva.maybe_log_attempt(t_idx, res.status)
                    
                    # Visualization
                    if save_tries and not error and not res.status.is_invalid() and t and vis_count<5:
                        for i in range(min(len(res.images), 10)):
                            vis_stack[vis_count,i] = T.tensor(cv2.resize(phyre.observations_to_uint8_rgb(res.images[i]), (vis_wid,vis_wid)))
                        vis_count +=1

                if not warning_flag and not res.status.is_solved() and eva.attempts_per_task_index[t_idx]==100:
                    L.info(f"{task} not solved")
                vis_batch(vis_stack, f'result/solver/pyramid', f"{task}_attempts")
            # Not Solve Noise Case
            else:
                # Visualization
                if save_tries and not res.status.is_invalid() and vis_count<5:
                    for i in range(min(len(res.images), 10)):
                        vis_stack[vis_count,i] = T.tensor(cv2.resize(phyre.observations_to_uint8_rgb(res.images[i]), (vis_wid,vis_wid)))
                    vis_count +=1
                if res.status.is_solved():
                    L.info(f"{task} solved after", eva.attempts_per_task_index[t_idx])
                    vis_batch(vis_stack, f'result/solver/pyramid', f"{task}_attempts")
                    while eva.attempts_per_task_index[t_idx]<100:
                        eva.maybe_log_attempt(t_idx, res.status)
                    break
    
    return eva.get_auccess()
Exemplo n.º 14
0
                    T.cat((sub, T.ones(32, 1) * 0.5), dim=1)
                    for sub in X[inspect]),
                      dim=1))
            #plt.imsave(f"result/flownet/{inspect}_init_scene.png", np.flip(batch[inspect][0], axis=0))
            plt.imsave(f"result/flownet/{inspect}_action.png",
                       action_paths[inspect, 0])
            plt.imsave(f"result/flownet/{inspect}_selection.png", B[inspect,
                                                                    0])

    gen_actions = []
    for b in B[:, 0]:
        gen_actions.append(pic_to_action_vector(b))
    print("Extracted actions:\n", gen_actions)

    # Feed actions into simulator
    eva = phyre.Evaluator(tasks)
    solved, valid, comb, avg_tries = dict(), dict(), dict(), dict()
    for i, t in enumerate(tasks):
        print(f"{i} solving {t}", end='\r')
        if not (t[:5] in comb):
            comb[t[:5]] = 0
            valid[t[:5]] = 0
            solved[t[:5]] = 0
            avg_tries[t[:5]] = []
        base_action = gen_actions[i]
        # Random Agent Intercept:
        #action = sim.sample()
        res = sim.simulate_action(i, base_action)
        alpha = 1
        # 100 Tries Max:
        while eva.get_attempts_for_task(i) < 100:
Exemplo n.º 15
0
def solve(model, model2, save_images=False):
    tasks = [
        '00000:001', '00000:002', '00000:003', '00000:004', '00000:005',
        '00001:001', '00001:002', '00001:003', '00001:004', '00001:005',
        '00002:007', '00002:011', '00002:015', '00002:017', '00002:023',
        '00003:000', '00003:001', '00003:002', '00003:003', '00003:004',
        '00004:063', '00004:071', '00004:092', '00004:094', '00004:095'
    ]
    tasks = json.load(open("most_tasks.txt", 'r'))

    eval_setup = 'ball_within_template'
    fold_id = 0  # For simplicity, we will just use one fold for evaluation.
    train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, fold_id)
    print('Size of resulting splits:\n train:', len(train_tasks), '\n dev:',
          len(dev_tasks), '\n test:', len(test_tasks))

    tasks = train_tasks[:]
    print("tasks:\n", tasks)
    sim = phyre.initialize_simulator(tasks, 'ball')
    init_scenes = sim.initial_scenes
    X = T.tensor(format(init_scenes)).float()
    print("Init Scenes Shape:\n", X.shape)

    base_path = []
    action_path = []
    for i, t in enumerate(tasks):
        while True:
            action = sim.sample(i)
            action[2] = 0.01
            res = sim.simulate_action(i, action, stride=20)
            if type(res.images) != type(None):
                base_path.append(rollouts_to_channel([res.images], 2))
                action_path.append(rollouts_to_channel([res.images], 1))
                break
    base_path = T.tensor(np.concatenate(base_path)).float()
    action_path = T.tensor(np.concatenate(base_path)).float()
    with T.no_grad():
        Z = model(X)
        A = model2(T.cat((X[:, 1:], base_path[:, None], Z), dim=1))
    #B = model3(T.cat((X[:,1:], Y[:,None,2], Z, A), dim=1))
    #B = extract_action(A, inspect=-2 if save_images else -1)
    B = extract_action(action_path[:, None], inspect=-2 if save_images else -1)

    # Saving Images:
    if save_images:
        for inspect in range(len(X)):
            plt.imsave(
                f"result/flownet/{inspect}_init.png",
                T.cat(tuple(
                    T.cat((sub, T.ones(32, 1) * 0.5), dim=1)
                    for sub in X[inspect]),
                      dim=1))
            plt.imsave(f"result/flownet/{inspect}_base.png",
                       base_path[inspect])
            plt.imsave(f"result/flownet/{inspect}_target.png", Z[inspect, 0])
            #plt.imsave(f"result/flownet/{inspect}_init_scene.png", np.flip(batch[inspect][0], axis=0))
            plt.imsave(f"result/flownet/{inspect}_action.png", A[inspect, 0])
            plt.imsave(f"result/flownet/{inspect}_selection.png", B[inspect,
                                                                    0])
    gen_actions = []
    for b in B[:, 0]:
        gen_actions.append(pic_to_values(b))
    print(gen_actions)

    # Feed actions into simulator
    eva = phyre.Evaluator(tasks)
    solved, valid, comb = dict(), dict(), dict()
    for i, t in enumerate(tasks):
        if not (t[:5] in comb):
            comb[t[:5]] = 0
            valid[t[:5]] = 0
            solved[t[:5]] = 0

        base_action = gen_actions[i]
        # Random Agent Intercept:
        #action = sim.sample()
        res = sim.simulate_action(i, base_action)
        tries = 0
        alpha = 1
        # 100 Tries Max:
        while eva.get_attempts_for_task(i) < 100:
            if not res.status.is_solved():
                action = np.array(base_action) + np.random.randn(3) * np.array(
                    [0.1, 0.1, 0.1]) * alpha
                res = sim.simulate_action(i, action)

                subtries = 0
                while subtries < 100 and res.status.is_invalid():
                    subtries += 1
                    action_var = np.array(action) + np.random.randn(
                        3) * np.array([0.05, 0.05, 0.05]) * alpha
                    res = sim.simulate_action(i, action_var)

                eva.maybe_log_attempt(i, res.status)
                alpha *= 1.01
            else:
                eva.maybe_log_attempt(i, res.status)
            tries += 1

        if save_images:
            try:
                for k, img in enumerate(res.images):
                    plt.imsave(f"result/flownet/{i}_{k}.png",
                               np.flip(img, axis=0))
                    pass
            except Exception:
                pass
        #print(i, t, res.status.is_solved(), not res.status.is_invalid())
        comb[t[:5]] = comb[t[:5]] + 1
        if not res.status.is_invalid():
            valid[t[:5]] = valid[t[:5]] + 1
        if res.status.is_solved():
            solved[t[:5]] = solved[t[:5]] + 1

    # Prepare Plotting
    print(eva.compute_all_metrics())
    print(eva.get_auccess())
    spacing = [1, 2, 3, 4]
    fig, ax = plt.subplots(5, 5, sharey=True, sharex=True)
    for i, t in enumerate(comb):
        ax[i // 5, i % 5].bar(spacing, [
            solved[t[:5]] /
            (valid[t[:5]] if valid[t[:5]] else 1), solved[t[:5]] / comb[t[:5]],
            valid[t[:5]] / comb[t[:5]], comb[t[:5]] / 100
        ])
        ax[i // 5, i % 5].set_xlabel(t[:5])
    plt.show()
Exemplo n.º 16
0
 def __init__(self, simulator, task_ids, nsteps, max_attempts_per_task):
     self.simulator = simulator
     self.task_ids = task_ids
     self.evaluators = [phyre.Evaluator(task_ids) for _ in range(nsteps)]
     self.max_attempts_per_task = max_attempts_per_task
Exemplo n.º 17
0
def evaluate_agent(task_ids, tier, solved_actions_pdf):
    cache = phyre.get_default_100k_cache(tier)
    evaluator = phyre.Evaluator(task_ids)
    simulator = phyre.initialize_simulator(task_ids, tier)
    task_data_dict = phyre.loader.load_compiled_task_dict()
    stride = 5
    empty_action = phyre.simulator.scene_if.UserInput()
    tasks_solved = 0

    for task_index in tqdm(range(len(task_ids)), desc='Evaluate tasks'):
        task_id = task_ids[task_index]
        task_type = task_id.split(":")[0]
        task_data = task_data_dict[task_id]
        statuses = cache.load_simulation_states(task_id)
        _, _, images, _ = phyre.simulator.magic_ponies(task_data,
                                                       empty_action,
                                                       need_images=True,
                                                       stride=stride)

        evaluator.maybe_log_attempt(task_index,
                                    phyre.simulation_cache.NOT_SOLVED)

        seq_data = ImgToObj.getObjectAndGoalSequence(images)

        goal_type = ImgToObj.Layer.dynamic_goal.value
        if goal_type not in images[0]:
            goal_type = ImgToObj.Layer.static_goal.value

        tested_actions = np.array([[-1, -1, -1, 1]])

        solved_task = False

        while evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS and not solved_task:
            random_action = np.random.random_sample((1, 4))
            if task_type in solved_actions_pdf and np.random.random_sample(
            ) >= .25:
                random_action[0, 0:3] = np.squeeze(
                    solved_actions_pdf[task_type].resample(size=1))

            test_action_dist = np.linalg.norm(tested_actions[:, 0:3] -
                                              random_action[:, 0:3],
                                              axis=1)
            if np.any(test_action_dist <= tested_actions[:, 3]
                      ) and np.random.random_sample() >= .75:
                continue
            if ImgToObj.check_seq_action_intersect(
                    images[0], seq_data, stride, goal_type,
                    np.squeeze(random_action[0:3])):
                eval_stride = 10
                goal = 3.0 * 60.0 / eval_stride
                sim_result = simulator.simulate_action(
                    task_index,
                    np.squeeze(random_action[:, 0:3]),
                    need_images=True,
                    stride=eval_stride)
                evaluator.maybe_log_attempt(task_index, sim_result.status)
                if not sim_result.status.is_invalid():
                    score = ImgToObj.objectTouchGoalSequence(sim_result.images)
                    eval_dist = .25 * (score == 0) + .1
                    random_action[0, 3] = eval_dist
                    tested_actions = np.concatenate(
                        (tested_actions, random_action), 0)
                    solved_task = sim_result.status.is_solved()
                    tasks_solved += solved_task

    print(tasks_solved, "Tasks solved out of ", len(task_ids), "Total Tasks")
    return (evaluator.get_aucess(), tasks_solved, len(task_ids))
def evaluate_agent(tasks, tier):
    """Evaluates the random agent on the given tasks/tier.

  Args:
      tasks: A list of task instances (strings) in the split to evaluate.
      tier: A string of the action tier.

  Returns:
      A Evaluator object updated with the results of all the siulations.
  """

    # Create a simulator for the task and tier.
    simulator = phyre.initialize_simulator(tasks, tier)
    evaluator = phyre.Evaluator(tasks)
    task_data_dict = phyre.loader.load_compiled_task_dict()
    empty_action = phyre.simulator.scene_if.UserInput()
    tasks_solved = 0
    for task_index in tqdm(range(len(tasks)), desc='Evaluate tasks'):
        task_id = tasks[task_index]
        task_data = task_data_dict[task_id]
        _, _, images, _ = phyre.simulator.magic_ponies(task_data,
                                                       empty_action,
                                                       need_images=True,
                                                       stride=100)

        evaluator.maybe_log_attempt(task_index,
                                    phyre.simulation_cache.NOT_SOLVED)

        seq_data = ImgToObj.getObjectAndGoalSequence(images)
        goal_type = ImgToObj.Layer.dynamic_goal.value
        if goal_type not in images[0]:
            goal_type = ImgToObj.Layer.static_goal.value

        simFunc = partial(evalAction,
                          initial_img=images[0],
                          seq_data=seq_data,
                          goal_type=goal_type,
                          simulator=simulator,
                          task_index=task_index,
                          evaluator=evaluator)
        space = {
            'x': hp.uniform('x', 0, 1),
            'y': hp.uniform('y', 0, 1),
            'r': hp.uniform('r', 0, 1),
        }
        trials = Trials()

        max_evals = 0

        solved_task = False
        best_score = 0

        while evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS and not solved_task:
            max_evals += phyre.MAX_TEST_ATTEMPTS - evaluator.get_attempts_for_task(
                task_index)
            if best_score > -1.0:
                best = fmin(simFunc,
                            space=space,
                            algo=hyperopt.rand.suggest,
                            max_evals=max_evals,
                            trials=trials,
                            rstate=random.seed(0),
                            show_progressbar=False)
            else:
                best = fmin(simFunc,
                            space=space,
                            algo=tpe.suggest,
                            max_evals=max_evals,
                            trials=trials,
                            rstate=random.seed(0),
                            show_progressbar=False)
            counter = Counter(result['solved'] for result in trials.results)
            solved_task = counter[True] > 0
            tasks_solved += solved_task
            best_score = trials.best_trial['result']['loss']

    print(tasks_solved, "Tasks solved out of ", len(tasks), "Total Tasks")
    return (evaluator.get_aucess(), tasks_solved, len(tasks))
Exemplo n.º 19
0
t0 = time.time()
min_index = np.argmin(np.linalg.norm(cache.action_array - test_action,axis=1))
t1 = time.time()
print((t1-t0), "Search Time")

print(test_action[:,0:2])
print(cache.action_array[min_index,0:2])
print(np.linalg.norm(test_action[:,0:2] - cache.action_array[min_index,0:2]))

print(np.concatenate((cache.action_array[min_index,:][np.newaxis,:],test_action),0))

task_ids = list(cache.task_ids)[0:2]

print(task_ids)

evaluator = phyre.Evaluator(task_ids)
print(evaluator.task_ids)
'''
for i in range(len(task_ids)-1):
    while evaluator.get_attempts_for_task(i) < phyre.MAX_TEST_ATTEMPTS:
        evaluator.maybe_log_attempt(i, phyre.simulation_cache.SOLVED)
'''
for i in range(len(task_ids)):
  while evaluator.get_attempts_for_task(i) < phyre.MAX_TEST_ATTEMPTS - 91:
          evaluator.maybe_log_attempt(i, phyre.simulation_cache.NOT_SOLVED)
  evaluator.maybe_log_attempt(i, phyre.simulation_cache.SOLVED)
print(evaluator.get_aucess())

k = np.zeros(100)
auccess = np.zeros(100)
Exemplo n.º 20
0
def solve(tasks,
          generator,
          save_images=False,
          force_collect=False,
          static=256,
          show=False):
    # Collect Interaction Data
    data_path = './data/cgan_solver'
    if not os.path.exists(data_path + '/interactions.pickle') or force_collect:
        os.makedirs(data_path, exist_ok=True)
        wid = generator.width
        print("Collecting Data")
        collect_interactions(data_path,
                             tasks,
                             10,
                             stride=1,
                             size=(wid, wid),
                             static=static)
    with open(data_path + '/interactions.pickle', 'rb') as fs:
        X = T.tensor(pickle.load(fs), dtype=T.float)
    with open(data_path + '/info.pickle', 'rb') as fs:
        info = pickle.load(fs)
        tasklist = info['tasks']
        positions = info['pos']
        orig_actions = info['action']
    print('loaded dataset with shape:', X.shape)
    #data_set = T.utils.data.TensorDataset(X)
    #data_loader = T.utils.data.DataLoader(data_set, batch_size=BATCH_SIZE, shuffle=False)

    # Sim SETUP
    print('Succesfull collection for tasks:\n', tasklist)
    eval_setup = 'ball_within_template'
    sim = phyre.initialize_simulator(tasklist, 'ball')
    eva = phyre.Evaluator(tasklist)

    # Solve Loop
    error = np.zeros((X.shape[0], 3))
    generator.eval()
    solved, tried = 0, 0
    for i, task in enumerate(tasklist):
        # generate 'fake'
        noise = T.randn(1, generator.noise_dim)
        with T.no_grad():
            fake = generator((X[i, :generator.s_chan])[None], noise)[0, 0]
        #action = np.array(pic_to_action_vector(fake, r_fac=1.8))
        action = np.array(pic_to_action_vector(fake.numpy(), r_fac=1))
        raw_action = action.copy()

        # PROCESS ACTION
        print(action, 'raw')
        # shift by half to get relative position
        action[:2] -= 0.5
        # multiply by half because extracted scope is already half of the scene
        action[:2] *= 0.5
        # multiply by 4 because action value is always 4*diameter -> 8*radius, but scope is already halfed -> 8*0.5*radius
        action[2] *= 4
        # finetuning
        action[2] *= 1.0
        print(action, 'relativ')
        pos = positions[i]
        print(pos)
        action[:2] += pos
        print(action, 'added')
        res = sim.simulate_action(i, action, need_featurized_objects=True)

        # Noisy tries while invalid actions
        t = 0
        temp = 1
        base_action = action
        while res.status.is_invalid() and t < 200:
            t += 1
            action = base_action + (np.random.rand(3) - 0.5) * 0.01 * temp
            res = sim.simulate_action(i, action, need_featurized_objects=False)
            temp *= 1.01
        print(action, 'final action')

        # Check for and log Solves
        if not res.status.is_invalid():
            tried += 1
        if res.status.is_solved():
            solved += 1
        print(orig_actions[i], 'orig action')
        print(task, "solved", res.status.is_solved())
        error[i] = orig_actions[i] - base_action

        # Visualization
        if show:
            x, y, d = np.round(raw_action * fake.shape[0])
            y = fake.shape[0] - y
            print(x, y, d)

            def generate_crosses(points):
                xx = []
                yy = []
                for x, y in points:
                    xx.extend([x, x + 1, x - 1, x, x])
                    yy.extend([y, y, y, y + 1, y - 1])
                return xx, yy

            xx, yy = [
                x, (x + d) if (x + d) < fake.shape[0] - 1 else 62, x - d, x, x
            ], [
                y, y, y, (y + d) if (y + d) < fake.shape[0] - 1 else 62, y - d
            ]
            xx, yy = generate_crosses(zip(xx, yy))
            fake[yy, xx] = 0.5
            os.makedirs(f'result/cgan_solver/vector_extractions',
                        exist_ok=True)
            plt.imsave(f'result/cgan_solver/vector_extractions/{i}.png', fake)
            if not res.status.is_invalid():
                os.makedirs(f'result/cgan_solver/scenes', exist_ok=True)
                plt.imsave(f'result/cgan_solver/scenes/{i}.png',
                           res.images[0, ::-1])
            else:
                print("invalid")
                plt.imshow(
                    phyre.observations_to_float_rgb(sim.initial_scenes[i]))
                plt.show()

    print("solving percentage:", solved / tried, 'overall:', tried)
    print("mean x error:", np.mean(error[:, 0]), 'mean x abs error:',
          np.mean(np.abs(error[:, 0])))
    print("mean y error:", np.mean(error[:, 1]), 'mean y abs error:',
          np.mean(np.abs(error[:, 1])))
    print("mean r error:", np.mean(error[:, 2]), 'mean r abs error:',
          np.mean(np.abs(error[:, 2])))
def evaluate_agent(task_ids, tier, solved_actions_pdf):
    cache = phyre.get_default_100k_cache(tier)
    evaluator = phyre.Evaluator(task_ids)
    simulator = phyre.initialize_simulator(task_ids, tier)
    task_data_dict = phyre.loader.load_compiled_task_dict()
    stride = 100
    eval_stride = 2
    goal = 3.0 * 60.0 / eval_stride
    empty_action = phyre.simulator.scene_if.UserInput()
    tasks_solved = 0
    alpha = 1.0
    N = 5
    max_actions = 100

    for task_index in tqdm(range(len(task_ids)), desc='Evaluate tasks'):
        task_id = task_ids[task_index]
        task_type = task_id.split(":")[0]
        task_data = task_data_dict[task_id]
        statuses = cache.load_simulation_states(task_id)
        _, _, images, _ = phyre.simulator.magic_ponies(task_data,
                                                       empty_action,
                                                       need_images=True,
                                                       stride=stride)

        evaluator.maybe_log_attempt(task_index,
                                    phyre.simulation_cache.NOT_SOLVED)

        seq_data = ImgToObj.getObjectAndGoalSequence(images)

        goal_type = ImgToObj.Layer.dynamic_goal.value
        if goal_type not in images[0]:
            goal_type = ImgToObj.Layer.static_goal.value

        tested_actions = np.array([[-1, -1, -1, 1, 0]])

        solved_task = False
        max_score = 0
        while evaluator.get_attempts_for_task(
                task_index
        ) < phyre.MAX_TEST_ATTEMPTS and not solved_task and max_score < 1.0:
            random_action = np.random.random_sample((1, 5))
            if task_type in solved_actions_pdf and np.random.random_sample(
            ) >= .25:
                random_action[0, 0:3] = np.squeeze(
                    solved_actions_pdf[task_type].resample(size=1))

            test_action_dist = np.linalg.norm(tested_actions[:, 0:3] -
                                              random_action[:, 0:3],
                                              axis=1)
            if np.any(test_action_dist <= tested_actions[:, 3]
                      ) and np.random.random_sample() >= .75:
                continue
            if ImgToObj.check_seq_action_intersect(
                    images[0], seq_data, stride, goal_type,
                    np.squeeze(random_action[0:3])):

                sim_result = simulator.simulate_action(
                    task_index,
                    np.squeeze(random_action[:, 0:3]),
                    need_images=True,
                    stride=eval_stride)
                evaluator.maybe_log_attempt(task_index, sim_result.status)
                if not sim_result.status.is_invalid():
                    score = ImgToObj.objectTouchGoalSequence(sim_result.images)
                    eval_dist = .1
                    random_action[0, 3] = eval_dist
                    random_action[0, 4] = 1.0 - np.linalg.norm(
                        seq_data['object'][-1]['centroid'] -
                        seq_data['goal'][-1]['centroid']) / 256.0
                    random_action[0, 4] += ImgToObj.objectTouchGoalSequence(
                        sim_result.images) / goal
                    if random_action[0, 4] > max_score:
                        max_score = random_action[0, 4]
                    tested_actions = np.concatenate(
                        (tested_actions, random_action), 0)
                    solved_task = sim_result.status.is_solved()
                    tasks_solved += solved_task

        if not solved_task and evaluator.get_attempts_for_task(
                task_index) < phyre.MAX_TEST_ATTEMPTS:
            tested_actions = np.delete(tested_actions, 0, 0)
            theta = tested_actions[np.argmax(tested_actions[:, 4]), 0:3]
            theta_score = tested_actions[np.argmax(tested_actions[:, 4]), 4]
            while evaluator.get_attempts_for_task(
                    task_index
            ) + 2 * N + 1 < phyre.MAX_TEST_ATTEMPTS and not solved_task:
                delta = np.random.normal(0, .2, (N, 3))
                test_actions_pos = theta + delta
                test_actions_neg = theta - delta
                old_theta = np.copy(theta)
                for i in range(N):

                    pos_score = 0
                    sim_result_pos = simulator.simulate_action(
                        task_index,
                        np.squeeze(test_actions_pos[i, :]),
                        need_images=True,
                        stride=eval_stride)
                    evaluator.maybe_log_attempt(task_index,
                                                sim_result_pos.status)
                    if not sim_result_pos.status.is_invalid():
                        pos_result_seq_data = ImgToObj.getObjectAndGoalSequence(
                            sim_result_pos.images)
                        pos_score = 1.0 - np.linalg.norm(
                            pos_result_seq_data['object'][-1]['centroid'] -
                            pos_result_seq_data['goal'][-1]['centroid']) / 256.0
                        pos_score += ImgToObj.objectTouchGoalSequence(
                            sim_result_pos.images) / goal
                        solved_task = sim_result_pos.status.is_solved()
                        tasks_solved += solved_task

                    neg_score = 0
                    sim_result_neg = simulator.simulate_action(
                        task_index,
                        np.squeeze(test_actions_neg[i, :]),
                        need_images=True,
                        stride=eval_stride)
                    evaluator.maybe_log_attempt(task_index,
                                                sim_result_neg.status)
                    if not sim_result_neg.status.is_invalid():
                        neg_result_seq_data = ImgToObj.getObjectAndGoalSequence(
                            sim_result_neg.images)
                        neg_score = 1.0 - np.linalg.norm(
                            neg_result_seq_data['object'][-1]['centroid'] -
                            neg_result_seq_data['goal'][-1]['centroid']) / 256.0
                        neg_score += ImgToObj.objectTouchGoalSequence(
                            sim_result_neg.images) / goal
                        solved_task = sim_result_neg.status.is_solved()
                        tasks_solved += solved_task

                    theta = theta + alpha / N * (pos_score -
                                                 neg_score) * delta[i, :]

                sim_result = simulator.simulate_action(task_index,
                                                       np.squeeze(theta),
                                                       need_images=True,
                                                       stride=eval_stride)
                evaluator.maybe_log_attempt(task_index, sim_result.status)
                if not sim_result.status.is_invalid():
                    result_seq_data = ImgToObj.getObjectAndGoalSequence(
                        sim_result.images)
                    score = 1.0 - np.linalg.norm(
                        result_seq_data['object'][-1]['centroid'] -
                        result_seq_data['goal'][-1]['centroid']) / 256.0
                    score += ImgToObj.objectTouchGoalSequence(
                        sim_result.images) / goal
                    solved_task = sim_result.status.is_solved()
                    tasks_solved += solved_task

    print(tasks_solved, "Tasks solved out of ", len(task_ids), "Total Tasks")
    return (evaluator.get_aucess(), tasks_solved, len(task_ids))
Exemplo n.º 22
0
    def real_eval(cls, cache, model, actions, task_ids, tier,
                  max_attempts_per_task, eval_batch_size, finetune_iterations,
                  refine_iterations, refine_loss, refine_lr):

        # TODO: move to a flag.
        finetune_lr = 1e-4

        model.cuda()

        simulator = phyre.initialize_simulator(task_ids, tier)
        observations = simulator.initial_scenes

        # CUSTOM
        if os.path.exists(cls.ACTION_PATH_DIR):
            with open(cls.ACTION_PATH_DIR + '/channel_paths.pickle',
                      'rb') as fp:
                action_path_dict = pickle.load(fp)
            action_paths = torch.Tensor([
                action_path_dict[task]
                if task in action_path_dict else torch.zeros(256, 256)
                for task in task_ids
            ])[:, None].cuda()
        else:
            print("can't find action_path_dict!")
            exit(-1)

        assert tuple(task_ids) == simulator.task_ids

        logging.info('Ranking %d actions and simulating top %d', len(actions),
                     max_attempts_per_task)
        if refine_iterations > 0:
            logging.info(
                'Will do refining for %d iterations with lr=%e and loss=%s',
                refine_iterations, refine_lr, refine_loss)
        evaluator = phyre.Evaluator(task_ids)
        for task_index in tqdm.trange(len(task_ids)):
            task_id = simulator.task_ids[task_index]
            if refine_iterations > 0:
                refined_actions = neural_agent.refine_actions(
                    model, actions, observations[task_index], refine_lr,
                    refine_iterations, eval_batch_size, refine_loss)
            else:
                refined_actions = actions
            scores = neural_agent.eval_actions(
                model,
                refined_actions,
                eval_batch_size,
                observations[task_index],
                action_path=action_paths[task_index])
            # Order of descendig scores.
            action_order = np.argsort(-scores)
            if not refine_iterations > 0:
                statuses = cache.load_simulation_states(task_id)

            finetune_data = []
            for action_id in action_order:
                if evaluator.get_attempts_for_task(
                        task_index) >= max_attempts_per_task:
                    break
                action = refined_actions[action_id]
                if refine_iterations > 0:
                    status = simulator.simulate_action(
                        task_index,
                        action,
                        need_images=False,
                        need_scenes=False).status
                else:
                    status = phyre.SimulationStatus(statuses[action_id])
                finetune_data.append((task_index, status, action))
                evaluator.maybe_log_attempt(task_index, status)
            if evaluator.get_attempts_for_task(task_index) == 0:
                logging.warning('Made 0 attempts for task %s', task_id)
            if finetune_iterations > 0:
                neural_agent.finetune(model, finetune_data, simulator,
                                      finetune_lr, finetune_iterations)

        return evaluator