Exemplo n.º 1
0
    def local_evaluate(self, agent: Agent, num_episodes: Optional[int] = None):
        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(
                    num_episodes, len(self._env.episodes)
                )
            )

        assert num_episodes > 0, "num_episodes should be greater than 0"

        agg_metrics: Dict = defaultdict(float)

        count_episodes = 0
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()

            while not self._env.episode_over:
                action = agent.act(observations)
                observations = self._env.step(action)

            metrics = self._env.get_metrics()
            for m, v in metrics.items():
                agg_metrics[m] += v
            count_episodes += 1

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}

        return avg_metrics
Exemplo n.º 2
0
    def evaluate(self,
                 agent: Agent,
                 num_episodes: Optional[int] = None) -> Dict[str, float]:
        r"""..

        :param agent: agent to be evaluated in environment.
        :param num_episodes: count of number of episodes for which the
            evaluation should be run.
        :return: dict containing metrics tracked by environment.
        """

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        agg_metrics: Dict = defaultdict(float)

        count_episodes = 0
        reward_episodes = 0
        step_episodes = 0
        success_count = 0
        for count_episodes in tqdm(range(num_episodes)):
            agent.reset()
            observations = self._env.reset()
            episode_reward = 0

            while not self._env.habitat_env.episode_over:
                action = agent.act(observations)
                observations, reward, done, info = self._env.step(**action)
                logging.debug("Reward: {}".format(reward))
                if done:
                    logging.debug('Episode reward: {}'.format(episode_reward))
                episode_reward += reward
                step_episodes += 1

            metrics = self._env.habitat_env.get_metrics()
            for m, v in metrics.items():
                agg_metrics[m] += v
            reward_episodes += episode_reward
            success_count += metrics['spl'] > 0

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}
        logging.info("Average reward: {} in {} episodes".format(
            reward_episodes / count_episodes, count_episodes))
        logging.info("Average episode steps: {}".format(step_episodes /
                                                        count_episodes))
        logging.info('Success rate: {}'.format(success_count / num_episodes))

        return avg_metrics
Exemplo n.º 3
0
    def evaluate(self,
                 agent: Agent,
                 num_episodes: Optional[int] = None) -> Dict[str, float]:
        r"""..

            :param agent: agent to be evaluated in environment.
            :param num_episodes: count of number of episodes for which the
                evaluation should be run.
            :return: dict containing metrics tracked by environment.
            """
        self.reset_benchmark()

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        count_episodes = 0
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()
            action_history = []

            while not self._env.episode_over:
                action = agent.act(
                    observations,
                    self._env._sim.previous_step_collided,
                )
                action["action_args"].update(
                    {"episode": self._env._current_episode})
                observations = self._env.step(action)

            metrics = self._env.get_metrics()
            pprint(metrics)
            for m, v in metrics.items():
                if m != "distance_to_goal":
                    self.agg_metrics[m] += v
            count_episodes += 1
            print(count_episodes)

        avg_metrics = {
            k: v / count_episodes
            for k, v in self.agg_metrics.items()
        }

        return avg_metrics
    def evaluate(
        self, agent: Agent, num_episodes: Optional[int] = None
    ) -> Dict[str, float]:
        """
        Args:
            agent: agent to be evaluated in environment.
            num_episodes: count of number of episodes for which the evaluation
                should be run.

        Returns:
            dict containing metrics tracked by environment.
        """

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(
                    num_episodes, len(self._env.episodes)
                )
            )

        assert num_episodes > 0, "num_episodes should be greater than 0"

        agg_metrics: Dict = defaultdict(float)

        count_episodes = 0
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()

            while not self._env.episode_over:
                action = agent.act(observations)
                observations = self._env.step(action)

            metrics = self._env.get_metrics()
            for m, v in metrics.items():
                agg_metrics[m] += v
            count_episodes += 1

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}

        return avg_metrics
Exemplo n.º 5
0
    def evaluate(self,
                 agent: Agent,
                 num_episodes: Optional[int] = None) -> Dict[str, float]:
        r"""..

            :param agent: agent to be evaluated in environment.
            :param num_episodes: count of number of episodes for which the
                evaluation should be run.
            :return: dict containing metrics tracked by environment.
            """
        self.reset_benchmark()

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        count_episodes = 0
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()
            action_history = []

            while not self._env.episode_over:
                action = agent.act(
                    observations,
                    self._env._current_episode,
                )
                print("action has been performed")
                break
            break
            #observations = self._env.step(action)
        return
Exemplo n.º 6
0
    def remote_evaluate(
        self, agent: Agent, num_episodes: Optional[int] = None
    ):
        # The modules imported below are specific to habitat-challenge remote evaluation.
        # These modules are not part of the habitat-api repository.
        import evaluation_pb2
        import evaluation_pb2_grpc
        import evalai_environment_habitat
        import grpc
        import pickle
        import time

        time.sleep(60)

        def pack_for_grpc(entity):
            return pickle.dumps(entity)

        def unpack_for_grpc(entity):
            return pickle.loads(entity)

        def remote_ep_over(stub):
            res_env = unpack_for_grpc(
                stub.episode_over(evaluation_pb2.Package()).SerializedEntity
            )
            return res_env["episode_over"]

        env_address_port = os.environ.get("EVALENV_ADDPORT", "localhost:8085")
        channel = grpc.insecure_channel(env_address_port)
        stub = evaluation_pb2_grpc.EnvironmentStub(channel)

        base_num_episodes = unpack_for_grpc(
            stub.num_episodes(evaluation_pb2.Package()).SerializedEntity
        )
        num_episodes = base_num_episodes["num_episodes"]

        agg_metrics: Dict = defaultdict(float)

        count_episodes = 0

        while count_episodes < num_episodes:
            agent.reset()
            res_env = unpack_for_grpc(
                stub.reset(evaluation_pb2.Package()).SerializedEntity
            )

            while not remote_ep_over(stub):
                obs = res_env["observations"]
                action = agent.act(obs)

                res_env = unpack_for_grpc(
                    stub.act_on_environment(
                        evaluation_pb2.Package(
                            SerializedEntity=pack_for_grpc(action)
                        )
                    ).SerializedEntity
                )

            metrics = unpack_for_grpc(
                stub.get_metrics(
                    evaluation_pb2.Package(
                        SerializedEntity=pack_for_grpc(action)
                    )
                ).SerializedEntity
            )

            for m, v in metrics["metrics"].items():
                agg_metrics[m] += v
            count_episodes += 1

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}

        stub.evalai_update_submission(evaluation_pb2.Package())

        return avg_metrics
Exemplo n.º 7
0
    def local_evaluate(self,
                       agent: Agent,
                       num_episodes: Optional[int] = None,
                       control_period: Optional[float] = 1.0,
                       frame_rate: Optional[int] = 1):
        if num_episodes is None:
            num_episodes = len(self._env._env.episodes)
        else:
            assert num_episodes <= len(self._env._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        agg_metrics: Dict = defaultdict(float)

        writer = TensorboardWriter(
            'tb_benchmark/', flush_secs=30)  # flush_specs from base_trainer.py

        count_episodes = 0
        print("number of episodes: " + str(num_episodes))
        while count_episodes < num_episodes:
            print("working on episode " + str(count_episodes))
            observations_per_episode = []
            agent.reset()
            observations_per_action = self._env._env.reset()
            # initialize physic-enabled sim env. Do this for every
            # episode, since sometimes assets get deallocated
            if self._enable_physics:
                self._env._env.disable_physics()
                self._env._env.enable_physics()

            frame_counter = 0
            # act until one episode is over
            while not self._env._env.episode_over:
                action = agent.act(observations_per_action)
                observations_per_action = reward_per_action = done_per_action = info_per_action = None
                if (self._enable_physics is False):
                    (observations_per_action, reward_per_action,
                     done_per_action, info_per_action) = self._env.step(action)
                else:
                    # step with physics. For now we use hard-coded time step of 1/60 secs
                    # (used in the rigid object tutorial in Habitat Sim)
                    (observations_per_action, reward_per_action,
                     done_per_action,
                     info_per_action) = self._env.step_physics(
                         action,
                         time_step=1.0 / 60.0,
                         control_period=control_period)
                # generate an output image for the action. The image includes observations
                # and a top-down map showing the agent's state in the environment
                # we use frame_rate (num. of frames per action) to reduce computational overhead
                if frame_counter % frame_rate == 0:
                    out_im_per_action = observations_to_image(
                        observations_per_action, info_per_action)
                    observations_per_episode.append(out_im_per_action)
                frame_counter = frame_counter + 1

            # episode ended
            # get per-episode metrics. for now we only extract
            # distance-to-goal, success, spl
            metrics = self._env._env.get_metrics()
            per_ep_metrics = {
                k: metrics[k]
                for k in ['distance_to_goal', 'success', 'spl']
            }
            # print distance_to_goal, success and spl
            for k, v in per_ep_metrics.items():
                print(f'{k},{v}')
            # calculate aggregated distance_to_goal, success and spl
            for m, v in per_ep_metrics.items():
                agg_metrics[m] += v
            count_episodes += 1
            # generate video
            generate_video(
                video_option=["disk", "tensorboard"],
                video_dir='video_benchmark_dir',
                images=observations_per_episode,
                episode_id=count_episodes - 1,
                checkpoint_idx=0,
                metrics=per_ep_metrics,
                tb_writer=writer,
            )

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}

        return avg_metrics
Exemplo n.º 8
0
    def evaluate(self,
                 agent: Agent,
                 num_episodes: Optional[int] = None) -> Dict[str, float]:
        r"""..

            :param agent: agent to be evaluated in environment.
            :param num_episodes: count of number of episodes for which the
                evaluation should be run.
            :return: dict containing metrics tracked by environment.
            """

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        agg_metrics: Dict = defaultdict(float)

        count_episodes = 0
        images = []
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()
            action_history = []
            gif_images = []
            #if images:
            #print("writing file with images")
            #for im in images:
            #    image = Image.fromarray(im[:,:, [2,1,0]])
            #    gif_images.append(image)
            #image =  image[:,:, [2,1,0]]
            #cv2.imshow("RGB", image)
            #cv2.waitKey(0)
            #im1 = gif_images[0]
            #im1.save("out.gif", save_all=True, append_images=gif_images[1:], duration=1000, loop=0)

            gif_images = []
            images = []
            #print("*"*20 + "Starting new episode" + "*"*20,
            #    self._env._current_episode.curr_viewpoint.image_id)
            #if observations and "heading" in observations:
            #    print("Episode heading: %s" % observations["heading"])

            elapsed_steps = 0
            goal_idx = 1
            last_goal_idx = len(self._env._current_episode.goals) - 1
            images.append(observations["rgb"][:, :, [2, 1, 0]])
            observations["images"] = images

            print("Target path ",
                  [str(goal) for goal in self._env._current_episode.goals])
            while not self._env.episode_over:
                goal_viewpoint = self._env._current_episode.goals[goal_idx]

                action = agent.act(
                    observations,
                    goal_viewpoint,
                )
                action["action_args"].update(
                    {"episode": self._env._current_episode})

                if action["action"] == "TELEPORT":
                    if goal_idx < last_goal_idx:
                        goal_idx += 1
                    else:
                        goal_idx = -1

                prev_state = self._env._sim.get_agent_state()

                prev_image_id = self._env._current_episode.curr_viewpoint.image_id
                prev_heading = observations["heading"]
                prev_nav_locations = observations["adjacentViewpoints"]
                #print("Taking action %s from %s \n" % (action["action"], self._env._current_episode.curr_viewpoint.image_id))
                observations = self._env.step(action)
                #pprint(observations["adjacentViewpoints"])
                images.append(observations["rgb"][:, :, [2, 1, 0]])
                observations["images"] = images
                #print("Result of Action in position %s\n" %  self._env._current_episode.curr_viewpoint.image_id)
                state = self._env._sim.get_agent_state()
                image_id = self._env._current_episode.curr_viewpoint.image_id
                heading = observations["heading"]
                nav_locations = observations["adjacentViewpoints"]
                #print("Current position", state.position)
                #print("Current rotation", state.rotation)

                action_history.append({
                    "action": action["action"],
                    "prev_image_id": prev_image_id,
                    "prev_heading": prev_heading,
                    "prev_pos": prev_state.position,
                    "prev_rot": prev_state.rotation,
                    "prev_nav_locations": prev_nav_locations,
                    "new_image_id": image_id,
                    "new_heading": heading,
                    "new_pos": state.position,
                    "new_rot": state.rotation,
                    "nav_locations": nav_locations,
                })

            #pprint(action_history)
            metrics = self._env.get_metrics()
            pprint(metrics)
            if "navigationError" in metrics and metrics["navigationError"] > 0:
                print("Scan %s" % self._env._current_episode.scan)
                print("image_id %s" %
                      str(self._env._current_episode.goals[0].image_id))

            for m, v in metrics.items():
                if m != "distance_to_goal":
                    agg_metrics[m] += v
            count_episodes += 1
            print(count_episodes)

        avg_metrics = {k: v / count_episodes for k, v in agg_metrics.items()}

        return avg_metrics
Exemplo n.º 9
0
    def evaluate(self,
                 agent: Agent,
                 num_episodes: Optional[int] = None) -> Dict[str, float]:
        r"""..

            :param agent: agent to be evaluated in environment.
            :param num_episodes: count of number of episodes for which the
                evaluation should be run.
            :return: dict containing metrics tracked by environment.
            """
        self.reset_benchmark()

        if num_episodes is None:
            num_episodes = len(self._env.episodes)
        else:
            assert num_episodes <= len(self._env.episodes), (
                "num_episodes({}) is larger than number of episodes "
                "in environment ({})".format(num_episodes,
                                             len(self._env.episodes)))

        assert num_episodes > 0, "num_episodes should be greater than 0"

        count_episodes = 0
        while count_episodes < num_episodes:
            agent.reset()
            observations = self._env.reset()
            action_history = []
            #print("*"*20 + "Starting new episode" + "*"*20,
            #    self._env._current_episode.curr_viewpoint.image_id)
            elapsed_steps = 0
            while not self._env.episode_over:
                action = agent.act(
                    observations,
                    elapsed_steps,
                    self._env._sim.previous_step_collided,
                )
                action["action_args"].update(
                    {"episode": self._env._current_episode})

                if elapsed_steps == 0 or action["action"] == "TELEPORT":
                    elapsed_steps += 1

                prev_state = self._env._sim.get_agent_state()
                prev_image_id = self._env._current_episode.curr_viewpoint.image_id
                prev_heading = observations["heading"]
                prev_nav_locations = observations["adjacentViewpoints"]
                #print("Taking action %s from %s \n" % (action["action"], self._env._current_episode.curr_viewpoint.image_id))
                observations = self._env.step(action)
                #print("Result of Action in position %s\n" %  self._env._current_episode.curr_viewpoint.image_id)
                state = self._env._sim.get_agent_state()
                image_id = self._env._current_episode.curr_viewpoint.image_id
                heading = observations["heading"]
                nav_locations = observations["adjacentViewpoints"]
                #print("Current position", state.position)
                #print("Current rotation", state.rotation)
                #print("\n\n")

                action_history.append({
                    "action": action["action"],
                    "prev_image_id": prev_image_id,
                    "prev_heading": prev_heading,
                    "prev_pos": prev_state.position,
                    "prev_rot": prev_state.rotation,
                    "prev_nav_locations": prev_nav_locations,
                    "new_image_id": image_id,
                    "new_heading": heading,
                    "new_pos": state.position,
                    "new_rot": state.rotation,
                    #"nav_locations": nav_locations,
                })

            #print("Target path ", [str(goal) for goal in self._env._current_episode.goals])

            #pprint(action_history)
            metrics = self._env.get_metrics()
            if np.isinf(metrics["navigationError"]):
                pprint(action_history)
                print("Target path ",
                      [str(goal) for goal in self._env._current_episode.goals])
            pprint(metrics)
            for m, v in metrics.items():
                if m != "distance_to_goal":
                    self.agg_metrics[m] += v
            count_episodes += 1
            print(count_episodes)

        avg_metrics = {
            k: v / count_episodes
            for k, v in self.agg_metrics.items()
        }

        return avg_metrics