コード例 #1
0
ファイル: gym_batch_rl.py プロジェクト: lwzbuaa/ReAgent
def evaluate_gym(
    env_name: str,
    model: ModelManager__Union,
    publisher: ModelPublisher__Union,
    num_eval_episodes: int,
    passing_score_bar: float,
    max_steps: Optional[int] = None,
):
    publisher_manager = publisher.value
    assert isinstance(
        publisher_manager, FileSystemPublisher
    ), f"publishing manager is type {type(publisher_manager)}, not FileSystemPublisher"
    env = EnvFactory.make(env_name)
    torchscript_path = publisher_manager.get_latest_published_model(
        model.value)
    jit_model = torch.jit.load(torchscript_path)
    policy = create_predictor_policy_from_model(jit_model)
    agent = Agent.create_for_env_with_serving_policy(env, policy)
    rewards = evaluate_for_n_episodes(n=num_eval_episodes,
                                      env=env,
                                      agent=agent,
                                      max_steps=max_steps)
    avg_reward = np.mean(rewards)
    logger.info(f"Average reward over {num_eval_episodes} is {avg_reward}.\n"
                f"List of rewards: {rewards}")
    assert (avg_reward >= passing_score_bar
            ), f"{avg_reward} fails to pass the bar of {passing_score_bar}!"
    return
コード例 #2
0
    def test_random_vs_lqr(self):
        """
        Test random actions vs. a LQR controller. LQR controller should perform
        much better than random actions in the linear dynamics environment.
        """
        env = EnvFactory.make("LinearDynamics-v0")
        num_test_episodes = 500

        def random_policy(env, state):
            return np.random.uniform(env.action_space.low,
                                     env.action_space.high, env.action_dim)

        def lqr_policy(env, state):
            # Four matrices that characterize the environment
            A, B, Q, R = env.A, env.B, env.Q, env.R
            # Solve discrete algebraic Riccati equation:
            M = linalg.solve_discrete_are(A, B, Q, R)
            K = np.dot(linalg.inv(np.dot(np.dot(B.T, M), B) + R),
                       (np.dot(np.dot(B.T, M), A)))
            state = state.reshape((-1, 1))
            action = -K.dot(state).squeeze()
            return action

        mean_acc_rws_random = self.run_n_episodes(env, num_test_episodes,
                                                  random_policy)
        mean_acc_rws_lqr = self.run_n_episodes(env, num_test_episodes,
                                               lqr_policy)
        logger.info(
            f"Mean acc. reward of random policy: {mean_acc_rws_random}")
        logger.info(f"Mean acc. reward of LQR policy: {mean_acc_rws_lqr}")
        assert mean_acc_rws_lqr > mean_acc_rws_random
コード例 #3
0
ファイル: gym_batch_rl.py プロジェクト: jayhsieh/ReAgent
def offline_gym(
    env: str,
    pkl_path: str,
    num_episodes_for_data_batch: int,
    max_steps: Optional[int],
    seed: Optional[int] = None,
):
    """
    Generate samples from a DiscreteRandomPolicy on the Gym environment and
    saves results in a pandas df parquet.
    """
    initialize_seed(seed)
    env = EnvFactory.make(env)

    policy = DiscreteRandomPolicy.create_for_env(env)
    dataset = RLDataset()
    for i in range(num_episodes_for_data_batch):
        logger.info(f"Starting episode {i}")
        post_step = log_data_post_step(dataset=dataset, mdp_id=str(i), env=env)
        agent = Agent.create_for_env(env,
                                     policy,
                                     post_transition_callback=post_step)
        run_episode(env=env, agent=agent, max_steps=max_steps)

    logger.info(f"Saving dataset with {len(dataset)} samples to {pkl_path}")
    df = dataset.to_pandas_df()
    df.to_pickle(pkl_path)
コード例 #4
0
def evaluate_gym(
    env_name: str,
    model: torch.nn.Module,
    eval_temperature: float,
    num_eval_episodes: int,
    passing_score_bar: float,
    max_steps: Optional[int] = None,
):
    env: gym.Env = EnvFactory.make(env_name)
    policy = create_predictor_policy_from_model(
        env, model, eval_temperature=eval_temperature)

    # since we already return softmax action, override action_extractor
    agent = Agent.create_for_env(
        env, policy=policy, action_extractor=policy.get_action_extractor())

    rewards = []
    for _ in range(num_eval_episodes):
        ep_reward = run_episode(env=env, agent=agent, max_steps=max_steps)
        rewards.append(ep_reward)

    avg_reward = np.mean(rewards)
    logger.info(f"Average reward over {num_eval_episodes} is {avg_reward}.\n"
                f"List of rewards: {rewards}")
    assert (avg_reward >= passing_score_bar
            ), f"{avg_reward} fails to pass the bar of {passing_score_bar}!"
コード例 #5
0
ファイル: gym_batch_rl.py プロジェクト: jayhsieh/ReAgent
def evaluate_gym(
    env: str,
    model,
    eval_temperature: float,
    num_eval_episodes: int,
    passing_score_bar: float,
    max_steps: Optional[int] = None,
):
    predictor = DiscreteDqnTorchPredictor(model)
    predictor.softmax_temperature = eval_temperature

    env = EnvFactory.make(env)
    policy = TorchPredictorPolicy(predictor)
    agent = Agent(policy=policy, action_extractor=lambda x: x.item())

    rewards = []
    for _ in range(num_eval_episodes):
        ep_reward = run_episode(env=env, agent=agent, max_steps=max_steps)
        rewards.append(ep_reward)

    avg_reward = np.mean(rewards)
    logger.info(f"Average reward over {num_eval_episodes} is {avg_reward}, "
                f"which passes the bar of {passing_score_bar}!\n"
                f"List of rewards: {rewards}")
    assert avg_reward >= passing_score_bar
コード例 #6
0
ファイル: gym_batch_rl.py プロジェクト: lwzbuaa/ReAgent
def offline_gym(
    env_name: str,
    pkl_path: str,
    num_train_transitions: int,
    max_steps: Optional[int],
    seed: Optional[int] = None,
):
    """
    Generate samples from a DiscreteRandomPolicy on the Gym environment and
    saves results in a pandas df parquet.
    """
    initialize_seed(seed)
    env = EnvFactory.make(env_name)

    replay_buffer = ReplayBuffer.create_from_env(
        env=env, replay_memory_size=num_train_transitions, batch_size=1)
    fill_replay_buffer(env, replay_buffer, num_train_transitions)
    if isinstance(env.action_space, gym.spaces.Discrete):
        is_discrete_action = True
    else:
        assert isinstance(env.action_space, gym.spaces.Box)
        is_discrete_action = False
    df = replay_buffer_to_pre_timeline_df(is_discrete_action, replay_buffer)
    logger.info(f"Saving dataset with {len(df)} samples to {pkl_path}")
    df.to_pickle(pkl_path)
コード例 #7
0
ファイル: test_gym_offline.py プロジェクト: lwzbuaa/ReAgent
def run_test_offline(
    env_name: str,
    max_steps: Optional[int],
    model: ModelManager__Union,
    replay_memory_size: int,
    num_batches_per_epoch: int,
    num_train_epochs: int,
    passing_score_bar: float,
    num_eval_episodes: int,
    use_gpu: bool,
):
    env = EnvFactory.make(env_name)
    env.seed(SEED)
    env.action_space.seed(SEED)
    normalization = build_normalizer(env)
    logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=normalization,
    )

    # first fill the replay buffer to burn_in
    replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=replay_memory_size,
        batch_size=trainer.minibatch_size,
    )
    # always fill full RB
    fill_replay_buffer(
        env=env, replay_buffer=replay_buffer, desired_size=replay_memory_size
    )

    device = torch.device("cuda") if use_gpu else None
    # pyre-fixme[6]: Expected `device` for 2nd param but got `Optional[torch.device]`.
    trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env)

    writer = SummaryWriter()
    with summary_writer_context(writer):
        for epoch in range(num_train_epochs):
            logger.info(f"Evaluating before epoch {epoch}: ")
            eval_rewards = evaluate_cem(env, manager, max_steps, 1)
            for _ in tqdm(range(num_batches_per_epoch)):
                train_batch = replay_buffer.sample_transition_batch_tensor()
                preprocessed_batch = trainer_preprocessor(train_batch)
                trainer.train(preprocessed_batch)

    logger.info(f"Evaluating after training for {num_train_epochs} epochs: ")
    eval_rewards = evaluate_cem(env, manager, max_steps, num_eval_episodes)
    mean_rewards = np.mean(eval_rewards)
    assert (
        mean_rewards >= passing_score_bar
    ), f"{mean_rewards} doesn't pass the bar {passing_score_bar}."
コード例 #8
0
def run_test(
    env: str,
    model: ModelManager__Union,
    replay_memory_size: int,
    train_every_ts: int,
    train_after_ts: int,
    num_episodes: int,
    max_steps: Optional[int],
    last_score_bar: float,
):
    env = EnvFactory.make(env)
    env.seed(SEED)
    env.action_space.seed(SEED)
    normalization = build_normalizer(env)
    logger.info(f"Normalization is {normalization}")

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=False,
        reward_options=RewardOptions(),
        normalization_data_map=normalization,
    )

    policy = manager.create_policy()
    replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=replay_memory_size,
        batch_size=trainer.minibatch_size,
    )

    post_step = train_with_replay_buffer_post_step(
        replay_buffer=replay_buffer,
        trainer=trainer,
        training_freq=train_every_ts,
        batch_size=trainer.minibatch_size,
        replay_burnin=train_after_ts,
    )

    agent = Agent.create_for_env(env,
                                 policy=policy,
                                 post_transition_callback=post_step)

    reward_history = []
    for i in range(num_episodes):
        logger.info(f"running episode {i}")
        ep_reward = run_episode(env=env, agent=agent, max_steps=max_steps)
        reward_history.append(ep_reward)

    assert reward_history[-1] >= last_score_bar, (
        f"reward after {len(reward_history)} episodes is {reward_history[-1]},"
        f"less than < {last_score_bar}...\n"
        f"Full reward history: {reward_history}")

    return reward_history
コード例 #9
0
    def _create_env(self, gymenv: Union[str, Env], random_seed: Optional[int]):
        """
        Creates a gym environment object and checks if it is supported. We
        support environments that supply Box(x, ) state representations and
        require Discrete(y) or Box(y,) action inputs.

        :param gymenv: String identifier for desired environment or environment
            object itself.
        """
        if isinstance(gymenv, Env):
            self.env = gymenv
            self.env_name = gymenv.unwrapped.spec.id
        else:
            if gymenv not in [e.id for e in gym.envs.registry.all()]:
                raise Exception(
                    "Env {} not found in OpenAI Gym.".format(gymenv))
            self.env = EnvFactory.make(gymenv)
            self.env_name = gymenv
            if random_seed is not None:
                self.env.seed(random_seed)
                self.env.action_space.seed(random_seed)  # type: ignore

        supports_state = isinstance(
            self.env.observation_space, gym.spaces.Box) and len(
                self.env.observation_space.shape) in [1, 3]
        supports_action = type(self.env.action_space) in (
            gym.spaces.Discrete,
            gym.spaces.Box,
        )

        if not supports_state and supports_action:
            raise Exception(
                "Unsupported environment state or action type: {}, {}".format(
                    self.env.observation_space, self.env.action_space))

        self.action_space = self.env.action_space
        if isinstance(self.env.action_space, gym.spaces.Discrete):
            self.action_type = EnvType.DISCRETE_ACTION
            self.action_dim = self.env.action_space.n
        elif isinstance(self.env.action_space, gym.spaces.Box):
            self.action_type = EnvType.CONTINUOUS_ACTION
            self.action_dim = self.env.action_space.shape[0]  # type: ignore

        if len(self.env.observation_space.shape) == 1:  # type: ignore
            self.state_dim = self.env.observation_space.shape[
                0]  # type: ignore
            self.img = False
        elif len(self.env.observation_space.shape) == 3:  # type: ignore
            self.height, self.width, self.num_input_channels = (
                self.env.observation_space.shape  # type: ignore
            )
            self.img = True
コード例 #10
0
ファイル: test_world_model.py プロジェクト: zhaonann/ReAgent
def train_mdnrnn_and_train_on_embedded_env(
    env_name: str,
    embedding_model: ModelManager__Union,
    num_embedding_train_transitions: int,
    seq_len: int,
    batch_size: int,
    num_embedding_train_epochs: int,
    train_model: ModelManager__Union,
    num_state_embed_transitions: int,
    num_agent_train_epochs: int,
    num_agent_eval_epochs: int,
    use_gpu: bool,
    passing_score_bar: float,
    # pyre-fixme[9]: saved_mdnrnn_path has type `str`; used as `None`.
    saved_mdnrnn_path: str = None,
):
    """ Train an agent on embedded states by the MDNRNN. """
    env = EnvFactory.make(env_name)
    env.seed(SEED)

    embedding_manager = embedding_model.value
    embedding_trainer = embedding_manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=build_normalizer(env),
    )

    device = "cuda" if use_gpu else "cpu"
    embedding_trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
        embedding_trainer, device, env
    )
    if saved_mdnrnn_path is None:
        # train from scratch
        embedding_trainer = train_mdnrnn(
            env=env,
            trainer=embedding_trainer,
            trainer_preprocessor=embedding_trainer_preprocessor,
            num_train_transitions=num_embedding_train_transitions,
            seq_len=seq_len,
            batch_size=batch_size,
            num_train_epochs=num_embedding_train_epochs,
        )
    else:
        # load a pretrained model, and just evaluate it
        embedding_trainer.memory_network.mdnrnn.load_state_dict(
            torch.load(saved_mdnrnn_path)
        )

    # create embedding dataset
    embed_rb, state_min, state_max = create_embed_rl_dataset(
        env=env,
        memory_network=embedding_trainer.memory_network,
        num_state_embed_transitions=num_state_embed_transitions,
        batch_size=batch_size,
        seq_len=seq_len,
        hidden_dim=embedding_trainer.params.hidden_size,
        use_gpu=use_gpu,
    )
    embed_env = StateEmbedEnvironment(
        gym_env=env,
        mdnrnn=embedding_trainer.memory_network,
        max_embed_seq_len=seq_len,
        state_min_value=state_min,
        state_max_value=state_max,
    )
    agent_manager = train_model.value
    agent_trainer = agent_manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=build_normalizer(embed_env),
    )
    device = "cuda" if use_gpu else "cpu"
    agent_trainer_preprocessor = make_replay_buffer_trainer_preprocessor(
        agent_trainer, device, env
    )
    num_batch_per_epoch = embed_rb.size // batch_size
    for epoch in range(num_agent_train_epochs):
        for _ in tqdm(range(num_batch_per_epoch), desc=f"epoch {epoch}"):
            batch = embed_rb.sample_transition_batch_tensor(batch_size=batch_size)
            preprocessed_batch = agent_trainer_preprocessor(batch)
            agent_trainer.train(preprocessed_batch)

    # evaluate model
    rewards = []
    policy = agent_manager.create_policy(serving=False)
    agent = Agent.create_for_env(embed_env, policy=policy, device=device)
    for i in range(num_agent_eval_epochs):
        ep_reward = run_episode(env=embed_env, agent=agent)
        rewards.append(ep_reward)
        logger.info(f"Finished eval episode {i} with reward {ep_reward}.")
    logger.info(f"Average eval reward is {np.mean(rewards)}.")
    assert (
        np.mean(rewards) >= passing_score_bar
    ), f"average reward doesn't pass our bar {passing_score_bar}"
    return rewards
コード例 #11
0
ファイル: test_world_model.py プロジェクト: zhaonann/ReAgent
def train_mdnrnn_and_compute_feature_stats(
    env_name: str,
    model: ModelManager__Union,
    num_train_transitions: int,
    num_test_transitions: int,
    seq_len: int,
    batch_size: int,
    num_train_epochs: int,
    use_gpu: bool,
    saved_mdnrnn_path: Optional[str] = None,
):
    """ Train MDNRNN Memory Network and compute feature importance/sensitivity. """
    env: gym.Env = EnvFactory.make(env_name)
    env.seed(SEED)

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=build_normalizer(env),
    )

    device = "cuda" if use_gpu else "cpu"
    trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env)
    test_replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=num_test_transitions,
        batch_size=batch_size,
        stack_size=seq_len,
        return_everything_as_stack=True,
    )
    fill_replay_buffer(env, test_replay_buffer, num_test_transitions)

    if saved_mdnrnn_path is None:
        # train from scratch
        trainer = train_mdnrnn(
            env=env,
            trainer=trainer,
            trainer_preprocessor=trainer_preprocessor,
            num_train_transitions=num_train_transitions,
            seq_len=seq_len,
            batch_size=batch_size,
            num_train_epochs=num_train_epochs,
            test_replay_buffer=test_replay_buffer,
        )
    else:
        # load a pretrained model, and just evaluate it
        trainer.memory_network.mdnrnn.load_state_dict(torch.load(saved_mdnrnn_path))

    with torch.no_grad():
        trainer.memory_network.mdnrnn.eval()
        test_batch = test_replay_buffer.sample_transition_batch_tensor(
            batch_size=test_replay_buffer.size
        )
        preprocessed_test_batch = trainer_preprocessor(test_batch)
        feature_importance = calculate_feature_importance(
            env=env,
            trainer=trainer,
            use_gpu=use_gpu,
            test_batch=preprocessed_test_batch,
        )

        feature_sensitivity = calculate_feature_sensitivity(
            env=env,
            trainer=trainer,
            use_gpu=use_gpu,
            test_batch=preprocessed_test_batch,
        )

        trainer.memory_network.mdnrnn.train()
    return feature_importance, feature_sensitivity
コード例 #12
0
ファイル: test_gym.py プロジェクト: dwtcourses/ReAgent
def run_test(
    env: str,
    model: ModelManager__Union,
    replay_memory_size: int,
    train_every_ts: int,
    train_after_ts: int,
    num_train_episodes: int,
    max_steps: Optional[int],
    passing_score_bar: float,
    num_eval_episodes: int,
    use_gpu: bool,
):
    env = EnvFactory.make(env)
    env.seed(SEED)
    env.action_space.seed(SEED)
    normalization = build_normalizer(env)
    logger.info(f"Normalization is {normalization}")

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=normalization,
    )

    replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=replay_memory_size,
        batch_size=trainer.minibatch_size,
    )

    device = torch.device("cuda") if use_gpu else None
    post_step = train_with_replay_buffer_post_step(
        replay_buffer=replay_buffer,
        trainer=trainer,
        training_freq=train_every_ts,
        batch_size=trainer.minibatch_size,
        replay_burnin=train_after_ts,
        device=device,
    )

    training_policy = manager.create_policy(serving=False)
    agent = Agent.create_for_env(env,
                                 policy=training_policy,
                                 post_transition_callback=post_step,
                                 device=device)

    train_rewards = []
    for i in range(num_train_episodes):
        ep_reward = run_episode(env=env, agent=agent, max_steps=max_steps)
        train_rewards.append(ep_reward)
        logger.info(f"Finished training episode {i} with reward {ep_reward}.")

    assert train_rewards[-1] >= passing_score_bar, (
        f"reward after {len(train_rewards)} episodes is {train_rewards[-1]},"
        f"less than < {passing_score_bar}...\n"
        f"Full reward history: {train_rewards}")

    logger.info("============Train rewards=============")
    logger.info(train_rewards)

    def gym_to_reagent_serving(
            obs: np.array) -> Tuple[torch.Tensor, torch.Tensor]:
        obs_tensor = torch.tensor(obs).float().unsqueeze(0)
        presence_tensor = torch.ones_like(obs_tensor)
        return (obs_tensor, presence_tensor)

    serving_policy = manager.create_policy(serving=True)
    agent = Agent.create_for_env(env,
                                 policy=serving_policy,
                                 obs_preprocessor=gym_to_reagent_serving)

    eval_rewards = []
    for i in range(num_eval_episodes):
        ep_reward = run_episode(env=env, agent=agent, max_steps=max_steps)
        eval_rewards.append(ep_reward)
        logger.info(f"Finished eval episode {i} with reward {ep_reward}.")

    assert np.mean(eval_rewards) >= passing_score_bar, (
        f"Predictor reward is {np.mean(eval_rewards)},"
        f"less than < {passing_score_bar}...\n"
        f"Full eval rewards: {eval_rewards}.")

    logger.info("============Eval rewards==============")
    logger.info(eval_rewards)
コード例 #13
0
ファイル: test_pomdp.py プロジェクト: zhaonann/ReAgent
 def test_pocman(self):
     env = EnvFactory.make("Pocman-v0")
     env.seed(313)
     mean_acc_reward = self._test_env(env)
     assert -80 <= mean_acc_reward <= -70
コード例 #14
0
ファイル: test_pomdp.py プロジェクト: zhaonann/ReAgent
 def test_string_game(self):
     env = EnvFactory.make("StringGame-v0")
     env.seed(313)
     mean_acc_reward = self._test_env(env)
     assert 0.1 >= mean_acc_reward
コード例 #15
0
def run_test(
    env_name: str,
    model: ModelManager__Union,
    replay_memory_size: int,
    train_every_ts: int,
    train_after_ts: int,
    num_train_episodes: int,
    max_steps: Optional[int],
    passing_score_bar: float,
    num_eval_episodes: int,
    use_gpu: bool,
):
    env = EnvFactory.make(env_name)
    env.seed(SEED)
    env.action_space.seed(SEED)
    normalization = build_normalizer(env)
    logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=normalization,
    )
    training_policy = manager.create_policy(serving=False)

    replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=replay_memory_size,
        batch_size=trainer.minibatch_size,
    )

    device = torch.device("cuda") if use_gpu else None
    # first fill the replay buffer to burn_in
    train_after_ts = max(train_after_ts, trainer.minibatch_size)
    fill_replay_buffer(env=env,
                       replay_buffer=replay_buffer,
                       desired_size=train_after_ts)

    post_step = train_with_replay_buffer_post_step(
        replay_buffer=replay_buffer,
        env=env,
        trainer=trainer,
        training_freq=train_every_ts,
        batch_size=trainer.minibatch_size,
        device=device,
    )

    agent = Agent.create_for_env(
        env,
        policy=training_policy,
        post_transition_callback=post_step,
        # pyre-fixme[6]: Expected `Union[str, torch.device]` for 4th param but got
        #  `Optional[torch.device]`.
        device=device,
    )

    writer = SummaryWriter()
    with summary_writer_context(writer):
        train_rewards = []
        for i in range(num_train_episodes):
            trajectory = run_episode(env=env,
                                     agent=agent,
                                     mdp_id=i,
                                     max_steps=max_steps)
            ep_reward = trajectory.calculate_cumulative_reward()
            train_rewards.append(ep_reward)
            logger.info(
                f"Finished training episode {i} with reward {ep_reward}.")

    assert train_rewards[-1] >= passing_score_bar, (
        f"reward after {len(train_rewards)} episodes is {train_rewards[-1]},"
        f"less than < {passing_score_bar}...\n"
        f"Full reward history: {train_rewards}")

    logger.info("============Train rewards=============")
    logger.info(train_rewards)

    serving_policy = manager.create_policy(serving=True)
    agent = Agent.create_for_env_with_serving_policy(env, serving_policy)

    eval_rewards = evaluate_for_n_episodes(n=num_eval_episodes,
                                           env=env,
                                           agent=agent,
                                           max_steps=max_steps).squeeze(1)
    assert np.mean(eval_rewards) >= passing_score_bar, (
        f"Predictor reward is {np.mean(eval_rewards)},"
        f"less than < {passing_score_bar}...\n"
        f"Full eval rewards: {eval_rewards}.")

    logger.info("============Eval rewards==============")
    logger.info(eval_rewards)
コード例 #16
0
ファイル: test_gym.py プロジェクト: h8f/ReAgent
def run_test(
    env_name: str,
    model: ModelManager__Union,
    replay_memory_size: int,
    train_every_ts: int,
    train_after_ts: int,
    num_train_episodes: int,
    max_steps: Optional[int],
    passing_score_bar: float,
    num_eval_episodes: int,
    use_gpu: bool,
):
    env = EnvFactory.make(env_name)
    env.seed(SEED)
    env.action_space.seed(SEED)
    normalization = build_normalizer(env)
    logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")

    manager = model.value
    try:
        # pyre-fixme[16]: `Env` has no attribute `state_feature_config_provider`.
        manager.state_feature_config_provider = env.state_feature_config_provider
        logger.info(
            f"Using environment's state_feature_config_provider.\n"
            f"{manager.state_feature_config_provider}"
        )
    except AttributeError:
        logger.info("state_feature_config_provider override not applicable")

    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=normalization,
    )
    training_policy = manager.create_policy(serving=False)

    replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=replay_memory_size,
        batch_size=trainer.minibatch_size,
    )

    device = torch.device("cuda") if use_gpu else torch.device("cpu")
    # first fill the replay buffer to burn_in
    train_after_ts = max(train_after_ts, trainer.minibatch_size)
    fill_replay_buffer(
        env=env, replay_buffer=replay_buffer, desired_size=train_after_ts
    )

    post_step = train_with_replay_buffer_post_step(
        replay_buffer=replay_buffer,
        env=env,
        trainer=trainer,
        training_freq=train_every_ts,
        batch_size=trainer.minibatch_size,
        device=device,
    )

    agent = Agent.create_for_env(
        env, policy=training_policy, post_transition_callback=post_step, device=device
    )

    writer = SummaryWriter()
    with summary_writer_context(writer):
        train_rewards = []
        for i in range(num_train_episodes):
            trajectory = run_episode(
                env=env, agent=agent, mdp_id=i, max_steps=max_steps
            )
            ep_reward = trajectory.calculate_cumulative_reward()
            train_rewards.append(ep_reward)
            logger.info(
                f"Finished training episode {i} (len {len(trajectory)})"
                f" with reward {ep_reward}."
            )

    logger.info("============Train rewards=============")
    logger.info(train_rewards)
    logger.info(f"average: {np.mean(train_rewards)};\tmax: {np.max(train_rewards)}")

    # Check whether the max score passed the score bar; we explore during training
    # the return could be bad (leading to flakiness in C51 and QRDQN).
    assert np.max(train_rewards) >= passing_score_bar, (
        f"max reward ({np.max(train_rewards)})after training for "
        f"{len(train_rewards)} episodes is less than < {passing_score_bar}.\n"
    )

    serving_policy = manager.create_policy(serving=True)
    agent = Agent.create_for_env_with_serving_policy(env, serving_policy)

    eval_rewards = evaluate_for_n_episodes(
        n=num_eval_episodes, env=env, agent=agent, max_steps=max_steps
    ).squeeze(1)

    logger.info("============Eval rewards==============")
    logger.info(eval_rewards)
    logger.info(f"average: {np.mean(eval_rewards)};\tmax: {np.max(eval_rewards)}")
    assert np.mean(eval_rewards) >= passing_score_bar, (
        f"Predictor reward is {np.mean(eval_rewards)},"
        f"less than < {passing_score_bar}.\n"
    )
コード例 #17
0
def train_seq2reward_and_compute_reward_mse(
    env_name: str,
    model: ModelManager__Union,
    num_train_transitions: int,
    num_test_transitions: int,
    seq_len: int,
    batch_size: int,
    num_train_epochs: int,
    use_gpu: bool,
    saved_seq2reward_path: Optional[str] = None,
):
    """ Train Seq2Reward Network and compute reward mse. """
    env: gym.Env = EnvFactory.make(env_name)
    env.seed(SEED)

    manager = model.value
    trainer = manager.initialize_trainer(
        use_gpu=use_gpu,
        reward_options=RewardOptions(),
        normalization_data_map=build_normalizer(env),
    )

    device = "cuda" if use_gpu else "cpu"
    # pyre-fixme[6]: Expected `device` for 2nd param but got `str`.
    trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env)
    test_replay_buffer = ReplayBuffer.create_from_env(
        env=env,
        replay_memory_size=num_test_transitions,
        batch_size=batch_size,
        stack_size=seq_len,
        return_everything_as_stack=True,
    )
    fill_replay_buffer(env, test_replay_buffer, num_test_transitions)

    if saved_seq2reward_path is None:
        # train from scratch
        trainer = train_seq2reward(
            env=env,
            trainer=trainer,
            trainer_preprocessor=trainer_preprocessor,
            num_train_transitions=num_train_transitions,
            seq_len=seq_len,
            batch_size=batch_size,
            num_train_epochs=num_train_epochs,
            test_replay_buffer=test_replay_buffer,
        )
    else:
        # load a pretrained model, and just evaluate it
        trainer.seq2reward_network.load_state_dict(torch.load(saved_seq2reward_path))
    state_dim = env.observation_space.shape[0]
    with torch.no_grad():
        trainer.seq2reward_network.eval()
        test_batch = test_replay_buffer.sample_transition_batch(
            batch_size=test_replay_buffer.size
        )
        preprocessed_test_batch = trainer_preprocessor(test_batch)
        adhoc_action_padding(preprocessed_test_batch, state_dim=state_dim)
        losses = trainer.get_loss(preprocessed_test_batch)
        detached_losses = losses.cpu().detach().item()
        trainer.seq2reward_network.train()
    return detached_losses