Example #1
0
def create_trainer(params, env):
    mdnrnn_params = MDNRNNParameters(**params["mdnrnn"])
    mdnrnn_net = MemoryNetwork(
        state_dim=env.state_dim,
        action_dim=env.action_dim,
        num_hiddens=mdnrnn_params.hidden_size,
        num_hidden_layers=mdnrnn_params.num_hidden_layers,
        num_gaussians=mdnrnn_params.num_gaussians,
    )
    cum_loss_hist_len = (params["run_details"]["num_train_episodes"] *
                         params["run_details"]["max_steps"] //
                         mdnrnn_params.minibatch_size)
    trainer = MDNRNNTrainer(mdnrnn_network=mdnrnn_net,
                            params=mdnrnn_params,
                            cum_loss_hist=cum_loss_hist_len)
    return trainer
Example #2
0
def create_trainer(params: Dict, env: OpenAIGymEnvironment, use_gpu: bool):
    mdnrnn_params = MDNRNNParameters(**params["mdnrnn"])
    mdnrnn_net = MemoryNetwork(
        state_dim=env.state_dim,
        action_dim=env.action_dim,
        num_hiddens=mdnrnn_params.hidden_size,
        num_hidden_layers=mdnrnn_params.num_hidden_layers,
        num_gaussians=mdnrnn_params.num_gaussians,
    )
    if use_gpu and torch.cuda.is_available():
        mdnrnn_net = mdnrnn_net.cuda()

    cum_loss_hist_len = (params["run_details"]["num_train_episodes"] *
                         params["run_details"]["max_steps"] //
                         mdnrnn_params.minibatch_size)
    trainer = MDNRNNTrainer(mdnrnn_network=mdnrnn_net,
                            params=mdnrnn_params,
                            cum_loss_hist=cum_loss_hist_len)
    return trainer
Example #3
0
def create_trainer(model_type, params, rl_parameters, use_gpu, env):
    if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:
        training_parameters = params["training"]
        if isinstance(training_parameters, dict):
            training_parameters = TrainingParameters(**training_parameters)
        rainbow_parameters = params["rainbow"]
        if isinstance(rainbow_parameters, dict):
            rainbow_parameters = RainbowDQNParameters(**rainbow_parameters)
        if env.img:
            assert (
                training_parameters.cnn_parameters is not None
            ), "Missing CNN parameters for image input"
            if isinstance(training_parameters.cnn_parameters, dict):
                training_parameters.cnn_parameters = CNNParameters(
                    **training_parameters.cnn_parameters
                )
            training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels
            training_parameters.cnn_parameters.input_height = env.height
            training_parameters.cnn_parameters.input_width = env.width
            training_parameters.cnn_parameters.num_input_channels = (
                env.num_input_channels
            )
        else:
            assert (
                training_parameters.cnn_parameters is None
            ), "Extra CNN parameters for non-image input"
        trainer_params = DiscreteActionModelParameters(
            actions=env.actions,
            rl=rl_parameters,
            training=training_parameters,
            rainbow=rainbow_parameters,
        )
        trainer = create_dqn_trainer_from_params(
            trainer_params, env.normalization, use_gpu
        )

    elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:
        training_parameters = params["training"]
        if isinstance(training_parameters, dict):
            training_parameters = TrainingParameters(**training_parameters)
        rainbow_parameters = params["rainbow"]
        if isinstance(rainbow_parameters, dict):
            rainbow_parameters = RainbowDQNParameters(**rainbow_parameters)
        if env.img:
            assert (
                training_parameters.cnn_parameters is not None
            ), "Missing CNN parameters for image input"
            training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels
        else:
            assert (
                training_parameters.cnn_parameters is None
            ), "Extra CNN parameters for non-image input"
        trainer_params = ContinuousActionModelParameters(
            rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters
        )
        trainer = create_parametric_dqn_trainer_from_params(
            trainer_params, env.normalization, env.normalization_action, use_gpu
        )

    elif model_type == ModelType.TD3.value:
        trainer_params = TD3ModelParameters(
            rl=rl_parameters,
            training=TD3TrainingParameters(
                minibatch_size=params["td3_training"]["minibatch_size"],
                q_network_optimizer=OptimizerParameters(
                    **params["td3_training"]["q_network_optimizer"]
                ),
                actor_network_optimizer=OptimizerParameters(
                    **params["td3_training"]["actor_network_optimizer"]
                ),
                use_2_q_functions=params["td3_training"]["use_2_q_functions"],
                exploration_noise=params["td3_training"]["exploration_noise"],
                initial_exploration_ts=params["td3_training"]["initial_exploration_ts"],
                target_policy_smoothing=params["td3_training"][
                    "target_policy_smoothing"
                ],
                noise_clip=params["td3_training"]["noise_clip"],
                delayed_policy_update=params["td3_training"]["delayed_policy_update"],
            ),
            q_network=FeedForwardParameters(**params["critic_training"]),
            actor_network=FeedForwardParameters(**params["actor_training"]),
        )
        trainer = get_td3_trainer(env, trainer_params, use_gpu)

    elif model_type == ModelType.SOFT_ACTOR_CRITIC.value:
        value_network = None
        value_network_optimizer = None
        alpha_optimizer = None
        if params["sac_training"]["use_value_network"]:
            value_network = FeedForwardParameters(**params["sac_value_training"])
            value_network_optimizer = OptimizerParameters(
                **params["sac_training"]["value_network_optimizer"]
            )
        if "alpha_optimizer" in params["sac_training"]:
            alpha_optimizer = OptimizerParameters(
                **params["sac_training"]["alpha_optimizer"]
            )
        entropy_temperature = params["sac_training"].get("entropy_temperature", None)
        target_entropy = params["sac_training"].get("target_entropy", None)

        trainer_params = SACModelParameters(
            rl=rl_parameters,
            training=SACTrainingParameters(
                minibatch_size=params["sac_training"]["minibatch_size"],
                use_2_q_functions=params["sac_training"]["use_2_q_functions"],
                use_value_network=params["sac_training"]["use_value_network"],
                q_network_optimizer=OptimizerParameters(
                    **params["sac_training"]["q_network_optimizer"]
                ),
                value_network_optimizer=value_network_optimizer,
                actor_network_optimizer=OptimizerParameters(
                    **params["sac_training"]["actor_network_optimizer"]
                ),
                entropy_temperature=entropy_temperature,
                target_entropy=target_entropy,
                alpha_optimizer=alpha_optimizer,
            ),
            q_network=FeedForwardParameters(**params["critic_training"]),
            value_network=value_network,
            actor_network=FeedForwardParameters(**params["actor_training"]),
        )
        trainer = get_sac_trainer(env, trainer_params, use_gpu)
    elif model_type == ModelType.CEM.value:
        trainer_params = CEMParameters(**params["cem"])
        trainer_params.mdnrnn = MDNRNNParameters(**params["cem"]["mdnrnn"])
        trainer_params.rl = rl_parameters
        trainer = get_cem_trainer(env, trainer_params, use_gpu)
    else:
        raise NotImplementedError("Model of type {} not supported".format(model_type))

    return trainer
Example #4
0
    def test_mdnrnn_simulate_world(self):
        num_epochs = 300
        num_episodes = 400
        batch_size = 200
        action_dim = 2
        seq_len = 5
        state_dim = 2
        simulated_num_gaussians = 2
        mdrnn_num_gaussians = 2
        simulated_num_hidden_layers = 1
        simulated_num_hiddens = 3
        mdnrnn_num_hidden_layers = 1
        mdnrnn_num_hiddens = 10
        adam_lr = 0.01

        replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_episodes)
        swm = SimulatedWorldModel(
            action_dim=action_dim,
            state_dim=state_dim,
            num_gaussians=simulated_num_gaussians,
            lstm_num_hidden_layers=simulated_num_hidden_layers,
            lstm_num_hiddens=simulated_num_hiddens,
        )

        possible_actions = torch.eye(action_dim)
        for _ in range(num_episodes):
            cur_state_mem = np.zeros((seq_len, state_dim))
            next_state_mem = np.zeros((seq_len, state_dim))
            action_mem = np.zeros((seq_len, action_dim))
            reward_mem = np.zeros(seq_len)
            not_terminal_mem = np.zeros(seq_len)
            next_mus_mem = np.zeros(
                (seq_len, simulated_num_gaussians, state_dim))

            swm.init_hidden(batch_size=1)
            next_state = torch.randn((1, 1, state_dim))
            for s in range(seq_len):
                cur_state = next_state
                action = possible_actions[np.random.randint(action_dim)].view(
                    1, 1, action_dim)
                next_mus, reward = swm(action, cur_state)

                not_terminal = 1
                if s == seq_len - 1:
                    not_terminal = 0

                # randomly draw for next state
                next_pi = torch.ones(
                    simulated_num_gaussians) / simulated_num_gaussians
                index = Categorical(next_pi).sample((1, )).long().item()
                next_state = next_mus[0, 0, index].view(1, 1, state_dim)

                cur_state_mem[s] = cur_state.detach().numpy()
                action_mem[s] = action.numpy()
                reward_mem[s] = reward.detach().numpy()
                not_terminal_mem[s] = not_terminal
                next_state_mem[s] = next_state.detach().numpy()
                next_mus_mem[s] = next_mus.detach().numpy()

            replay_buffer.insert_into_memory(cur_state_mem, action_mem,
                                             next_state_mem, reward_mem,
                                             not_terminal_mem)

        num_batch = num_episodes // batch_size
        mdnrnn_params = MDNRNNParameters(
            hidden_size=mdnrnn_num_hiddens,
            num_hidden_layers=mdnrnn_num_hidden_layers,
            minibatch_size=batch_size,
            learning_rate=adam_lr,
            num_gaussians=mdrnn_num_gaussians,
        )
        mdnrnn_net = MemoryNetwork(
            state_dim=state_dim,
            action_dim=action_dim,
            num_hiddens=mdnrnn_params.hidden_size,
            num_hidden_layers=mdnrnn_params.num_hidden_layers,
            num_gaussians=mdnrnn_params.num_gaussians,
        )
        trainer = MDNRNNTrainer(mdnrnn_network=mdnrnn_net,
                                params=mdnrnn_params,
                                cum_loss_hist=num_batch)

        for e in range(num_epochs):
            for i in range(num_batch):
                training_batch = replay_buffer.sample_memories(batch_size)
                losses = trainer.train(training_batch)
                logger.info(
                    "{}-th epoch, {}-th minibatch: \n"
                    "loss={}, bce={}, gmm={}, mse={} \n"
                    "cum loss={}, cum bce={}, cum gmm={}, cum mse={}\n".format(
                        e,
                        i,
                        losses["loss"],
                        losses["bce"],
                        losses["gmm"],
                        losses["mse"],
                        np.mean(trainer.cum_loss),
                        np.mean(trainer.cum_bce),
                        np.mean(trainer.cum_gmm),
                        np.mean(trainer.cum_mse),
                    ))

                if (np.mean(trainer.cum_loss) < 0
                        and np.mean(trainer.cum_gmm) < -3.0
                        and np.mean(trainer.cum_bce) < 0.6
                        and np.mean(trainer.cum_mse) < 0.2):
                    return

        assert False, "losses not reduced significantly during training"