def run_task(*_):
    initial_goal = np.array([0.6, -0.1, 0.80])

    rospy.init_node('trpo_real_sawyer_pnp_exp', anonymous=True)

    pnp_env = TheanoEnv(
        PickAndPlaceEnv(initial_goal,
                        initial_joint_pos=INITIAL_ROBOT_JOINT_POS,
                        simulated=False))

    rospy.on_shutdown(pnp_env.shutdown)

    pnp_env.initialize()

    env = pnp_env

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=100,
        n_itr=100,
        discount=0.99,
        step_size=0.01,
        plot=False,
        force_batch_sampler=True,
    )
    algo.train()
Exemple #2
0
def run_task(*_):
    # Please note that different environments with different action spaces may
    # require different policies. For example with a Box action space, a
    # GaussianMLPPolicy works, but for a Discrete action space may need to use
    # a CategoricalMLPPolicy (see the trpo_gym_cartpole.py example)
    env = TheanoEnv(normalize(gym.make("Pendulum-v0")))

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=env.horizon,
        n_itr=50,
        discount=0.99,
        step_size=0.01,
        # Uncomment both lines (this and the plot parameter below) to enable
        # plotting
        # plot=True,
    )
    algo.train()
Exemple #3
0
def run_task(v):
    env = TheanoEnv(normalize(CartpoleEnv()))

    policy = GaussianMLPPolicy(
        env_spec=env.spec,
        # The neural network policy should have two hidden layers,
        # each with 32 hidden units.
        hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=100,
        n_itr=40,
        discount=0.99,
        step_size=v["step_size"],
        # Uncomment both lines (this and the plot parameter below) to enable
        # plotting
        plot=True,
    )
    algo.train()
def run_task(*_):
    """Run task function."""
    initial_goal = np.array([0.6, -0.1, 0.30])

    rospy.init_node('trpo_real_sawyer_reacher_exp', anonymous=True)

    env = TheanoEnv(
        ReacherEnv(
            initial_goal,
            initial_joint_pos=INITIAL_ROBOT_JOINT_POS,
            simulated=False,
            robot_control_mode='position'))

    rospy.on_shutdown(env.shutdown)

    env.initialize()

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=100,
        n_itr=100,
        discount=0.99,
        step_size=0.01,
        plot=False,
        force_batch_sampler=True,
    )
    algo.train()
Exemple #5
0
 def test_adaptive_std():
     """
     Checks if the adaptive_std parameter works.
     """
     env = TheanoEnv(CartpoleEnv())
     policy = GaussianMLPPolicy(env_spec=env, adaptive_std=True)
     baseline = ZeroBaseline(env_spec=env.spec)
     algo = TRPO(env=env,
                 policy=policy,
                 baseline=baseline,
                 batch_size=100,
                 n_itr=1)
     algo.train()
Exemple #6
0
 def test_trpo_relu_nan(self):
     env = TheanoEnv(DummyEnv())
     policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(1, ))
     baseline = ZeroBaseline(env_spec=env.spec)
     algo = TRPO(env=env,
                 policy=policy,
                 baseline=baseline,
                 n_itr=1,
                 batch_size=1000,
                 max_path_length=100,
                 step_size=0.001)
     algo.train()
     assert not np.isnan(np.sum(policy.get_param_values()))
Exemple #7
0
 def test_trpo_deterministic_nan(self):
     env = TheanoEnv(DummyEnv())
     policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(1, ))
     policy._l_log_std.param.set_value([np.float32(np.log(1e-8))])
     baseline = ZeroBaseline(env_spec=env.spec)
     algo = TRPO(env=env,
                 policy=policy,
                 baseline=baseline,
                 n_itr=10,
                 batch_size=1000,
                 max_path_length=100,
                 step_size=0.01)
     algo.train()
     assert not np.isnan(np.sum(policy.get_param_values()))
Exemple #8
0
def run_pick_and_place(*_):
    initial_goal = np.array([0.6, -0.1, 0.80])
    env = TheanoEnv(PickAndPlaceEnv(initial_goal))
    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
    baseline = LinearFeatureBaseline(env_spec=env.spec)
    algo = TRPO(
        env=env,
        policy=policy,
        batch_size=4000,
        max_path_length=2000,
        baseline=baseline,
        n_itr=1000,
        discount=0.99,
        step_size=0.01,
        plot=True,
        force_batch_sampler=True,
    )
    algo.train()
def run_task(*_):
    env = TheanoEnv(normalize(CartpoleEnv()))

    policy = GaussianGRUPolicy(env_spec=env.spec, )

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(env=env,
                policy=policy,
                baseline=baseline,
                batch_size=4000,
                max_path_length=100,
                n_itr=10,
                discount=0.99,
                step_size=0.01,
                optimizer=ConjugateGradientOptimizer(
                    hvp_approach=FiniteDifferenceHvp(base_eps=1e-5)))
    algo.train()
def run(*_):
    """Stub method for running trpo."""
    env = TheanoEnv(
        ReacherEnv(control_method='position_control', sparse_reward=False))
    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
    baseline = LinearFeatureBaseline(env_spec=env.spec)
    algo = TRPO(
        env=env,
        policy=policy,
        batch_size=4000,
        max_path_length=100,
        baseline=baseline,
        n_itr=2500,
        discount=0.99,
        step_size=0.01,
        plot=True,
        force_batch_sampler=True,
    )
    algo.train()
def run_task(*_):
    env = TheanoEnv(normalize(gym.make("Acrobot-v1")))

    policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=env.max_episode_steps,
        n_itr=50,
        discount=0.99,
        step_size=0.01,
        plot=True,
    )
    algo.train()
Exemple #12
0
def run_block_stacking(*_):
    """Run TRPO with block stacking. """
    env = TheanoEnv(BlockStackingEnv())

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
    baseline = LinearFeatureBaseline(env_spec=env.spec)
    algo = TRPO(
        env=env,
        policy=policy,
        batch_size=4000,
        max_path_length=2000,
        baseline=baseline,
        n_itr=1000,
        discount=0.99,
        step_size=0.01,
        plot=True,
        force_batch_sampler=True,
    )
    algo.train()
Exemple #13
0
def run_task(v):
    env = normalize(CartpoleEnv())

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=100,
        n_itr=40,
        discount=0.99,
        step_size=v["step_size"],
        # plot=True,
    )
    algo.train()
Exemple #14
0
def run_task(*_):
    env = TheanoEnv(normalize(CartpoleEnv()))

    policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

    baseline = LinearFeatureBaseline(env_spec=env.spec)

    algo = TRPO(
        env=env,
        policy=policy,
        baseline=baseline,
        batch_size=4000,
        max_path_length=100,
        n_itr=1000,
        discount=0.99,
        step_size=0.01,
        # Uncomment both lines (this and the plot parameter below) to enable
        # plotting
        #plot=True
    )
    algo.train()
Exemple #15
0
    def test_dm_control_theano_policy(self):
        task = ALL_TASKS[0]

        env = TheanoEnv(DmControlEnv(domain_name=task[0], task_name=task[1]))

        policy = GaussianMLPPolicy(
            env_spec=env.spec,
            hidden_sizes=(32, 32),
        )

        baseline = LinearFeatureBaseline(env_spec=env.spec)

        algo = TRPO(
            env=env,
            policy=policy,
            baseline=baseline,
            batch_size=10,
            max_path_length=5,
            n_itr=1,
            discount=0.99,
            step_size=0.01,
        )
        algo.train()
Exemple #16
0
from garage.baselines import LinearFeatureBaseline
from garage.envs import normalize
from garage.envs.box2d import CartpoleEnv
from garage.theano.algos import TRPO
from garage.theano.envs import TheanoEnv
from garage.theano.policies import GaussianMLPPolicy

env = TheanoEnv(normalize(CartpoleEnv()))

policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))

baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
    env=env,
    policy=policy,
    baseline=baseline,
    batch_size=4000,
    max_path_length=100,
    n_itr=40,
    discount=0.99,
    step_size=0.01,
    # plot=True
)
algo.train()