예제 #1
0
        def _f():
            if env_type == 'Fetch':
                env = gym.make(env_id, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range']
                                    )
            elif env_type == 'FetchStack':
                env = gym.make(env_id, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range'],
                                    change_stack_order=config['change_stack_order']
                                    )
            elif env_type == 'Hand':
                env = gym.make(env_id, obj_action_type=config['obj_action_type'])
            elif env_type == 'Others':
                env = gym.make(env_id)
            

            #env._max_episode_steps *= config['max_nb_objects']
            keys = env.observation_space.spaces.keys()
            env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
            env.seed(SEED+10*i_env)
            if stack_prob is not None:
                env.unwrapped.stack_prob = stack_prob
            return env
예제 #2
0
 def _f():
     if env_type == 'Fetch':
         env = gym.make(env_id, n_objects=config['max_nb_objects'], 
                             obj_action_type=config['obj_action_type'], 
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
     elif env_type == 'Hand':
         env = gym.make(env_id, obj_action_type=config['obj_action_type'])
     elif env_type == 'Others':
         env = gym.make(env_id)
     
     keys = env.observation_space.spaces.keys()
     env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
     env.seed(SEED+10*i_env)
     return env
예제 #3
0
def test_text_envs():
    env = gym_wmgds.make('FrozenLake-v0')
    video = VideoRecorder(env)
    try:
        env.reset()
        video.capture_frame()
        video.close()
    finally:
        os.remove(video.path)
예제 #4
0
def test_record_simple():
    env = gym_wmgds.make("CartPole-v1")
    rec = VideoRecorder(env)
    env.reset()
    rec.capture_frame()
    rec.close()
    assert not rec.empty
    assert not rec.broken
    assert os.path.exists(rec.path)
    f = open(rec.path)
    assert os.fstat(f.fileno()).st_size > 100
예제 #5
0
#!/usr/bin/env python
from __future__ import print_function

import sys, gym_wmgds, time

#
# Test yourself as a learning agent! Pass environment name as a command-line argument, for example:
#
# python keyboard_agent.py SpaceInvadersNoFrameskip-v4
#

env = gym_wmgds.make('LunarLander-v2' if len(sys.argv) < 2 else sys.argv[1])

if not hasattr(env.action_space, 'n'):
    raise Exception('Keyboard agent only supports discrete action spaces')
ACTIONS = env.action_space.n
SKIP_CONTROL = 0  # Use previous control decision SKIP_CONTROL times, that's how you
# can test what skip is still usable.

human_agent_action = 0
human_wants_restart = False
human_sets_pause = False


def key_press(key, mod):
    global human_agent_action, human_wants_restart, human_sets_pause
    if key == 0xff0d: human_wants_restart = True
    if key == 32: human_sets_pause = not human_sets_pause
    a = int(key - ord('0'))
    if a <= 0 or a >= ACTIONS: return
    human_agent_action = a
예제 #6
0
파일: main4.py 프로젝트: ozcell/olbr
def init(config):
    
    if config['resume'] != '':
        resume_path = config['resume']
        saver = Saver(config)
        config, start_episode, save_dict = saver.resume_ckpt()
        config['resume'] = resume_path
    else:
        start_episode = 0
        
    #hyperparameters
    ENV_NAME = config['env_id'] #'simple_spread'
    SEED = config['random_seed'] # 1

    GAMMA = config['gamma'] # 0.95
    TAU = config['tau'] # 0.01

    ACTOR_LR = config['plcy_lr'] # 0.01
    CRITIC_LR = config['crtc_lr'] # 0.01

    MEM_SIZE = config['buffer_length'] # 1000000 

    REGULARIZATION = config['regularization'] # True
    NORMALIZED_REWARDS = config['reward_normalization'] # True

    env = gym.make(ENV_NAME)
    env.seed(SEED)

    observation_space = env.observation_space.spaces['observation'].shape[0] + env.observation_space.spaces['desired_goal'].shape[0]
    action_space = env.action_space
    if env.action_space.low[0] == -1 and env.action_space.high[0] == 1:
        OUT_FUNC = K.tanh 
    elif env.action_space.low[0] == 0 and env.action_space.high[0] == 1:
        OUT_FUNC = K.sigmoid
    else:
        OUT_FUNC = K.sigmoid

    K.manual_seed(SEED)
    np.random.seed(SEED)
    
    MODEL = DDPG

    if config['verbose'] > 1:
        # utils
        summaries = (Summarizer(config['dir_summary_train'], config['port'], config['resume']),
                    Summarizer(config['dir_summary_test'], config['port'], config['resume']))
        saver = Saver(config)
    else:
        summaries = None
        saver = None

    #exploration initialization
    noise = Noise(action_space.shape[0], sigma=0.2, eps=0.3)
    #noise = OUNoise(action_space.shape[0])

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)) # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space, action_space, optimizer, 
                  Actor, Critic, loss_func, GAMMA, TAU, out_func=OUT_FUNC,
                  discrete=False, regularization=REGULARIZATION, normalized_rewards=NORMALIZED_REWARDS)
    
    if config['resume'] != '':
        for i, param in enumerate(save_dict['model_params']):
            model.entities[i].load_state_dict(param)
    
    #memory initilization
    memory = ReplayMemory(MEM_SIZE)

    normalizer = (Normalizer(), None)

    experiment_args = (env, memory, noise, config, summaries, saver, start_episode, normalizer)
          
    return model, experiment_args
예제 #7
0
def init(config,
         agent='robot',
         her=False,
         object_Qfunc=None,
         backward_dyn=None,
         object_policy=None,
         reward_fun=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']

    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        env = gym.make(ENV_NAME,
                       n_objects=config['max_nb_objects'],
                       obj_action_type=config['obj_action_type'],
                       observe_obj_grp=config['observe_obj_grp'])
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        env = gym.make(ENV_NAME, obj_action_type=config['obj_action_type'])
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    else:
        env = gym.make(ENV_NAME)

    def her_reward_fun(ag_2, g, info):  # vectorized
        return env.compute_reward(achieved_goal=ag_2,
                                  desired_goal=g,
                                  info=info)

    env.seed(SEED)
    K.manual_seed(SEED)
    np.random.seed(SEED)

    #if config['obj_action_type'] == 'all':
    #    n_actions = config['max_nb_objects'] * 7 + 4
    #elif config['obj_action_type'] == 'slide_only':
    #    n_actions = config['max_nb_objects'] * 3 + 4
    #elif config['obj_action_type'] == 'rotation_only':
    #    n_actions = config['max_nb_objects'] * 4 + 4

    observation_space = env.observation_space.spaces['observation'].shape[
        1] + env.observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    OUT_FUNC = K.tanh
    if config['agent_alg'] == 'DDPG_BD':
        MODEL = DDPG_BD
        from olbr.replay_buffer import ReplayBuffer
        from olbr.her_sampler import make_sample_her_transitions
    elif config['agent_alg'] == 'MADDPG_BD':
        MODEL = MADDPG_BD
        from olbr.replay_buffer import ReplayBuffer_v2 as ReplayBuffer
        from olbr.her_sampler import make_sample_her_transitions_v2 as make_sample_her_transitions

    #exploration initialization
    if agent == 'robot':
        agent_id = 0
        noise = (Noise(action_space[0].shape[0], sigma=0.2, eps=0.3),
                 Noise(action_space[1].shape[0], sigma=0.2, eps=0.3))

        env._max_episode_steps *= config['max_nb_objects']
    elif agent == 'object':
        agent_id = 1
        #noise = Noise(action_space[1].shape[0], sigma=0.05, eps=0.1)
        noise = Noise(action_space[1].shape[0], sigma=0.2, eps=0.3)

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space,
                  action_space,
                  optimizer,
                  Actor,
                  Critic,
                  loss_func,
                  GAMMA,
                  TAU,
                  out_func=OUT_FUNC,
                  discrete=False,
                  regularization=REGULARIZATION,
                  normalized_rewards=NORMALIZED_REWARDS,
                  agent_id=agent_id,
                  object_Qfunc=object_Qfunc,
                  backward_dyn=backward_dyn,
                  object_policy=object_policy,
                  reward_fun=reward_fun,
                  masked_with_r=config['masked_with_r'])
    normalizer = [Normalizer(), Normalizer()]

    #memory initilization
    if her:
        sample_her_transitions = make_sample_her_transitions(
            'future', 4, her_reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions(
            'none', 4, her_reward_fun)

    buffer_shapes = {
        'o': (env._max_episode_steps,
              env.observation_space.spaces['observation'].shape[1] * 2),
        'ag': (env._max_episode_steps,
               env.observation_space.spaces['achieved_goal'].shape[0]),
        'g': (env._max_episode_steps,
              env.observation_space.spaces['desired_goal'].shape[0]),
        'u': (env._max_episode_steps - 1, action_space[2].shape[0])
    }
    memory = (ReplayBuffer(buffer_shapes, MEM_SIZE, env._max_episode_steps,
                           sample_her_transitions),
              ReplayBuffer(buffer_shapes, MEM_SIZE, env._max_episode_steps,
                           sample_her_transitions))

    experiment_args = (env, memory, noise, config, normalizer, agent_id)

    print('singleseeding')

    return model, experiment_args
예제 #8
0
def init(config,
         agent='robot',
         her=False,
         object_Qfunc=None,
         backward_dyn=None,
         object_policy=None,
         reward_fun=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']
    N_ENVS = config['n_envs']

    def make_env(env_id, i_env, env_type='Fetch', stack_prob=None):
        def _f():
            if env_type == 'Fetch':
                env = gym.make(env_id,
                               n_objects=config['max_nb_objects'],
                               obj_action_type=config['obj_action_type'],
                               observe_obj_grp=config['observe_obj_grp'],
                               obj_range=config['obj_range'],
                               change_stack_order=config['change_stack_order'])
            elif env_type == 'Hand':
                env = gym.make(env_id,
                               obj_action_type=config['obj_action_type'])
            elif env_type == 'Others':
                env = gym.make(env_id)

            #env._max_episode_steps *= config['max_nb_objects']
            keys = env.observation_space.spaces.keys()
            env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
            env.seed(SEED + 10 * i_env)
            if stack_prob is not None:
                env.unwrapped.stack_prob = stack_prob
            return env

        return _f

    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME and 'Stack' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'Fetch', config['train_stack_prob'])
            for i_env in range(N_ENVS)
        ])
        envs_test = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'Fetch', config['test_stack_prob'])
            for i_env in range(N_ENVS)
        ])
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(N_ENVS)])
        envs_test = None
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             obj_action_type=config['obj_action_type'])
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Hand') for i_env in range(N_ENVS)])
        envs_test = None
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    else:
        dummy_env = gym.make(ENV_NAME)
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Others') for i_env in range(N_ENVS)])
        envs_test = None

    def her_reward_fun_sparse(ag_2, g, info):  # vectorized
        return dummy_env.compute_reward(achieved_goal=ag_2,
                                        desired_goal=g,
                                        info=info)

    def her_reward_fun_step(ag_2, g, info):
        goal_a = ag_2
        goal_b = g
        assert goal_a.shape == goal_b.shape
        goal_a = goal_a.reshape(-1, dummy_env.env.n_objects, 3)
        goal_b = goal_b.reshape(-1, dummy_env.env.n_objects, 3)
        d = np.linalg.norm(goal_a - goal_b, axis=-1)
        return -(d > dummy_env.env.distance_threshold).astype(
            np.float32).sum(-1)

    if config['use_step_reward_fun']:
        her_reward_fun = her_reward_fun_step
        print('using step reward')
    else:
        her_reward_fun = her_reward_fun_sparse
        print('using sparse reward')

    if config['change_stack_order']:
        print('changing the stacking order')

    K.manual_seed(SEED)
    np.random.seed(SEED)

    observation_space = dummy_env.observation_space.spaces['observation'].shape[
        1] + dummy_env.observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    clip_Q_neg = config['clip_Q_neg'] if config['clip_Q_neg'] < 0 else None
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    OUT_FUNC = K.tanh
    if config['agent_alg'] == 'DDPG_BD':
        MODEL = DDPG_BD
        from sldr.replay_buffer import ReplayBuffer
        from sldr.her_sampler import make_sample_her_transitions
    elif config['agent_alg'] == 'MADDPG_BD':
        MODEL = MADDPG_BD
        from sldr.replay_buffer import ReplayBuffer_v2 as ReplayBuffer
        from sldr.her_sampler import make_sample_her_transitions_v2 as make_sample_her_transitions

    #exploration initialization
    agent_id = 0
    noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3)
    config['episode_length'] = dummy_env._max_episode_steps
    config['observation_space'] = dummy_env.observation_space

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space,
                  action_space,
                  optimizer,
                  Actor,
                  Critic,
                  loss_func,
                  GAMMA,
                  TAU,
                  out_func=OUT_FUNC,
                  discrete=False,
                  regularization=REGULARIZATION,
                  normalized_rewards=NORMALIZED_REWARDS,
                  agent_id=agent_id,
                  object_Qfunc=object_Qfunc,
                  backward_dyn=backward_dyn,
                  object_policy=object_policy,
                  reward_fun=reward_fun,
                  n_objects=config['max_nb_objects'],
                  clip_Q_neg=clip_Q_neg)
    normalizer = [Normalizer(), Normalizer()]

    for _ in range(1):
        state_all = dummy_env.reset()
        for _ in range(config['episode_length']):

            model.to_cpu()

            obs = [
                K.tensor(obs, dtype=K.float32).unsqueeze(0)
                for obs in state_all['observation']
            ]
            goal = K.tensor(state_all['desired_goal'],
                            dtype=K.float32).unsqueeze(0)

            # Observation normalization
            obs_goal = []
            obs_goal.append(K.cat([obs[0], goal], dim=-1))
            if normalizer[0] is not None:
                obs_goal[0] = normalizer[0].preprocess_with_update(obs_goal[0])

            action = model.select_action(obs_goal[0],
                                         noise).cpu().numpy().squeeze(0)

            action_to_env = np.zeros_like(dummy_env.action_space.sample())
            action_to_env[0:action.shape[0]] = action

            next_state_all, _, _, _ = dummy_env.step(action_to_env)

            # Move to the next state
            state_all = next_state_all

    #memory initilization
    if her:
        sample_her_transitions = make_sample_her_transitions(
            'future', 4, her_reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions(
            'none', 4, her_reward_fun)

    buffer_shapes = {
        'o': (config['episode_length'],
              dummy_env.observation_space.spaces['observation'].shape[1] * 2),
        'ag': (config['episode_length'],
               dummy_env.observation_space.spaces['achieved_goal'].shape[0]),
        'g': (config['episode_length'],
              dummy_env.observation_space.spaces['desired_goal'].shape[0]),
        'u': (config['episode_length'] - 1, action_space[2].shape[0])
    }
    memory = ReplayBuffer(buffer_shapes, MEM_SIZE, config['episode_length'],
                          sample_her_transitions)

    experiment_args = ((envs, envs_test), memory, noise, config, normalizer,
                       agent_id)

    return model, experiment_args
def init(config,
         agent='robot',
         her=False,
         reward_fun=None,
         obj_traj=None,
         obj_mean=None,
         obj_std=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']
    N_ENVS = config['n_envs']

    def make_env(env_id, i_env, env_type='Fetch', stack_prob=None):
        def _f():
            if env_type == 'Fetch':
                env = gym.make(env_id,
                               n_objects=config['max_nb_objects'],
                               obj_action_type=config['obj_action_type'],
                               observe_obj_grp=config['observe_obj_grp'],
                               obj_range=config['obj_range'])
            elif env_type == 'FetchStack':
                env = gym.make(env_id,
                               n_objects=config['max_nb_objects'],
                               obj_action_type=config['obj_action_type'],
                               observe_obj_grp=config['observe_obj_grp'],
                               obj_range=config['obj_range'],
                               change_stack_order=config['change_stack_order'])
            elif env_type == 'Hand':
                env = gym.make(env_id,
                               obj_action_type=config['obj_action_type'])
            elif env_type == 'FetchMaRobSeq':
                env = gym.make(env_id,
                               n_objects=config['max_nb_objects'],
                               obj_action_type=config['obj_action_type'],
                               observe_obj_grp=config['observe_obj_grp'],
                               obj_range=np.array([0.15, 0.60]),
                               widerangeobj=True)
            elif env_type == 'FetchMaRobSeqTest':
                env = gym.make(env_id,
                               n_objects=config['max_nb_objects'],
                               obj_action_type=config['obj_action_type'],
                               observe_obj_grp=config['observe_obj_grp'],
                               obj_range=config['obj_range'],
                               widerangeobj=False)
            elif env_type == 'Others':
                env = gym.make(env_id)

            #env._max_episode_steps *= config['max_nb_objects']
            keys = env.observation_space.spaces.keys()
            env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
            env.seed(SEED + 10 * i_env)
            if stack_prob is not None:
                env.unwrapped.stack_prob = stack_prob
            return env

        return _f

    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME and 'Stack' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchStack', config['train_stack_prob'])
            for i_env in range(N_ENVS)
        ])
        envs_test = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchStack', config['test_stack_prob'])
            for i_env in range(N_ENVS)
        ])
        envs_render = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchStack', config['test_stack_prob'])
            for i_env in range(1)
        ])
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(1)])
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             obj_action_type=config['obj_action_type'])
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Hand') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Hand') for i_env in range(1)])
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'MaRobLong' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(1)])
        n_rob_actions = 4 * 2
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'MaRobSeq' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME,
                             n_objects=config['max_nb_objects'],
                             obj_action_type=config['obj_action_type'],
                             observe_obj_grp=config['observe_obj_grp'],
                             obj_range=config['obj_range'])
        envs = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchMaRobSeq')
            for i_env in range(N_ENVS)
        ])
        envs_test = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchMaRobSeqTest')
            for i_env in range(N_ENVS)
        ])
        envs_render = SubprocVecEnv([
            make_env(ENV_NAME, i_env, 'FetchMaRobSeqTest')
            for i_env in range(1)
        ])
        n_rob_actions = 4 * 2
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    else:
        dummy_env = gym.make(ENV_NAME)
        envs = SubprocVecEnv(
            [make_env(ENV_NAME, i_env, 'Others') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = None

    def her_reward_fun(ag_2, g, info):  # vectorized
        return dummy_env.compute_reward(achieved_goal=ag_2,
                                        desired_goal=g,
                                        info=info).reshape(-1, 1)

    K.manual_seed(SEED)
    np.random.seed(SEED)

    observation_space = (
        dummy_env.observation_space.spaces['observation'].shape[1] * 2 +
        dummy_env.observation_space.spaces['desired_goal'].shape[0],
        dummy_env.observation_space.spaces['observation'].shape[1] +
        dummy_env.observation_space.spaces['desired_goal'].shape[0])
    action_space = (gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    clip_Q_neg = config['clip_Q_neg'] if config['clip_Q_neg'] < 0 else None
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    OUT_FUNC = K.tanh
    if config['agent_alg'] == 'DDPG_BD':
        from olbr.algorithms.ddpg_q_schedule import DDPG_BD
    elif config['agent_alg'] == 'MADDPG_BD':
        from olbr.algorithms.maddpg_q_schedule import DDPG_BD
    MODEL = DDPG_BD

    #exploration initialization
    noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3)
    config['episode_length'] = dummy_env._max_episode_steps
    config['observation_space'] = dummy_env.observation_space

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(
        observation_space,
        action_space,
        optimizer,
        Actor,
        Critic,
        loss_func,
        GAMMA,
        TAU,
        out_func=OUT_FUNC,
        discrete=False,
        regularization=REGULARIZATION,
        normalized_rewards=NORMALIZED_REWARDS,
        reward_fun=reward_fun,
        clip_Q_neg=clip_Q_neg,
        nb_critics=config['max_nb_objects']  #or fixing to 3
    )

    model.n_objects = config['max_nb_objects']

    class NormalizerObj(object):
        def __init__(self, mean, std):
            self.mean = mean
            self.std = std

        def process(self, achieved, desired, step):
            achieved_out = achieved - K.tensor(
                self.mean[0], dtype=achieved.dtype, device=achieved.device)
            achieved_out /= K.tensor(self.std[0],
                                     dtype=achieved.dtype,
                                     device=achieved.device)

            desired_out = desired - K.tensor(
                self.mean[1], dtype=desired.dtype, device=desired.device)
            desired_out /= K.tensor(self.std[1],
                                    dtype=desired.dtype,
                                    device=desired.device)

            step_out = step - K.tensor(
                self.mean[2], dtype=desired.dtype, device=desired.device)
            step_out /= K.tensor(self.std[2],
                                 dtype=desired.dtype,
                                 device=desired.device)

            return achieved_out, desired_out, step_out

    normalizer = [Normalizer(), Normalizer(), NormalizerObj(obj_mean, obj_std)]

    model.obj_traj = obj_traj.to('cuda')
    model.obj_traj.eval()

    for _ in range(1):
        state_all = dummy_env.reset()
        for i_step in range(config['episode_length']):

            model.to_cpu()

            obs = [
                K.tensor(obs, dtype=K.float32).unsqueeze(0)
                for obs in state_all['observation']
            ]
            goal = K.tensor(state_all['desired_goal'],
                            dtype=K.float32).unsqueeze(0)
            if i_step == 0:
                objtraj_goal = goal

            # Observation normalization
            obs_goal = []
            obs_goal.append(K.cat([obs[0], obs[1], objtraj_goal], dim=-1))
            if normalizer[0] is not None:
                obs_goal[0] = normalizer[0].preprocess_with_update(obs_goal[0])

            if config['agent_alg'] == 'DDPG_BD':
                action = model.select_action(obs_goal[0],
                                             noise).cpu().numpy().squeeze(0)
            elif config['agent_alg'] == 'MADDPG_BD':
                action = model.select_action(
                    obs_goal[0], noise,
                    goal_size=goal.shape[1]).cpu().numpy().squeeze(0)
            action_to_env = np.zeros_like(dummy_env.action_space.sample())
            action_to_env[0:action.shape[0]] = action

            next_state_all, _, _, _ = dummy_env.step(action_to_env)

            # Move to the next state
            state_all = next_state_all

    #memory initilization
    if her:
        sample_her_transitions = make_sample_her_transitions(
            'future', 4, her_reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions(
            'none', 4, her_reward_fun)

    buffer_shapes = {
        'o': (config['episode_length'],
              dummy_env.observation_space.spaces['observation'].shape[1] * 3),
        'ag': (config['episode_length'],
               dummy_env.observation_space.spaces['achieved_goal'].shape[0]),
        'g': (config['episode_length'],
              dummy_env.observation_space.spaces['desired_goal'].shape[0]),
        'u': (config['episode_length'] - 1, action_space[2].shape[0])
    }
    memory = ReplayBuffer(buffer_shapes, MEM_SIZE, config['episode_length'],
                          sample_her_transitions)

    experiment_args = ((envs, envs_test, envs_render), memory, noise, config,
                       normalizer, None)

    print("0.20 - 0.25 - boundary_sample iff original_goal update norm")

    return model, experiment_args
예제 #10
0
#!/usr/bin/env python3
import argparse
import gym_wmgds

parser = argparse.ArgumentParser(
    description='Renders a gym_wmgds environment for quick inspection.')
parser.add_argument(
    'env_id',
    type=str,
    help='the ID of the environment to be rendered (e.g. HalfCheetah-v1')
parser.add_argument('--step', type=int, default=1)
args = parser.parse_args()

env = gym_wmgds.make(args.env_id)
env.reset()

step = 0
while True:
    if args.step:
        env.step(env.action_space.sample())
    env.render()
    if step % 10 == 0:
        env.reset()
    step += 1
예제 #11
0
파일: play.py 프로젝트: ozcell/gym_wmgds
        num_plots = len(self.plot_names)
        self.fig, self.ax = plt.subplots(num_plots)
        if num_plots == 1:
            self.ax = [self.ax]
        for axis, name in zip(self.ax, plot_names):
            axis.set_title(name)
        self.t = 0
        self.cur_plot = [None for _ in range(num_plots)]
        self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)]

    def callback(self, obs_t, obs_tp1, action, rew, done, info):
        points = self.data_callback(obs_t, obs_tp1, action, rew, done, info)
        for point, data_series in zip(points, self.data):
            data_series.append(point)
        self.t += 1

        xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t

        for i, plot in enumerate(self.cur_plot):
            if plot is not None:
                plot.remove()
            self.cur_plot[i] = self.ax[i].scatter(range(xmin, xmax),
                                                  list(self.data[i]))
            self.ax[i].set_xlim(xmin, xmax)
        plt.pause(0.000001)


if __name__ == '__main__':
    env = gym_wmgds.make("MontezumaRevengeNoFrameskip-v4")
    play(env, zoom=4, fps=60)
예제 #12
0
def init(config,
         agent='robot',
         her=False,
         object_Qfunc=None,
         backward_dyn=None,
         object_policy=None,
         reward_fun=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']

    if (ENV_NAME
            == 'FetchStackMulti-v1') or (ENV_NAME == 'FetchPushMulti-v1') or (
                ENV_NAME == 'FetchPickAndPlaceMulti-v1'):
        env = gym.make(ENV_NAME,
                       n_objects=config['max_nb_objects'],
                       obj_action_type=config['obj_action_type'],
                       observe_obj_grp=config['observe_obj_grp'])
    else:
        env = gym.make(ENV_NAME)

    def her_reward_fun(ag_2, g, info):  # vectorized
        return env.compute_reward(achieved_goal=ag_2,
                                  desired_goal=g,
                                  info=info)

    env.seed(SEED)
    K.manual_seed(SEED)
    np.random.seed(SEED)

    #if config['obj_action_type'] == 'all':
    #    n_actions = config['max_nb_objects'] * 7 + 4
    #elif config['obj_action_type'] == 'slide_only':
    #    n_actions = config['max_nb_objects'] * 3 + 4
    #elif config['obj_action_type'] == 'rotation_only':
    #    n_actions = config['max_nb_objects'] * 4 + 4
    n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + 4

    observation_space = env.observation_space.spaces['observation'].shape[
        1] + env.observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1., 1., shape=(4, ), dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - 4, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    if config['agent_alg'] == 'DDPG_BD':
        MODEL = DDPG_BD
        OUT_FUNC = K.tanh
        from olbr.agents.basic import Actor
        from olbr.replay_buffer import ReplayBuffer
        from olbr.her_sampler import make_sample_her_transitions
    elif config['agent_alg'] == 'MADDPG_BD':
        MODEL = MADDPG_BD
        OUT_FUNC = K.tanh
        from olbr.agents.basic import Actor
        from olbr.replay_buffer import ReplayBuffer_v2 as ReplayBuffer
        from olbr.her_sampler import make_sample_her_transitions_v2 as make_sample_her_transitions
    elif config['agent_alg'] == 'PPO_BD':
        MODEL = PPO_BD
        OUT_FUNC = 'linear'
        from olbr.agents.basic import ActorStoch as Actor
        from olbr.replay_buffer import RolloutStorage as ReplayBuffer

    #exploration initialization
    if agent == 'robot':
        agent_id = 0
        if config['agent_alg'] == 'PPO_BD':
            noise = True
        else:
            noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3)
    elif agent == 'object':
        agent_id = 1
        #noise = Noise(action_space[1].shape[0], sigma=0.05, eps=0.1)
        noise = Noise(action_space[1].shape[0], sigma=0.2, eps=0.3)

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    if config['agent_alg'] == 'PPO_BD':
        model = MODEL(observation_space,
                      action_space,
                      optimizer,
                      Actor,
                      Critic,
                      config['clip_param'],
                      config['ppo_epoch'],
                      config['n_batches'],
                      config['value_loss_coef'],
                      config['entropy_coef'],
                      eps=config['eps'],
                      max_grad_norm=config['max_grad_norm'],
                      use_clipped_value_loss=True,
                      out_func=OUT_FUNC,
                      discrete=False,
                      agent_id=agent_id,
                      object_Qfunc=object_Qfunc,
                      backward_dyn=backward_dyn,
                      object_policy=object_policy,
                      reward_fun=reward_fun,
                      masked_with_r=config['masked_with_r'])
    else:
        model = MODEL(observation_space,
                      action_space,
                      optimizer,
                      Actor,
                      Critic,
                      loss_func,
                      GAMMA,
                      TAU,
                      out_func=OUT_FUNC,
                      discrete=False,
                      regularization=REGULARIZATION,
                      normalized_rewards=NORMALIZED_REWARDS,
                      agent_id=agent_id,
                      object_Qfunc=object_Qfunc,
                      backward_dyn=backward_dyn,
                      object_policy=object_policy,
                      reward_fun=reward_fun,
                      masked_with_r=config['masked_with_r'])

    normalizer = [Normalizer(), Normalizer()]

    #memory initilization
    if config['agent_alg'] == 'PPO_BD':
        memory = ReplayBuffer(env._max_episode_steps - 1, config['n_rollouts'],
                              (observation_space, ), action_space[0])
    else:
        if her:
            sample_her_transitions = make_sample_her_transitions(
                'future', 4, her_reward_fun)
        else:
            sample_her_transitions = make_sample_her_transitions(
                'none', 4, her_reward_fun)

        buffer_shapes = {
            'o': (env._max_episode_steps,
                  env.observation_space.spaces['observation'].shape[1] * 2),
            'ag': (env._max_episode_steps,
                   env.observation_space.spaces['achieved_goal'].shape[0]),
            'g': (env._max_episode_steps,
                  env.observation_space.spaces['desired_goal'].shape[0]),
            'u': (env._max_episode_steps - 1, action_space[2].shape[0]),
            'r': (env._max_episode_steps - 1, 1)
        }
        memory = ReplayBuffer(buffer_shapes, MEM_SIZE, env._max_episode_steps,
                              sample_her_transitions)

    experiment_args = (env, memory, noise, config, normalizer, agent_id)

    print('clipped between -1 and 0, and masked with abs(r), and + r')

    return model, experiment_args
예제 #13
0
def init(config,
         agent='robot',
         her=False,
         object_Qfunc=None,
         backward_dyn=None,
         object_policy=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']

    if (ENV_NAME == 'FetchStackMulti-v1') or (ENV_NAME == 'FetchPushMulti-v1'):
        env = gym.make(ENV_NAME,
                       n_objects=config['max_nb_objects'],
                       obj_action_type=config['obj_action_type'])
    else:
        env = gym.make(ENV_NAME)

    def reward_fun(ag_2, g, info):  # vectorized
        return env.compute_reward(achieved_goal=ag_2,
                                  desired_goal=g,
                                  info=info)

    env.seed(SEED)
    K.manual_seed(SEED)
    np.random.seed(SEED)

    if config['obj_action_type'] == 'all':
        n_actions = config['max_nb_objects'] * 7 + 4
    elif config['obj_action_type'] == 'slide_only':
        n_actions = config['max_nb_objects'] * 3 + 4
    elif config['obj_action_type'] == 'rotation_only':
        n_actions = config['max_nb_objects'] * 4 + 4

    observation_space = env.observation_space.spaces['observation'].shape[
        1] + env.observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1., 1., shape=(4, ), dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - 4, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    OUT_FUNC = K.tanh
    MODEL = DDPG_BD

    #exploration initialization
    if agent == 'robot':
        agent_id = 0
        noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3)
    elif agent == 'object':
        agent_id = 1
        noise = Noise(action_space[1].shape[0], sigma=0.05, eps=0.1)

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space,
                  action_space,
                  optimizer,
                  Actor,
                  Critic,
                  loss_func,
                  GAMMA,
                  TAU,
                  out_func=OUT_FUNC,
                  discrete=False,
                  regularization=REGULARIZATION,
                  normalized_rewards=NORMALIZED_REWARDS,
                  agent_id=agent_id,
                  object_Qfunc=object_Qfunc,
                  backward_dyn=backward_dyn,
                  object_policy=object_policy)
    normalizer = [Normalizer(), Normalizer()]

    #memory initilization
    if her:
        sample_her_transitions = make_sample_her_transitions(
            'future', 4, reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions(
            'none', 4, reward_fun)

    buffer_shapes = {
        'o': (env._max_episode_steps,
              env.observation_space.spaces['observation'].shape[1] * 2),
        'ag': (env._max_episode_steps,
               env.observation_space.spaces['achieved_goal'].shape[0]),
        'g': (env._max_episode_steps,
              env.observation_space.spaces['desired_goal'].shape[0]),
        'u': (env._max_episode_steps - 1, action_space[2].shape[0])
    }
    memory = ReplayBuffer(buffer_shapes, MEM_SIZE, env._max_episode_steps,
                          sample_her_transitions)

    experiment_args = (env, memory, noise, config, normalizer, agent_id)

    return model, experiment_args
예제 #14
0
def init(config, agent='robot', her=False, object_Qfunc=None, 
                                           backward_dyn=None, 
                                           object_policy=None, 
                                           reward_fun=None,
                                           rnd_models=None):
        
    #hyperparameters
    ENV_NAME = config['env_id'] 
    SEED = config['random_seed']
    N_ENVS = config['n_envs']

    env = []
    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        for i_env in range(N_ENVS):
            env.append(gym.make(ENV_NAME, n_objects=config['max_nb_objects'], 
                                          obj_action_type=config['obj_action_type'], 
                                          observe_obj_grp=config['observe_obj_grp'],
                                          obj_range=config['obj_range']))
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        for i_env in range(N_ENVS):
            env.append(gym.make(ENV_NAME, obj_action_type=config['obj_action_type']))
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    else:
        for i_env in range(N_ENVS):
            env.append(gym.make(ENV_NAME))
    
    for i_env in range(N_ENVS):
        env[i_env].seed(SEED+10*i_env)
    K.manual_seed(SEED)
    np.random.seed(SEED)

    observation_space = env[0].observation_space.spaces['observation'].shape[1] + env[0].observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1., 1., shape=(n_rob_actions,), dtype='float32'),
                    gym.spaces.Box(-1., 1., shape=(n_actions-n_rob_actions,), dtype='float32'),
                    gym.spaces.Box(-1., 1., shape=(n_actions,), dtype='float32'))

    GAMMA = config['gamma'] 
    TAU = config['tau'] 
    ACTOR_LR = config['plcy_lr'] 
    CRITIC_LR = config['crtc_lr'] 

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    MODEL = PPO_BD
    OUT_FUNC = 'linear'

    #exploration initialization
    noise = True
    env[0]._max_episode_steps *= config['max_nb_objects']

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)) # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss          
    model = MODEL(observation_space, action_space, optimizer, Actor, Critic, 
            config['clip_param'], config['ppo_epoch'], config['n_batches'], config['value_loss_coef'], config['entropy_coef'],
            eps=config['eps'], max_grad_norm=config['max_grad_norm'], use_clipped_value_loss=True,
            out_func=OUT_FUNC, discrete=False, 
            object_Qfunc=object_Qfunc, backward_dyn=backward_dyn, 
            object_policy=object_policy, reward_fun=reward_fun, masked_with_r=config['masked_with_r'],
            rnd_models=rnd_models, pred_th=config['pred_th'])
    normalizer = [Normalizer(), Normalizer()]

    #memory initilization  
    memory = ReplayBuffer(env[0]._max_episode_steps-1, config['n_rollouts'], (observation_space,), action_space[0])

    running_rintr_mean = RunningMean()

    experiment_args = (env, memory, noise, config, normalizer, running_rintr_mean)
          
    return model, experiment_args
예제 #15
0
def init(config,
         agent='robot',
         her=False,
         object_Qfunc=None,
         backward_dyn=None,
         object_policy=None,
         reward_fun=None):

    #hyperparameters
    ENV_NAME = config['env_id']
    SEED = config['random_seed']
    N_ENVS = config['n_envs']

    env = []
    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        for i_env in range(N_ENVS):
            env.append(
                gym.make(ENV_NAME,
                         n_objects=config['max_nb_objects'],
                         obj_action_type=config['obj_action_type'],
                         observe_obj_grp=config['observe_obj_grp'],
                         obj_range=config['obj_range']))
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(
            config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        for i_env in range(N_ENVS):
            env.append(
                gym.make(ENV_NAME, obj_action_type=config['obj_action_type']))
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    else:
        for i_env in range(N_ENVS):
            env.append(gym.make(ENV_NAME))

    def her_reward_fun(ag_2, g, info):  # vectorized
        return env[0].compute_reward(achieved_goal=ag_2,
                                     desired_goal=g,
                                     info=info)

    for i_env in range(N_ENVS):
        env[i_env].seed(SEED + 10 * i_env)
    K.manual_seed(SEED)
    np.random.seed(SEED)

    observation_space = env[0].observation_space.spaces['observation'].shape[
        1] + env[0].observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - n_rob_actions, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))

    GAMMA = config['gamma']
    TAU = config['tau']
    ACTOR_LR = config['plcy_lr']
    CRITIC_LR = config['crtc_lr']

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    if config['agent_alg'] == 'PPO_BD':
        MODEL = PPO_BD
        OUT_FUNC = 'linear'
        from olbr.replay_buffer import ReplayBuffer
        from olbr.her_sampler import make_sample_her_transitions
        from olbr.replay_buffer import RolloutStorage as RolloutStorage

    #exploration initialization
    env[0]._max_episode_steps *= config['max_nb_objects']
    noise = (True, Noise(action_space[1].shape[0], sigma=0.2, eps=0.3))

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space,
                  action_space,
                  optimizer,
                  Actor,
                  Critic,
                  config['clip_param'],
                  config['ppo_epoch'],
                  config['n_batches'],
                  config['value_loss_coef'],
                  config['entropy_coef'],
                  eps=config['eps'],
                  max_grad_norm=config['max_grad_norm'],
                  use_clipped_value_loss=True,
                  out_func=OUT_FUNC,
                  discrete=False,
                  agent_id=0,
                  object_Qfunc=object_Qfunc,
                  backward_dyn=backward_dyn,
                  object_policy=object_policy,
                  reward_fun=reward_fun,
                  masked_with_r=config['masked_with_r'])
    normalizer = [Normalizer(), Normalizer()]

    #memory initilization
    if her:
        sample_her_transitions = make_sample_her_transitions(
            'future', 4, her_reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions(
            'none', 4, her_reward_fun)

    buffer_shapes = {
        'o': (env[0]._max_episode_steps,
              env[0].observation_space.spaces['observation'].shape[1] * 2),
        'ag': (env[0]._max_episode_steps,
               env[0].observation_space.spaces['achieved_goal'].shape[0]),
        'g': (env[0]._max_episode_steps,
              env[0].observation_space.spaces['desired_goal'].shape[0]),
        'u': (env[0]._max_episode_steps - 1, action_space[2].shape[0])
    }
    memory = (RolloutStorage(env[0]._max_episode_steps - 1,
                             config['n_rollouts'], (observation_space, ),
                             action_space[0]),
              ReplayBuffer(buffer_shapes, MEM_SIZE, env[0]._max_episode_steps,
                           sample_her_transitions))

    experiment_args = (env, memory, noise, config, normalizer, 0)

    return model, experiment_args
예제 #16
0
def init(config, agent='robot', her=False, 
                                reward_fun=None, 
                                obj_traj=None,
                                obj_mean=None,
                                obj_std=None):
        
    #hyperparameters
    ENV_NAME = config['env_id'] 
    SEED = config['random_seed']
    N_ENVS = config['n_envs']

    def make_env(env_id, i_env, env_type='Fetch', stack_prob=None):
        def _f():
            if env_type == 'Fetch':
                env = gym.make(env_id, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range']
                                    )
            elif env_type == 'FetchStack':
                env = gym.make(env_id, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range'],
                                    change_stack_order=config['change_stack_order']
                                    )
            elif env_type == 'Hand':
                env = gym.make(env_id, obj_action_type=config['obj_action_type'])
            elif env_type == 'Others':
                env = gym.make(env_id)
            

            #env._max_episode_steps *= config['max_nb_objects']
            keys = env.observation_space.spaces.keys()
            env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
            env.seed(SEED+10*i_env)
            if stack_prob is not None:
                env.unwrapped.stack_prob = stack_prob
            return env
        return _f  

    if 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME and 'Stack' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range'])
        envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'FetchStack', config['train_stack_prob']) for i_env in range(N_ENVS)])
        envs_test = SubprocVecEnv([make_env(ENV_NAME, i_env, 'FetchStack', config['test_stack_prob']) for i_env in range(N_ENVS)])
        envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'FetchStack', config['test_stack_prob']) for i_env in range(1)])         
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range'])
        envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(1)])
        n_rob_actions = 4
        n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + n_rob_actions
    elif 'HandManipulate' in ENV_NAME and 'Multi' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME, obj_action_type=config['obj_action_type'])
        envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Hand') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Hand') for i_env in range(1)])
        n_rob_actions = 20
        n_actions = 1 * len(config['obj_action_type']) + n_rob_actions
    elif 'Fetch' in ENV_NAME and 'MaRob' in ENV_NAME:
        dummy_env = gym.make(ENV_NAME, n_objects=config['max_nb_objects'], 
                                    obj_action_type=config['obj_action_type'], 
                                    observe_obj_grp=config['observe_obj_grp'],
                                    obj_range=config['obj_range'])
        envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Fetch') for i_env in range(1)])
        n_rob_actions = 4 * 2
        n_actions = config['max_nb_objects'] * len(config['obj_action_type']) + n_rob_actions
    else:
        dummy_env = gym.make(ENV_NAME)
        envs = SubprocVecEnv([make_env(ENV_NAME, i_env, 'Others') for i_env in range(N_ENVS)])
        envs_test = None
        envs_render = None

    def make_her_reward_fun(nb_critics, use_step_reward_fun=False):

        def _her_reward_fun(ag_2, g, info):  # vectorized
            goal_len = ag_2.shape[1]//nb_critics
            
            rew = dummy_env.compute_reward(achieved_goal=ag_2[:,0:goal_len], desired_goal=g[:,0:goal_len], info=info)
            all_rew = rew.copy()[:, np.newaxis]

            for i_reward in range(1,nb_critics):
                if not use_step_reward_fun:
                    obj_rew = dummy_env.compute_reward(achieved_goal=ag_2[:,goal_len*i_reward:goal_len*(i_reward+1)], 
                                                    desired_goal=g[:,goal_len*i_reward:goal_len*(i_reward+1)], info=info)
                else:
                    goal_a = ag_2[:,goal_len*i_reward:goal_len*(i_reward+1)].reshape(-1,dummy_env.env.n_objects,3)
                    goal_b = g[:,goal_len*i_reward:goal_len*(i_reward+1)].reshape(-1,dummy_env.env.n_objects,3)
                    d = np.linalg.norm(goal_a - goal_b, axis=-1)
                    #obj_rew = - (d > dummy_env.env.distance_threshold).astype(np.float32).sum(-1)
                    obj_rew = - (d > dummy_env.env.distance_threshold).astype(np.float32)
                all_rew = np.concatenate((all_rew, obj_rew.copy()[:, np.newaxis]), axis=-1)
                #all_rew = np.concatenate((all_rew, obj_rew.copy()), axis=-1)
            return all_rew
        
        return _her_reward_fun

    her_reward_fun = make_her_reward_fun(2, config['use_step_reward_fun'])

    K.manual_seed(SEED)
    np.random.seed(SEED)

    observation_space = (dummy_env.observation_space.spaces['observation'].shape[1]*2 + dummy_env.observation_space.spaces['desired_goal'].shape[0]*2, 
                        dummy_env.observation_space.spaces['observation'].shape[1]*1 + dummy_env.observation_space.spaces['desired_goal'].shape[0]*2)
    action_space = (gym.spaces.Box(-1., 1., shape=(n_rob_actions,), dtype='float32'),
                    gym.spaces.Box(-1., 1., shape=(n_actions-n_rob_actions,), dtype='float32'),
                    gym.spaces.Box(-1., 1., shape=(n_actions,), dtype='float32'))

    GAMMA = config['gamma']
    clip_Q_neg = config['clip_Q_neg'] if config['clip_Q_neg'] < 0 else None
    TAU = config['tau'] 
    ACTOR_LR = config['plcy_lr'] 
    CRITIC_LR = config['crtc_lr'] 

    MEM_SIZE = config['buffer_length']

    REGULARIZATION = config['regularization']
    NORMALIZED_REWARDS = config['reward_normalization']

    OUT_FUNC = K.tanh 
    MODEL = DDPG_BD

    #exploration initialization
    noise = Noise(action_space[0].shape[0], sigma=0.2, eps=0.3) 
    config['episode_length'] = dummy_env._max_episode_steps
    config['observation_space'] = dummy_env.observation_space

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)) # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space, action_space, optimizer, 
                  Actor, Critic, loss_func, GAMMA, TAU, out_func=OUT_FUNC, discrete=False, 
                  regularization=REGULARIZATION, normalized_rewards=NORMALIZED_REWARDS,
                  reward_fun=reward_fun, clip_Q_neg=clip_Q_neg, nb_critics=config['max_nb_objects']+1
                  )

    model.n_objects = config['max_nb_objects']

    class NormalizerObj(object):
        def __init__(self, mean, std):
            self.mean = mean
            self.std = std

        def process(self, achieved, desired):
            achieved_out = achieved - K.tensor(self.mean[0], dtype=achieved.dtype, device=achieved.device)
            achieved_out /= K.tensor(self.std[0], dtype=achieved.dtype, device=achieved.device)

            desired_out = desired - K.tensor(self.mean[1], dtype=desired.dtype, device=desired.device)
            desired_out /= K.tensor(self.std[1], dtype=desired.dtype, device=desired.device)

            return achieved_out, desired_out

    normalizer = [Normalizer(), Normalizer(), NormalizerObj(obj_mean, obj_std)]

    model.obj_traj = obj_traj.to('cpu')
    model.obj_traj.eval()

    for _ in range(1):
        state_all = dummy_env.reset()
        for i_step in range(config['episode_length']):

            model.to_cpu()

            obs = [K.tensor(obs, dtype=K.float32).unsqueeze(0) for obs in state_all['observation']]
            goal = K.tensor(state_all['desired_goal'], dtype=K.float32).unsqueeze(0)
            if i_step%config['objtraj_goal_horizon'] == 0:
                achieved_goal = K.tensor(state_all['achieved_goal'], dtype=K.float32).unsqueeze(0)

                objtraj_goal = []
                goal_len_per_obj = goal.shape[1]//model.n_objects
                for i_object in range(model.n_objects):
                    
                    achieved_goal_per_obj = achieved_goal[:,i_object*goal_len_per_obj:(i_object+1)*goal_len_per_obj]
                    goal_per_obj = goal[:,i_object*goal_len_per_obj:(i_object+1)*goal_len_per_obj]

                    normed_achieved_goal_per_obj, normed_goal_per_goal = normalizer[2].process(achieved_goal_per_obj, goal_per_obj)
                    with K.no_grad():
                        objtraj_goal_per_obj = model.obj_traj(normed_achieved_goal_per_obj, normed_goal_per_goal)

                    objtraj_goal.append(objtraj_goal_per_obj)
                objtraj_goal = K.cat(objtraj_goal, dim=-1)

            # Observation normalization
            obs_goal = []
            obs_goal.append(K.cat([obs[0], obs[1], goal, objtraj_goal], dim=-1))
            if normalizer[0] is not None:
                obs_goal[0] = normalizer[0].preprocess_with_update(obs_goal[0])

            if config['agent_alg'] == 'DDPG_BD':
                action = model.select_action(obs_goal[0], noise).cpu().numpy().squeeze(0)
            elif config['agent_alg'] == 'MADDPG_BD':
                action = model.select_action(obs_goal[0], noise, goal_size=goal.shape[1]).cpu().numpy().squeeze(0)
            action_to_env = np.zeros_like(dummy_env.action_space.sample())
            action_to_env[0:action.shape[0]] = action

            next_state_all, _, _, _ = dummy_env.step(action_to_env)

            # Move to the next state
            state_all = next_state_all

    #memory initilization  
    if her:
        sample_her_transitions = make_sample_her_transitions('future', 4, her_reward_fun)
    else:
        sample_her_transitions = make_sample_her_transitions('none', 4, her_reward_fun)

    buffer_shapes = {
        'o' : (config['episode_length'], dummy_env.observation_space.spaces['observation'].shape[1]*3),
        'ag' : (config['episode_length'], dummy_env.observation_space.spaces['achieved_goal'].shape[0]*2),
        'g' : (config['episode_length'], dummy_env.observation_space.spaces['desired_goal'].shape[0]*2),
        'u' : (config['episode_length']-1, action_space[2].shape[0])
        }
    memory = ReplayBuffer(buffer_shapes, MEM_SIZE, config['episode_length'], sample_her_transitions)

    experiment_args = ((envs, envs_test, envs_render), memory, noise, config, normalizer, None)
          
    return model, experiment_args
예제 #17
0
def init(config):

    if config['resume'] != '':
        resume_path = config['resume']
        saver = Saver(config)
        config, start_episode, save_dict = saver.resume_ckpt()
        config['resume'] = resume_path
    else:
        start_episode = 0

    #hyperparameters
    ENV_NAME = config['env_id']  #'simple_spread'
    SEED = config['random_seed']  # 1

    GAMMA = config['gamma']  # 0.95
    TAU = config['tau']  # 0.01

    ACTOR_LR = config['plcy_lr']  # 0.01
    CRITIC_LR = config['crtc_lr']  # 0.01

    MEM_SIZE = config['buffer_length']  # 1000000

    REGULARIZATION = config['regularization']  # True
    NORMALIZED_REWARDS = config['reward_normalization']  # True

    if (ENV_NAME == 'FetchStackMulti-v1') or (ENV_NAME == 'FetchPushMulti-v1'):
        env = gym.make(ENV_NAME,
                       n_objects=config['max_nb_objects'],
                       obj_action_type=config['obj_action_type'])
    else:
        env = gym.make(ENV_NAME)
    env.seed(SEED)

    if config['obj_action_type'] == 'all':
        n_actions = config['max_nb_objects'] * 7 + 4
    elif config['obj_action_type'] == 'slide_only':
        n_actions = config['max_nb_objects'] * 3 + 4
    elif config['obj_action_type'] == 'rotation_only':
        n_actions = config['max_nb_objects'] * 4 + 4

    observation_space = env.observation_space.spaces['observation'].shape[
        1] + env.observation_space.spaces['desired_goal'].shape[0]
    action_space = (gym.spaces.Box(-1., 1., shape=(4, ), dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions - 4, ),
                                   dtype='float32'),
                    gym.spaces.Box(-1.,
                                   1.,
                                   shape=(n_actions, ),
                                   dtype='float32'))
    if env.action_space.low[0] == -1 and env.action_space.high[0] == 1:
        OUT_FUNC = K.tanh
    elif env.action_space.low[0] == 0 and env.action_space.high[0] == 1:
        OUT_FUNC = K.sigmoid
    else:
        OUT_FUNC = K.sigmoid

    K.manual_seed(SEED)
    np.random.seed(SEED)

    if config['agent_alg'] == 'MADDPG':
        MODEL = MADDPG
    elif config['agent_alg'] == 'DDPG':
        MODEL = DDPG
    elif config['agent_alg'] == 'MADDPG_R':
        MODEL = MADDPG_R
    elif config['agent_alg'] == 'MADDPG_RAE':
        MODEL = MADDPG_RAE

    if config['verbose'] > 1:
        # utils
        summaries = (Summarizer(config['dir_summary_train'], config['port'],
                                config['resume']),
                     Summarizer(config['dir_summary_test'], config['port'],
                                config['resume']))
        saver = Saver(config)
    else:
        summaries = None
        saver = None

    #exploration initialization
    noise = (Noise(action_space[0].shape[0], sigma=0.2, eps=0.3),
             Noise(action_space[1].shape[0], sigma=0.05, eps=0.1))
    #noise = OUNoise(action_space.shape[0])

    #model initialization
    optimizer = (optim.Adam, (ACTOR_LR, CRITIC_LR)
                 )  # optimiser func, (actor_lr, critic_lr)
    loss_func = F.mse_loss
    model = MODEL(observation_space,
                  action_space,
                  optimizer,
                  Actor,
                  Critic,
                  loss_func,
                  GAMMA,
                  TAU,
                  out_func=OUT_FUNC,
                  discrete=False,
                  regularization=REGULARIZATION,
                  normalized_rewards=NORMALIZED_REWARDS)

    if config['resume'] != '':
        for i, param in enumerate(save_dict['model_params']):
            model.entities[i].load_state_dict(param)

    #memory initilization
    #memory = ReplayMemory(MEM_SIZE)
    def reward_fun(ag_2, g, info):  # vectorized
        return env.compute_reward(achieved_goal=ag_2,
                                  desired_goal=g,
                                  info=info)

    sample_her_transitions = make_sample_her_transitions(
        'future', 4, reward_fun)
    buffer_shapes = {
        'o': (env._max_episode_steps,
              env.observation_space.spaces['observation'].shape[1] * 2),
        'ag': (env._max_episode_steps,
               env.observation_space.spaces['achieved_goal'].shape[0]),
        'g': (env._max_episode_steps,
              env.observation_space.spaces['desired_goal'].shape[0]),
        'u': (env._max_episode_steps - 1, action_space[2].shape[0])
    }
    memory = ReplayBuffer(buffer_shapes, MEM_SIZE, env._max_episode_steps,
                          sample_her_transitions)

    normalizer = (Normalizer(), Normalizer())

    experiment_args = (env, memory, noise, config, summaries, saver,
                       start_episode, normalizer)

    return model, experiment_args
예제 #18
0
파일: cem.py 프로젝트: ozcell/gym_wmgds
        (ob, reward, done, _info) = env.step(a)
        total_rew += reward
        if render and t % 3 == 0: env.render()
        if done: break
    return total_rew, t + 1


if __name__ == '__main__':
    logger.set_level(logger.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('--display', action='store_true')
    parser.add_argument('target', nargs="?", default="CartPole-v0")
    args = parser.parse_args()

    env = gym_wmgds.make(args.target)
    env.seed(0)
    np.random.seed(0)
    params = dict(n_iter=10, batch_size=25, elite_frac=0.2)
    num_steps = 200

    # You provide the directory to write to (can be an existing
    # directory, but can't contain previous monitor results. You can
    # also dump to a tempdir if you'd like: tempfile.mkdtemp().
    outdir = '/tmp/cem-agent-results'
    env = wrappers.Monitor(env, outdir, force=True)

    # Prepare snapshotting
    # ----------------------------------------
    def writefile(fname, s):
        with open(path.join(outdir, fname), 'w') as fh: