Esempio n. 1
0
    def test_buffer(self):
        data = \
            {AgentKey(0, '0-1'): AgentReplayFrame([2, 1, 2, 2, 3], [0, 1, 0], 3, False, [3, 1, 1, 2, 3]),
             AgentKey(0, '0-2'): AgentReplayFrame([1, 1, 3, 2, 1], [0, 1, 0], 4, False, [2, 1, 1, 2, 2]),
             AgentKey(1, '0-1'): AgentReplayFrame([2, 0, 3, 1, 2], [0, 1], 5, False, [3, 0, 1, 3, 4])}

        max_steps = 4
        buffer = ReplayBuffer(max_steps)
        for i in range(5):
            buffer.push(data)
            self.assertEqual(buffer.length(), min(i + 1, max_steps))

        sample: List[Dict[AgentKey,
                          AgentReplayFrame]] = buffer.sample(2,
                                                             norm_rews=False)
        for s in sample:
            for k, v in s.items():
                self.assertEqual(v.reward, data[k].reward)

        sample: List[Dict[AgentKey,
                          AgentReplayFrame]] = buffer.sample(2, norm_rews=True)
        for s in sample:
            for k, v in s.items():
                self.assertEqual(v.reward, 0)

        avg_rewards = buffer.get_average_rewards(3)
        for k, v in avg_rewards.items():
            self.assertEqual(v, data[k].reward)
Esempio n. 2
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    #logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
                            config.discrete_action)

    if(env=='simple_reference'):
        for i in range(2):
            agent_init_params.append({'num_in_pol': num_in_pol,
                                          'num_out_pol': num_out_pol,
                                          'num_in_critic': num_in_critic})
            
            init_dict = {'gamma': gamma, 'tau': tau, 'lr': lr,
                         'hidden_dim': hidden_dim,
                         'alg_types': alg_types,
                         'agent_init_params': agent_init_params,
                         'discrete_action': discrete_action}

    maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)

    replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0

    episode_average_rewards=[]
    hundred_episode_average_rewards=[]

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):

        if (ep_i%100==0 and ep_i>0):
            hundred_episode_average_rewards.append(np.mean(episode_average_rewards))
            print('Rewards till',ep_i,'=',hundred_episode_average_rewards[-1])
            print('Agent Actions=',torch_agent_actions)
            episode_average_rewards=[]
        '''
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        '''
        obs = env.reset()

        rewards_for_this_episode=[]
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(maddpg.nagents)]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)

            rewards_for_this_episode.append(np.mean(rewards))

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i)#, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
            
            if ep_i>10000:
                print('Goal Color=',torch_obs[0])
                print('Communication=',agent_actions[0])
            
                env.render()
                time.sleep(0.01)


        if ep_i>100000:
            import ipdb
            ipdb.set_trace()

        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        
        episode_average_rewards.append(np.sum(rewards_for_this_episode))
        #for a_i, a_ep_rew in enumerate(ep_rews):
            #logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    plt.plot(100*np.array(range(1,config.n_episodes//100)),hundred_episode_average_rewards)
    plt.xlabel('Episode Number')
    plt.ylabel('Average Reward for 100 episodes')
    plt.title('Speaker Discrete and Mover Continuous')
    plt.show('plot.png')

    maddpg.save(run_dir / 'model.pt')
    env.close()
def run(config):
    # Get paths for saving logs and model
    run_dir, model_cp_path, log_dir = get_paths(config)
    print("Saving model in dir", run_dir)

    # Init summary writer
    logger = SummaryWriter(str(log_dir))

    # Load scenario config
    sce_conf = load_scenario_config(config, run_dir)

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)

    env = make_parallel_env(config.env_path, config.n_rollout_threads,
                            config.seed, config.discrete_action, sce_conf)

    maddpg = MADDPG.init_from_env(env,
                                  agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  gamma=config.gamma,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim,
                                  shared_params=config.shared_params)

    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])

    t = 0
    for ep_i in tqdm(range(0, config.n_episodes, config.n_rollout_threads)):
        #print("Episodes %i-%i of %i" % (ep_i + 1,
        #                                ep_i + 1 + config.n_rollout_threads,
        #                                config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            if dones[0, 0]:
                break
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                              a_ep_rew / config.n_rollout_threads, ep_i)
        # Save ep number
        with open(str(log_dir / 'ep_nb.txt'), 'w') as f:
            f.write(str(ep_i))

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(model_cp_path)

    maddpg.save(model_cp_path)
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
    print("Model saved in dir", run_dir)
Esempio n. 4
0
File: main.py Progetto: xuezzee/-
def run(config):
    scores_window = deque(maxlen=100)

    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)

    # transport configuration
    name = 'Materials Transport'
    conf = {
        'n_player': 2,  #玩家数量
        'board_width': 11,  #地图宽
        'board_height': 11,  #地图高
        'n_cell_type': 5,  #格子的种类
        'materials': 4,  #集散点数量
        'cars': 2,  #汽车数
        'planes': 0,  #飞机数量
        'barriers': 12,  #固定障碍物数量
        'max_step': 500,  #最大步数
        'game_name': name,  #游戏名字
        'K': 5,  #每个K局更新集散点物资数目
        'map_path': 'env/map.txt',  #存放初始地图
        'cell_range': 6,  # 单格中各维度取值范围(tuple类型,只有一个int自动转为tuple)##?
        'ob_board_width': None,  # 不同智能体观察到的网格宽度(tuple类型),None表示与实际网格相同##?
        'ob_board_height': None,  # 不同智能体观察到的网格高度(tuple类型),None表示与实际网格相同##?
        'ob_cell_range':
        None,  # 不同智能体观察到的单格中各维度取值范围(二维tuple类型),None表示与实际网格相同##?
    }

    env = make_parallel_env_transport(config.env_id, conf,
                                      config.n_rollout_threads, config.seed,
                                      config.discrete_action)

    maddpg = MADDPG.init_from_env(env,
                                  agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)
    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        score = 0
        # print("Episodes %i-%i of %i" % (ep_i + 1,
        #                                 ep_i + 1 + config.n_rollout_threads,
        #                                 config.n_episodes))

        obs = env.reset()  # TODO: TO CHECK
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            # print('step', et_i)
            # env.render()
            # rearrange observations to be per agent, and convert to torch Variable
            # print('step', et_i)
            # print(maddpg.nagents)
            torch_obs = [
                Variable(
                    torch.Tensor(np.vstack(obs[:, i])),  # 沿着竖直方向将矩阵堆叠起来。
                    requires_grad=False) for i in range(maddpg.nagents)
            ]

            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=False)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            ############################################
            # add
            # actions = actions.astype(int)
            ############################################
            # add: 前两个action
            joint_action = []

            for i in range(2):
                player = []
                for j in range(1):
                    each = [0] * 11
                    # idx = np.random.randint(11)
                    each[3] = 1
                    player.append(each)
                joint_action.append(player)
            for m in range(2):
                joint_action.append([actions[0][m].astype(int).tolist()])

            next_obs, rewards, dones, infos = env.step(joint_action)

            #################################
            agents_action = actions[0]
            #################################

            replay_buffer.push(obs, agents_action, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')

            score += rewards[0][0]

        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

        scores_window.append(score)
        reward_epi = np.mean(scores_window)
        reward_epi_var = np.var(scores_window)
        logger.add_scalar('results/completion_window' % reward_epi, ep_i)
        logger.add_scalar('results/completion_window' % reward_epi_var, ep_i)
        print(
            '\r Episode {}\t Average Reward: {:.3f}\t Var Reward: {:.3f} \t '.
            format(ep_i, reward_epi, reward_epi_var))

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 5
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)
    # model = AttentionSAC.init_from_env(env,
    #                                    tau=config.tau,
    #                                    pi_lr=config.pi_lr,
    #                                    q_lr=config.q_lr,
    #                                    gamma=config.gamma,
    #                                    pol_hidden_dim=config.pol_hidden_dim,
    #                                    critic_hidden_dim=config.critic_hidden_dim,
    #                                    attend_heads=config.attend_heads,
    #                                    reward_scale=config.reward_scale)

    # Model used to test with adversarial agent 
    # model= AttentionSAC.init_from_save ("C:\\Users\\HP\\Desktop\\NTU\\FYP\\FYP Code\\MAAC\\Output\\run140\\model.pt")
    # print("Model instantiated")

    # Model used to test without adversarial agent 
    model= AttentionSAC.init_from_save ("C:\\Users\\HP\\Desktop\\NTU\\FYP\\FYP Code\\MAAC\\Output\\run148\\model.pt")
    print("Model instantiated")

    replay_buffer = ReplayBuffer(config.buffer_length, model.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0

    row_list = []

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        obs = env.reset()
        model.prep_rollouts(device='cpu')

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(model.nagents)]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            # print (rewards)
            # print (dones[0])
            # env.render('human')
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if config.use_gpu:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config.num_updates):
                    sample = replay_buffer.sample(config.batch_size,
                                                  to_gpu=config.use_gpu)
                    #print(sample)
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')

            if (dones[0][0]):
                print("Breakin the epsiodeeeee at timestep", et_i)
                break
        
        et_i += 1   

        row_list.append((ep_i+1,et_i))   

        ep_rews = replay_buffer.get_average_rewards(
            et_i * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                              a_ep_rew * et_i, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    with open('Timesteps_vs_Episodes.csv', 'w', newline='') as file:
         writer = csv.writer(file)
         writer.writerow(["Ep No", "Number of Timesteps"])
         for row in row_list:
            writer.writerow(row)

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 6
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    # if not model_dir.exists():
    #     run_num = 1
    # else:
    #     exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
    #                      model_dir.iterdir() if
    #                      str(folder.name).startswith('run')]
    #     if len(exst_run_nums) == 0:
    #         run_num = 1
    #     else:
    #         run_num = max(exst_run_nums) + 1
    run_num = 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir,exist_ok=True)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)
    model = AttentionSAC.init_from_env(env,
                                       tau=config.tau,
                                       pi_lr=config.pi_lr,
                                       q_lr=config.q_lr,
                                       gamma=config.gamma,
                                       pol_hidden_dim=config.pol_hidden_dim,
                                       critic_hidden_dim=config.critic_hidden_dim,
                                       attend_heads=config.attend_heads,
                                       reward_scale=config.reward_scale)
    replay_buffer = ReplayBuffer(config.buffer_length, model.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        obs = env.reset()
        model.prep_rollouts(device='cpu')

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(model.nagents)]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if config.use_gpu:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config.num_updates):
                    sample = replay_buffer.sample(config.batch_size,
                                                  to_gpu=config.use_gpu)
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                              a_ep_rew * config.episode_length, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 7
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads,
                            config.seed, config.discrete_action)

    ##################### INITIALIZE FROM SAVED? ###########################
    if init_from_saved:
        if model_path is not None:
            maddpg = MADDPG.init_from_save(model_path)
            print("Initialized from saved model")
    # -------------------------------------------------------------------- #
    else:
        maddpg = MADDPG.init_from_env(env,
                                      agent_alg=config.agent_alg,
                                      adversary_alg=config.adversary_alg,
                                      tau=config.tau,
                                      lr=config.lr,
                                      hidden_dim=config.hidden_dim)
    # used for learning (updates)
    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])

    # This is just to store the global rewards and not for updating the policies
    g_storage_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])

    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions, maddpg)
            '''
            Reward Shaping using D++, D.
            The rewards now contain global as well as shaped rewards
            Keep the global for logging, and use the shaped rewards for updates
            '''
            # Choose which reward to use
            use_dpp = True

            # DIFFERENCE REWARDS
            d_rewards = []
            for n in range(maddpg.nagents):
                d_rewards.append([rewards[0][n][1]])
            d_rewards = [d_rewards]
            d_rewards = np.array(d_rewards)

            # GLOBAL REWARDS
            g_rewards = []
            for n in range(maddpg.nagents):
                g_rewards.append([rewards[0][n][0]])
            g_rewards = [g_rewards]
            g_rewards = np.array(g_rewards)

            if use_dpp:
                rewards = d_rewards
            else:
                rewards = g_rewards
            # ----------------------------------------------------------- #
            # Buffer used for updates
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            # push global rewards into g_replay_buffer for plotting
            g_storage_buffer.push(obs, agent_actions, g_rewards, next_obs,
                                  dones)

            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        # Take out global reward from g_storage_buffer
        ep_rews = g_storage_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)

        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 8
0
        noisy_agent_actions = []
        for i in range(len(agent_actions)):
            noise = np.random.rand(agent_actions[i].shape[0],
                                   agent_actions[i].shape[1])
            tmp = agent_actions[i] * 0
            tmp_action = np.argmax(agent_actions[i] + noise * 5)
            tmp[0][tmp_action] = 1.0
            noisy_agent_actions.append(tmp)

        replay_buffer.push(obs, agent_actions, rewards, next_obs,
                           dones)  # Here pushing observations
        obs = next_obs
        t += n_rollout_threads

        if (len(replay_buffer) >= batch_size
                and (t % steps_per_update) < n_rollout_threads):
            maddpg.prep_training(device='cpu')  # If use GPU, here change
            for u_i in range(n_rollout_threads):
                for a_i in range(maddpg.nagents):
                    sample = replay_buffer.sample(batch_size, to_gpu=USE_CUDA)
                    maddpg.update(sample, a_i, logger=logger)
                maddpg.update_all_targets()
            maddpg.prep_rollouts(device='cpu')

    # Logging part
    ep_rews = replay_buffer.get_average_rewards(episode_length *
                                                n_rollout_threads)
    for a_i, a_ep_rew in enumerate(ep_rews):
        logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
def run(config):
    # Make directory to store the results
    model_dir = Path('./models')/config.env_id/config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)

    # initialize tensorboard summary writer
    logger = SummaryWriter(str(log_dir))

    # use provided seed
    torch.manual_seed(config.seed)
    np.random.seed(config.seed)

    # IDK how helpful this is
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)

    env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
                            config.discrete_action)

    maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim,
                                  )
    if not rnn:    # TODO: this might break. code might not be modular (yet). Code works with RNN
        replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                     [obsp.shape[0] for obsp in env.observation_space],
                                     [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                      for acsp in env.action_space])
    else:
        # replay buffer obs space size is increased
        rnn_replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                     [obsp.shape[0]*history_steps for obsp in env.observation_space],
                                     [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                      for acsp in env.action_space])

        # This is just to store the global rewards and not for updating the policies
        g_storage_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                        [obsp.shape[0]*history_steps for obsp in env.observation_space],
                                        [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                         for acsp in env.action_space])

    t = 0
    #####################################################################################################
    #                                       START EPISODES                                              #
    #####################################################################################################
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))

        # List of Observations for each of the agents
        # E.g., For simple_spread, shape is {1,3,18}
        obs = env.reset()

        # For RNN history buffer. I know this is not modular.
        obs_tminus_0 = copy(obs)
        obs_tminus_1 = copy(obs)
        obs_tminus_2 = copy(obs)
        obs_tminus_3 = copy(obs)
        obs_tminus_4 = copy(obs)
        obs_tminus_5 = copy(obs)

        # # for 3 time-steps
        # obs_history = np.empty([1,3,54])
        # next_obs_history = np.empty([1,3,54])

        # For 6 time-steps (18*3 = 54)
        obs_history = np.empty([1,3,108])
        next_obs_history = np.empty([1,3,108])

        maddpg.prep_rollouts(device='cpu')

        # Exploration percentage remaining. IDK if this is a standard way of doing it however.
        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        ##################################################################################################
        #                                       START TIME-STEPS                                         #
        ##################################################################################################

        for et_i in range(config.episode_length):

            # Populate current history
            for a in range(3):  # env.nagents
                obs_history[0][a][:] = np.concatenate((obs_tminus_0[0][a][:], obs_tminus_1[0][a][:], obs_tminus_2[0][a][:],
                                                      obs_tminus_3[0][a][:], obs_tminus_4[0][a][:], obs_tminus_5[0][a][:]))
                # Now, temp has history of 6 timesteps for each agent

            if not rnn:    # TODO: This might break. Code works with RNN. !RNN not tested.
                # rearrange observations to be per agent, and convert to torch Variable
                torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                      requires_grad=False)
                             for i in range(maddpg.nagents)]

                # get actions (from learning algorithm) as torch Variables. For simple_spread this is discrete[5]
                torch_agent_actions = maddpg.step(torch_obs, explore=True)

            else:
                # rearrange histories to be per agent, and convert to torch Variable
                rnn_torch_obs = [Variable(torch.Tensor(np.vstack(obs_history[:, i])),
                                      requires_grad=False)
                             for i in range(maddpg.nagents)]
                # TODO: for RNN, actions should condition on history (DONE)
                torch_agent_actions = maddpg.step(rnn_torch_obs, explore=True)


            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]    # print(torch_agent_actions[0].data)
            # rearrange actions to be per environment. For single thread, it wont really matter.
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)

            ############### WHICH REWARD TO USE ##############
            # the rewards now contain global as well as difference rewards
            # Keep the global for logging, and difference for updates

            use_diff_reward = False    #TODO: THIS IS THE TYPE OF REWARD YOU USE

            # DIFFERENCE REWARDS
            d_rewards = []
            for n in range(maddpg.nagents):
                d_rewards.append([rewards[0][n][1]])
            d_rewards = [d_rewards]
            d_rewards = np.array(d_rewards)

            # GLOBAL REWARDS
            g_rewards = []
            for n in range(maddpg.nagents):
                g_rewards.append([rewards[0][n][0]])
            g_rewards = [g_rewards]
            g_rewards = np.array(g_rewards)

            # replace "reward" with the reward that you want to use
            if use_diff_reward:
                rewards = d_rewards
            else:
                rewards = g_rewards

            # Create history for next state
            '''
            history is [t, t-1, t-2]
            history[0] is because [0] is for one thread
            '''
            for a in range(3):      # env.nagents
                next_obs_history[0][a][:] = np.concatenate((next_obs[0][a][:], obs_tminus_0[0][a][:], obs_tminus_1[0][a][:],
                                                            obs_tminus_2[0][a][:], obs_tminus_3[0][a][:], obs_tminus_4[0][a][:]))
                    # Now, next_obs_history has history of 6 timesteps for each agent the next state

            # for RNN, replay buffer needs to store for e.g., states=[obs_t-2, obs_t-1, obs_t]
            if not rnn:
                replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
                obs = next_obs
            else:
                # Buffer used for updates
                rnn_replay_buffer.push(obs_history, agent_actions, rewards, next_obs_history, dones)
                # push global rewards into g_replay_buffer
                g_storage_buffer.push(obs_history, agent_actions, g_rewards, next_obs_history, dones)

            # Update histories
            obs_tminus_5 = copy(obs_tminus_4)
            obs_tminus_4 = copy(obs_tminus_3)
            obs_tminus_3 = copy(obs_tminus_2)

            obs_tminus_2 = copy(obs_tminus_1)
            obs_tminus_1 = copy(obs_tminus_0)
            obs_tminus_0 = copy(next_obs)

            t += config.n_rollout_threads
            if (len(rnn_replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = rnn_replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        # For plotting, use global reward achieved using difference rewards
        ep_rews = g_storage_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()

    print()
Esempio n. 10
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    #log_dir = run_dir / 'logs'
    os.makedirs(run_dir)
    #logger = SummaryWriter(str(log_dir))

    # Initialization of evaluation metrics
    collisions = [0]
    success_nums = [0]
    ccr_activates = [0]
    final_ep_rewards = []  # sum of rewards for training curve
    final_ep_collisions = []
    final_ep_activates = []
    final_ep_success_nums = []

    torch.manual_seed(run_num)
    np.random.seed(run_num)

    env = make_env(config.env_id, discrete_action=True)
    num_agents = env.n
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)

    # if config.emergency:
    #     env.switch_emergency()

    model = AttentionSAC.init_from_env(
        env,
        tau=config.tau,
        pi_lr=config.pi_lr,
        q_lr=config.q_lr,
        gamma=config.gamma,
        pol_hidden_dim=config.pol_hidden_dim,
        critic_hidden_dim=config.critic_hidden_dim,
        attend_heads=config.attend_heads,
        reward_scale=config.reward_scale)

    replay_buffer = ReplayBuffer(
        config.buffer_length, model.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0

    #### remove all tensorboard methods, replace with print and pickle

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        #print("Episodes %i-%i of %i" % (ep_i + 1,
        #                                ep_i + 1 + config.n_rollout_threads,
        #                                config.n_episodes))
        if config.emergency:
            env.switch_emergency()
        obs = env.reset()
        model.prep_rollouts(device='cpu')

        t_start = time.time()

        prev_obs = None
        act_n_t_minus_1 = None

        for et_i in range(config.episode_length):
            if config.CCR:
                if act_n_t_minus_1:
                    target_obs_n, _, _, _ = env.oracle_step(act_n_t_minus_1)
                    diff_state = obs[:, :, :4] - target_obs_n[:, :, :
                                                              4]  # 12x4x4

                    if config.env_id == 'wall' or config.env_id == 'strong_wind' or config.env_id == 'wall_expos':
                        diff_obs = obs[:, :, -(model.nagents + 8 + 1)]
                    elif config.env_id == 'turbulence':
                        diff_obs = obs[:, :, -(model.nagents + 2 + 1)]
                    else:
                        assert (False)

                    emerg_n = np.sum(diff_state**2, axis=-1) + diff_obs  # 12x4

                    env.oracle_update()

                    # obs: 12x4x20
                    # emerg_n: 12x4
                    for agent_i in range(model.nagents):
                        for agent_j in range(model.nagents):
                            #print(obs[:, agent_i, -agent_j])
                            #print(emerg_n[:, agent_j])
                            obs[:, agent_i, -agent_j] = emerg_n[:, agent_j]
                            #print(obs[:, agent_i, -agent_j])
                            #print(emerg_n[:, agent_j])
            # collect experience
            if prev_obs is not None:
                replay_buffer.push(prev_obs, agent_actions, rewards, obs,
                                   dones)

            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(model.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)

            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]

            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]

            next_obs, rewards, dones, infos = env.step(actions)

            if config.CCR:
                if act_n_t_minus_1:
                    for i in range(model.nagents):
                        for j in range(model.nagents):
                            # ccr_activates[-1] += 1
                            intrinsic_reward = np.linalg.norm(
                                next_obs[:, i, 2:4] - obs[:, j, 2:4],
                                axis=-1) - np.linalg.norm(
                                    obs[:, i, 2:4] - obs[:, j, 2:4], axis=-1)
                            intrinsic_reward /= (1 + np.linalg.norm(
                                obs[:, i, 2:4] - obs[:, j, 2:4], axis=-1))
                            intrinsic_reward *= (emerg_n[:, j] - emerg_n[:, i])
                            rewards[:, i] += 10 * intrinsic_reward / np.sqrt(
                                num_agents)
                            """
                            if (len(episode_rewards) == 2 or len(episode_rewards) == 2000 or len(episode_rewards) == 5000) and episode_step % 5 == 0:
                                Ls[i].append('      intrinsic reward = ' + str(intrinsic_reward) + '\n')
                            """
                            # if i == j: continue
                            # emerg_invalid = ~((emerg_n[:,j] > emerg_n[:,i]) & (emerg_n[:,j] > 0))
                            # ccr_activates[-1] += (~emerg_invalid).sum()
                            # intrinsic_reward = np.linalg.norm(next_obs[:,i,2:4] - obs[:,j,2:4], axis=-1) - np.linalg.norm(obs[:,i,2:4] - obs[:,j,2:4], axis=-1)
                            # intrinsic_reward[emerg_invalid] = 0
                            # rewards[:,i] += 10 * intrinsic_reward

                act_n_t_minus_1 = actions

            prev_obs = obs

            obs = next_obs

            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if config.use_gpu:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config.num_updates):
                    sample = replay_buffer.sample(config.batch_size,
                                                  to_gpu=config.use_gpu)
                    model.update_critic(sample, logger=None)
                    model.update_policies(sample, logger=None)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')

        ls_num_collision = env.get_collision_and_zero_out()

        collisions.append(np.array(
            ls_num_collision).mean())  # might need to convert to np.int

        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        ep_rews = np.array(ep_rews).mean()
        # save model, display training output

        print(
            "episodes: {}, mean episode reward: {}, mean number of collisions with wall: {}, ccr activates: {}, success numbers: {}, time: {}"
            .format(ep_i, ep_rews, np.mean(collisions[-config.save_rate:]),
                    np.mean(ccr_activates[-config.save_rate:]),
                    np.mean(success_nums[-config.save_rate:]),
                    round(time.time() - t_start, 3)))

        # Keep track of final episode reward
        final_ep_rewards.append(ep_rews)
        # final_ep_activates.append(np.mean(ccr_activates[-config.save_rate:]))
        final_ep_collisions.append(np.mean(collisions[-config.save_rate:]))
        final_ep_success_nums.append(np.mean(success_nums[-config.save_rate:]))
        if ep_i % config.save_rate == 0:
            x_axis = np.arange(0, ep_i + 1, step=12)
            # plot reward data
            rew_file_name = run_dir / 'rewards.png'

            plt.plot(x_axis, final_ep_rewards)
            plt.xlabel('training episode')
            plt.ylabel('reward')
            #plt.legend()
            plt.savefig(rew_file_name)

            plt.clf()

            collision_file_name = run_dir / 'collisions.png'

            plt.plot(x_axis, final_ep_collisions)
            plt.xlabel('training episode')
            plt.ylabel('number of collisions')
            #plt.legend()
            plt.savefig(collision_file_name)

            plt.clf()

            # activates_file_name = run_dir / 'activates.png'

            # plt.plot(x_axis, final_ep_activates)
            # plt.xlabel('training episode')
            # plt.ylabel('CCR activates')
            # #plt.legend()
            # plt.savefig(activates_file_name)

            # plt.clf()

            success_file_name = run_dir / 'successes.png'

            plt.plot(x_axis, final_ep_success_nums)
            plt.xlabel('training episode')
            plt.ylabel('success numbers')
            #plt.legend()
            plt.savefig(success_file_name)

            plt.clf()

            rew_file_name = run_dir
            collision_file_name = run_dir
            success_nums_file_name = run_dir
            activates_file_name = run_dir

            rew_file_name /= 'rewards.pkl'
            collision_file_name /= 'collisions.pkl'
            success_nums_file_name /= 'success_nums.pkl'
            # activates_file_name /= 'activates.pkl'

            with open(rew_file_name, 'wb') as fp:
                pickle.dump(final_ep_rewards, fp)
            with open(collision_file_name, 'wb') as fp:
                pickle.dump(final_ep_collisions, fp)

            # with open(activates_file_name, 'wb') as fp:
            #     pickle.dump(final_ep_activates, fp)

            with open(success_nums_file_name, 'wb') as fp:
                pickle.dump(final_ep_success_nums, fp)

                plt.clf()

        if ep_i % config.save_interval < config.n_rollout_threads:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                  (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    model.save(run_dir / 'model.pt')
    env.close()
Esempio n. 11
0
def run(config):
    USE_CUDA = False
    if config.gpu:
        if torch.cuda.is_available():
            USE_CUDA = True
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))
    
#     model_run = 'run%i' % max(exst_run_nums)
#     model_path = model_dir / model_run / 'model.pt'

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num,
                            config.n_controlled_lagents, config.n_controlled_ragents, config.reward_type, config.render)
    model = AttentionSAC.init_from_env(env,
                                       tau=config.tau,
                                       pi_lr=config.pi_lr,
                                       q_lr=config.q_lr,
                                       gamma=config.gamma,
                                       pol_hidden_dim=config.pol_hidden_dim,
                                       critic_hidden_dim=config.critic_hidden_dim,
                                       attend_heads=config.attend_heads,
                                       reward_scale=config.reward_scale)
    
#     model = AttentionSAC.init_from_save_(model_path, load_critic=False, gpu=USE_CUDA)
    
    replay_buffer = ReplayBuffer(config.buffer_length, model.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    best_rewards = 0
    t = 0
    num_episodes = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        
        if ep_i % (config.epoch_size * config.n_rollout_threads) == 0:
            stat = dict()
            stat['epoch'] = int(ep_i / (config.epoch_size * config.n_rollout_threads) + 1)
            
        obs = env.reset()
        model.prep_rollouts(device='cpu')
        
        s = dict()
        s['dones'] = [0 for i in range(config.n_rollout_threads)]
        s['num_episodes'] = [0 for i in range(config.n_rollout_threads)]
        s['reward'] = [0 for i in range(config.n_rollout_threads)]
        s['success'] = [0 for i in range(config.n_rollout_threads)]
        s['steps_taken'] = [0 for i in range(config.n_rollout_threads)]
        s['reward_buffer'] = [0 for i in range(config.n_rollout_threads)]
        s['steps_buffer'] = [0 for i in range(config.n_rollout_threads)]

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(model.nagents)]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config.num_updates):
                    sample = replay_buffer.sample(config.batch_size,
                                                  to_gpu=USE_CUDA)
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')
                
            for i in range(config.n_rollout_threads):
                s['reward'][i] += np.mean(rewards[i])
                s['steps_taken'][i] += 1
                if dones[i][0] == True:
                    s['dones'][i] += 1
                    s['num_episodes'][i] += 1
                    s['reward_buffer'][i] = s['reward'][i]
                    s['steps_buffer'][i] = s['steps_taken'][i]
                    if infos[i]['score_reward'] == 1:
                        s['success'][i] += 1
                if et_i == config.episode_length-1:
                    if dones[i][0] == False:
                        if s['dones'][i] > 0:
                            s['reward'][i] = s['reward_buffer'][i]
                            s['steps_taken'][i] = s['steps_buffer'][i]
                        else:
                            s['num_episodes'][i] += 1
                            
        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        global_ep_rews = 0
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalars('agent%i/rewards' % a_i, {'mean_episode_rewards': a_ep_rew}, ep_i)
            global_ep_rews += a_ep_rew / (config.n_controlled_lagents + config.n_controlled_ragents)
        logger.add_scalars('global', {'global_rewards': global_ep_rews}, ep_i)
        
        if global_ep_rews > 0.007:
            model.save(run_dir / ('model_ep%i.pt' % ep_i))
#             print('model saved at ep%i' % ep_i)   
#             print('saved model reward: ', global_ep_rews)
        
        if global_ep_rews > best_rewards:
            best_rewards = global_ep_rews
            if best_rewards > 0.005:
                model.save(run_dir / ('best_model_ep%i.pt' % ep_i))
#                 print('best model saved at ep%i' % ep_i)
#                 print('best global reward: ', best_rewards)
                
#         if ep_i%500 == 0:
#             print('episode: ', ep_i)
#             print('global reward: ', global_ep_rews)
#             print('best global reward: ', best_rewards)

        if ep_i % config.save_interval < config.n_rollout_threads:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            model.save(run_dir / 'model.pt')
            
        # An exact episode means a real episode in the game, rather than the episode in a training loop
        # Mean (exact) episode data are only generated from complete exact episodes
        # We calculate the mean (exact) episode data in each epoch
        # (config.epoch_size * config.n_rollout_threads) means the number of training episodes an epoch includes
        # The mean (exact) episode data are used for visualization and comparison
        # Reward, Steps-Taken, Success

        stat['num_episodes'] = stat.get('num_episodes', 0) + np.sum(s['num_episodes'])
        stat['reward'] = stat.get('reward', 0) + np.sum(s['reward'])
        stat['success'] = stat.get('success', 0) + np.sum(s['success'])
        stat['steps_taken'] = stat.get('steps_taken', 0) + np.sum(s['steps_taken'])

        if (ep_i+config.n_rollout_threads) % (config.epoch_size * config.n_rollout_threads) == 0:
            num_episodes += stat['num_episodes']
            print('Epoch {}'.format(stat['epoch']))
            print('Episode: {}'.format(num_episodes))
            print('Reward: {}'.format(stat['reward']/stat['num_episodes']))
            print('Success: {:.2f}'.format(stat['success']/stat['num_episodes']))
            print('Steps-Taken: {:.2f}'.format(stat['steps_taken']/stat['num_episodes']))

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 12
0
def run(config):
    # model_dir = Path('./models') / config.env_id / config.model_name
    # if not model_dir.exists():
    run_num = 10
    # else:
    #    exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
    #                     model_dir.iterdir() if
    #                     str(folder.name).startswith('run')]
    #    if len(exst_run_nums) == 0:
    #        run_num = 1
    #    else:
    #        run_num = max(exst_run_nums) + 1
    # curr_run = 'run%i' % run_num
    # run_dir = model_dir / curr_run
    # log_dir = run_dir / 'logs'
    log_dir = 'checkpoints0605_4/'
    run_dir = log_dir + 'logs/'
    # os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    # change env
    env = SCraftAdapter(map_name='3m', seed=123,step_mul=8, difficulty='7', game_version='latest', replay_dir="replay/")
    maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)
    replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                 [obsp for obsp in env.observation_space],
                                 [acsp for acsp in env.action_space])
    t = 0

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        stop = False
        obs = env.reset()
        # if stop:
        #     stop = False
        #     obs = env.reset()
        # else:
        #     obs=env._get_obs()

        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(
            config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        episode_length = 0
        while not stop:
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(maddpg.nagents)]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)


            actions=[]
            agent_actions=np.zeros([len(env.action_space),env.action_space[0]])
            # add avail_agent
            avail_actions=np.array(env.get_avail_actions())
            for agent_i in range(len(torch_agent_actions)):
                agent_action = env.get_avail_agent_actions(agent_i)
                agent_action=[0 if agent_action[i]==0 else torch_agent_actions[agent_i].data.numpy()[0][i] for i in range(len(agent_action))]
            # add argmax
                actions.append(np.argmax(agent_action))
            # new actions
                agent_actions[agent_i][actions[agent_i]]=1


            # torch_agent_actions=[(if agent_avail_actions  for action in ac.data.numpy()) for ac in torch_agent_actions]
            # convert actions to numpy arrays

            # rearrange actions to be per environment

            # actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)

            stop = dones[0][0]
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones,avail_actions)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                    (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
            episode_length += 1
        ep_rews = replay_buffer.get_average_rewards(
            episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)

    #     if ep_i % config.save_interval < config.n_rollout_threads:
    #         os.makedirs(run_dir / 'incremental', exist_ok=True)
    #         maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
    #         maddpg.save(run_dir / 'model.pt')
    #
    # maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir, '/summary.json'))
    logger.close()
Esempio n. 13
0
def run(config):
    # Make directory to store the results
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)

    # initialize tensorboard summary writer
    logger = SummaryWriter(str(log_dir))

    # use provided seed
    torch.manual_seed(config.seed)
    np.random.seed(config.seed)

    # IDK how helpful this is
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)

    env = make_parallel_env(config.env_id, config.n_rollout_threads,
                            config.seed, config.discrete_action)

    maddpg = MADDPG.init_from_env(env,
                                  agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)
    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])

    t = 0
    global_reward_list = []
    # START EPISODES
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))

        # List of Observations for each of the agents
        # E.g., For simple_spread, shape is {1,3,18}
        obs = env.reset()
        maddpg.prep_rollouts(device='cpu')

        # Exploration percentage remaining. IDK if this is a standard way of doing it however.
        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        # START TIME-STEPS
        episode_reward = 0
        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions (from learning algorithm) as torch Variables. For simple_spread this is discrete[5]
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions
                             ]  # print(torch_agent_actions[0].data)
            # rearrange actions to be per environment. For single thread, it wont really matter.
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            #print(rewards[0][0])
            episode_reward += rewards[0][0]

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')

        global_reward_list.append(episode_reward / (config.episode_length))
        #print(global_reward_list)
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')
    with open("DIFF_rewards.txt", "wb") as fp_:  # Pickling
        pickle.dump(global_reward_list, fp_)
    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 14
0
File: MAAC.py Progetto: leehe228/TIL
def run(config):
    model_dir = Path('./models') / config["env_id"] / config["model_name"]
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config["env_id"], config["n_rollout_threads"],
                            run_num)
    model = AttentionSAC.init_from_env(
        env,
        tau=config["tau"],
        pi_lr=config["pi_lr"],
        q_lr=config["q_lr"],
        gamma=config["gamma"],
        pol_hidden_dim=config["pol_hidden_dim"],
        critic_hidden_dim=config["critic_hidden_dim"],
        attend_heads=config["attend_heads"],
        reward_scale=config["reward_scale"])
    replay_buffer = ReplayBuffer(config["buffer_length"], model.nagents,
                                 [115 for _ in range(11)],
                                 [19 for _ in range(11)])
    t = 0
    for ep_i in range(0, config["n_episodes"], config["n_rollout_threads"]):
        print("Episodes %i-%i of %i" %
              (ep_i + 1, ep_i + 1 + config["n_rollout_threads"],
               config["n_episodes"]))
        obs = env.reset()
        model.prep_rollouts(device='cpu')

        done = [False]
        et_i = 0

        while not any(done):
            et_i += 1
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(model.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config["n_rollout_threads"])]

            actions_list = []
            for a in actions:
                temp = []
                for b in a:
                    temp.append(np.argmax(b))
                actions_list.append(temp)

            next_obs, rewards, done, infos = env.step(actions_list)

            dones = [done for _ in range(11)]

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config["n_rollout_threads"]
            if (len(replay_buffer) >= config["batch_size"]
                    and (t % config["steps_per_update"]) <
                    config["n_rollout_threads"]):
                if config["use_gpu"]:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config["num_updates"]):
                    sample = replay_buffer.sample(config["batch_size"],
                                                  to_gpu=config["use_gpu"])
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')

            print("ep_i : {} | et_i : {}".format(ep_i, et_i), end='\r')

        ep_rews = replay_buffer.get_average_rewards(
            config["episode_length"] * config["n_rollout_threads"])

        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                              a_ep_rew * config["episode_length"], ep_i)

        if ep_i % config["save_interval"] < config["n_rollout_threads"]:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                  (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 15
0
def run(config):
    model_dir = Path('./models') / config["env_id"] / config["model_name"]
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config["n_rollout_threads"], run_num)
    model = AttentionSAC.init_from_env(
        env,
        tau=config["tau"],
        pi_lr=config["pi_lr"],
        q_lr=config["q_lr"],
        gamma=config["gamma"],
        pol_hidden_dim=config["pol_hidden_dim"],
        critic_hidden_dim=config["critic_hidden_dim"],
        attend_heads=config["attend_heads"],
        reward_scale=config["reward_scale"])
    # (** EDITED **) Set Replay Buffer
    # env.action_space, env.observation_space 의 shape를 iteration을 통해 버퍼 설정
    replay_buffer = ReplayBuffer(config["buffer_length"], model.nagents,
                                 [115 for _ in range(model.nagents)],
                                 [19 for _ in range(model.nagents)])
    t = 0
    for ep_i in range(0, config["n_episodes"], config["n_rollout_threads"]):
        print("Episodes %i-%i of %i" %
              (ep_i + 1, ep_i + 1 + config["n_rollout_threads"],
               config["n_episodes"]))

        obs = env.reset()
        model.prep_rollouts(device='cpu')

        for et_i in range(config["episode_length"]):
            print("episode : {} | step : {}".format(ep_i, et_i), end='\r')
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(model.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config["n_rollout_threads"])]

            # Reform Actions list to fit on Football Env
            # Google Football 환경은 액션 리스트 (one hot encoded)가 아닌 정수값을 받음
            actions_list = [[np.argmax(b) for b in a] for a in actions]

            # Step
            next_obs, rewards, dones, infos = env.step(actions_list)

            # Prevention of divergence
            # 안해주면 발산해서 학습 불가 (NaN)
            rewards = rewards - 0.000001

            # Reform Done Flag list
            # replay buffer에 알맞도록 done 리스트 재구성
            dones = (np.array([dones for _ in range(model.nagents)])).T

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config["n_rollout_threads"]
            if (len(replay_buffer) >= config["batch_size"]
                    and (t % config["steps_per_update"]) <
                    config["n_rollout_threads"]):
                if config["use_gpu"]:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config["num_updates"]):
                    sample = replay_buffer.sample(config["batch_size"],
                                                  to_gpu=config["use_gpu"])
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(
            config["episode_length"] * config["n_rollout_threads"])
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                              a_ep_rew * config["episode_length"], ep_i)

        if ep_i % config["save_interval"] < config["n_rollout_threads"]:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                  (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 16
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads,
                            config.seed, config.discrete_action)

    if isinstance(env.action_space[0], Box):
        discr_act = False
        get_shape = lambda x: x.shape[0]
    else:  # Discrete
        discr_act = True
        get_shape = lambda x: x.n
    num_out_pol = get_shape(env.action_space[0])

    agent_init_params = {
        'num_in_pol': env.observation_space[0].shape[0],
        'num_out_pol': num_out_pol,
        'num_vars': len(env.agent_types)
    }
    maddpg = MADDPG(agent_init_params,
                    nagents=len(env.agent_types),
                    tau=config.tau,
                    lr=config.lr,
                    hidden_dim=config.hidden_dim,
                    discrete_action=discr_act)

    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ], config.hidden_dim * (maddpg.nagents - 1))
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        rnn_hidden = (torch.zeros(
            1,
            config.n_rollout_threads * (maddpg.nagents) * (maddpg.nagents - 1),
            config.hidden_dim),
                      torch.zeros(
                          1,
                          config.n_rollout_threads * (maddpg.nagents) *
                          (maddpg.nagents - 1), config.hidden_dim))

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions, new_rnn_hidden = maddpg.step(torch_obs,
                                                              rnn_hidden,
                                                              explore=True)
            hid_to_store = (rnn_hidden[0].detach().contiguous().view(
                config.n_rollout_threads, maddpg.nagents,
                -1), rnn_hidden[1].detach().contiguous().view(
                    config.n_rollout_threads, maddpg.nagents, -1))
            next_hid_to_store = (new_rnn_hidden[0].detach().contiguous().view(
                config.n_rollout_threads, maddpg.nagents,
                -1), new_rnn_hidden[1].detach().contiguous().view(
                    config.n_rollout_threads, maddpg.nagents, -1))

            # convert actions to numpy arrays
            agent_actions = [
                ac.data.numpy() for ac in torch_agent_actions.cpu()
            ]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, hid_to_store, agent_actions, rewards,
                               next_obs, next_hid_to_store, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                sample = replay_buffer.sample(config.batch_size,
                                              to_gpu=USE_CUDA)
                maddpg.update(sample, ep_i)
                maddpg.update_all_targets()
            rnn_hidden = new_rnn_hidden
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)
            print("Episode %i, reward for %i is " % (ep_i + 1, a_i), a_ep_rew)

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 17
0
File: train.py Progetto: Makiah/MAAC
def run(halite_env: BaseEnv, load_latest: bool=False):
    config = halite_env.config

    model_path, run_num, run_dir, log_dir = run_setup(config.model_name, get_latest_model=load_latest)

    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)

    # Build MAAC model
    if model_path is None:
        model = AttentionSAC(halite_env.agent_type_topologies,
                             tau=config.tau,
                             pi_lr=config.pi_lr,
                             q_lr=config.q_lr,
                             gamma=config.gamma,
                             pol_hidden_dim=config.pol_hidden_dim,
                             critic_hidden_dim=config.critic_hidden_dim,
                             attend_heads=config.attend_heads,
                             reward_scale=config.reward_scale)
    else:
        model = AttentionSAC.init_from_save(model_path, load_critic=True)

    # Build replay buffer
    replay_buffer = ReplayBuffer(config.buffer_length)

    prev_time = time.perf_counter()

    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        curr_time = time.perf_counter()
        print("Episodes %i-%i of %i (%is)" % (ep_i + 1,
                                              ep_i + 1 + config.n_rollout_threads,
                                              config.n_episodes,
                                              (curr_time - prev_time)))
        model.prep_rollouts(device='cpu')

        game_reward = halite_env.simulate(lambda o: model.step(o, explore=True), replay_buffer)

        t += config.n_rollout_threads
        if (replay_buffer.length() >= config.batch_size and
            (t % config.games_per_update) < config.n_rollout_threads):
            print("Training")
            if config.use_gpu:
                model.prep_training(device='gpu')
            else:
                model.prep_training(device='cpu')
            for u_i in range(config.num_updates):
                sample: List[Dict[AgentKey, AgentReplayFrame]] = replay_buffer.sample(config.batch_size)
                # print("Original sample size", len(sample))
                # print("Preprocessing to batch structure")
                sample: Dict[AgentKey, BatchedAgentReplayFrame] = preprocess_to_batch(sample, to_gpu=config.use_gpu)
                # print("Filtered sample size", len(sample))
                # if len(sample) < 5:
                #     print("Sample size keys:", sample.keys())
                # print("Updating model critic")
                model.update_critic(sample, logger=logger)
                # print("Updating model policies")
                model.update_policies(sample, logger=logger)
                model.update_all_targets()
            model.prep_rollouts(device='cpu')

        ep_rews = replay_buffer.get_average_rewards(config.episode_length * config.n_rollout_threads)
        for k, v in ep_rews.items():
            logger.add_scalar('agent%s/mean_episode_rewards' % str(k), v, ep_i)

        logger.add_scalar("global_env_rewards", game_reward, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            print("Saving")
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            model.save(run_dir / 'model.pt')
            print("run_dir", run_dir)

        prev_time = curr_time

    model.save(run_dir / 'model.pt')
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 18
0
def run(config):
    model_dir = Path('./models') / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)

    env = gym.make("intersection-multiagent-v0")

    maddpg = MADDPG.init_from_env(env,
                                  agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)

    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0
    delay_step = config.delay_step
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='gpu')

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        agent_obs = []
        for i in range(4):
            agent_obs.append(
                np.array([
                    obs[i % 4], obs[(i + 1) % 4], obs[(i + 2) % 4],
                    obs[(i + 3) % 4]
                ]).flatten())
        obs = np.array([agent_obs])
        zero_agent_actions = [1, 1, 1, 1]
        last_agent_actions = [zero_agent_actions for _ in range(delay_step)]

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                torch.FloatTensor(np.vstack(obs[:, i]))
                for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            #             print(obs)
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # print(agent_actions)
            # rearrange actions to be per environment
            if delay_step == 0:
                actions = [np.argmax(agent_actions[i][0]) for i in range(4)]
            else:
                future_actions = [
                    np.argmax(agent_actions[i][0]) for i in range(4)
                ]
                actions = last_agent_actions[0]
                last_agent_actions = last_agent_actions[1:]
                last_agent_actions.append(future_actions)
            next_obs, rewards, dones, infos = env.step(actions)
            #             print(rewards)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            if dones[0][0]:
                break

            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):

                    for a_i in range(
                            maddpg.nagents):  #do not update the runner
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                    maddpg.prep_rollouts(device='gpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            # logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
            logger.add_scalars('agent%i/mean_episode_rewards' % a_i,
                               {'reward': a_ep_rew}, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 19
0
def run(config):
    device = torch.device('cuda' if USE_CUDA else 'cpu')
    print('Using device:', device)
    if device.type == 'cuda':
        print(torch.cuda.get_device_name(0))
        print('Memory Usage:')
        print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
        print('Cached:   ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')

    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    print(str(log_dir))
    logger = SummaryWriter(str(log_dir))
    #logger = None

    f = open(run_dir / "hyperparametrs.txt","w+")
    f.write(str(config))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
                            config.discrete_action, config.benchmark)
    maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim, 
                                  stochastic = config.stochastic, 
                                  commonCritic = config.commonCritic, gasil = config.gasil, dlr = config.dlr, lambda_disc = config.lambda_disc,
                                  batch_size_disc = config.batch_size_disc, dynamic=config.dynamic)
    replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    expert_replay_buffer = PriorityReplayBuffer(config.expert_buffer_length, config.episode_length, maddpg.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0
    agent_info = [[[] for i in range(config.n_rollout_threads)]]
    reward_info = []
    total_returns = []
    eval_trajectories = []
    expert_average_returns = []
    trajectories = []
    durations = []
    start_time = time.time()
    expert_trajectories = []
    evaluation_rewards = []
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        if ep_i%100 == 0:
            mins = (time.time() - start_time)/60
            durations.append(mins)
            print(mins, "minutes")
            start_time = time.time()

        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()
        current_episode = [[] for i in range(config.n_rollout_threads)]
        current_trajectory = [[] for i in range(config.n_rollout_threads)]
        current_entities = []
        total_dense = None
        if config.store_traj:
            cur_state_ent = env.getStateEntities()
            for i in range(config.n_rollout_threads):
                current_entities.append(cur_state_ent[i])
           
            cur_state = env.getState()
            for i in range(config.n_rollout_threads):
                current_trajectory[i].append(cur_state[i])
        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(maddpg.nagents)]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)

            if config.store_traj:
                cur_state = env.getState()
                for i in range(config.n_rollout_threads):
                    current_trajectory[i].append(cur_state[i])

            for i in range(config.n_rollout_threads):
                current_episode[i].append([obs[i], actions[i]])
            
            if config.benchmark:
                #Fix this
                for i, info in enumerate(infos):
                    agent_info[-1][i].append(info['n'])

            if et_i == 0:
                total_dense = rewards
            else:
                total_dense = total_dense + rewards

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads and
                ((expert_replay_buffer.num_traj*config.episode_length >= config.batch_size_disc) == (maddpg.gasil))):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                if maddpg.gasil:
                    for update_i in range(config.num_disc_updates):
                        sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
                        
                        sample_expert = expert_replay_buffer.sample(config.batch_size_disc,
                                                    to_gpu=USE_CUDA)
                        maddpg.gasil_disc_update(sample_normal, sample_expert, 0, logger=logger, num_disc_permutations = config.num_disc_permutations)

                    for update_i in range(config.num_AC_updates):
                        sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
                        maddpg.gasil_AC_update(sample_normal, 0, episode_num = ep_i, logger=logger, num_AC_permutations = config.num_AC_permutations) 
                else:
                    for update_i in range(config.num_AC_updates):
                        sample_normal = replay_buffer.sample(config.batch_size,to_gpu=USE_CUDA, norm_rews = False)
                        maddpg.update(sample_normal, 0, logger=logger, num_AC_permutations = config.num_AC_permutations)
                maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        total_returns.append(total_dense)
        if maddpg.gasil:
            expert_replay_buffer.push(current_episode, total_dense, config.n_rollout_threads, current_entities, current_trajectory, config.store_traj)
            expert_average_returns.append(expert_replay_buffer.get_average_return())
        
        if config.store_traj:
            for i in range(config.n_rollout_threads):
                trajectories.append([current_entities[i], current_trajectory[i]])

        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
           logger.add_scalars('agent%i/rew' % a_i,
                              {'mean_episode_rewards': a_ep_rew},
                              ep_i)
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
        
        #save mean episode rewards
        #save benchmarking data
        agent_info.append([[] for i in range(config.n_rollout_threads)])
        reward_info.append(ep_rews)
        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')
            #save the trajectories in the expert replay buffer 
            trajec = expert_replay_buffer.get_trajectories()
            if config.store_traj:
                expert_trajectories.append(trajec)
        
        if ep_i % config.eval_interval < config.n_rollout_threads:
            current_eval = []
            current_trajectories = []

            for ep_i_eval in range(0, config.n_eval_episodes, config.n_rollout_threads):
                obs = env.reset()
                total_eval = None
                maddpg.prep_rollouts(device='cpu')

                if config.store_traj:
                    current_trajectory = [[] for i in range(config.n_rollout_threads)]
                    current_entities = []
                    cur_state_ent = env.getStateEntities()
                    for i in range(config.n_rollout_threads):
                        current_entities.append(cur_state_ent[i])

                    cur_state = env.getState()
                    for i in range(config.n_rollout_threads):
                        current_trajectory[i].append(cur_state[i])

                for et_i in range(config.episode_length):
                    torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                        requires_grad=False)
                                for i in range(maddpg.nagents)]
                    torch_agent_actions = maddpg.step(torch_obs, explore=False)
                    agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
                    actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
                    next_obs, rewards, dones, infos = env.step(actions)
                    if config.store_traj:
                        cur_state = env.getState()
                        for i in range(config.n_rollout_threads):
                            current_trajectory[i].append(cur_state[i])

                    
                    if et_i == 0:
                        total_eval = rewards
                    else:
                        total_eval = total_eval + rewards
                    obs = next_obs
                current_eval.append(total_eval)
                if config.store_traj:
                    for i in range(config.n_rollout_threads):
                        current_trajectories.append([current_entities[i], current_trajectory[i]])
            
            if config.store_traj:
                eval_trajectories.append(current_trajectories)
            evaluation_rewards.append(current_eval)
Esempio n. 20
0
def run(config):
    model_dir = Path('./models') / config.env_name / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1

    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    os.system("cp shape.txt {}".format(run_dir))
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)

    #training时的线程数
    if not config.use_cuda:
        torch.set_num_threads(config.n_training_threads)

    #env并行采样的进程

    env = make_parallel_env(config.num_agents, config.n_rollout_threads,
                            run_num, config.shape_file)
    #'''
    maddpg = MADDPG.init_from_env(env=env,
                                  agent_alg=config.agent_alg,
                                  cripple_alg=config.cripple_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim,
                                  discrete_action=config.discrete_action)
    #'''
    #maddpg = MADDPG.init_from_save(model_dir/'run1'/'model.pt')

    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])

    t = 0
    a_loss = []
    c_loss = []
    rewss = []

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))

        obs = env.reset()

        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')  # show for the first time

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        #if config.display:
        #    for env_show in env.envs:
        #        env_show.render('human', close=False)

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)

            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]

            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]

            #actions = [np.array([i.tolist().index(1.0) for i in action]) for action in actions_one_hot]

            for i in actions:
                #    print(i)
                for j in i:
                    j[1] *= np.pi
            #print(actions[0])

            next_obs, rewards, dones, infos = env.step(actions)

            #print(len(agent_actions),len(next_obs))
            #if config.display:
            #    for env_show in env.envs:
            #        env_show.render('human', close=False)

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                #print(t)
                if config.use_cuda:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=config.use_cuda,
                                                      norm_rews=True)
                        maddpg.update(sample,
                                      a_i,
                                      logger=logger,
                                      actor_loss_list=a_loss,
                                      critic_loss_list=c_loss)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        rewss.append(ep_rews)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)
            # print('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(str(run_dir / 'incremental'), exist_ok=True)
            maddpg.save(
                str(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1))))
            maddpg.save(str(run_dir / 'model.pt'))
    maddpg.save(str(run_dir / 'model.pt'))
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
    '''
Esempio n. 21
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads,
                            config.seed, config.discrete_action)

    if config.load_adv == True:
        model_path = (Path('./models') / config.env_id / config.model_name /
                      ('run%i' % config.run_num))
        model_path = model_path / 'model.pt'
        maddpg = MADDPG.init_from_env_with_runner_delay_unaware(
            env,
            agent_alg=config.agent_alg,
            adversary_alg=config.adversary_alg,
            tau=config.tau,
            lr=config.lr,
            hidden_dim=config.hidden_dim,
            file_name=model_path)
    else:
        maddpg = MADDPG.init_from_env(env,
                                      agent_alg=config.agent_alg,
                                      adversary_alg=config.adversary_alg,
                                      tau=config.tau,
                                      lr=config.lr,
                                      hidden_dim=config.hidden_dim)

    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0
    delay_step = config.delay_step
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))
        obs = env.reset()
        maddpg.prep_rollouts(device='gpu')

        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        if config.env_id == 'simple_speaker_listener':
            zero_agent_actions = [
                np.array([[0, 0, 0]]),
                np.array([[0, 0, 0, 0, 0]])
            ]
        elif config.env_id == 'simple_spread':
            zero_agent_actions = [
                np.array([[0.0, 0.0, 0.0, 0.0, 0.0]])
                for _ in range(maddpg.nagents)
            ]
        elif config.env_id == 'simple_tag':
            zero_agent_actions = [
                np.array([0.0, 0.0]) for _ in range(maddpg.nagents)
            ]
        last_agent_actions = [zero_agent_actions for _ in range(delay_step)]

        for et_i in range(config.episode_length):
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            if config.load_adv:
                if delay_step == 0:
                    actions = [[ac[i] for ac in agent_actions]
                               for i in range(config.n_rollout_threads)]
                else:
                    agent_actions_tmp = [[
                        ac[i] for ac in agent_actions
                    ] for i in range(config.n_rollout_threads)][0][:]
                    actions = last_agent_actions[0]
                    actions.append(agent_actions_tmp[-1])
                    last_agent_actions = last_agent_actions[1:]
                    last_agent_actions.append(agent_actions_tmp[:2])
                actions = [actions]
                next_obs, rewards, dones, infos = env.step(
                    copy.deepcopy(actions))

            else:
                if delay_step == 0:
                    actions = [[ac[i] for ac in agent_actions]
                               for i in range(config.n_rollout_threads)]
                else:
                    actions = [[ac[i] for ac in last_agent_actions[0]]
                               for i in range(config.n_rollout_threads)]
                    last_agent_actions.pop(0)
                    last_agent_actions.append(agent_actions)

                next_obs, rewards, dones, infos = env.step(
                    copy.deepcopy(actions))
            print('1', obs, agent_actions, rewards, next_obs, dones)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)

            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    if config.load_adv:
                        for a_i in range(maddpg.nagents -
                                         1):  #do not update the runner
                            sample = replay_buffer.sample(config.batch_size,
                                                          to_gpu=USE_CUDA)
                            maddpg.update(sample, a_i, logger=logger)
    #                     maddpg.update_all_targets()
                        maddpg.update_adversaries()
                    else:
                        for a_i in range(
                                maddpg.nagents):  #do not update the runner
                            sample = replay_buffer.sample(config.batch_size,
                                                          to_gpu=USE_CUDA)
                            maddpg.update(sample, a_i, logger=logger)
                        maddpg.update_all_targets()
                maddpg.prep_rollouts(device='gpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalars('agent%i/mean_episode_rewards' % a_i,
                               {'reward': a_ep_rew}, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 22
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads,
                            config.seed, config.discrete_action)
    maddpg = MADDPG.init_from_env(env,
                                  agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim,
                                  noisy_sharing=True,
                                  noisy_SNR=config.noisy_SNR,
                                  game_id=config.env_id,
                                  est_ac=config.est_action)
    replay_buffer = ReplayBuffer(
        config.buffer_length, maddpg.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0
    print(
        '#########################################################################'
    )
    print('Adversary using: ', config.adversary_alg, 'Good agent using: ',
          config.agent_alg, '\n')
    print('Noisy SNR is: ', config.noisy_SNR)
    print(
        '#########################################################################'
    )
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')
        if ep_i % 5000 == 0:
            maddpg.lr *= 0.5
        explr_pct_remaining = max(
            0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale +
                           (config.init_noise_scale -
                            config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(maddpg.nagents)
            ]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()

                maddpg.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew,
                              ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            print("Episodes %i-%i of %i, rewards are: \n" %
                  (ep_i + 1, ep_i + 1 + config.n_rollout_threads,
                   config.n_episodes))
            for a_i, a_ep_rew in enumerate(ep_rews):
                print('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                   (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

        # *** perform validation every 1000 episodes. i.e. run N=10 times without exploration ***
        if ep_i % config.validate_every_n_eps == config.validate_every_n_eps - 1:
            # 假设只有一个env在跑
            episodes_stats = []
            info_for_one_env_among_timesteps = []
            print('*' * 10, 'Validation BEGINS', '*' * 10)
            for valid_et_i in range(config.run_n_eps_in_validation):
                obs = env.reset()
                maddpg.prep_rollouts(device='cpu')
                explr_pct_remaining = max(0, config.n_exploration_eps -
                                          ep_i) / config.n_exploration_eps
                maddpg.scale_noise(
                    config.final_noise_scale +
                    (config.init_noise_scale - config.final_noise_scale) *
                    explr_pct_remaining)
                maddpg.reset_noise()

                curr_episode_stats = []
                for et_i in range(config.episode_length):
                    # rearrange observations to be per agent, and convert to torch Variable
                    torch_obs = [
                        Variable(torch.Tensor(np.vstack(obs[:, i])),
                                 requires_grad=False)
                        for i in range(maddpg.nagents)
                    ]
                    # get actions as torch Variables
                    torch_agent_actions = maddpg.step(torch_obs, explore=False)
                    # convert actions to numpy arrays
                    agent_actions = [
                        ac.data.numpy() for ac in torch_agent_actions
                    ]
                    # rearrange actions to be per environment
                    actions = [[ac[i] for ac in agent_actions]
                               for i in range(config.n_rollout_threads)]
                    next_obs, rewards, dones, infos = env.step(actions)

                    info_for_one_env_among_timesteps.append(infos[0]['n'])

                    curr_episode_stats.append(infos[0]['n'])

                    obs = next_obs
                episodes_stats.append(curr_episode_stats)

            print('Summary statistics:')
            if config.env_id == 'simple_tag':
                # avg_collisions = sum(map(sum,info_for_one_env_among_timesteps))/config.run_n_eps_in_validation
                episodes_stats = np.array(episodes_stats)
                # print(episodes_stats.shape)
                # validation logging
                with open(f'{config.model_name}.log', 'a') as valid_logfile:
                    valid_logwriter = csv.writer(valid_logfile, delimiter=' ')
                    valid_logwriter.writerow(
                        np.sum(episodes_stats, axis=(1, 2)).tolist())
                avg_collisions = np.sum(
                    episodes_stats) / episodes_stats.shape[0]
                print(f'Avg of collisions: {avg_collisions}')

            elif config.env_id == 'simple_speaker_listener':
                for i, stat in enumerate(info_for_one_env_among_timesteps):
                    print(f'ep {i}: {stat}')
            else:
                raise NotImplementedError
            print('*' * 10, 'Validation ENDS', '*' * 10)

        # *** END of VALIDATION ***

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
    valid_logfile.close()
Esempio n. 23
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1

    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)
    model = make_model(env, config)
    replay_buffer = ReplayBuffer(config.buffer_length, model.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])

    recent_reliable_obs = [[None for i in range(model.nagents)] for e in range(config.n_rollout_threads)]

    print("Start train Agents...")

    t = 0
    steps, avg_ep_rew = 0, 0
    t_start = time.time()

    each_rws = []
    large_rws = []
    small_rws = []

    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        model.prep_rollouts(device='cpu')
        obs, validity = env.reset()
        obs = get_reliable_obs(model, obs, recent_reliable_obs, validity)

        for et_i in range(config.episode_length):
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])), requires_grad=False)
                         for i in range(model.nagents)]

            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)

            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]

            next_obs, rewards, dones, infos, next_validity = env.step(actions)

            next_obs = get_reliable_obs(model, next_obs, recent_reliable_obs, next_validity)

            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            validity = next_validity
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                    (t % config.steps_per_update) < config.n_rollout_threads):

                if config.use_gpu:
                    model.prep_training(device='gpu')
                else:
                    model.prep_training(device='cpu')
                for u_i in range(config.num_updates):
                    sample = replay_buffer.sample(config.batch_size,
                                                  to_gpu=config.use_gpu)
                    model.update_critic(sample, logger=logger)
                    model.update_policies(sample, logger=logger)
                    model.update_all_targets()
                model.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)

        steps += 1

        for a_i, a_ep_rew in enumerate(ep_rews):
            avg_ep_rew += a_ep_rew
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
        each_rws.append(ep_rews[0])
        small_rws.append(sum(ep_rews))
        large_rws.append(sum(ep_rews) * config.episode_length)
        logger.add_scalar('large_rewards', large_rws[-1], ep_i)
        logger.add_scalar('small_rewards', small_rws[-1], ep_i)

        if ep_i > 1 and (ep_i + 1) % config.save_interval < config.n_rollout_threads:
            print("Episodes %i of %i" % (ep_i + config.n_rollout_threads,
                                         config.n_episodes), end=' ')
            print('mean_episode_rewards: %f, time: %f' % (
                avg_ep_rew / steps * config.episode_length, round(time.time() - t_start, 3)))
            t_start = time.time()
            steps, avg_ep_rew = 0, 0
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            if (ep_i + 1) % (config.save_interval * 5) < config.n_rollout_threads:
                model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
                model.save(run_dir / 'model.pt')

    large_rew_file_name = log_dir / (config.model_name + '_large_rewards.pkl')
    with open(large_rew_file_name, 'wb') as fp:
        pickle.dump(large_rws, fp)

    small_rew_file_name = log_dir / (config.model_name + '_small_rewards.pkl')
    with open(small_rew_file_name, 'wb') as fp:
        pickle.dump(small_rws, fp)

    each_rew_file_name = log_dir / (config.model_name + '_each_rewards.pkl')
    with open(each_rew_file_name, 'wb') as fp:
        pickle.dump(each_rws, fp)

    model.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()

    print("Agents train completion!\n")
Esempio n. 24
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        run_num = 1
    else:
        exst_run_nums = [
            int(str(folder.name).split('run')[1])
            for folder in model_dir.iterdir()
            if str(folder.name).startswith('run')
        ]
        if len(exst_run_nums) == 0:
            run_num = 1
        else:
            run_num = max(exst_run_nums) + 1
    curr_run = 'run%i' % run_num
    run_dir = model_dir / curr_run

    torch.manual_seed(run_num)
    np.random.seed(run_num)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)
    envActionSpace = env.action_space
    envObservationSpace = env.observation_space

    model = AttentionSAC.init_from_env(
        envActionSpace,
        envObservationSpace,
        tau=config.tau,
        pi_lr=config.pi_lr,
        q_lr=config.q_lr,
        gamma=config.gamma,
        pol_hidden_dim=config.pol_hidden_dim,  #128
        critic_hidden_dim=config.critic_hidden_dim,  #128
        attend_heads=config.attend_heads,  #4
        reward_scale=config.reward_scale)
    replay_buffer = ReplayBuffer(
        config.buffer_length, model.nagents,
        [obsp.shape[0] for obsp in env.observation_space], [
            acsp.shape[0] if isinstance(acsp, Box) else acsp.n
            for acsp in env.action_space
        ])
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):  #12
        print(
            "Episodes %i-%i of %i" %
            (ep_i + 1, ep_i + 1 + config.n_rollout_threads, config.n_episodes))
        obs = env.reset()
        model.prep_rollouts(device='cpu')

        for et_i in range(config.episode_length):  #25
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [
                Variable(torch.Tensor(np.vstack(obs[:, i])),
                         requires_grad=False) for i in range(model.nagents)
            ]

            # get actions as torch Variables
            torch_agent_actions = model.step(torch_obs, explore=True)

            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]

            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions]
                       for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads
                ):  # 100 steps across rollouts -> 4 updates
                model.prep_training(device='cpu')

                for u_i in range(config.num_updates):  #4
                    sample = replay_buffer.sample(config.batch_size)
                    model.update_critic(sample)
                    model.update_policies(sample)
                    model.update_all_targets()

                model.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(config.episode_length *
                                                    config.n_rollout_threads)

        if ep_i % config.save_interval < config.n_rollout_threads:
            model.prep_rollouts(device='cpu')
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            model.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                  (ep_i + 1)))
            model.save(run_dir / 'model.pt')

    model.save(run_dir / 'model.pt')
    env.close()
Esempio n. 25
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
                            config.discrete_action)
    
#     model_path = (Path('./models') / config.env_id / config.model_name /
#                   ('run%i' % config.run_num))
#     model_path = model_path / 'model.pt'
#     maddpg = MADDPG.init_runner_from_save(model_path)
    maddpg = MADDPG.init_from_env_with_delay(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim,
                                  delay_step = 1)
    delay_step = 1
    replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                 [obsp.shape[0] + delay_step*2 for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        
#         zero_agent_actions = [[[0, 0]].data.numpy() for _ in range(maddpg.nagents-1)]
        zero_agent_actions = [np.array([0.0, 0.0]) for _ in range(maddpg.nagents)]
        last_agent_actions = [zero_agent_actions for _ in range(delay_step)]
        for a_i, agent_obs in enumerate(obs[0]):
            for _ in range(len(last_agent_actions)):
                obs[0][a_i] = np.append(agent_obs, last_agent_actions[_][a_i])
                
        for et_i in range(config.episode_length):
#             print(obs)

#                     agent_obs = np.append(agent_obs, last_agent_actions[_][a_i])
#             print(np.concatenate(obs[0], np.array(last_agent_actions).T))
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(maddpg.nagents)]
            # augment the obs
            # get actions as torch Variables
#             print(torch_obs)
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            
#             print(torch_agent_actions)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
#             print('1', agent_actions)
    #        actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            # rearrange actions to be per environment
            if delay_step == 0:
                actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            else:
                agent_actions_tmp = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)][0]
#                 print('2', agent_actions_tmp)
                actions = last_agent_actions[0]
#                 print('3', actions)
#                 print('4', actions)
                last_agent_actions = last_agent_actions[1:]
                last_agent_actions.append(agent_actions_tmp)
#                 print('3', last_agent_actions)
#                 print('4', last_agent_actions)
#                 print('5', actions)
            actions = [actions]
            next_obs, rewards, dones, infos = env.step(actions)
#             print('6', actions)
            for a_i, agent_obs in enumerate(next_obs[0]):
                for _ in range(len(last_agent_actions)):
                    if a_i == 2:
                        next_obs[0][a_i] = np.append(agent_obs, 4*last_agent_actions[_][a_i])
                    else:
                        next_obs[0][a_i] = np.append(agent_obs, 3*last_agent_actions[_][a_i])
#             print('3', agent_actions)
            agent_actions[0] = agent_actions[0]*3
            agent_actions[1] = agent_actions[1]*3
            agent_actions[2] = agent_actions[1]*4
#             print('2',agent_actions)
#             print('4', obs)
#             print('5', next_obs)
#             print('1',agent_actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
    

            
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents - 1): #do not update the runner
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
#                     maddpg.update_all_targets()
                    maddpg.update_adversaries()
                maddpg.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            # logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)
            logger.add_scalars('agent%i/mean_episode_rewards' % a_i, {'reward': a_ep_rew}, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 26
0
def run(config):
    model_dir = Path('./models') / config.env_id / config.model_name
    if not model_dir.exists():
        curr_run = 'run1'
    else:
        exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
                         model_dir.iterdir() if
                         str(folder.name).startswith('run')]
        if len(exst_run_nums) == 0:
            curr_run = 'run1'
        else:
            curr_run = 'run%i' % (max(exst_run_nums) + 1)
    run_dir = model_dir / curr_run
    log_dir = run_dir / 'logs'
    os.makedirs(log_dir)
    logger = SummaryWriter(str(log_dir))

    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    if not USE_CUDA:
        torch.set_num_threads(config.n_training_threads)
    env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
                            config.discrete_action)
    maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
                                  adversary_alg=config.adversary_alg,
                                  tau=config.tau,
                                  lr=config.lr,
                                  hidden_dim=config.hidden_dim)
    replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
                                 [obsp.shape[0] for obsp in env.observation_space],
                                 [acsp.shape[0] if isinstance(acsp, Box) else acsp.n
                                  for acsp in env.action_space])
    t = 0
    for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
        print("Episodes %i-%i of %i" % (ep_i + 1,
                                        ep_i + 1 + config.n_rollout_threads,
                                        config.n_episodes))
        obs = env.reset()
        # obs.shape = (n_rollout_threads, nagent)(nobs), nobs differs per agent so not tensor
        maddpg.prep_rollouts(device='cpu')

        explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
        maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
        maddpg.reset_noise()

        for et_i in range(config.episode_length):
            env.render()
            # rearrange observations to be per agent, and convert to torch Variable
            torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
                                  requires_grad=False)
                         for i in range(maddpg.nagents)]
            # get actions as torch Variables
            torch_agent_actions = maddpg.step(torch_obs, explore=True)
            # convert actions to numpy arrays
            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            # rearrange actions to be per environment
            actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
            next_obs, rewards, dones, infos = env.step(actions)
            replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)
            obs = next_obs
            t += config.n_rollout_threads
            if (len(replay_buffer) >= config.batch_size and
                (t % config.steps_per_update) < config.n_rollout_threads):
                if USE_CUDA:
                    maddpg.prep_training(device='gpu')
                else:
                    maddpg.prep_training(device='cpu')
                for u_i in range(config.n_rollout_threads):
                    for a_i in range(maddpg.nagents):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=USE_CUDA)
                        maddpg.update(sample, a_i, logger=logger)
                    maddpg.update_all_targets()
                maddpg.prep_rollouts(device='cpu')
        ep_rews = replay_buffer.get_average_rewards(
            config.episode_length * config.n_rollout_threads)
        for a_i, a_ep_rew in enumerate(ep_rews):
            logger.add_scalar('agent%i/mean_episode_rewards' % a_i, a_ep_rew, ep_i)

        if ep_i % config.save_interval < config.n_rollout_threads:
            os.makedirs(run_dir / 'incremental', exist_ok=True)
            maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
            maddpg.save(run_dir / 'model.pt')

    maddpg.save(run_dir / 'model.pt')
    env.close()
    logger.export_scalars_to_json(str(log_dir / 'summary.json'))
    logger.close()
Esempio n. 27
0
def run(config):
    device = torch.device(
        'cuda:' + str(config.gpu) if torch.cuda.is_available() else 'cpu')
    model_dir = Path('./runs') / config.store_result_dir

    train_loader, train_drugs, train_Y = preprocess(config.dataset, config)

    print("number of data")
    print(len(train_loader))
    for it, original_pair in enumerate(train_loader):
        if not model_dir.exists():
            run_num = 1
        else:
            exst_run_nums = [
                int(str(folder.name).split('run')[1])
                for folder in model_dir.iterdir()
                if str(folder.name).startswith('run')
            ]
            if len(exst_run_nums) == 0:
                run_num = 1
            else:
                run_num = max(exst_run_nums) + 1
        curr_run = 'run%i' % run_num
        run_dir = model_dir / curr_run
        log_dir = run_dir / 'logs'
        os.makedirs(log_dir)
        logger = SummaryWriter(str(log_dir))

        torch.manual_seed(run_num)
        np.random.seed(run_num)

        print('Run pair number ', str(it))
        Hyperparams = Args()
        BasePath = './runs/' + config.store_result_dir
        writer = SummaryWriter(BasePath + '/plots')

        original_drug_smile = train_drugs[it]
        original_target_aff = train_Y[it]
        original_drug = original_pair
        original_target = original_pair.target[0]

        print('Original target:')
        print(original_target)
        print('Original molecule:')
        print(original_drug_smile)

        model_to_explain = mol_utils.get_graphdta_dgn().to(device)
        pred_aff, drug_original_encoding, prot_original_encoding = model_to_explain(
            original_drug.to(device),
            seq_cat(original_target).to(device))
        atoms_ = np.unique([
            x.GetSymbol()
            for x in Chem.MolFromSmiles(original_drug_smile).GetAtoms()
        ])
        cof = [1.0, 0.05, 0.01, 0.05]
        env = make_parallel_env(original_drug_smile, original_target,
                                Hyperparams, atoms_, model_to_explain,
                                original_drug, original_target_aff, pred_aff,
                                device, cof)
        model = AttentionSAC.init_from_env(
            env,
            tau=config.tau,
            pi_lr=config.pi_lr,
            q_lr=config.q_lr,
            gamma=config.gamma,
            pol_hidden_dim=config.pol_hidden_dim,
            critic_hidden_dim=config.critic_hidden_dim,
            attend_heads=config.attend_heads,
            reward_scale=config.reward_scale)
        replay_buffer = ReplayBuffer(
            config.buffer_length, model.nagents,
            [obsp[0] for obsp in env.observation_space],
            [acsp for acsp in env.action_space])

        if not os.path.isdir(BasePath + "/counterfacts"):
            os.makedirs(BasePath + "/counterfacts")
        mol_utils.TopKCounterfactualsDTA.init(original_drug_smile, it,
                                              BasePath + "/counterfacts")

        t = 0
        episode_length = 1
        trg = trange(0, config.n_episodes, config.n_rollout_threads)
        for ep_i in trg:
            obs = env.reset()
            model.prep_rollouts(device='cpu')

            for et_i in range(episode_length):
                # rearrange observations to be per agent, and convert to torch Variable
                torch_obs = [
                    Variable(torch.Tensor(np.vstack(obs[:, i])),
                             requires_grad=False) for i in range(model.nagents)
                ]
                # get actions as torch Variables
                torch_agent_actions = model.step(torch_obs, explore=True)
                # convert actions to numpy arrays
                agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
                # rearrange actions to be per environment
                actions = [[ac[i] for ac in agent_actions]
                           for i in range(config.n_rollout_threads)]
                next_obs, results, dones, action_drug, action_prot = env.step(
                    actions)
                drug_reward, loss_, gain, drug_sim, prot_sim, qed = results[0][
                    0]
                prot_reward, loss_, gain, drug_sim, prot_sim, qed = results[0][
                    1]

                writer.add_scalar('DTA/Reward', drug_reward, ep_i)
                writer.add_scalar('DTA/Distance', loss_, ep_i)
                writer.add_scalar('DTA/Drug Similarity', drug_sim, ep_i)
                writer.add_scalar('DTA/Drug QED', qed, ep_i)
                writer.add_scalar('DTA/Protein Similarity', prot_sim, ep_i)

                pair_reward = []
                pair_reward.append(drug_reward)
                pair_reward.append(prot_reward)
                rewards = np.array([pair_reward])
                replay_buffer.push(obs, agent_actions, rewards, next_obs,
                                   dones)
                obs = next_obs
                t += 1
                if (len(replay_buffer) >= config.batch_size
                        and (t % config.steps_per_update) < 1):
                    if config.use_gpu:
                        model.prep_training(device='gpu')
                    else:
                        model.prep_training(device='cpu')
                    for u_i in range(config.num_updates):
                        sample = replay_buffer.sample(config.batch_size,
                                                      to_gpu=config.use_gpu)
                        model.update_critic(sample, logger=logger)
                        model.update_policies(sample, logger=logger)
                        model.update_all_targets()
                    model.prep_rollouts(device='cpu')
                if np.all(dones == True):
                    mutate_position = [
                        i for i in range(len(original_target))
                        if original_target[i] != action_prot[i]
                    ]
                    trg.set_postfix(Reward=drug_reward,
                                    DrugSim=drug_sim,
                                    TargetSim=prot_sim,
                                    SMILES=action_drug,
                                    TargetMutatePosition=mutate_position,
                                    refresh=True)
                    mol_utils.TopKCounterfactualsDTA.insert({
                        'smiles':
                        action_drug,
                        'protein':
                        action_prot,
                        'drug_reward':
                        drug_reward,
                        'protein_reward':
                        prot_reward,
                        'loss':
                        loss_,
                        'gain':
                        gain,
                        'drug sim':
                        drug_sim,
                        'drug qed':
                        qed,
                        'prot sim':
                        prot_sim,
                        'mutate position':
                        mutate_position
                    })
            ep_rews = replay_buffer.get_average_rewards(episode_length * 1)
            for a_i, a_ep_rew in enumerate(ep_rews):
                logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                                  a_ep_rew * episode_length, ep_i)

            if ep_i % config.save_interval < config.n_rollout_threads:
                model.prep_rollouts(device='cpu')
                os.makedirs(run_dir / 'incremental', exist_ok=True)
                model.save(run_dir / 'incremental' / ('model_ep%i.pt' %
                                                      (ep_i + 1)))
                model.save(run_dir / 'model.pt')

        model.save(run_dir / 'model.pt')
        env.close()
        logger.export_scalars_to_json(str(log_dir / 'summary.json'))
        logger.close()