Exemplo n.º 1
0
def train(rank, args, shared_model, optimizer=None):

    mse_loss = torch.nn.MSELoss()
    nll_loss = torch.nn.NLLLoss()

    torch.manual_seed(args.seed + rank)

    env = env_wrapper.create_doom(args.record, outdir=args.outdir)
    num_outputs = env.action_space.n
    model = ActorCritic(env.observation_space.shape[0], env.action_space)

    if optimizer is None:
        optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)

    model.train()

    state = env.reset()
    state = torch.from_numpy(state)
    done = True

    episode_length = 0

    while True:
        episode_length += 1
        # Sync with the shared model
        model.load_state_dict(shared_model.state_dict())
        if done:
            cx = Variable(torch.zeros(1, 256))
            hx = Variable(torch.zeros(1, 256))
        else:
            cx = Variable(cx.data)
            hx = Variable(hx.data)
        values = []
        log_probs = []
        rewards = []
        entropies = []

        inverses = []
        forwards = []
        actions = []
        vec_st1s = []

        for step in range(args.num_steps):
            value, logit, (hx, cx) = model(
                (Variable(state.unsqueeze(0)), (hx, cx)), icm=False)
            s_t = state
            prob = F.softmax(logit)
            log_prob = F.log_softmax(logit)
            entropy = -(log_prob * prob).sum(1)
            entropies.append(entropy)

            action = prob.multinomial().data
            log_prob = log_prob.gather(1, Variable(action))

            oh_action = torch.Tensor(1, num_outputs)
            oh_action.zero_()
            oh_action.scatter_(1, action, 1)
            oh_action = Variable(oh_action)
            a_t = oh_action
            actions.append(oh_action)

            state, reward, done, _ = env.step(action.numpy()[0][0])
            state = torch.from_numpy(state)

            done = done or episode_length >= args.max_episode_length
            reward = max(min(reward, 1), -1)
            s_t1 = state
            vec_st1, inverse, forward = model(
                (Variable(s_t.unsqueeze(0)), Variable(s_t1.unsqueeze(0)), a_t),
                icm=True)

            reward_intrinsic = args.eta * (
                (vec_st1 - forward).pow(2)).sum(1) / 2.
            #reward_intrinsic = args.eta * ((vec_st1 - forward).pow(2)).sum(1).sqrt() / 2.
            reward_intrinsic = reward_intrinsic.data.numpy()[0][0]
            reward += reward_intrinsic

            if done:
                episode_length = 0
                state = env.reset()
                state = torch.from_numpy(state)
            values.append(value)
            log_probs.append(log_prob)
            rewards.append(reward)
            vec_st1s.append(vec_st1)
            inverses.append(inverse)
            forwards.append(forward)

            if done:
                break

        R = torch.zeros(1, 1)
        if not done:
            value, _, _ = model((Variable(state.unsqueeze(0)), (hx, cx)),
                                icm=False)
            R = value.data

        values.append(Variable(R))
        policy_loss = 0
        value_loss = 0
        inverse_loss = 0
        forward_loss = 0

        R = Variable(R)
        gae = torch.zeros(1, 1)
        for i in reversed(range(len(rewards))):
            R = args.gamma * R + rewards[i]
            advantage = R - values[i]
            value_loss = value_loss + 0.5 * advantage.pow(2)

            # Generalized Advantage Estimataion
            delta_t = rewards[i] + args.gamma * \
                values[i + 1].data - values[i].data
            gae = gae * args.gamma * args.tau + delta_t

            policy_loss = policy_loss - \
                log_probs[i] * Variable(gae) - 0.01 * entropies[i]

            cross_entropy = -(actions[i] *
                              torch.log(inverses[i] + 1e-15)).sum(1)
            inverse_loss = inverse_loss + cross_entropy
            forward_err = forwards[i] - vec_st1s[i]
            forward_loss = forward_loss + 0.5 * (forward_err.pow(2)).sum(1)

        optimizer.zero_grad()

        ((1 - args.beta) * inverse_loss +
         args.beta * forward_loss).backward(retain_variables=True)
        (args.lmbda * (policy_loss + 0.5 * value_loss)).backward()

        #(((1-args.beta) * inverse_loss + args.beta * forward_loss) + args.lmbda * (policy_loss + 0.5 * value_loss)).backward()

        torch.nn.utils.clip_grad_norm(model.parameters(), 40)

        ensure_shared_grads(model, shared_model)
        optimizer.step()
Exemplo n.º 2
0
parser.add_argument('--beta', type=float, default=0.2, metavar='LR',
                    help='balance between inverse & forward')
parser.add_argument('--lmbda', type=float, default=0.1, metavar='LR',
                    help='lambda : balance between A3C & icm')

parser.add_argument('--outdir', default="../output", help='Output log directory')
parser.add_argument('--record', action='store_true', help="Record the policy running video")



if __name__ == '__main__':
    args = parser.parse_args()
    torch.manual_seed(args.seed)

    #env = create_atari_env(args.env_name)
    env = env_wrapper.create_doom(args.record, outdir=args.outdir)
    shared_model = ActorCritic(
        env.observation_space.shape[0], env.action_space).cuda()
    shared_model.share_memory()

    if args.no_shared:
        optimizer = None
    else:
        optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
        optimizer.share_memory()

    debug = True
    
    if debug:
        train(0, args, shared_model, optimizer)    
Exemplo n.º 3
0
def test(rank, args, shared_model):
    torch.manual_seed(args.seed + rank)
    env = env_wrapper.create_doom(args.record, outdir=args.outdir)

    model = ActorCritic(env.observation_space.shape[0], env.action_space)

    model.eval()
    state = env.reset()
    state = torch.from_numpy(state)
    reward_sum = 0
    done = True

    start_time = time.time()

    # a quick hack to prevent the agent from stucking
    actions = deque(maxlen=2100)
    episode_length = 0
    result = []

    while True:
        episode_length += 1
        # Sync with the shared model
        if done:
            model.load_state_dict(shared_model.state_dict())
            cx = Variable(torch.zeros(1, 256), volatile=True)
            hx = Variable(torch.zeros(1, 256), volatile=True)
        else:
            cx = Variable(cx.data, volatile=True)
            hx = Variable(hx.data, volatile=True)

        value, logit, (hx, cx) = model(
            (Variable(state.unsqueeze(0), volatile=True), (hx, cx)), icm=False)

        prob = F.softmax(logit)
        action = prob.max(1)[1].data.numpy()

        state, reward, done, _ = env.step(action[0, 0])
        state = torch.from_numpy(state)

        done = done or episode_length >= args.max_episode_length
        reward_sum += reward

        # a quick hack to prevent the agent from stucking
        actions.append(action[0, 0])
        if actions.count(actions[0]) == actions.maxlen:
            done = True

        if done:
            end_time = time.time()
            print("Time {}, episode reward {}, episode length {}".format(
                time.strftime("%Hh %Mm %Ss",
                              time.gmtime(end_time - start_time)), reward_sum,
                episode_length))
            result.append((reward_sum, end_time - start_time))
            f = open('output/result.pickle', 'w')
            pickle.dump(result, f)
            f.close()
            torch.save(model.state_dict(), 'output/{}.pth'.format(
                (end_time - start_time)))

            reward_sum = 0
            episode_length = 0
            actions.clear()
            state = env.reset()
            state = torch.from_numpy(state)
            time.sleep(60)