Пример #1
0
def train(rank,
          scene_scope,
          task_scope,
          args,
          shared_model,
          counter,
          lock,
          optimizer=None):
    torch.manual_seed(args.seed + rank)

    #env = create_atari_env(args.env_name)
    #env.seed(args.seed + rank)

    env = Environment({
        'scene_name': scene_scope,
        'terminal_state_id': int(task_scope)
    })

    model = ActorCriticFFNetwork(ACTION_SIZE)

    if optimizer is None:
        # TODO: Discount learning rate based on episode length
        optimizer = my_optim.SharedRMSprop(shared_model.parameters(),
                                           lr=args.lr,
                                           alpha=args.alpha,
                                           eps=args.eps)
        optimizer.share_memory()

    model.train()

    env.reset()
    state = torch.from_numpy(env.s_t)
    done = True

    episode_length = 0
    for i in range(int(args.max_episode_length)):
        # Sync with the shared model
        model.load_state_dict(shared_model.state_dict())
        '''
        if done:
            cx = Variable(torch.zeros(1, 256))
            hx = Variable(torch.zeros(1, 256))
        else:
            cx = Variable(cx.data)
            hx = Variable(hx.data)
        '''

        values = []
        log_probs = []
        rewards = []
        entropies = []

        for step in range(args.num_steps):
            print('Thread: ', rank, ', step: ', step, 'epochs:', i)
            episode_length += 1
            logit, value = model(env.s_t, env.target)
            prob = F.softmax(logit, dim=1)
            log_prob = F.log_softmax(logit, dim=1)
            entropy = -(log_prob * prob).sum(1, keepdim=True)
            entropies.append(entropy)

            action = prob.multinomial(num_samples=1).data
            log_prob = log_prob.gather(1, Variable(action))

            env.step(action)
            #state, reward, done, _ = env.step(action.numpy())
            env.update()
            state = env.s_t
            reward = env.reward
            done = env.terminal

            done = done or episode_length >= args.max_episode_length
            reward = max(min(reward, 1), -1)

            with lock:
                if counter.value % 1000 == 0:
                    print('Now saving data. Please wait.')
                    torch.save(shared_model.state_dict(),
                               CHECKPOINT_DIR + '/' + 'checkpoint.pth.tar')
                counter.value += 1

            if done:
                episode_length = 0
                if env.terminal:
                    print('Task completed')
                counter.value += 1

            if done:
                episode_length = 0
                env.reset()
                state = env.s_t

            state = torch.from_numpy(state)
            values.append(value)
            log_probs.append(log_prob)
            rewards.append(reward)

            if done:
                break

        R = torch.zeros(1, 1)
        if not done:
            _, value = model(env.s_t, env.target)
            R = value.data

        values.append(Variable(R))
        policy_loss = 0
        value_loss = 0
        R = Variable(R)
        gae = torch.zeros(1, 1)
        for i in reversed(range(len(rewards))):
            R = args.gamma * R + rewards[i]
            advantage = R - values[i]
            value_loss = value_loss + 0.5 * advantage.pow(2)

            # Generalized Advantage Estimataion
            delta_t = rewards[i] + args.gamma * \
                values[i + 1].data - values[i].data
            gae = gae * args.gamma * args.tau + delta_t

            policy_loss = policy_loss - \
                log_probs[i] * Variable(gae) - args.entropy_coef * entropies[i]

        optimizer.zero_grad()

        (policy_loss + args.value_loss_coef * value_loss).backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

        ensure_shared_grads(model, shared_model)
        optimizer.step()
Пример #2
0
def test(rank, scene_scope, task_scope, args, shared_model, counter):
    torch.manual_seed(args.seed + rank)
    
    env = Environment({
        'scene_name': scene_scope,
        'terminal_state_id': int(task_scope)
        })
    
    model = ActorCriticFFNetwork(ACTION_SIZE)

    model.eval()

    height, width, layers = env.observation.shape
    video = cv2.VideoWriter('video/' + task_scope + '.mp4',-1,1,(width,height))

    env.reset()
    state = torch.from_numpy(env.s_t)
    reward_sum = 0
    done = True

    start_time = time.time()

    # a quick hack to prevent the agent from stucking
    actions = deque(maxlen=100)
    episode_length = 0

    img = cv2.cvtColor(env.observation, cv2.COLOR_BGR2RGB)
    video.write(img)
    for i in range(100):
        episode_length += 1
        # Sync with the shared model
        if done:
            model.load_state_dict(shared_model.state_dict())

        logit, value = model(env.s_t, env.target)
        prob = F.softmax(logit, dim=1)
        action = prob.max(1, keepdim=True)[1].data.numpy()
        env.step(action[0, 0])
        env.update()        
        img = cv2.cvtColor(env.observation, cv2.COLOR_BGR2RGB)
        video.write(img)
        
        reward = env.reward
        state = env.s_t
        done = env.terminal
        print(env.terminal_state_id, env.current_state_id)
        done = done or episode_length >= args.max_episode_length
        reward_sum += reward

        # a quick hack to prevent the agent from stucking
        actions.append(action[0, 0])
        if actions.count(actions[0]) == actions.maxlen:
            done = True

        if done:
            print("Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}".format(
                time.strftime("%Hh %Mm %Ss",
                              time.gmtime(time.time() - start_time)),
                counter.value, counter.value / (time.time() - start_time),
                reward_sum, episode_length))
            reward_sum = 0
            episode_length = 0
            actions.clear()
            env.reset()
            state = env.s_t
            break

        state = torch.from_numpy(state)
    cv2.destroyAllWindows()
    video.release()
Пример #3
0
    ''' 
    p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
    p.start()
    processes.append(p)
    '''
    branches = []
    for scene in scene_scopes:
        for task in list_of_tasks[scene]:
            branches.append((scene, task))
    NUM_TASKS = len(branches)

    if os.path.exists(CHECKPOINT_DIR + '/' + 'checkpoint.pth.tar'):
        checkpoint = torch.load(CHECKPOINT_DIR + '/' + 'checkpoint.pth.tar',
                                map_location=lambda storage, loc: storage)
        # set global step
        shared_model.load_state_dict(checkpoint)
        print("Model loaded")
    else:
        print("Could not find old checkpoint")

    for rank in range(0, args.num_processes):
        scene, task = branches[rank % NUM_TASKS]
        p = mp.Process(target=test,
                       args=(rank, scene, task, args, shared_model, counter))
        #p = mp.Process(target=train, args=(rank ,scene, task, args, shared_model, counter, lock, optimizer))
        p.start()
        processes.append(p)

    for p in processes:
        p.join()
    '''