Exemple #1
0
eval_samples = iter(eval_loader)

num_points = 400
file_dir = 'ours_fix_alpha_' + str(num_points)
os.makedirs(file_dir, exist_ok=True)

feature_list = np.random.randn(num_points * opt.batch_size * 2, opt.vector_dim)
label = []

for total_step in tqdm(range(num_points)):
    batch = next(train_samples)
    to_device(batch, device)
    # feature = encoder(batch['img'])
    feature = encoder(batch['img'], batch['v_0'])
    check_shape(feature, 'feature')
    set_mute(True)
    feature_list[total_step * opt.batch_size:(total_step + 1) *
                 opt.batch_size] = feature.data.cpu().numpy()
label = [0] * num_points * opt.batch_size

for total_step in tqdm(range(num_points, 2 * num_points)):
    batch = next(eval_samples)
    to_device(batch, device)
    feature = encoder(batch['img'], batch['v_0'])
    check_shape(feature, 'feature')
    set_mute(True)
    feature_list[total_step * opt.batch_size:(total_step + 1) *
                 opt.batch_size] = feature.data.cpu().numpy()
label.extend([1] * num_points * opt.batch_size)

np_feature_list = np.array(feature_list)
Exemple #2
0
def eval_decision(logger, total_steps, algorithm='WCM'):
    batch = next(eval_samples)
    to_device(batch, device)
    check_shape(batch)
    set_mute(True)

    x = model_cluster[0]._decoder._base_dist.mean.clone().detach().view(-1, opt.points_num, 2)
    x.requires_grad = True
    zs = [model._params(
        velocity=batch['v_0'].view(-1,1),
        visual_features=batch['img'],
    ) for model in model_cluster]

    optimizer = torch.optim.Adam(params=[x], lr=1e-1)

    x_best = x.clone()
    loss_best = torch.ones(()).to(x.device) * 1000.0
    for _ in range(50):
        optimizer.zero_grad()
        y, _ = model_cluster[0]._decoder._forward(x=x, z=zs[0])

        imitation_posteriors = list()
        for model, z in zip(model_cluster, zs):
            _, log_prob, logabsdet = model._decoder._inverse(y=y, z=z)
            imitation_prior = torch.mean(log_prob - logabsdet)
            imitation_posteriors.append(imitation_prior)

        imitation_posteriors = torch.stack(imitation_posteriors, dim=0)

        if algorithm == "WCM":
            loss, _ = torch.min(-imitation_posteriors, dim=0)
        elif algorithm == "BCM":
            loss, _ = torch.max(-imitation_posteriors, dim=0)
        else:
            loss = torch.mean(-imitation_posteriors, dim=0)

        loss.backward(retain_graph=True)
        optimizer.step()
        if loss < loss_best:
            x_best = x.clone()
            loss_best = loss.clone()

    plan, _ = model_cluster[0]._decoder._forward(x=x_best, z=zs[0])
    xy = plan.detach().cpu().numpy()[0]*opt.max_dist
    real_xy = batch['xy'].view(-1, 2).data.cpu().numpy()*opt.max_dist

    fake_x = xy[:,0]
    fake_y = xy[:,1]
    real_x = real_xy[:,0]
    real_y = real_xy[:,1]
    time = batch['t'].data.cpu().numpy()[0]*opt.max_t
    # time_list = [0.0, 0.75, 1.5, 2.25] #oxford training time
    time_list = [0.0000, 0.1875, 0.3750, 0.5625, 0.7500, 0.9375, 1.1250, 1.3125, 1.5001,1.6875, 1.8750, 2.0625, 2.2501, 2.4376, 2.6250, 2.8126]
    xs = []
    ys = []
    for t in time:
        x, y = interpolation(time_list, fake_x, fake_y, t)
        xs.append(x)
        ys.append(y)
    fake_x = np.array(xs)
    fake_y = np.array(ys)
    # max_x = 30.
    # max_y = 30.

    # fig = plt.figure(figsize=(7, 7))
    # ax1 = fig.add_subplot(111)
    # ax1.plot(real_x, real_y, label='real-trajectory', color = 'b', linewidth=3, linestyle='--')
    # ax1.plot(fake_x, fake_y, label='fake-trajectory', color = 'r', linewidth=3)
    # ax1.set_xlabel('Forward/(m)')
    # ax1.set_ylabel('Sideways/(m)')  
    # ax1.set_xlim([0., max_x])
    # ax1.set_ylim([-max_y/2, max_y/2])
    # plt.legend(loc='lower right')
    
    # plt.legend(loc='lower left')
    # plt.savefig('result/output/%s/' % opt.dataset_name+'/'+str(total_steps)+'.png')
    # plt.close('all')
    ex = np.mean(np.abs(fake_x-real_x))
    ey = np.mean(np.abs(fake_y-real_y))
    fde = np.hypot(fake_x - real_x, fake_y - real_y)[-1]
    ade = np.mean(np.hypot(fake_x - real_x, fake_y - real_y))

    logger.add_scalar('eval/ex',  ex.item(),  total_steps)
    logger.add_scalar('eval/ey',  ey.item(),  total_steps)
    logger.add_scalar('eval/fde', fde.item(), total_steps)
    logger.add_scalar('eval/ade', ade.item(), total_steps)