Beispiel #1
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(
                    obs_traj, obs_traj_rel, seq_start_end
                )
                pred_traj_fake = relative_to_abs(
                    pred_traj_fake_rel, obs_traj[-1]
                )
                ade.append(displacement_error(
                    pred_traj_fake, pred_traj_gt, mode='raw'
                ))
                fde.append(final_displacement_error(
                    pred_traj_fake[-1], pred_traj_gt[-1], mode='raw'
                ))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #2
0
    def predict(self, obs_traj, pred_traj_gt, obs_traj_rel, seq_start_end):
        pred_traj_fake_rel = self.generator(
            obs_traj, obs_traj_rel, seq_start_end
        )

        pred_traj_fake = relative_to_abs(
            pred_traj_fake_rel, obs_traj[-1]
        )

        ade = cal_ade(pred_traj_gt, pred_traj_fake)
        fde = cal_fde(pred_traj_gt, pred_traj_fake)

        pred_traj_fake = pred_traj_fake.cpu().detach().numpy()
        ade = ade.cpu().detach().numpy()
        fde = fde.cpu().detach().numpy()

        return pred_traj_fake, ade, fde
Beispiel #3
0
def discriminator_step(args, batch, generator, discriminator, d_loss_fn,
                       optimizer_d):
    # batch = [tensor.cuda() for tensor in batch]
    if args.use_gpu == 1:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
     loss_mask, seq_start_end) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)

    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

    traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
    traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
    scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)

    # Compute loss with optional gradient penalty
    data_loss = d_loss_fn(scores_real, scores_fake)
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    writer.add_scalar('D_data_loss',
                      losses['D_data_loss'],
                      global_step=global_step)
    writer.add_scalar('D_total_loss',
                      losses['D_total_loss'],
                      global_step=global_step)

    optimizer_d.zero_grad()
    loss.backward()
    if args.clipping_threshold_d > 0:
        nn.utils.clip_grad_norm_(discriminator.parameters(),
                                 args.clipping_threshold_d)
    optimizer_d.step()

    return losses
Beispiel #4
0
    def check_accuracy(self, loader_type='test', loader=None, limit=False):
        if loader_type == 'spec':
            if loader == None:
                raise 'loader is not defined'
            loader = loader
        elif loader_type == 'test':
            loader = self.test_loader
        else:
            loader = self.loader

        args = self.args
        metrics = {}
        disp_error, f_disp_error = [], []
        total_traj = 0
        loss_mask_sum = 0
        with torch.no_grad():
            for batch in loader:
                if args.use_gpu == 1:
                    batch = [tensor.cuda() for tensor in batch]
                else:
                    batch = [tensor for tensor in batch]
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,non_linear_ped, loss_mask, seq_start_end) = batch
                linear_ped = 1 - non_linear_ped
                loss_mask = loss_mask[:, args.obs_len:]
    
                pred_traj_fake_rel = self.generator(
                    obs_traj, obs_traj_rel, seq_start_end
                )
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
    
                ade = cal_ade(pred_traj_gt, pred_traj_fake)
    
                fde = cal_fde(pred_traj_gt, pred_traj_fake)
    
                disp_error.append(ade.item())
                f_disp_error.append(fde.item())
    
                total_traj += pred_traj_gt.size(1)
                if limit and total_traj >= args.num_samples_check:
                    break
    
        metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
        metrics['fde'] = sum(f_disp_error) / total_traj
    
        return metrics
Beispiel #5
0
    def infer(self):
        with torch.no_grad():
            # print(len(self.loader))
            for batch in self.loader:
                if self.use_cuda == 1:
                    batch = [tensor.cuda() for tensor in batch]
                else:
                    batch = [tensor for tensor in batch]
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped, loss_mask, seq_start_end) = batch

                # [8, 4, 2]
                pred_traj_fake_rel = self.generator(
                    obs_traj, obs_traj_rel, seq_start_end
                )

                pred_traj_fake = relative_to_abs(
                    pred_traj_fake_rel, obs_traj[-1]
                )

                obs_traj = obs_traj.cpu().numpy()
                pred_traj_fake = pred_traj_fake.cpu().numpy()
                pred_traj_gt = pred_traj_gt.cpu().numpy()

                return obs_traj, pred_traj_fake, pred_traj_gt
Beispiel #6
0
def check_accuracy(args,
                   loader,
                   generator,
                   discriminator,
                   d_loss_fn,
                   lr_scheduler_g,
                   lr_scheduler_d,
                   limit=False,
                   is_train=False):
    d_losses = []
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    loss_mask_sum = 0
    generator.eval()
    with torch.no_grad():
        for batch in loader:
            if args.use_gpu == 1:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch
            linear_ped = 1 - non_linear_ped
            loss_mask = loss_mask[:, args.obs_len:]

            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                           seq_start_end)
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

            g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
                pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                pred_traj_fake_rel, loss_mask)
            ade = cal_ade(pred_traj_gt, pred_traj_fake)

            fde = cal_fde(pred_traj_gt, pred_traj_fake)

            traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
            traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
            traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel],
                                      dim=0)

            scores_fake = discriminator(traj_fake, traj_fake_rel,
                                        seq_start_end)
            scores_real = discriminator(traj_real, traj_real_rel,
                                        seq_start_end)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            f_disp_error.append(fde.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()
            if limit and total_traj >= args.num_samples_check:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj

    if is_train:
        writer.add_scalar('train_d_loss', metrics['d_loss'], global_step=t)
        writer.add_scalar('train_g_l2_loss_abs',
                          metrics['g_l2_loss_abs'],
                          global_step=t)
        writer.add_scalar('train_g_l2_loss_rel',
                          metrics['g_l2_loss_rel'],
                          global_step=t)
        writer.add_scalar('train_ade', metrics['ade'], global_step=t)
        writer.add_scalar('train_fde', metrics['fde'], global_step=t)
    else:
        writer.add_scalar('val_d_loss', metrics['d_loss'], global_step=t)
        writer.add_scalar('val_g_l2_loss_abs',
                          metrics['g_l2_loss_abs'],
                          global_step=t)
        writer.add_scalar('val_g_l2_loss_rel',
                          metrics['g_l2_loss_rel'],
                          global_step=t)
        writer.add_scalar('val_ade', metrics['ade'], global_step=t)
        writer.add_scalar('val_fde', metrics['fde'], global_step=t)

        if args.lr_scheduler and t > 2000:
            lr_scheduler_d.step(metrics['ade'])
            lr_scheduler_g.step(metrics['ade'])

    generator.train()
    return metrics
Beispiel #7
0
def generator_step(args, batch, generator, discriminator, g_loss_fn,
                   optimizer_g):
    if args.use_gpu == 1:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    # batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
     loss_mask, seq_start_end) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, args.obs_len:]

    for _ in range(args.best_k):
        generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if args.l2_loss_weight > 0:
            g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(
                pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if args.l2_loss_weight > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
                loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel

        writer.add_scalar('G_l2_loss_rel',
                          losses['G_l2_loss_rel'],
                          global_step=t)

    if args.discriminator_weight > 0:
        traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
        traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)

        scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
        discriminator_loss = g_loss_fn(scores_fake)

        loss += args.discriminator_weight * discriminator_loss
        losses['G_discriminator_loss'] = discriminator_loss.item()
        writer.add_scalar('G_discriminator_loss',
                          losses['G_discriminator_loss'],
                          global_step=t)

    losses['G_total_loss'] = loss.item()
    writer.add_scalar('G_total_loss', losses['G_total_loss'], global_step=t)

    optimizer_g.zero_grad()
    loss.backward()
    if args.clipping_threshold_g > 0:
        nn.utils.clip_grad_norm_(generator.parameters(),
                                 args.clipping_threshold_g)
    optimizer_g.step()

    return losses