Пример #1
0
def evaluate_baseline(args, loader, model, num_samples):
    ade_outer, fde_outer, miss_rate_outer, mean_l2_outer, best_l2_outer, max_l2_outer = [], [], [], [], [], []
    total_traj = 0
    threshold = 3
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, maps, dnames) = batch

            ade, fde, l2, losses = [], [], [], []
            total_traj += pred_traj_gt.size(1)

            for idx in range(num_samples):
                if args.model == 'vrnn':
                    kld_loss, nll_loss, _, h = model(obs_traj_rel.cuda(),
                                                     obs_traj[0])
                    loss = kld_loss + nll_loss
                elif args.model == 'rnn':
                    loss, _, h = model(obs_traj_rel.cuda())

                sample_traj_rel = model.sample(args.pred_len,
                                               obs_traj_rel.size(1),
                                               obs_traj[-1], dnames, h)
                sample_traj = relative_to_abs(sample_traj_rel, obs_traj[-1])
                ade.append(
                    displacement_error(sample_traj,
                                       pred_traj_gt.cpu(),
                                       mode='raw'))
                fde.append(
                    final_displacement_error(sample_traj[-1],
                                             pred_traj_gt[-1].cpu(),
                                             mode='raw'))
                l2.append(
                    l2_loss(relative_to_abs(sample_traj, obs_traj[-1]),
                            pred_traj_gt.cpu(), loss_mask[:, args.obs_len:]))
                losses.append(loss)

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            miss_rate_outer.append(miss_rate(losses, threshold))
            mean_l2_outer.append(torch.mean(torch.stack(l2)))
            best_l2_outer.append(torch.max(torch.stack(l2)))
            max_l2_outer.append(torch.min(torch.stack(l2)))

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / total_traj
        m_rate = sum(miss_rate_outer) / total_traj
        mean_l2 = sum(mean_l2_outer) / total_traj
        best_l2 = sum(best_l2_outer) / total_traj
        max_l2 = sum(max_l2_outer) / total_traj

    return ade, fde, m_rate, mean_l2, best_l2, max_l2
Пример #2
0
def evaluate_graph_sways(args, loader, model, num_samples, epoch):
    ade_outer, fde_outer = [], []
    total_traj = 0
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             seq_start_end, maps, dnames) = batch

            if args.adj_type == 0:
                adj_out = compute_adjs(args, seq_start_end)
            elif args.adj_type == 1:
                adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                               pred_traj_gt)
            elif args.adj_type == 2:
                adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                              pred_traj_gt)

            ade, fde, l2, losses = [], [], [], []
            total_traj += pred_traj_gt.size(1)
            kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                                  adj_out.cuda(),
                                                  seq_start_end.cuda(),
                                                  obs_traj[0],
                                                  maps[:args.obs_len], epoch)
            for idx in range(num_samples):
                sample_traj_rel = model.sample(args.pred_len,
                                               seq_start_end.cuda(), False,
                                               maps[args.obs_len - 1:],
                                               obs_traj[-1], dnames, h).cpu()
                sample_traj = relative_to_abs(sample_traj_rel, obs_traj[-1])
                ade.append(
                    displacement_error(sample_traj,
                                       pred_traj_gt.cpu(),
                                       mode='raw'))
                fde.append(
                    final_displacement_error(sample_traj[-1],
                                             pred_traj_gt[-1].cpu(),
                                             mode='raw'))
                loss = kld_loss + nll_loss + kld_hm
                losses.append(loss)

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / total_traj

    return ade, fde
Пример #3
0
def train(epoch, train_loader, optimizer, model, args, writer, beta_vals):
    train_loss = 0
    mean_kld_loss, mean_nll_loss, mean_ade_loss, mean_kld_hm = 0, 0, 0, 0
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    metrics = {}

    model.train()

    beta = beta_vals[epoch]

    for batch_idx, batch in enumerate(train_loader):
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, seq_start_end,
         maps, dnames) = batch

        if args.adj_type == 0:
            adj_out = compute_adjs(args, seq_start_end)
        elif args.adj_type == 1:
            adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                           pred_traj_gt)
        elif args.adj_type == 2:
            adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                          pred_traj_gt)

        # Forward + backward + optimize
        optimizer.zero_grad()

        kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                              adj_out.cuda(),
                                              seq_start_end.cuda(),
                                              obs_traj[0], maps[:args.obs_len],
                                              epoch)

        v_losses = []
        if args.v_loss:
            h_samples = torch.cat(args.k_vloss * [h], 1)
            pred_traj_rel = model.sample(args.pred_len, seq_start_end.cuda(),
                                         True, maps[args.obs_len - 1:],
                                         obs_traj[-1], dnames, h_samples)
            pred_traj_rel = torch.stack(
                torch.chunk(pred_traj_rel, args.k_vloss, dim=1))
            for k in range(0, args.k_vloss):
                pred_traj_abs = relative_to_abs(pred_traj_rel[k], obs_traj[-1])
                ade_loss = displacement_error(
                    pred_traj_abs, pred_traj_gt) / obs_traj_rel.size(1)
                v_losses.append(ade_loss)

            ade_min = min(v_losses).cuda()
            mean_ade_loss += ade_min.item()
            loss = beta * kld_loss + nll_loss + ade_min + kld_hm
        else:
            loss = beta * kld_loss + nll_loss + kld_hm

        mean_kld_loss += kld_loss.item()
        mean_nll_loss += nll_loss.item()
        mean_kld_hm += kld_hm.item()

        loss.backward()

        # Clipping gradients
        nn.utils.clip_grad_norm_(model.parameters(), args.clip)
        optimizer.step()
        train_loss += loss.item()

        # Printing
        if batch_idx % args.print_every == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\t KLD Loss: {:.6f} \t NLL Loss: {:.6f} \t KLD_hm: {:.6f}'
                .format(epoch, batch_idx * len(batch),
                        len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), kld_loss.item(),
                        nll_loss.item(), kld_hm.item()))

            with torch.no_grad():
                pred_traj_sampled_rel = model.sample(args.pred_len,
                                                     seq_start_end.cuda(),
                                                     False,
                                                     maps[args.obs_len - 1:],
                                                     obs_traj[-1], dnames,
                                                     h).cpu()
            pred_traj_sampled = relative_to_abs(pred_traj_sampled_rel,
                                                obs_traj[-1])

            ade, ade_l, ade_nl = cal_ade(pred_traj_sampled,
                                         pred_traj_gt,
                                         linear_ped=None,
                                         non_linear_ped=None)
            fde, fde_l, fde_nl = cal_fde(pred_traj_sampled,
                                         pred_traj_gt,
                                         linear_ped=None,
                                         non_linear_ped=None)

            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            total_traj += pred_traj_gt.size(1)

            # Plot samples
            # Input observations (obs_len, x_len)
            start, end = seq_start_end[0][0], seq_start_end[0][1]
            input_a = obs_traj[:, start:end, :].data
            # Ground truth (pred_len, x_len)
            gt = pred_traj_gt[:, start:end, :].data
            out_a = pred_traj_sampled[:, start:end, :].data

            gt_r = np.insert(np.asarray(gt.cpu()),
                             0,
                             np.asarray(input_a[-1].unsqueeze(0).cpu()),
                             axis=0)
            out_a_r = np.insert(np.asarray(out_a.cpu()),
                                0,
                                np.asarray(input_a[-1].unsqueeze(0).cpu()),
                                axis=0)

            img2 = draw_all_trj_seq(np.asarray(input_a.cpu()), gt_r, out_a_r,
                                    args)
            writer.add_figure('Generated_samples_in_absolute_coordinates',
                              img2, epoch)

            metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
            metrics['ade_l'] = sum(disp_error_l) / (total_traj * args.pred_len)
            metrics['ade_nl'] = sum(disp_error_nl) / (total_traj *
                                                      args.pred_len)
            metrics['fde'] = sum(f_disp_error) / total_traj
            metrics['fde_l'] = sum(f_disp_error_l) / total_traj
            metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj

            writer.add_scalar('ade', metrics['ade'], epoch)
            writer.add_scalar('fde', metrics['fde'], epoch)

    mean_kld_loss /= len(train_loader)
    mean_nll_loss /= len(train_loader)
    mean_ade_loss /= len(train_loader)
    mean_kld_hm /= len(train_loader)

    writer.add_scalar('train_mean_kld_loss', mean_kld_loss, epoch)
    writer.add_scalar('train_mean_nll_loss', mean_nll_loss, epoch)
    if args.v_loss:
        writer.add_scalar('train_mean_ade_loss', mean_ade_loss, epoch)
    if args.use_hm: writer.add_scalar('train_mean_kld_hm', mean_kld_hm, epoch)

    writer.add_scalar('loss_train', train_loss / len(train_loader), epoch)
    print('====> Epoch: {} Average loss: {:.4f}'.format(
        epoch, train_loss / len(train_loader)))
    print(metrics)
Пример #4
0
def test(epoch, test_loader, model, writer, beta_vals):
    """Use test data to evaluate likelihood of the model"""
    mean_kld_loss, mean_nll_loss, mean_ade_loss, mean_kld_hm = 0, 0, 0, 0
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    metrics = {}

    model.eval()

    beta = beta_vals[epoch]

    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             seq_start_end, maps, dnames) = batch

            if args.adj_type == 0:
                adj_out = compute_adjs(args, seq_start_end)
            elif args.adj_type == 1:
                adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                               pred_traj_gt)
            elif args.adj_type == 2:
                adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                              pred_traj_gt)

            kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                                  adj_out.cuda(),
                                                  seq_start_end.cuda(),
                                                  obs_traj[0],
                                                  maps[:args.obs_len], epoch)

            mean_kld_loss += beta * kld_loss.item()
            mean_nll_loss += nll_loss.item()
            mean_kld_hm += kld_hm.item()

            v_losses = []
            if args.v_loss:
                h_samples = torch.cat(args.k_vloss * [h], 1)
                pred_traj_rel = model.sample(args.pred_len,
                                             seq_start_end.cuda(), True,
                                             maps[args.obs_len - 1:],
                                             obs_traj[-1], dnames, h_samples)
                pred_traj_rel = torch.stack(
                    torch.chunk(pred_traj_rel, args.k_vloss, dim=1))
                for k in range(0, args.k_vloss):
                    pred_traj_abs = relative_to_abs(pred_traj_rel[k],
                                                    obs_traj[-1])
                    ade_loss = displacement_error(
                        pred_traj_abs, pred_traj_gt) / obs_traj_rel.size(1)
                    v_losses.append(ade_loss)
                ade_min = min(v_losses).cuda()
                mean_ade_loss += ade_min.item()

            if i % args.print_every == 0:
                pred_traj_sampled_rel = model.sample(args.pred_len,
                                                     seq_start_end.cuda(),
                                                     False,
                                                     maps[args.obs_len - 1:],
                                                     obs_traj[-1], dnames,
                                                     h).cpu()
                pred_traj_sampled = relative_to_abs(pred_traj_sampled_rel,
                                                    obs_traj[-1])

                ade, ade_l, ade_nl = cal_ade(pred_traj_sampled,
                                             pred_traj_gt,
                                             linear_ped=None,
                                             non_linear_ped=None)
                fde, fde_l, fde_nl = cal_fde(pred_traj_sampled,
                                             pred_traj_gt,
                                             linear_ped=None,
                                             non_linear_ped=None)

                disp_error.append(ade.item())
                disp_error_l.append(ade_l.item())
                disp_error_nl.append(ade_nl.item())
                f_disp_error.append(fde.item())
                f_disp_error_l.append(fde_l.item())
                f_disp_error_nl.append(fde_nl.item())

                total_traj += pred_traj_gt.size(1)

                metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
                metrics['ade_l'] = sum(disp_error_l) / (total_traj *
                                                        args.pred_len)
                metrics['ade_nl'] = sum(disp_error_nl) / (total_traj *
                                                          args.pred_len)
                metrics['fde'] = sum(f_disp_error) / total_traj
                metrics['fde_l'] = sum(f_disp_error_l) / total_traj
                metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj

                writer.add_scalar('ade', metrics['ade'], epoch)
                writer.add_scalar('fde', metrics['fde'], epoch)

        mean_kld_loss /= len(test_loader)
        mean_nll_loss /= len(test_loader)
        mean_ade_loss /= len(test_loader)
        mean_kld_hm /= len(test_loader)

        writer.add_scalar('test_mean_kld_loss', mean_kld_loss, epoch)
        writer.add_scalar('test_mean_nll_loss', mean_nll_loss, epoch)
        if args.v_loss:
            writer.add_scalar('test_mean_ade_loss', mean_ade_loss, epoch)
        if args.use_hm:
            writer.add_scalar('test_mean_kld_hm', mean_kld_hm, epoch)
        writer.add_scalar(
            'loss_test',
            mean_kld_loss + mean_nll_loss + mean_ade_loss + mean_kld_hm, epoch)

        print(
            '====> Test set loss: KLD Loss = {:.4f}, NLL Loss = {:.4f}, ADE = {:.4f}, KLD_HM = {:.4f} '
            .format(mean_kld_loss, mean_nll_loss, mean_ade_loss, mean_kld_hm))
        print(metrics)
Пример #5
0
def train(epoch, train_loader, optimizer, model, args, writer, beta_vals):
    train_loss = 0
    loss_mask_sum = 0
    disp_error, disp_error_l, disp_error_nl = ([],) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    l2_losses_abs, l2_losses_rel = ([],) * 2
    metrics = {}

    model.train()

    for batch_idx, batch in enumerate(train_loader):
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
         loss_mask, seq_start_end, maps) = batch

        loss_mask = loss_mask[:, args.obs_len:]
        linear_ped = 1 - non_linear_ped

        # Forward + backward + optimize
        optimizer.zero_grad()
        model = model.to(device)
        kld_loss, nll_loss, (x_list, mean_list), h = model(obs_traj_rel.cuda(), obs_traj[0])
        mean_list[0] = mean_list[0].cuda()
        beta = beta_vals[epoch]

        v_losses = []
        if args.v_loss:
            for i in range(0, args.k_vloss):
                pred_traj_rel = model.sample(args.pred_len, obs_traj_rel.size(1), h)
                pred_traj_abs = relative_to_abs(pred_traj_rel, obs_traj[-1])
                ade_loss = displacement_error(pred_traj_abs, pred_traj_gt) / obs_traj_rel.size(1)
                v_losses.append(ade_loss)
            ade_min = min(v_losses)
            loss = beta * kld_loss + nll_loss + ade_min
        else:
            loss = beta * kld_loss + nll_loss

        loss.backward()

        # Clipping gradients
        nn.utils.clip_grad_norm_(model.parameters(), args.clip)
        optimizer.step()
        train_loss += loss.item()

        # Printing
        if batch_idx % args.print_every == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\t KLD Loss: {:.6f} \t NLL Loss: {:.6f}'.format(
                epoch, batch_idx * len(batch), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader),
                       kld_loss.item(),
                       nll_loss.item()))

            pred_traj_sampled_rel = model.sample(args.pred_len, obs_traj_rel.size(1), h)
            pred_traj_sampled = relative_to_abs(pred_traj_sampled_rel, obs_traj[-1])
            pred_traj_gt_rel = pred_traj_gt_rel
            pred_traj_gt = pred_traj_gt

            ade, ade_l, ade_nl = cal_ade(pred_traj_sampled, pred_traj_gt, linear_ped, non_linear_ped)
            fde, fde_l, fde_nl = cal_fde(pred_traj_sampled, pred_traj_gt, linear_ped, non_linear_ped)
            l2_loss_abs, l2_loss_rel = cal_l2_losses(pred_traj_gt, pred_traj_gt_rel, pred_traj_sampled_rel,
                                                     pred_traj_sampled, loss_mask)

            l2_losses_abs.append(l2_loss_abs.item())
            l2_losses_rel.append(l2_loss_rel.item())
            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()

            # Plot samples
            # Input observations (obs_len, x_len)
            start, end = seq_start_end[0][0], seq_start_end[0][1]
            input_a = obs_traj[:, start:end, :].data
            # Ground truth (pred_len, x_len)
            gt = pred_traj_gt[:, start:end, :].data
            out_a = pred_traj_sampled[:, start:end, :].data

            gt_r = np.insert(np.asarray(gt.cpu()), 0, np.asarray(input_a[-1].unsqueeze(0).cpu()), axis=0)
            out_a_r = np.insert(np.asarray(out_a.cpu()), 0, np.asarray(input_a[-1].unsqueeze(0).cpu()), axis=0)

            img2 = draw_all_trj_seq(np.asarray(input_a.cpu()), gt_r, out_a_r, args)
            writer.add_figure('Generated_samples_in_absolute_coordinates', img2, epoch)

            metrics['l2_loss_abs'] = sum(l2_losses_abs) / loss_mask_sum
            metrics['l2_loss_rel'] = sum(l2_losses_rel) / loss_mask_sum

            metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
            metrics['fde'] = sum(f_disp_error) / total_traj

            writer.add_scalar('ade', metrics['ade'], epoch)
            writer.add_scalar('fde', metrics['fde'], epoch)

    writer.add_scalar('loss_train', train_loss / len(train_loader.dataset), epoch)
    print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset)))
    print(metrics)
Пример #6
0
def test(epoch, test_loader, model, writer, beta_vals):
    """Use test data to evaluate likelihood of the model"""
    mean_kld_loss, mean_nll_loss = 0, 0
    loss_mask_sum = 0
    disp_error, disp_error_l, disp_error_nl = ([],) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    l2_losses_abs, l2_losses_rel = ([],) * 2
    metrics = {}

    model.eval()
    beta = beta_vals[epoch]

    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
             loss_mask, seq_start_end, maps) = batch

            loss_mask = loss_mask[:, args.obs_len:]
            linear_ped = 1 - non_linear_ped

            model = model.to(device)
            kld_loss, nll_loss, _, h = model(obs_traj_rel.cuda(), obs_traj[0])
            mean_kld_loss += beta * kld_loss.item()

            v_losses = []
            if args.v_loss:
                for j in range(0, args.k_vloss):
                    pred_traj_rel = model.sample(args.pred_len, obs_traj_rel.size(1), h)
                    pred_traj_abs = relative_to_abs(pred_traj_rel, obs_traj[-1])
                    ade_loss = displacement_error(pred_traj_abs, pred_traj_gt) / obs_traj_rel.size(1)
                    v_losses.append(ade_loss)
                ade_min = min(v_losses)
                mean_nll_loss += (nll_loss.item() + ade_min.item())
            else:
                mean_nll_loss += nll_loss.item()

            if i % args.print_every == 0:
                pred_traj_sampled_rel = model.sample(args.pred_len, obs_traj_rel.size(1), h)
                pred_traj_sampled = relative_to_abs(pred_traj_sampled_rel, obs_traj[-1])
                pred_traj_gt_rel = pred_traj_gt_rel
                pred_traj_gt = pred_traj_gt

                ade, ade_l, ade_nl = cal_ade(pred_traj_sampled, pred_traj_gt, linear_ped, non_linear_ped)
                fde, fde_l, fde_nl = cal_fde(pred_traj_sampled, pred_traj_gt, linear_ped, non_linear_ped)
                l2_loss_abs, l2_loss_rel = cal_l2_losses(pred_traj_gt, pred_traj_gt_rel, pred_traj_sampled_rel,
                                                         pred_traj_sampled, loss_mask)

                l2_losses_abs.append(l2_loss_abs.item())
                l2_losses_rel.append(l2_loss_rel.item())
                disp_error.append(ade.item())
                disp_error_l.append(ade_l.item())
                disp_error_nl.append(ade_nl.item())
                f_disp_error.append(fde.item())
                f_disp_error_l.append(fde_l.item())
                f_disp_error_nl.append(fde_nl.item())

                loss_mask_sum += torch.numel(loss_mask.data)
                total_traj += pred_traj_gt.size(1)
                total_traj_l += torch.sum(linear_ped).item()
                total_traj_nl += torch.sum(non_linear_ped).item()

                metrics['l2_loss_abs'] = sum(l2_losses_abs) / loss_mask_sum
                metrics['l2_loss_rel'] = sum(l2_losses_rel) / loss_mask_sum

                metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
                metrics['fde'] = sum(f_disp_error) / total_traj
                writer.add_scalar('ade', metrics['ade'], epoch)
                writer.add_scalar('fde', metrics['fde'], epoch)

        mean_kld_loss /= len(test_loader.dataset)
        mean_nll_loss /= len(test_loader.dataset)
        writer.add_scalar('loss_test', mean_kld_loss + mean_nll_loss, epoch)
        print('====> Test set loss: KLD Loss = {:.4f}, NLL Loss = {:.4f} '.format(mean_kld_loss, mean_nll_loss))
        print(metrics)
Пример #7
0
def check_accuracy_graph_sways(args, loader, model, epoch, limit=False):
    losses = []
    val_loss = 0
    metrics = {}
    disp_error = []
    f_disp_error = []
    total_traj = 0
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             seq_start_end, maps, dnames) = batch

            if args.adj_type == 0:
                adj_out = compute_adjs(args, seq_start_end)
            elif args.adj_type == 1:
                adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                               pred_traj_gt)
            elif args.adj_type == 2:
                adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                              pred_traj_gt)

            kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                                  adj_out.cuda(),
                                                  seq_start_end.cuda(),
                                                  obs_traj[0],
                                                  maps[:args.obs_len], epoch)
            loss = kld_loss + nll_loss + kld_hm
            val_loss += loss.item()
            pred_traj_rel = model.sample(args.pred_len, seq_start_end.cuda(),
                                         False, maps[args.obs_len - 1:],
                                         obs_traj[-1], dnames, h).cpu()
            pred_traj = relative_to_abs(pred_traj_rel, obs_traj[-1])

            ade, ade_l, ade_nl = cal_ade(pred_traj_gt,
                                         pred_traj,
                                         linear_ped=None,
                                         non_linear_ped=None)
            fde, fde_l, fde_nl = cal_fde(pred_traj_gt,
                                         pred_traj,
                                         linear_ped=None,
                                         non_linear_ped=None)

            losses.append(loss.item())
            disp_error.append(ade.item())

            f_disp_error.append(fde.item())
            total_traj += pred_traj_gt.size(1)
            if limit and total_traj >= args.num_samples_check:
                break

    metrics['loss'] = sum(losses) / len(losses)
    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj

    metrics['ade_l'] = 0
    metrics['fde_l'] = 0

    metrics['ade_nl'] = 0
    metrics['fde_nl'] = 0

    model.train()
    return metrics, val_loss / len(loader)
Пример #8
0
def check_accuracy_graph(args, loader, model, epoch, limit=False):
    losses = []
    val_loss = 0
    metrics = {}
    l2_losses_abs, l2_losses_rel = ([], ) * 2
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    loss_mask_sum = 0
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, maps, dnames) = batch

            linear_ped = 1 - non_linear_ped
            loss_mask = loss_mask[:, args.obs_len:]

            if args.adj_type == 0:
                adj_out = compute_adjs(args, seq_start_end)
            elif args.adj_type == 1:
                adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                               pred_traj_gt)
            elif args.adj_type == 2:
                adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                              pred_traj_gt)

            kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                                  adj_out.cuda(),
                                                  seq_start_end.cuda(),
                                                  obs_traj[0],
                                                  maps[:args.obs_len], epoch)
            loss = kld_loss + nll_loss + kld_hm
            val_loss += loss.item()
            pred_traj_rel = model.sample(args.pred_len, seq_start_end.cuda(),
                                         False, maps[args.obs_len - 1:],
                                         obs_traj[-1], dnames, h).cpu()
            pred_traj = relative_to_abs(pred_traj_rel, obs_traj[-1])

            l2_loss_abs, l2_loss_rel = cal_l2_losses(pred_traj_gt,
                                                     pred_traj_gt_rel,
                                                     pred_traj, pred_traj_rel,
                                                     loss_mask)
            ade, ade_l, ade_nl = cal_ade(pred_traj_gt, pred_traj, linear_ped,
                                         non_linear_ped)
            fde, fde_l, fde_nl = cal_fde(pred_traj_gt, pred_traj, linear_ped,
                                         non_linear_ped)

            losses.append(loss.item())
            l2_losses_abs.append(l2_loss_abs.item())
            l2_losses_rel.append(l2_loss_rel.item())
            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()
            if limit and total_traj >= args.num_samples_check:
                break

    metrics['loss'] = sum(losses) / len(losses)
    metrics['l2_loss_abs'] = sum(l2_losses_abs) / loss_mask_sum
    metrics['l2_loss_rel'] = sum(l2_losses_rel) / loss_mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj
    if total_traj_l != 0:
        metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
        metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
    else:
        metrics['ade_l'] = 0
        metrics['fde_l'] = 0
    if total_traj_nl != 0:
        metrics['ade_nl'] = sum(disp_error_nl) / (total_traj_nl *
                                                  args.pred_len)
        metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
    else:
        metrics['ade_nl'] = 0
        metrics['fde_nl'] = 0

    model.train()
    return metrics, val_loss / len(loader)
Пример #9
0
def evaluate_graph(args, loader, model, num_samples, epoch):
    ade_outer, fde_outer, miss_rate_outer, mean_l2_outer, best_l2_outer, max_l2_outer = [], [], [], [], [], []
    mean_l2_graph = []
    total_traj = 0
    threshold = 3
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, maps, dnames) = batch

            if args.adj_type == 0:
                adj_out = compute_adjs(args, seq_start_end)
            elif args.adj_type == 1:
                adj_out = compute_adjs_distsim(args, seq_start_end, obs_traj,
                                               pred_traj_gt)
            elif args.adj_type == 2:
                adj_out = compute_adjs_knnsim(args, seq_start_end, obs_traj,
                                              pred_traj_gt)

            ade, fde, l2, losses = [], [], [], []
            l2_graph = []
            total_traj += pred_traj_gt.size(1)

            kld_loss, nll_loss, kld_hm, h = model(obs_traj_rel.cuda(),
                                                  adj_out.cuda(),
                                                  seq_start_end.cuda(),
                                                  obs_traj[0],
                                                  maps[:args.obs_len], epoch)

            for idx in range(num_samples):
                sample_traj_rel = model.sample(args.pred_len,
                                               seq_start_end.cuda(), False,
                                               maps[args.obs_len - 1:],
                                               obs_traj[-1], dnames, h).cpu()
                sample_traj = relative_to_abs(sample_traj_rel, obs_traj[-1])
                ade.append(
                    displacement_error(sample_traj,
                                       pred_traj_gt.cpu(),
                                       mode='raw'))
                fde.append(
                    final_displacement_error(sample_traj[-1],
                                             pred_traj_gt[-1].cpu(),
                                             mode='raw'))
                l2.append(
                    l2_loss(relative_to_abs(sample_traj, obs_traj[-1]),
                            pred_traj_gt.cpu(), loss_mask[:, args.obs_len:]))
                loss = kld_loss + nll_loss + kld_hm
                losses.append(loss)

                l2_graph.append(l2_error_graph(sample_traj,
                                               pred_traj_gt.cpu()))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)
            l2_sum = evaluate_helper_l2(l2_graph, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            miss_rate_outer.append(miss_rate(losses, threshold))
            mean_l2_outer.append(torch.mean(torch.stack(l2)))
            best_l2_outer.append(torch.max(torch.stack(l2)))
            max_l2_outer.append(torch.min(torch.stack(l2)))

            mean_l2_graph.append(l2_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / total_traj
        m_rate = sum(miss_rate_outer) / total_traj
        mean_l2 = sum(mean_l2_outer) / total_traj
        best_l2 = sum(best_l2_outer) / total_traj
        max_l2 = sum(max_l2_outer) / total_traj

        l2_graph_steps = sum(mean_l2_graph) / total_traj

        mean_velocity1d, mean_velocity1d_v2, mean_acceleration1d = linear_velocity_acceleration_1D(
            l2_graph_steps)

    return ade, fde, m_rate, mean_l2, best_l2, max_l2
Пример #10
0
def check_accuracy_baseline(args, loader, model, limit=False):
    losses = []
    metrics = {}
    val_loss = 0
    l2_losses_abs, l2_losses_rel = ([], ) * 2
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    loss_mask_sum = 0
    model.eval()
    with torch.no_grad():
        for batch in loader:
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, maps, dnames) = batch

            linear_ped = 1 - non_linear_ped
            loss_mask = loss_mask[:, args.obs_len:]

            if args.model == 'vrnn':
                kld_loss, nll_loss, _, h = model(obs_traj_rel.cuda(),
                                                 obs_traj[0])
                loss = kld_loss + nll_loss
            elif args.model == 'rnn':
                loss, _, h = model(obs_traj_rel.cuda())

            val_loss += loss.item()

            pred_traj_rel = model.sample(args.pred_len, obs_traj_rel.size(1),
                                         obs_traj[-1], dnames, h)
            pred_traj = relative_to_abs(pred_traj_rel, obs_traj[-1])

            l2_loss_abs, l2_loss_rel = cal_l2_losses(pred_traj_gt,
                                                     pred_traj_gt_rel,
                                                     pred_traj, pred_traj_rel,
                                                     loss_mask)
            ade, ade_l, ade_nl = cal_ade(pred_traj_gt, pred_traj, linear_ped,
                                         non_linear_ped)
            fde, fde_l, fde_nl = cal_fde(pred_traj_gt, pred_traj, linear_ped,
                                         non_linear_ped)

            losses.append(loss.item())
            l2_losses_abs.append(l2_loss_abs.item())
            l2_losses_rel.append(l2_loss_rel.item())
            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()
            if limit and total_traj >= args.num_samples_check:
                break

    metrics['loss'] = sum(losses) / len(losses)
    metrics['l2_loss_abs'] = sum(l2_losses_abs) / loss_mask_sum
    metrics['l2_loss_rel'] = sum(l2_losses_rel) / loss_mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj
    if total_traj_l != 0:
        metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
        metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
    else:
        metrics['ade_l'] = 0
        metrics['fde_l'] = 0
    if total_traj_nl != 0:
        metrics['ade_nl'] = sum(disp_error_nl) / (total_traj_nl *
                                                  args.pred_len)
        metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
    else:
        metrics['ade_nl'] = 0
        metrics['fde_nl'] = 0

    model.train()
    return metrics, val_loss / len(loader)
Пример #11
0
    def sample_likelihood(self, seq_len, seq_start_end, maps, obs_traj_last, h,
                          dnames, pred_traj_gt_rel):

        sample = []
        nll_loss = 0
        lm_gt = torch.zeros((obs_traj_last.shape[0], 5, 5))
        for t in range(seq_len):
            if self.use_hm and self.conditional:
                if t == 0:
                    lmg = torch.from_numpy(np.stack(maps[t, :, 2])).type(
                        torch.float32).cuda()
                    lm_gt = lmg / (torch.sum(lmg, dim=[1, 2], keepdim=True) +
                                   1e-8)
                sample_t, dec_logvar_t, phi_z_t = self._generate_sample_hm_logvar(
                    h, lm_gt)
            else:
                sample_t, dec_logvar_t, phi_z_t = self._generate_sample_logvar(
                    h)

            phi_x_t = self.phi_x(sample_t.cuda())
            sample.append(sample_t)

            nll_loss_t = self._nll_gauss(sample_t, dec_logvar_t,
                                         pred_traj_gt_rel[t])
            nll_loss += (nll_loss_t / (sample_t.shape[0] * sample_t.shape[1]))

            sample_abs_t = relative_to_abs(sample_t.unsqueeze(0),
                                           obs_traj_last).squeeze(0)

            if self.use_hm and self.conditional:
                sample_abs_t_numpy = np.asarray(sample_abs_t.detach().cpu())
                lm_gt_list = []
                for idx in range(len(dnames)):
                    global_hm = np.load(self.hm_path + "/" + dnames[idx] +
                                        "_local_hm.npy",
                                        allow_pickle=True)

                    abs_cord_np = np.asarray(global_hm[:, 0].tolist()).astype(
                        np.float32)
                    deltas_np = np.asarray(global_hm[:, 1].tolist()).astype(
                        np.float32)
                    lm_np = np.asarray(global_hm[:, 2].tolist()).astype(
                        np.float32)

                    cond1 = (abs_cord_np[..., 0] <
                             sample_abs_t_numpy[idx, 0:1]).astype(np.int)
                    cond2 = (sample_abs_t_numpy[idx, 0:1] <=
                             abs_cord_np[..., 0] + deltas_np[..., 0]).astype(
                                 np.int)
                    cond3 = (abs_cord_np[..., 1] <
                             sample_abs_t_numpy[idx, 1:2]).astype(np.int)
                    cond4 = (sample_abs_t_numpy[idx, 1:2] <=
                             abs_cord_np[..., 1] + deltas_np[..., 1]).astype(
                                 np.int)
                    cond = cond1 * cond2 * cond3 * cond4

                    lm_gt_list.append(
                        (cond[..., np.newaxis, np.newaxis] * lm_np).sum(0))
                lm_gt = np.stack(lm_gt_list)
                lm_gt = torch.from_numpy(lm_gt).type(torch.float32).cuda()
                lm_gt = lm_gt / (torch.sum(lm_gt, dim=[1, 2], keepdim=True) +
                                 1e-8)

            if self.adj_type == 0:
                adj_pred_t = torch.ones(
                    (sample_abs_t.shape[0], sample_abs_t.shape[0]))
            elif self.adj_type == 1:
                adj_pred_t = compute_adjs_distsim_pred(self.sigma,
                                                       seq_start_end,
                                                       sample_abs_t)
            elif self.adj_type == 2:
                adj_pred_t = compute_adjs_knnsim_pred(self.top_k_neigh,
                                                      seq_start_end,
                                                      sample_abs_t)

            # recurrence
            if self.rnn_type == 'gru':
                _, h = self.rnn(
                    torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
            elif self.rnn_type == 'lstm':
                _, (h, c) = self.rnn(
                    torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), (h, c))

            h_g = self.graph(h.squeeze(0), adj_pred_t.cuda()).unsqueeze(0)
            h = self.lg(torch.cat((h, h_g), 2))

        sample = torch.stack(sample)
        nll_loss = nll_loss / seq_len
        return sample, nll_loss