Beispiel #1
0
def cal_fde(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped):
    fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1])
    fde_l = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1],
                                     linear_ped)
    fde_nl = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1],
                                      non_linear_ped)
    return fde, fde_l, fde_nl
Beispiel #2
0
def evaluate(args, loader, generator):
    trajs = []
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            total_traj += pred_traj_gt.size(1)

            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                           seq_start_end)
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

            trajs.append([
                obs_traj.cpu().numpy(),
                pred_traj_fake.cpu().numpy(),
                pred_traj_gt.cpu().numpy(),
                seq_start_end.cpu().numpy()
            ])
            ade_traj = displacement_error(pred_traj_fake,
                                          pred_traj_gt,
                                          mode='sum')

            fde_traj = final_displacement_error(pred_traj_fake[-1],
                                                pred_traj_gt[-1],
                                                mode='sum')

            ade_outer.append(ade_traj)
            fde_outer.append(fde_traj)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / total_traj
        return ade, fde, trajs
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #4
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        ade_error, fde_error = [], []

        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            # print(len(batch))
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             obs_team_vec, obs_pos_vec, pred_team_vec, pred_pos_vec,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                # pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, seq_start_end) #regressor
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end, obs_team_vec,
                                               obs_pos_vec)  # generator

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            # ade_error.append(ade.item())
            # fde_error.append(fde.item())

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)

        # print(len(ade_error),len(ade_error[0]))
        # print(ade_error)

        # print(sum(ade_error),sum(sum(ade_error)))
        # print(total_traj,args.pred_len)
        # ade_=sum(sum(ade_error)) / (total_traj * args.pred_len)
        # fde_=sum(sum(fde_error))/ (total_traj * args.pred_len)
        #
        # print('ADE',ade_)
        # print('FDE',fde_)

        return ade, fde
Beispiel #5
0
def evaluate(args, loader, generatorSO, generatorST, discriminator,
             num_samples):
    ade_outer, fde_outer, kde_outer = [], [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                noise_input, noise_shape = generatorSO(obs_traj, obs_traj_rel,
                                                       seq_start_end)
                with torch.enable_grad():
                    z_noise = MALA_corrected_sampler(generatorST,
                                                     discriminator, args,
                                                     noise_shape, noise_input,
                                                     seq_start_end, obs_traj,
                                                     obs_traj_rel)
                decoder_h = torch.cat([noise_input, z_noise], dim=1)
                decoder_h = torch.unsqueeze(decoder_h, 0)
                generator_out = generatorST(decoder_h, seq_start_end, obs_traj,
                                            obs_traj_rel)
                pred_traj_fake_rel = generator_out
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)
            kde_sum = compute_kde_nll(pred_traj_fake, pred_traj_gt)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            kde_outer.append(kde_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        kde_nll = sum(kde_outer) / len(kde_outer)
        return ade, fde, kde_nll
Beispiel #6
0
def cal_fde(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped, loss_mask):
    # pred_traj_gt      frames x obj x 2

    # select the right last timestamp for FDE computation, i.e., not select the last frame if masked out
    pred_traj_last = []
    gt_traj_last = []
    num_objects = pred_traj_gt.size(1)
    for obj_tmp in range(num_objects):
        loss_mask_tmp = loss_mask[obj_tmp]         # seq_len
        good_index = torch.nonzero(loss_mask_tmp)
        if torch.nonzero(loss_mask_tmp).size(0) == 0:
            pred_traj_last.append(torch.zeros(2).cuda())
            gt_traj_last.append(torch.zeros(2).cuda())
        else:
            last_index = torch.max(good_index)
            pred_traj_last.append(pred_traj_fake[last_index, obj_tmp, :])
            gt_traj_last.append(pred_traj_gt[last_index, obj_tmp, :])
    gt_traj_last   = torch.stack(gt_traj_last, dim=0)       # num_obj x 2
    pred_traj_last = torch.stack(pred_traj_last, dim=0)     # num_obj x 2

    fde = final_displacement_error(pred_traj_last, gt_traj_last)
    fde_l = final_displacement_error(pred_traj_last, gt_traj_last, linear_ped)
    fde_nl = final_displacement_error(pred_traj_last, gt_traj_last, non_linear_ped)
    return fde, fde_l, fde_nl
Beispiel #7
0
def evaluate(args, loader, generatorSO, generatorST, discriminator,
             num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    count_left = 0
    count_mid = 0
    count_right = 0
    count = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                #TODO:替换generator
                #将注释下一句换为注释下五句
                #generator_out, z_noise = generator(obs_traj, obs_traj_rel, seq_start_end)
                noise_input, noise_shape = generatorSO(obs_traj, obs_traj_rel,
                                                       seq_start_end)
                with torch.enable_grad():
                    z_noise = MALA_corrected_sampler(generatorST,
                                                     discriminator, args,
                                                     noise_shape, noise_input,
                                                     seq_start_end, obs_traj,
                                                     obs_traj_rel)
                decoder_h = torch.cat([noise_input, z_noise], dim=1)
                decoder_h = torch.unsqueeze(decoder_h, 0)
                generator_out = generatorST(decoder_h, seq_start_end, obs_traj,
                                            obs_traj_rel)
                pred_traj_fake_rel = generator_out
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                #---------------previous_tra,groundtruth_tra,predicted_tra--------------
                window_num = obs_traj.size(1)

                for i in range(window_num):

                    print("i:", i)

                    #-----------------------------------------------------
                    for ji in range(num_samples):

                        #----------pred_visualization-----------------------------------------------------------

                        predicted_tra = pred_traj_fake[:, i, :]
                        predicted_tra_x = predicted_tra[:, 0]
                        predicted_tra_y = predicted_tra[:, 1]
                        predicted_tra_x = predicted_tra_x.cpu()
                        predicted_tra_y = predicted_tra_y.cpu()
                        predicted_tra_x = predicted_tra_x.numpy()
                        predicted_tra_y = predicted_tra_y.numpy()

                        pred_x = predicted_tra_x[-1]
                        pred_y = predicted_tra_y[-1]
                        pred_last_pos = Point(pred_x, pred_y)
                        p1 = Point(-8, 8)
                        p2 = Point(8, 8)
                        left = Getlen(p1, pred_last_pos)
                        left = left.getlen()
                        right = Getlen(p2, pred_last_pos)
                        right = right.getlen()

                        if left >= right:
                            count_right += 1
                        else:
                            count_left += 1

            #-----------------------------------------------------------------------------------------

                print("第%d轮loader结束" % count)
                count += 1
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde, count_left, count_mid, count_right
Beispiel #8
0
# print(datasetss.obs_traj_rel[1:3].permute(2,0,1))
# print(datasetss.obs_traj[1:3].permute(2,0,1)[0])

abs_traj=relative_to_abs(predict.permute(2,0,1),test.pred_traj.permute(2,0,1)[0])
pred_traj= abs_traj.permute(1,0,2).numpy()
# print(pred_traj[1:4])
# print(datasetss.obs_traj[1:3])
# print(a)
# print(a.permute(1,0,2))

for i in range(110,115):
    plt.plot(pred_traj[i][:,0], pred_traj[i][:,1], "r+")
    plt.plot(test_x_abs[i][:,0], test_x_abs[i][:,1], "b+")
    plt.plot(test_y_abs[i][:,0], test_y_abs[i][:,1], "g+")

plt.show()
pred_traj_tensor=torch.from_numpy(pred_traj)
gt_tensor=torch.from_numpy(test_y_abs)
ade=displacement_error(pred_traj_tensor.permute(1,0,2),gt_tensor.permute(1,0,2),mode="sum")
# print(ade)
ade_res = (ade[0].detach()) / (len(pred_traj) * 12)
print(ade_res)
fde= final_displacement_error(pred_traj_tensor.permute(1,0,2)[-1],gt_tensor.permute(1,0,2)[-1],mode="sum")
fde_res= (fde[0].detach()) / (len(pred_traj))
# print(predict.permute(2,0,1))
# print(test.pred_traj.permute(2,0,1))
print(fde_res)

print(test_y_abs.shape)
print(pred_traj_tensor.shape)
def evaluate(args, loader, generator, num_samples, collisionThreshold):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():

        testSetStatistics = {}

        collisionStatistics = {}

        poolingStatistics = collections.Counter(), collections.Counter(
        ), collections.Counter(), collections.Counter()

        for batch in loader:

            if evalArgs.use_gpu:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor.cpu() for tensor in batch]

            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            def updateTestSetStatistics():
                dirOfCurrentTestSet = loader.dataset.data_dir

                if (dirOfCurrentTestSet not in testSetStatistics):
                    testSetStatistics[dirOfCurrentTestSet] = (
                        0, collections.Counter(), 0)

                currNumOfScenes, pedestriansPerScene, currNumOfBatches = testSetStatistics[
                    dirOfCurrentTestSet]

                newNumOfScenes = currNumOfScenes + len(seq_start_end)
                newNumOfBatches = currNumOfBatches + 1

                for start, end in seq_start_end:
                    start = start.item()
                    end = end.item()
                    numPedestriansInScene = end - start

                    pedestriansPerScene[numPedestriansInScene] += 1

                testSetStatistics[dirOfCurrentTestSet] = (newNumOfScenes,
                                                          pedestriansPerScene,
                                                          newNumOfBatches)

            updateTestSetStatistics()

            ade, fde, poolStats = [], [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):

                pred_traj_fake_rel, currPoolingStatistics = generator(
                    obs_traj, obs_traj_rel, seq_start_end)

                poolingStatistics = tuple(
                    oldStats + newStats for oldStats, newStats in zip(
                        poolingStatistics, currPoolingStatistics))

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])

                start, end = seq_start_end[1]
                exampleSituation = obs_traj[:, start:
                                            end, :], pred_traj_fake[:, start:
                                                                    end, :], pred_traj_gt[:,
                                                                                          start:
                                                                                          end, :]

                def updateCollisionStatistics():
                    allCoordOfFrame0 = pred_traj_fake[0]
                    allCoordFrame0Situation0 = allCoordOfFrame0

                    for _, (start, end) in enumerate(seq_start_end):
                        start = start.item()
                        end = end.item()

                        totalNumOfCollisions = 0
                        for currFrame in pred_traj_fake:
                            currPedestrians = currFrame[start:end]
                            currPedestrians = np.asarray(currPedestrians)

                            pedestrianDistances = cdist(
                                currPedestrians, currPedestrians)

                            upperTriangle = sum([
                                pedestrianDistances[i][j]
                                for i in range(1, len(pedestrianDistances))
                                for j in range(i)
                            ])
                            lowerTriangle = sum([
                                pedestrianDistances[i][j]
                                for i in range(len(pedestrianDistances))
                                for j in range(i + 1, len(pedestrianDistances))
                            ])
                            assert upperTriangle - lowerTriangle < .000001, 'UpperSum = {}, LowerSum = {}'.format(
                                upperTriangle, lowerTriangle)

                            numCollisions = [
                                pedestrianDistances[i][j] <= collisionThreshold
                                for i in range(1, len(pedestrianDistances))
                                for j in range(i)
                            ].count(True)
                            totalNumOfCollisions += numCollisions

                        dirOfCurrentTestSet = loader.dataset.data_dir
                        if (dirOfCurrentTestSet not in collisionStatistics):
                            collisionStatistics[dirOfCurrentTestSet] = (0, 0,
                                                                        [])

                        currNumOfCollisions, currTotalNumOfSituations, currCollisionSituations = collisionStatistics[
                            dirOfCurrentTestSet]
                        newNumOfCollisions = currNumOfCollisions + totalNumOfCollisions

                        if (newNumOfCollisions > currNumOfCollisions):
                            currSituation = (obs_traj[:, start:end, :],
                                             pred_traj_fake[:, start:end, :],
                                             pred_traj_gt[:, start:end, :])
                            currCollisionSituations.append(currSituation)

                        collisionStatistics[dirOfCurrentTestSet] = (
                            newNumOfCollisions, currTotalNumOfSituations + 1,
                            currCollisionSituations)

                updateCollisionStatistics()

                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde, testSetStatistics, poolingStatistics, collisionStatistics
Beispiel #10
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    i = 0
    fig = plt.figure()
    ax = fig.add_axes([0.1, 0.1, 0.75, 0.75])
    generator.eval()
    with torch.no_grad():
        for batch in loader:
            i = i + 1
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            # print("Trajs: ")
            # print(obs_traj[:,0,:])
            # print(obs_traj_rel[:,0,:])
            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)
            print(num_samples)
            for rr in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                pred_traj_fake_plot = pred_traj_fake.permute(1, 0, 2)
                pred_traj_fake_plot_single = pred_traj_fake_plot[
                    0, :, :].cpu().numpy()
                # print(pred_traj_fake_plot_single.shape)
                if i < 25:
                    pred_traj_fake_permuted = pred_traj_fake.permute(1, 0, 2)
                    pred_traj_gt_permuted = pred_traj_gt.permute(1, 0, 2)
                    obs_traj_permuted = obs_traj.permute(1, 0, 2)

                    # if k == 0:
                    #     view_traj(ax, pred_traj_fake_permuted[0,:,:], pred_traj_gt_permuted[0,:,:], obs_traj_permuted[0,:,:], args, all_three=True)
                    # else:
                    yy = 0
                    view_traj(ax, pred_traj_fake_permuted[yy, :, :],
                              pred_traj_gt_permuted[yy, :, :],
                              obs_traj_permuted[yy, :, :], args)

                # plt.plot(pred_traj_fake_plot_single[:,0], pred_traj_fake_plot_single[:,1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            # plt.xlim((0,17))
            # plt.ylim((-10,10))
            # plt.legend()
            if i < 25:
                plt.show()
                plt.cla()

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #11
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    fig, ax = plt.subplots(num='3d time-surface', figsize=(16, 12))

    topleft = (103.80, 1.21)
    bottomright = (103.86, 1.16)

    def plotrectangle(ax, topleft, bottomright):
        x1 = topleft[0]
        y1 = topleft[1]
        x2 = bottomright[0]
        y2 = bottomright[1]
        bordercol = 'black'
        borderalpha = 0.6
        ax.plot([x1, x2], [y1, y1], color=bordercol, alpha=borderalpha)
        ax.plot([x1, x2], [y2, y2], color=bordercol, alpha=borderalpha)
        ax.plot([x1, x1], [y1, y2], color=bordercol, alpha=borderalpha)
        ax.plot([x2, x2], [y1, y2], color=bordercol, alpha=borderalpha)

    plotrectangle(ax, topleft, bottomright)

    cols = ['b', 'b', 'b', 'b']
    colid = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)
            np_obs_traj = obs_traj.cpu().detach().numpy()
            np_pred_traj_gt = pred_traj_gt.cpu().detach().numpy()
            print("np_pred_traj_gt.shape", np_pred_traj_gt.shape)
            print("np_obs_traj[-1,:,:].shape", np_obs_traj[-1:, :, :].shape)
            np_pred_traj_gt = np.concatenate(
                (np_obs_traj[-1:, :, :], np_pred_traj_gt), axis=0)
            # np_pred_traj_gt reshape(,33,2)
            plottraj(ax, np_obs_traj, 'obs_traj', cols[colid], alpha=1.0)
            colid = (colid + 1) % 4
            plottraj(ax, np_pred_traj_gt, 'pred_traj_gt', 'g', alpha=1.0)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))
                np_pred_traj_fake = pred_traj_fake.cpu().detach().numpy()
                np_pred_traj_fake = np.concatenate(
                    (np_obs_traj[-1:, :, :], np_pred_traj_fake), axis=0)
                plottraj(ax, np_pred_traj_fake, 'pred_traj_fake', 'r')
            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            break
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        print("ade ", ade)
        plt.legend(bbox_to_anchor=(1.0, 1.1),
                   loc='upper left',
                   title="legend",
                   borderaxespad=0.)
        # ax.autoscale(False)
        ax.set_aspect('equal')
        # plt.show()
        plt.savefig('img' + global_model_path + '.png')
        return ade, fde
Beispiel #12
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    count_left = 0
    count_mid = 0
    count_right = 0
    count = 0
    with torch.no_grad():
        for batch in loader:

            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            #---------------previous_tra,groundtruth_tra,predicted_tra--------------
            window_num = obs_traj.size(1)

            for i in range(window_num):
                print(i)
                #-----------------------------------------------------
                for ji in range(num_samples):

                    pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                                   seq_start_end)

                    pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                     obs_traj[-1])
                    #----------visualization-----------------------------------------------------------

                    predicted_tra = pred_traj_fake[:, i, :]
                    predicted_tra_x = predicted_tra[:, 0]
                    predicted_tra_y = predicted_tra[:, 1]
                    predicted_tra_x = predicted_tra_x.cpu()
                    predicted_tra_y = predicted_tra_y.cpu()
                    predicted_tra_x = predicted_tra_x.numpy()
                    predicted_tra_y = predicted_tra_y.numpy()

                    pred_x = predicted_tra_x[-1]
                    pred_y = predicted_tra_y[-1]
                    pred_last_pos = Point(pred_x, pred_y)
                    p1 = Point(-8, 16)
                    p2 = Point(8, 16)
                    left = Getlen(p1, pred_last_pos)
                    left = left.getlen()
                    right = Getlen(p2, pred_last_pos)
                    right = right.getlen()
                    #ipdb.set_trace()
                    if -1.5 < pred_x < 1.8:
                        count_mid += 1
                    else:
                        if left >= right:
                            count_right += 1
                        else:
                            count_left += 1

            #----------------------------------------------------------------------------------
                    ade.append(
                        displacement_error(pred_traj_fake,
                                           pred_traj_gt,
                                           mode='raw'))
                    fde.append(
                        final_displacement_error(pred_traj_fake[-1],
                                                 pred_traj_gt[-1],
                                                 mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
            print("第%d轮batch结束" % count)
            count += 1
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)

        return ade, fde, count_left, count_mid, count_right
def evaluate(args, loader, generator, num_samples, plot=True):
    ade_outer, fde_outer = [], []
    total_traj = 0
    count = 0
    with torch.no_grad():
        for batch in loader:
            count += 1
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))
                # add plot module
                if plot:
                    _plot_dir = '../saves/'
                    if not os.path.exists(_plot_dir):
                        os.makedirs(_plot_dir)

                    fig = plt.figure()

                    whole_traj_fake = torch.cat([obs_traj, pred_traj_fake],
                                                dim=0)
                    whole_traj_fake = whole_traj_fake[:, 0, :]
                    whole_traj_gt = torch.cat([obs_traj, pred_traj_gt], dim=0)
                    whole_traj_gt = whole_traj_gt[:, seq_start_end[0][0]:
                                                  seq_start_end[0][1], :]

                    y_upper_limit = max([
                        torch.max(whole_traj_fake[:, 1]).data,
                        torch.max(whole_traj_gt[:, :, 1]).data
                    ]) + 1.

                    y_lower_limit = min([
                        torch.min(whole_traj_fake[:, 1]).data,
                        torch.min(whole_traj_gt[:, :, 1]).data
                    ]) - 1.

                    x_upper_limit = max([
                        torch.max(whole_traj_fake[:, 0]).data,
                        torch.max(whole_traj_gt[:, :, 0]).data
                    ]) + 1.

                    x_lower_limit = min([
                        torch.min(whole_traj_fake[:, 0]).data,
                        torch.min(whole_traj_gt[:, :, 0]).data
                    ]) - 1.

                    def plot_time_step(i):
                        fig, ax = plt.subplots()
                        # ax.plot(goal_point[0].cpu().numpy(), goal_point[1].cpu().numpy(), 'gx')
                        # plot last three point
                        gt_points_x = whole_traj_gt[max(i - 2, 0):i + 1, :,
                                                    0].cpu().numpy().flatten()
                        gt_points_y = whole_traj_gt[max(i - 2, 0):i + 1, :,
                                                    1].cpu().numpy().flatten()
                        ax.plot(gt_points_x, gt_points_y, 'b.')

                        fake_points_x = whole_traj_fake[max(i - 2, 0):i + 1,
                                                        0].cpu().numpy()
                        fake_points_y = whole_traj_fake[max(i - 2, 0):i + 1,
                                                        1].cpu().numpy()
                        if i >= args.obs_len:
                            ax.plot(fake_points_x, fake_points_y, 'r*')
                        else:
                            ax.plot(fake_points_x, fake_points_y, 'g.')

                        ax.set_ylim(y_lower_limit.cpu(), y_upper_limit.cpu())
                        ax.set_xlim(x_lower_limit.cpu(), x_upper_limit.cpu())

                        fig.canvas.draw()
                        image = np.frombuffer(fig.canvas.tostring_rgb(),
                                              dtype='uint8')
                        image = image.reshape(
                            fig.canvas.get_width_height()[::-1] + (3, ))
                        plt.close(fig)

                        return image

                    imageio.mimsave(_plot_dir + str(count) + '.gif', [
                        plot_time_step(i)
                        for i in range(args.obs_len + args.pred_len)
                    ],
                                    fps=2)

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #14
0
def evaluate(args, loader, generator, num_samples, path):
    # ade_outer, fde_outer = [], []
    ade_all, fde_all = AverageMeter(), AverageMeter()
    total_obj = 0
    pred_len = args.pred_len
    dataset_name = args.dataset_name
    obj_class = dataset_name.split('_')[1][:3]

    save_dir, _, _ = fileparts(path)
    save_dir = os.path.join(save_dir, 'results_%s' % get_timestring())
    mkdir_if_missing(save_dir)
    result_file_single = os.path.join(save_dir, 'results.json')
    result_dict = dict()
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, id_frame) = batch
            # obs_traj          frames x objects x 2
            # pred_traj_gt      frames x objects x 2
            # seq_Start_end     start, end of ped index in each timestamp, used for pooling at every timestamp
            # id_frame          2frames x objects x 3
            # loss_mask         objects x 2frames

            num_obs = obs_traj.size(0)
            num_objects = obs_traj.size(1)
            id_frame_pred = id_frame[num_obs:]  # frames x obj x 3
            loss_mask_pred = loss_mask[:, num_obs:]  # objects x seq_len

            ade, fde = [], []
            for sample_index in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(
                    pred_traj_fake_rel, obs_traj[-1])  # frames x objects x 2

                # save results
                for object_index in range(num_objects):
                    id_frame_tmp = id_frame_pred[:, object_index, :]
                    frame = int(id_frame_tmp[0, 0].item())

                    # seqname should be the same across frames
                    seq = np.unique(id_frame_tmp[:, -1].cpu().clone().numpy())
                    assert len(seq) == 1, 'error'
                    seqname = int2seqname(seq[0])  # AIODrive only

                    # seqname should be the same across frames
                    ID = np.unique(id_frame_tmp[:, 1].cpu().clone().numpy())
                    assert len(ID) == 1, 'error'
                    ID = int(ID[0])

                    # saving to individual frames
                    final_results = torch.cat([
                        id_frame_tmp[:, :2], pred_traj_fake[:, object_index, :]
                    ],
                                              axis=-1).cpu().clone().numpy()
                    save_path = os.path.join(
                        save_dir, seqname, 'frame_%06d' % (frame),
                        'sample_%03d' % sample_index + '.txt')
                    mkdir_if_missing(save_path)
                    with open(save_path, 'a') as f:
                        np.savetxt(f, final_results, fmt="%.3f")

                    # saving to a single file, result format
                    # {seqname1: {frame1: {sample1: {ID1: {state: N x 2, prob: 1}}}, seqname2, ...}
                    if seqname not in result_dict.keys():
                        result_dict[seqname] = dict()
                    if frame not in result_dict[seqname].keys():
                        result_dict[seqname][frame] = dict()
                    if sample_index not in result_dict[seqname][frame].keys():
                        result_dict[seqname][frame][sample_index] = dict()
                    if ID not in result_dict[seqname][frame][
                            sample_index].keys():
                        result_dict[seqname][frame][sample_index][ID] = dict()
                    result_dict[seqname][frame][sample_index][
                        ID]['state'] = pred_traj_fake[:, object_index, :].cpu(
                        ).clone().numpy().tolist()
                    result_dict[seqname][frame][sample_index][ID]['prob'] = 1.0

                # compute ADE
                ade_tmp = displacement_error(
                    pred_traj_fake,
                    pred_traj_gt,
                    mode='raw',
                    mask=loss_mask_pred
                )  # list of ade for each object in the batch
                ade.append(ade_tmp)  # list of error for all samples

                # select the right last timestamp for FDE computation, i.e., not select the last frame if masked out
                pred_traj_last = []
                gt_traj_last = []
                for obj_tmp in range(num_objects):
                    loss_mask_tmp = loss_mask_pred[obj_tmp]  # seq_len
                    good_index = torch.nonzero(loss_mask_tmp)
                    if torch.nonzero(loss_mask_tmp).size(0) == 0:
                        pred_traj_last.append(torch.zeros(2).cuda() / 0)
                        gt_traj_last.append(torch.zeros(2).cuda() / 0)
                    else:
                        last_index = torch.max(good_index)
                        pred_traj_last.append(pred_traj_fake[last_index,
                                                             obj_tmp, :])
                        gt_traj_last.append(pred_traj_gt[last_index,
                                                         obj_tmp, :])
                gt_traj_last = torch.stack(gt_traj_last, dim=0)  # num_obj x 2
                pred_traj_last = torch.stack(pred_traj_last,
                                             dim=0)  # num_obj x 2

                # compute FDE
                fde_tmp = final_displacement_error(pred_traj_last,
                                                   gt_traj_last,
                                                   mode='raw')
                fde.append(fde_tmp)  # list of error for all samples

            # select the one sample with the minimum errors, remove nan
            num_invalid = torch.sum(torch.isnan(ade_tmp))
            num_valid = pred_traj_gt.size(1) - num_invalid
            total_obj += num_valid  # only add No.obj if it is valid, not all future frames are padded
            ade_ave, num_obj = best_of_K(ade, seq_start_end, err_type='ADE')
            fde_ave, num_obj = best_of_K(fde, seq_start_end, err_type='FDE')
            ade_all.update(ade_ave, n=num_obj)
            fde_all.update(fde_ave, n=num_obj)

        actual_len = pred_len * args.skip
        final_dict = {actual_len: {obj_class: result_dict}}
        with open(result_file_single, 'w') as outfile:
            json.dump(final_dict, outfile)

        return ade_all.avg, fde_all.avg
Beispiel #15
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0

    # plt 2021-02
    plt.figure(num='global map', figsize=(8, 10))
    plt.axis('equal')
    plt.xlabel("x")
    plt.ylabel("y")
    plt.title('Global')
    plt.pause(1)

    with torch.no_grad():  # dont calculate grad
        for batch in loader:
            # batch = [tensor.cuda() for tensor in batch]
            # obs_traj[num, x_size, y_size]-[:,0,0],[:,0,1]为一条轨迹坐标
            batch = [tensor for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(
                    obs_traj,
                    obs_traj_rel,
                    seq_start_end  # run TrajectoryGenerator
                )  # 预测-未来轨迹-相对坐标
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

                # plt 2021-02
                plt.pause(1)
                plt.cla()

                center_point = obs_traj[-1, 0:2, 0]
                plt_axis = [
                    center_point[0] - 15, center_point[0] + 15,
                    center_point[1] - 15, center_point[1] + 15
                ]
                for index in range(seq_start_end[0, 0], seq_start_end[0, 1]):
                    # obs_traj
                    plt.plot(obs_traj[:, index, 0],
                             obs_traj[:, index, 1],
                             c='gray',
                             linestyle='--')

                    # pred_traj_fake
                    plt.plot(pred_traj_fake[:, index, 0],
                             pred_traj_fake[:, index, 1],
                             c='blue',
                             linestyle='-.')

                    # pred_traj_gt
                    plt.plot(pred_traj_gt[:, index, 0],
                             pred_traj_gt[:, index, 1],
                             c='black',
                             linestyle='--')

                plt.axis('equal')
                plt.axis(plt_axis)

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #16
0
         seq_start_end) = batch

        pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, seq_start_end,
                                       obs_team_vec, obs_pos_vec)  # generator
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        # print(seq_start_end)
        start, end = seq_start_end[0][0], seq_start_end[0][1]
        if end - start != 11:
            continue

        ade = displacement_error(pred_traj_fake, pred_traj_gt,
                                 mode='raw')  # batch*11

        fde = final_displacement_error(pred_traj_fake[-1],
                                       pred_traj_gt[-1],
                                       mode='raw')  # batch*11

        obs_traj_list.append(obs_traj.cpu().numpy().reshape(
            8, -1, 11, 2))  # obs_len * (batch*11) * 2
        pred_traj_gt_list.append(pred_traj_gt.cpu().numpy().reshape(
            8, -1, 11, 2))
        pred_traj_fake_list.append(pred_traj_fake.cpu().numpy().reshape(
            8, -1, 11, 2))
        pos_vec_list.append(obs_pos_vec.cpu().numpy().reshape(8, -1, 11, 4))
        team_vec_list.append(obs_team_vec.cpu().numpy().reshape(8, -1, 11, 3))

        seq_start_end_list.append(seq_start_end.cpu().numpy())  # Batch * 2

        ade_list.append(ade.cpu().numpy().reshape(-1, 11))
        fde_list.append(fde.cpu().numpy().reshape(-1, 11))