Beispiel #1
0
def evaluate(args, loader, generator):
    total_traj = 0
    predictions = []
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (
                obs_traj,
                pred_traj_gt,
                obs_traj_rel,
                pred_traj_gt_rel,
                non_linear_ped,
                loss_mask,
                seq_start_end,
            ) = batch

            total_traj += pred_traj_gt.size(1)

            for _ in range(args.num_samples):
                pred_traj_fake_rel = generator(obs_traj_rel, obs_traj,
                                               seq_start_end, 0, 3)
                pred_traj_fake_rel = pred_traj_fake_rel[-args.pred_len:]

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                predictions.append(pred_traj_fake)

    return predictions
Beispiel #2
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))

            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #3
0
def discriminator_step(args, batch, generator, discriminator, d_loss_fn,
                       optimizer_d):
    batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
     loss_mask, seq_start_end) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)

    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

    traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
    traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
    scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)

    # Compute loss with optional gradient penalty
    data_loss = d_loss_fn(scores_real, scores_fake)
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    optimizer_d.zero_grad()
    loss.backward()
    if args.clipping_threshold_d > 0:
        nn.utils.clip_grad_norm_(discriminator.parameters(),
                                 args.clipping_threshold_d)
    optimizer_d.step()

    return losses
Beispiel #4
0
def discriminator_step(batch, generator, discriminator, d_loss_fn,
                       optimizer_d):

    batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    generator_out = generator(obs_traj, obs_traj_rel, vgg_list)

    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1, :, 0, :])

    traj_real = torch.cat([obs_traj[:, :, 0, :], pred_traj_gt], dim=0)
    traj_real_rel = torch.cat([obs_traj_rel[:, :, 0, :], pred_traj_gt_rel],
                              dim=0)
    traj_fake = torch.cat([obs_traj[:, :, 0, :], pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel[:, :, 0, :], pred_traj_fake_rel],
                              dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel)
    scores_real = discriminator(traj_real, traj_real_rel)

    data_loss = d_loss_fn(scores_real, scores_fake)
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    optimizer_d.zero_grad()
    loss.backward()
    optimizer_d.step()
    return losses
Beispiel #5
0
def evaluate(loader, generator):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(NUM_SAMPLES):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, vgg_list)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1, :, 0, :])
                ade.append(displacement_error(pred_traj_fake, pred_traj_gt, mode='raw'))
                fde.append(final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1], mode='raw'))

            ade_sum = evaluate_helper(ade)
            fde_sum = evaluate_helper(fde)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)
        ade = sum(ade_outer) / (total_traj * PRED_LEN)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
    def forward(self,
                obs_traj,
                obs_traj_rel,
                seq_start_end,
                obs_ped_speed,
                pred_ped_speed,
                pred_traj,
                train_or_test,
                speed_to_add,
                user_noise=None):
        batch = obs_traj_rel.size(1)
        final_encoder_h = self.encoder(obs_traj_rel, obs_ped_speed)
        if POOLING_TYPE:
            if train_or_test == 1:
                simulated_ped_speed = speed_control(pred_ped_speed[0, :, :],
                                                    SPEED_TO_ADD,
                                                    seq_start_end)
                next_speed = simulated_ped_speed
            else:
                next_speed = pred_ped_speed[0, :, :]
            sspm = self.social_speed_pooling(final_encoder_h, seq_start_end,
                                             train_or_test, speed_to_add,
                                             obs_traj[-1, :, :], next_speed)
            mlp_decoder_context_input = torch.cat(
                [final_encoder_h.view(-1, self.h_dim), sspm], dim=1)
        else:
            mlp_decoder_context_input = final_encoder_h.view(-1, self.h_dim)

        noise_input = self.mlp_decoder_context(mlp_decoder_context_input)

        decoder_h = self.add_noise(noise_input, seq_start_end).unsqueeze(dim=0)
        if USE_GPU:
            decoder_c = torch.zeros(self.num_layers, batch, self.h_dim).cuda()
        else:
            decoder_c = torch.zeros(self.num_layers, batch, self.h_dim)

        state_tuple = (decoder_h, decoder_c)

        decoder_out = self.decoder(obs_traj[-1], obs_traj_rel[-1], state_tuple,
                                   seq_start_end, speed_to_add, pred_ped_speed,
                                   train_or_test)
        pred_traj_fake_rel, final_decoder_h = decoder_out

        # LOGGING THE OUTPUT OF ALL SEQUENCES TO TEST THE SPEED AND TRAJECTORIES
        if train_or_test == 1:
            simulated_trajectories = []
            for _, (start, end) in enumerate(seq_start_end):
                start = start.item()
                end = end.item()
                obs_test_traj = obs_traj[:, start:end, :]
                pred_test_traj_rel = pred_traj_fake_rel[:, start:end, :]
                pred_test_traj = relative_to_abs(pred_test_traj_rel,
                                                 obs_test_traj[-1])
                speed_added = pred_ped_speed[0, start:end, :]
                simulated_trajectories.append(pred_test_traj)
        return pred_traj_fake_rel
Beispiel #7
0
def generator_step(args, batch, generator, discriminator, g_loss_fn,
                   optimizer_g, discriminator_wight):
    if args.use_gpu == 1:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    # batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
     loss_mask, seq_start_end) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, args.obs_len:]

    for _ in range(args.best_k):
        generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if args.l2_loss_weight > 0:
            g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(
                pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if args.l2_loss_weight > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
                loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel

    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += (discriminator_wight * discriminator_loss)
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    if args.clipping_threshold_g > 0:
        nn.utils.clip_grad_norm_(generator.parameters(),
                                 args.clipping_threshold_g)
    optimizer_g.step()

    return losses
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):
    """This step is similar to Social GAN Code"""
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask,
     seq_start_end, obs_ped_speed, pred_ped_speed) = batch

    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, OBS_LEN:]

    for _ in range(BEST_K):
        generator_out = generator(obs_traj, obs_traj_rel, seq_start_end,
                                  obs_ped_speed, pred_ped_speed, pred_traj_gt,
                                  TRAIN_METRIC, SPEED_TO_ADD)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if L2_LOSS_WEIGHT > 0:
            g_l2_loss_rel.append(L2_LOSS_WEIGHT * l2_loss(
                pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if L2_LOSS_WEIGHT > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
                loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed,
                                seq_start_end)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Beispiel #9
0
def evaluate(args, loader, generator, num_samples):
    ade_outer, fde_outer = [], []
    total_traj = 0
    trajs = []
    with torch.no_grad():
        for batch in loader:
            # batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch
            obs_traj = obs_traj.float()
            obs_traj_rel = obs_traj_rel.float()

            pred_traj_gt = pred_traj_gt.double()
            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                pred_traj_fake = pred_traj_fake.double()
                trajs.append([
                    obs_traj.cpu().numpy(),
                    pred_traj_fake.cpu().numpy(),
                    pred_traj_gt.cpu().numpy(),
                    seq_start_end.cpu().numpy()
                ])

                ade_traj = displacement_error(pred_traj_fake,
                                              pred_traj_gt,
                                              mode='sum')
                fde_traj = final_displacement_error(pred_traj_fake[-1],
                                                    pred_traj_gt[-1],
                                                    mode='sum')

                # ade.append(displacement_error(
                #     pred_traj_fake, pred_traj_gt, mode='sum'
                # ))
                # fde.append(final_displacement_error(
                #     pred_traj_fake[-1], pred_traj_gt[-1], mode='sum'
                # ))

                # ade_sum = evaluate_helper(ade, seq_start_end)
                # fde_sum = evaluate_helper(fde, seq_start_end)

                # ade_outer.append(ade_sum)
                # fde_outer.append(fde_sum)
                ade_outer.append(ade_traj)
                fde_outer.append(fde_traj)
        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde, trajs
Beispiel #10
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):

    batch = [tensor.cuda() for tensor in batch]
    # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
    # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel) = batch
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, n_l, l_m, V_obs,
     A_obs, V_pre, A_pre, vgg_list) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    g_l2_loss_rel = []
    for _ in range(BEST_K):
        # generator_out = generator(obs_traj, obs_traj_rel, vgg_list)
        generator_out = generator(obs_traj, obs_traj_rel, V_obs, A_obs,
                                  vgg_list)  # 生成坐标差
        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                         obs_traj[0, :, :, -1])  # 12*3*2 TVC

        g_l2_loss_rel.append(
            l2_loss(  # 生成坐标差和真实坐标差 n*1
                pred_traj_fake_rel,  # T V C
                pred_traj_gt_rel,  # N V C T
                mode='raw'))
    # 生成了K条轨迹并计算损失 K V
    npeds = obs_traj.size(1)  # V
    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)  # 1
    g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)  # 拼接张量 得v k
    _g_l2_loss_rel = torch.sum(g_l2_loss_rel, dim=0)  # 求和 得 k

    _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / (npeds * PRED_LEN
                                                  )  # 取最小的然后取平均
    g_l2_loss_sum_rel += _g_l2_loss_rel
    losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
    loss += g_l2_loss_sum_rel  # 生成轨迹的损失
    pred_traj_fake = pred_traj_fake.permute(1, 2, 0)  # TVC——VCT
    pred_traj_fake_rel = pred_traj_fake_rel.permute(1, 2, 0)
    traj_fake = torch.cat([obs_traj[0], pred_traj_fake], dim=2)  # VCT T=20
    traj_fake_rel = torch.cat([obs_traj_rel[0], pred_traj_fake_rel], dim=2)

    scores_fake = discriminator(traj_fake, traj_fake_rel)  # 生成轨迹鉴别分数
    discriminator_loss = g_loss_fn(scores_fake)
    loss += discriminator_loss  # 加入鉴别器损失
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Beispiel #11
0
def evaluate(loader, generator, num_samples):
    ade_outer, fde_outer, simulated_output, total_traj, sequences = [], [], [], [], []
    with torch.no_grad():
        for batch in loader:
            if USE_GPU:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed) = batch

            ade, fde, sim_op = [], [], []
            total_traj.append(pred_traj_gt.size(1))

            for _ in range(num_samples):
                if TEST_METRIC:
                    pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed, pred_traj_gt,
                              TEST_METRIC, SPEED_TO_ADD)
                else:
                    pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed,
                                pred_ped_speed, pred_traj_gt, TEST_METRIC, SPEED_TO_ADD)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
                ade.append(displacement_error(pred_traj_fake, pred_traj_gt, mode='raw'))
                fde.append(final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1], mode='raw'))
                sim_op.append(pred_traj_fake)

                for _, (start, end) in enumerate(seq_start_end):
                    num_ped = end - start
                    sequences.append(num_ped)

            ade_outer.append(evaluate_helper(torch.stack(ade, dim=1), seq_start_end))
            fde_outer.append(evaluate_helper(torch.stack(fde, dim=1), seq_start_end))
            simulated_output.append(torch.cat(sim_op, dim=0))

        ade = sum(ade_outer) / (sum(total_traj) * PRED_LEN)
        fde = sum(fde_outer) / (sum(total_traj))
        simulated_traj_for_visualization = torch.cat(simulated_output, dim=1)
        sequences = torch.cumsum(torch.stack(sequences, dim=0), dim=0)

        if TEST_METRIC and VERIFY_OUTPUT_SPEED:
            # The speed can be verified for different sequences and this method runs for n number of batches.
            verify_speed(simulated_traj_for_visualization, sequences)

        if ANIMATED_VISUALIZATION_CHECK:
            # Trajectories at User-defined speed for Visualization
            with open('SimulatedTraj.pkl', 'wb') as f:
                pickle.dump(simulated_traj_for_visualization, f, pickle.HIGHEST_PROTOCOL)
            # Sequence list file used for Visualization
            with open('Sequences.pkl', 'wb') as f:
                pickle.dump(sequences, f, pickle.HIGHEST_PROTOCOL)
        return ade, fde
Beispiel #12
0
def discriminator_step(batch, generator, discriminator, d_loss_fn, optimizer_d):
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    if MULTI_CONDITIONAL_MODEL:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed, obs_label, pred_label,
         obs_obj_rel_speed) = batch
        generator_out, _ = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                  pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=obs_label, pred_label=pred_label)
    else:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed, obs_obj_rel_speed) = batch
        generator_out, _ = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                  pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=None, pred_label=None)

    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

    traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
    traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)
    if MULTI_CONDITIONAL_MODEL:
        label_info = torch.cat([obs_label, pred_label], dim=0)
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=label_info)
        scores_real = discriminator(traj_real, traj_real_rel, ped_speed, label=label_info)
    else:
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=None)
        scores_real = discriminator(traj_real, traj_real_rel, ped_speed, label=None)

    data_loss = d_loss_fn(scores_real, scores_fake)
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    optimizer_d.zero_grad()
    loss.backward()
    optimizer_d.step()

    return losses
Beispiel #13
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):

    batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    g_l2_loss_rel = []
    for _ in range(BEST_K):
        generator_out = generator(obs_traj, obs_traj_rel, vgg_list)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1, :,
                                                                      0, :])

        g_l2_loss_rel.append(
            l2_loss(pred_traj_fake_rel, pred_traj_gt_rel, mode='raw'))

    npeds = obs_traj.size(1)
    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
    _g_l2_loss_rel = torch.sum(g_l2_loss_rel, dim=0)
    _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / (npeds * PRED_LEN)
    g_l2_loss_sum_rel += _g_l2_loss_rel
    losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
    loss += g_l2_loss_sum_rel

    traj_fake = torch.cat([obs_traj[:, :, 0, :], pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel[:, :, 0, :], pred_traj_fake_rel],
                              dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Beispiel #14
0
def discriminator_step(batch, generator, discriminator, d_loss_fn,
                       optimizer_d):

    batch = [tensor.cuda() for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, n_l, l_m, V_obs,
     A_obs, V_pre, A_pre, vgg_list) = batch
    # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    # generator_out = generator(obs_traj, obs_traj_rel, vgg_list)
    generator_out = generator(obs_traj, obs_traj_rel, V_obs, A_obs, vgg_list)
    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[0, :, :,
                                                                  -1])  # V C

    traj_real = torch.cat([obs_traj[0], pred_traj_gt[0]],
                          dim=2)  # 1*3*2*20 轨迹序列

    traj_real_rel = torch.cat([obs_traj_rel[0], pred_traj_gt_rel[0]],
                              dim=2)  # 1*3*2*20 轨迹差值序列 N V C T
    pred_traj_fake = pred_traj_fake.permute(1, 2, 0)  # T V C——V C T
    # pred_traj_fake=pred_traj_fake.unsqueeze(dim=0)
    pred_traj_fake_rel = pred_traj_fake_rel.permute(1, 2, 0)  #  T V C——V C T
    # pred_traj_fake_rel=pred_traj_fake_rel.unsqueeze(dim=0)
    traj_fake = torch.cat([obs_traj[0], pred_traj_fake],
                          dim=2)  # 观测轨迹加上预测 V C T
    traj_fake_rel = torch.cat([obs_traj_rel[0], pred_traj_fake_rel],
                              dim=2)  # 观测差加上预测差 V C T

    scores_fake = discriminator(traj_fake, traj_fake_rel)  # 计算鉴别分数输入 VCT编码
    scores_real = discriminator(traj_real, traj_real_rel)  # 输入 V C T 输出 V 1

    data_loss = d_loss_fn(scores_real, scores_fake)  # BCE
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    optimizer_d.zero_grad()
    loss.backward()
    optimizer_d.step()
    return losses
def discriminator_step(batch, generator, discriminator, d_loss_fn,
                       optimizer_d):
    """This step is similar to Social GAN Code"""
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask,
     seq_start_end, obs_ped_speed, pred_ped_speed) = batch
    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)

    generator_out = generator(obs_traj, obs_traj_rel, seq_start_end,
                              obs_ped_speed, pred_ped_speed, pred_traj_gt,
                              TRAIN_METRIC, SPEED_TO_ADD)

    pred_traj_fake_rel = generator_out
    pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

    traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
    traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)

    scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed,
                                seq_start_end)
    scores_real = discriminator(traj_real, traj_real_rel, ped_speed,
                                seq_start_end)

    data_loss = d_loss_fn(scores_real, scores_fake)
    losses['D_data_loss'] = data_loss.item()
    loss += data_loss
    losses['D_total_loss'] = loss.item()

    optimizer_d.zero_grad()
    loss.backward()
    optimizer_d.step()

    return losses
Beispiel #16
0
def validate(args, model, val_loader, epoch, writer):
    ade = utils.AverageMeter("ADE", ":.6f")
    fde = utils.AverageMeter("FDE", ":.6f")
    progress = utils.ProgressMeter(len(val_loader), [ade, fde],
                                   prefix="Test: ")

    model.eval()
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
            batch = [tensor.cuda() for tensor in batch]
            (
                obs_traj,
                pred_traj_gt,
                obs_traj_rel,
                pred_traj_gt_rel,
                non_linear_ped,
                loss_mask,
                seq_start_end,
            ) = batch
            loss_mask = loss_mask[:, args.obs_len:]
            pred_traj_fake_rel = model(obs_traj_rel, obs_traj, seq_start_end)

            pred_traj_fake_rel_predpart = pred_traj_fake_rel[-args.pred_len:]
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel_predpart,
                                             obs_traj[-1])
            ade_, fde_ = cal_ade_fde(pred_traj_gt, pred_traj_fake)
            ade_ = ade_ / (obs_traj.shape[1] * args.pred_len)

            fde_ = fde_ / (obs_traj.shape[1])
            ade.update(ade_, obs_traj.shape[1])
            fde.update(fde_, obs_traj.shape[1])

            if i % args.print_every == 0:
                progress.display(i)

        logging.info(" * ADE  {ade.avg:.3f} FDE  {fde.avg:.3f}".format(
            ade=ade, fde=fde))
        writer.add_scalar("val_ade", ade.avg, epoch)
    return ade.avg
Beispiel #17
0
def evaluate(args, loader, generator):
    ade_outer, fde_outer = [], []
    total_traj = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (
                obs_traj,
                pred_traj_gt,
                obs_traj_rel,
                pred_traj_gt_rel,
                non_linear_ped,
                loss_mask,
                seq_start_end,
            ) = batch

            ade, fde = [], []
            total_traj += pred_traj_gt.size(1)

            for _ in range(args.num_samples):
                pred_traj_fake_rel = generator(obs_traj_rel, obs_traj,
                                               seq_start_end, 0, 3)
                pred_traj_fake_rel = pred_traj_fake_rel[-args.pred_len:]

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade_, fde_ = cal_ade_fde(pred_traj_gt, pred_traj_fake)
                ade.append(ade_)
                fde.append(fde_)
            ade_sum = evaluate_helper(ade, seq_start_end)
            fde_sum = evaluate_helper(fde, seq_start_end)

            ade_outer.append(ade_sum)
            fde_outer.append(fde_sum)

        ade = sum(ade_outer) / (total_traj * args.pred_len)
        fde = sum(fde_outer) / (total_traj)
        return ade, fde
Beispiel #18
0
def check_accuracy(args, loader, generator, limit=False):
    d_losses = []
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    loss_mask_sum = 0
    generator.eval()
    with torch.no_grad():
        for batch in loader:
            # batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch

            linear_ped = 1 - non_linear_ped
            loss_mask = loss_mask[:, args.obs_len:]
            obs_traj = obs_traj.float()
            obs_traj_rel = obs_traj_rel.float()
            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                           seq_start_end)
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

            g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
                pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                pred_traj_fake_rel, loss_mask)
            ade, ade_l, ade_nl = cal_ade(pred_traj_gt, pred_traj_fake,
                                         linear_ped, non_linear_ped)

            fde, fde_l, fde_nl = cal_fde(pred_traj_gt, pred_traj_fake,
                                         linear_ped, non_linear_ped)

            # traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
            # traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
            # traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
            # traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)

            # scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
            # scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)

            # d_loss = d_loss_fn(scores_real, scores_fake)
            # d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()
            if limit and total_traj >= args.num_samples_check:
                break

    # metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj
    if total_traj_l != 0:
        metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
        metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
    else:
        metrics['ade_l'] = 0
        metrics['fde_l'] = 0
    if total_traj_nl != 0:
        metrics['ade_nl'] = sum(disp_error_nl) / (total_traj_nl *
                                                  args.pred_len)
        metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
    else:
        metrics['ade_nl'] = 0
        metrics['fde_nl'] = 0

    generator.train()
    return metrics
def get_trajs(frame, step=10):
    '''
    :param frame: last observed frame
    :param step: step between each frame
    :returns None if no prediction can be made, or trajs_, a dictionary containing trajectories for each pedestrian
    '''

    trajs_ = {}

    # -1 because we include in selection
    seq_range = [frame - (obs_len - 1) * step, frame + pred_len * step]
    print("seq_range",seq_range)
    obs_range = [frame - (obs_len - 1) * step, frame]
    print("obs_range",obs_range)
    raw_obs_seq = data.loc[data["frameID"].between(obs_range[0], obs_range[1], inclusive=True)]
    raw_pred_seq = data.loc[data["frameID"].between(obs_range[1] + step, seq_range[1], inclusive=True)]
    peds_in_seq = raw_obs_seq.pedID.unique()

    curr_seq = np.zeros((len(peds_in_seq), 2, obs_len))
    curr_seq_rel = np.zeros((len(peds_in_seq), 2, obs_len))
    id_list = []
    considered_ped = 0

    for ped_id in peds_in_seq:
        obs_ped_seq = raw_obs_seq.loc[raw_obs_seq.pedID == ped_id]
        # seq has to have at least obs_len length
        if len(obs_ped_seq.frameID) == obs_len:
            id_list.append(ped_id)

            pred_ped_seq = raw_pred_seq.loc[raw_pred_seq.pedID == ped_id]
            trajs_[ped_id] = {}

            obs_traj = obs_ped_seq[["x", "y"]].values.transpose()
            obs_traj_rel = np.zeros(obs_traj.shape)
            obs_traj_rel[:, 1:] = obs_traj[:, 1:] - obs_traj[:, :-1]

            curr_seq[considered_ped, :, 0:obs_len] = obs_traj
            curr_seq_rel[considered_ped, :, 0:obs_len] = obs_traj_rel

            trajs_[ped_id]["obs"] = obs_traj.transpose()
            trajs_[ped_id]["pred_gt"] = pred_ped_seq[["x", "y"]].values

            considered_ped += 1
    print(considered_ped)
    if considered_ped > 0:
        obs_list_tensor = torch.from_numpy(curr_seq[:considered_ped, :]).permute(2, 0, 1).float()#.cuda().float()
        obs_list_rel_tensor = torch.from_numpy(curr_seq_rel[:considered_ped, :]).permute(2, 0, 1).float()#.cuda().float()
        seq_start_end_tensor = torch.tensor([[0, considered_ped]])

        for model_name, model in models.items():
            if model_name=='rnn':
                pred_rel = model(obs_list_tensor, obs_list_rel_tensor, seq_start_end_tensor,dat_seg)
                pred_abs = relative_to_abs(pred_rel, obs_list_tensor[-1]).detach().cpu().numpy()
                pred_abs_reorder = np.swapaxes(pred_abs, 0, 1)
                key = "pred_" + model_name
                for i in range(considered_ped):
                    ped_id = id_list[i]
                    trajs_[ped_id][key] = pred_abs_reorder[i]
            
            if model_name=='vgg':
                pred_rel = model(obs_list_tensor, obs_list_rel_tensor, seq_start_end_tensor,dat)
                pred_abs = relative_to_abs(pred_rel, obs_list_tensor[-1]).detach().cpu().numpy()
                pred_abs_reorder = np.swapaxes(pred_abs, 0, 1)
                key = "pred_" + model_name
                for k in range(considered_ped):
                     ped_id = id_list[k]
                     trajs_[ped_id][key] = pred_abs_reorder[k]
            if model_name == 'sgan':
                pred_rel = model(obs_list_tensor, obs_list_rel_tensor, seq_start_end_tensor)
                pred_abs = relative_to_abs(pred_rel, obs_list_tensor[-1]).detach().cpu().numpy()
                pred_abs_reorder = np.swapaxes(pred_abs, 0, 1)
                key = "pred_" + model_name
                for k in range(considered_ped):
                    ped_id = id_list[k]
                    trajs_[ped_id][key] = pred_abs_reorder[k]
            if model_name == 'segnet_full':
                pred_rel = model(obs_list_tensor, obs_list_rel_tensor, seq_start_end_tensor,dat_segnet_full)
                pred_abs = relative_to_abs(pred_rel, obs_list_tensor[-1]).detach().cpu().numpy()
                pred_abs_reorder = np.swapaxes(pred_abs, 0, 1)
                key = "pred_" + model_name
                for k in range(considered_ped):
                    ped_id = id_list[k]
                    trajs_[ped_id][key] = pred_abs_reorder[k]

            if model_name == 'segnet':
                pred_rel = model(obs_list_tensor, obs_list_rel_tensor, seq_start_end_tensor, dat_segnet)
                pred_abs = relative_to_abs(pred_rel, obs_list_tensor[-1]).detach().cpu().numpy()
                pred_abs_reorder = np.swapaxes(pred_abs, 0, 1)
                key = "pred_" + model_name
                for k in range(considered_ped):
                    ped_id = id_list[k]
                    trajs_[ped_id][key] = pred_abs_reorder[k]

        return trajs_

    else:
        return None
def check_accuracy(loader, generator, discriminator, d_loss_fn):
    d_losses = []
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error, f_disp_error, mean_speed_disp_error, final_speed_disp_error = [], [], [], []
    total_traj = 0
    loss_mask_sum = 0
    generator.eval()
    with torch.no_grad():
        for batch in loader:
            if USE_GPU:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask,
             seq_start_end, obs_ped_speed, pred_ped_speed) = batch

            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                           seq_start_end, obs_ped_speed,
                                           pred_ped_speed, pred_traj_gt,
                                           TRAIN_METRIC, SPEED_TO_ADD)
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
            loss_mask = loss_mask[:, OBS_LEN:]

            g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
                pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                pred_traj_fake_rel, loss_mask)
            ade = displacement_error(pred_traj_gt, pred_traj_fake)
            fde = final_displacement_error(pred_traj_gt, pred_traj_fake)

            last_pos = obs_traj[-1]
            traj_for_speed_cal = torch.cat(
                [last_pos.unsqueeze(dim=0), pred_traj_fake], dim=0)
            msae = cal_msae(pred_ped_speed, traj_for_speed_cal)
            fse = cal_fse(pred_ped_speed[-1], pred_traj_fake)

            traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
            traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
            traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel],
                                      dim=0)
            ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)

            scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed,
                                        seq_start_end)
            scores_real = discriminator(traj_real, traj_real_rel, ped_speed,
                                        seq_start_end)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            f_disp_error.append(fde.item())
            mean_speed_disp_error.append(msae.item())
            final_speed_disp_error.append(fse.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            if total_traj >= NUM_SAMPLE_CHECK:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum
    metrics['ade'] = sum(disp_error) / (total_traj * PRED_LEN)
    metrics['fde'] = sum(f_disp_error) / total_traj
    metrics['msae'] = sum(mean_speed_disp_error) / (total_traj * PRED_LEN)
    metrics['fse'] = sum(final_speed_disp_error) / total_traj

    generator.train()
    return metrics
def evaluate(loader, generator, num_samples, speed_regressor):
    ade_outer, fde_outer, simulated_output, total_traj, sequences, labels, observed_traj = [], [], [], [], [], [], []
    with torch.no_grad():
        for batch in loader:
            if USE_GPU:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            if MULTI_CONDITIONAL_MODEL:
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
                 loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed,
                 obs_label, pred_label, obs_obj_rel_speed) = batch
            else:
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
                 loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed,
                 obs_obj_rel_speed) = batch

            ade, fde, traj_op, traj_obs = [], [], [], []
            total_traj.append(pred_traj_gt.size(1))
            sequences.append(seq_start_end)
            if MULTI_CONDITIONAL_MODEL:
                labels.append(torch.cat([obs_label, pred_label], dim=0))

            for _ in range(num_samples):
                if TEST_METRIC == 1:  # USED DURING PREDICTION ENVIRONMENT
                    if MULTI_CONDITIONAL_MODEL:
                        _, final_enc_h = generator(obs_traj,
                                                   obs_traj_rel,
                                                   seq_start_end,
                                                   obs_ped_speed,
                                                   pred_ped_speed,
                                                   pred_traj_gt,
                                                   0,
                                                   None,
                                                   obs_obj_rel_speed,
                                                   obs_label=obs_label,
                                                   pred_label=pred_label)
                        fake_speed = speed_regressor(obs_ped_speed,
                                                     final_enc_h)
                        pred_traj_fake_rel, _ = generator(
                            obs_traj,
                            obs_traj_rel,
                            seq_start_end,
                            obs_ped_speed,
                            pred_ped_speed,
                            pred_traj_gt,
                            TEST_METRIC,
                            fake_speed,
                            obs_obj_rel_speed,
                            obs_label=obs_label,
                            pred_label=pred_label)
                    else:
                        _, final_enc_h = generator(obs_traj,
                                                   obs_traj_rel,
                                                   seq_start_end,
                                                   obs_ped_speed,
                                                   pred_ped_speed,
                                                   pred_traj_gt,
                                                   0,
                                                   None,
                                                   obs_obj_rel_speed,
                                                   obs_label=None,
                                                   pred_label=None)
                        fake_speed = speed_regressor(obs_ped_speed,
                                                     final_enc_h)
                        pred_traj_fake_rel, _ = generator(obs_traj,
                                                          obs_traj_rel,
                                                          seq_start_end,
                                                          obs_ped_speed,
                                                          pred_ped_speed,
                                                          pred_traj_gt,
                                                          TEST_METRIC,
                                                          fake_speed,
                                                          obs_obj_rel_speed,
                                                          obs_label=None,
                                                          pred_label=None)
                elif TEST_METRIC == 2:  # Used during Simulation environment
                    if MULTI_CONDITIONAL_MODEL:
                        pred_traj_fake_rel, _ = generator(
                            obs_traj,
                            obs_traj_rel,
                            seq_start_end,
                            obs_ped_speed,
                            pred_ped_speed,
                            pred_traj_gt,
                            TEST_METRIC,
                            None,
                            obs_obj_rel_speed,
                            obs_label=obs_label,
                            pred_label=pred_label)
                    else:
                        pred_traj_fake_rel, _ = generator(obs_traj,
                                                          obs_traj_rel,
                                                          seq_start_end,
                                                          obs_ped_speed,
                                                          pred_ped_speed,
                                                          pred_traj_gt,
                                                          TEST_METRIC,
                                                          None,
                                                          obs_obj_rel_speed,
                                                          obs_label=None,
                                                          pred_label=None)

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])
                ade.append(
                    displacement_error(pred_traj_fake,
                                       pred_traj_gt,
                                       mode='raw'))
                fde.append(
                    final_displacement_error(pred_traj_fake[-1],
                                             pred_traj_gt[-1],
                                             mode='raw'))
                traj_op.append(pred_traj_fake.unsqueeze(dim=0))
                traj_obs.append(obs_traj.unsqueeze(dim=0))

            best_traj, min_ade_error = evaluate_helper(
                torch.stack(ade, dim=1), torch.cat(traj_op, dim=0),
                seq_start_end)
            staked_obs = torch.cat(traj_obs, dim=0)
            obs = staked_obs[0]
            observed_traj.append(obs)
            _, min_fde_error = evaluate_helper(torch.stack(fde, dim=1),
                                               torch.cat(traj_op, dim=0),
                                               seq_start_end)
            ade_outer.append(min_ade_error)
            fde_outer.append(min_fde_error)
            simulated_output.append(best_traj)

        ade = sum(ade_outer) / (sum(total_traj) * PRED_LEN)
        fde = sum(fde_outer) / (sum(total_traj))
        simulated_traj = torch.cat(simulated_output, dim=1)
        total_obs = torch.cat(observed_traj, dim=1).permute(1, 0, 2)
        if MULTI_CONDITIONAL_MODEL:
            all_labels = torch.cat(labels, dim=1)
        last_items_in_sequences = []
        curr_sequences = []
        i = 0
        for sequence_list in sequences:
            last_sequence = sequence_list[-1]
            if i > 0:
                last_items_sum = sum(last_items_in_sequences)
                curr_sequences.append(last_items_sum + sequence_list)
            last_items_in_sequences.append(last_sequence[1])
            if i == 0:
                curr_sequences.append(sequence_list)
                i += 1
                continue

        sequences = torch.cat(curr_sequences, dim=0)
        colpercent = collisionPercentage(simulated_traj, sequences)
        print('Collision Percentage: ', colpercent * 100)

        # The user defined speed is verified by computing inverse sigmoid function on the output speed of the model.
        if TEST_METRIC == 2:
            if SINGLE_CONDITIONAL_MODEL:
                verify_speed(simulated_traj, sequences, labels=None)
            else:
                verify_speed(simulated_traj, sequences, labels=all_labels)

        return ade, fde, colpercent * 100
Beispiel #22
0
def generator_step(batch, generator, discriminator, g_loss_fn, optimizer_g):
    if USE_GPU:
        batch = [tensor.cuda() for tensor in batch]
    else:
        batch = [tensor for tensor in batch]
    if MULTI_CONDITIONAL_MODEL:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed,
        obs_label, pred_label, obs_obj_rel_speed) = batch
    else:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed, pred_ped_speed, obs_obj_rel_speed) = batch

    losses = {}
    loss = torch.zeros(1).to(pred_traj_gt)
    g_l2_loss_rel = []

    loss_mask = loss_mask[:, OBS_LEN:]

    for _ in range(BEST_K):
        if MULTI_CONDITIONAL_MODEL:
            generator_out, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                  pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=obs_label, pred_label=pred_label)
        else:
            generator_out, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                      pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=None, pred_label=None)

        pred_traj_fake_rel = generator_out
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

        if L2_LOSS_WEIGHT > 0:
            g_l2_loss_rel.append(L2_LOSS_WEIGHT * l2_loss(
                pred_traj_fake_rel,
                pred_traj_gt_rel,
                loss_mask,
                mode='raw'))

    g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
    if L2_LOSS_WEIGHT > 0:
        g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
        for start, end in seq_start_end.data:
            _g_l2_loss_rel = g_l2_loss_rel[start:end]
            _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
            _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(loss_mask[start:end])
            g_l2_loss_sum_rel += _g_l2_loss_rel
        losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
        loss += g_l2_loss_sum_rel
    traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
    traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
    ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)
    if MULTI_CONDITIONAL_MODEL:
        label_info = torch.cat([obs_label, pred_label], dim=0)
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=label_info)
    else:
        scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=None)
    discriminator_loss = g_loss_fn(scores_fake)

    loss += discriminator_loss
    losses['G_discriminator_loss'] = discriminator_loss.item()
    losses['G_total_loss'] = loss.item()

    optimizer_g.zero_grad()
    loss.backward()
    optimizer_g.step()

    return losses
Beispiel #23
0
def plot_trajectory(args, loader, generator):
    ground_truth_input = []
    all_model_output_traj = []
    ground_truth_output = []
    pic_cnt = 0
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (
                obs_traj,
                pred_traj_gt,
                obs_traj_rel,
                pred_traj_gt_rel,
                non_linear_ped,
                loss_mask,
                seq_start_end,
            ) = batch
            ade = []
            ground_truth_input.append(obs_traj)
            ground_truth_output.append(pred_traj_gt)
            model_output_traj = []
            model_output_traj_best = torch.ones_like(pred_traj_gt).cuda()

            for _ in range(args.num_samples):
                pred_traj_fake_rel = generator(
                    obs_traj_rel, obs_traj, seq_start_end, 0, 3
                )
                pred_traj_fake_rel = pred_traj_fake_rel[-args.pred_len :]

                pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
                model_output_traj.append(pred_traj_fake)
                ade_, fde_ = cal_ade_fde(pred_traj_gt, pred_traj_fake)
                ade.append(ade_)
            model_output_traj_best = evaluate_helper(
                ade, seq_start_end, model_output_traj, model_output_traj_best
            )
            all_model_output_traj.append(model_output_traj_best)

            for (start, end) in seq_start_end:
                plt.figure(figsize=(20,15), dpi=100)
                ground_truth_input_x_piccoor = (
                    obs_traj[:, start:end, :].cpu().numpy()[:, :, 0].T
                )
                ground_truth_input_y_piccoor = (
                    obs_traj[:, start:end, :].cpu().numpy()[:, :, 1].T
                )
                ground_truth_output_x_piccoor = (
                    pred_traj_gt[:, start:end, :].cpu().numpy()[:, :, 0].T
                )
                ground_truth_output_y_piccoor = (
                    pred_traj_gt[:, start:end, :].cpu().numpy()[:, :, 1].T
                )
                model_output_x_piccoor = (
                    model_output_traj_best[:, start:end, :].cpu().numpy()[:, :, 0].T
                )
                model_output_y_piccoor = (
                    model_output_traj_best[:, start:end, :].cpu().numpy()[:, :, 1].T
                )
                for i in range(ground_truth_output_x_piccoor.shape[0]):

                    observed_line = plt.plot(
                        ground_truth_input_x_piccoor[i, :],
                        ground_truth_input_y_piccoor[i, :],
                        "r-",
                        linewidth=4,
                        label="Observed Trajectory",
                    )[0]
                    observed_line.axes.annotate(
                        "",
                        xytext=(
                            ground_truth_input_x_piccoor[i, -2],
                            ground_truth_input_y_piccoor[i, -2],
                        ),
                        xy=(
                            ground_truth_input_x_piccoor[i, -1],
                            ground_truth_input_y_piccoor[i, -1],
                        ),
                        arrowprops=dict(
                            arrowstyle="->", color=observed_line.get_color(), lw=1
                        ),
                        size=20,
                    )
                    ground_line = plt.plot(
                        np.append(
                            ground_truth_input_x_piccoor[i, -1],
                            ground_truth_output_x_piccoor[i, :],
                        ),
                        np.append(
                            ground_truth_input_y_piccoor[i, -1],
                            ground_truth_output_y_piccoor[i, :],
                        ),
                        "b-",
                        linewidth=4,
                        label="Ground Truth",
                    )[0]
                    predict_line = plt.plot(
                        np.append(
                            ground_truth_input_x_piccoor[i, -1],
                            model_output_x_piccoor[i, :],
                        ),
                        np.append(
                            ground_truth_input_y_piccoor[i, -1],
                            model_output_y_piccoor[i, :],
                        ),
                        color="#ffff00",
                        ls="--",
                        linewidth=4,
                        label="Predicted Trajectory",
                    )[0]

                #plt.axis("off")
                plt.savefig(
                    "./traj_fig/pic_{}.png".format(pic_cnt)
                )
                plt.close()
                pic_cnt += 1
Beispiel #24
0
def check_accuracy(loader, generator, discriminator, d_loss_fn, speed_regressor):
    d_losses = []
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([],) * 2
    disp_error, f_disp_error, mean_speed_disp_error, final_speed_disp_error = [], [], [], []
    total_traj = 0
    loss_mask_sum = 0
    generator.eval()
    with torch.no_grad():
        for batch in loader:
            if USE_GPU:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            if MULTI_CONDITIONAL_MODEL:
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed,
                 pred_ped_speed, obs_label, pred_label, obs_obj_rel_speed) = batch
            else:
                (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, loss_mask, seq_start_end, obs_ped_speed,
                 pred_ped_speed, obs_obj_rel_speed) = batch

            if MULTI_CONDITIONAL_MODEL:
                pred_traj_fake_rel, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                  pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=obs_label, pred_label=pred_label)
            else:
                pred_traj_fake_rel, final_enc_h = generator(obs_traj, obs_traj_rel, seq_start_end, obs_ped_speed, pred_ped_speed,
                                      pred_traj_gt, TRAIN_METRIC, None, obs_obj_rel_speed, obs_label=None, pred_label=None)

            fake_ped_speed = speed_regressor(obs_ped_speed, final_enc_h)

            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
            loss_mask = loss_mask[:, OBS_LEN:]

            g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
                pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                pred_traj_fake_rel, loss_mask
            )

            abs_speed_los = cal_mae_speed_loss(pred_ped_speed, fake_ped_speed)
            ade = displacement_error(pred_traj_gt, pred_traj_fake)
            fde = final_displacement_error(pred_traj_gt, pred_traj_fake)

            traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
            traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
            traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
            ped_speed = torch.cat([obs_ped_speed, pred_ped_speed], dim=0)
            if MULTI_CONDITIONAL_MODEL:
                label_info = torch.cat([obs_label, pred_label], dim=0)
                scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=label_info)
                scores_real = discriminator(traj_real, traj_real_rel, ped_speed, label=label_info)
            else:
                scores_fake = discriminator(traj_fake, traj_fake_rel, ped_speed, label=None)
                scores_real = discriminator(traj_real, traj_real_rel, ped_speed, label=None)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            f_disp_error.append(fde.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            if total_traj >= NUM_SAMPLE_CHECK:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum
    metrics['ade'] = sum(disp_error) / (total_traj * PRED_LEN)
    metrics['fde'] = sum(f_disp_error) / total_traj

    generator.train()
    return metrics
Beispiel #25
0
def check_accuracy(args, loader, generator, discriminator, d_loss_fn,
                   N_VALID_EXAMPLES):
    d_losses = []
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error, disp_error_l, disp_error_nl = ([], ) * 3
    f_disp_error, f_disp_error_l, f_disp_error_nl = ([], ) * 3
    total_traj, total_traj_l, total_traj_nl = 0, 0, 0
    loss_mask_sum = 0
    generator.eval()
    with torch.no_grad():
        for batch_idx, batch in enumerate(loader):
            if args.use_gpu == 1:
                batch = [tensor.cuda() for tensor in batch]
            else:
                batch = [tensor for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch
            linear_ped = 1 - non_linear_ped
            loss_mask = loss_mask[:, args.obs_len:]

            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                           seq_start_end)
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])

            g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
                pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
                pred_traj_fake_rel, loss_mask)
            ade, ade_l, ade_nl = cal_ade(pred_traj_gt, pred_traj_fake,
                                         linear_ped, non_linear_ped)

            fde, fde_l, fde_nl = cal_fde(pred_traj_gt, pred_traj_fake,
                                         linear_ped, non_linear_ped)

            traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
            traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
            traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel],
                                      dim=0)

            scores_fake = discriminator(traj_fake, traj_fake_rel,
                                        seq_start_end)
            scores_real = discriminator(traj_real, traj_real_rel,
                                        seq_start_end)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            disp_error_l.append(ade_l.item())
            disp_error_nl.append(ade_nl.item())
            f_disp_error.append(fde.item())
            f_disp_error_l.append(fde_l.item())
            f_disp_error_nl.append(fde_nl.item())

            loss_mask_sum += torch.numel(loss_mask.data)
            total_traj += pred_traj_gt.size(1)
            total_traj_l += torch.sum(linear_ped).item()
            total_traj_nl += torch.sum(non_linear_ped).item()

            if batch_idx * args.batch_size >= N_VALID_EXAMPLES:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
    metrics['fde'] = sum(f_disp_error) / total_traj

    generator.train()
    return metrics
Beispiel #26
0
def check_accuracy(loader, generator, discriminator, d_loss_fn, limit=False):

    d_losses = []  #
    metrics = {}
    g_l2_losses_abs, g_l2_losses_rel = ([], ) * 2
    disp_error = []  # ADE FDE
    f_disp_error = []
    total_traj = 0

    mask_sum = 0
    generator.eval()
    with torch.no_grad():  #
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, vgg_list) = batch
            # (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel) = batch
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, n_l, l_m,
             V_obs, A_obs, V_pre, A_pre, vgg_list) = batch
            # pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, vgg_list)
            pred_traj_fake_rel = generator(obs_traj, obs_traj_rel, V_obs,
                                           A_obs, vgg_list)  # T V C
            pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                             obs_traj[0, :, :,
                                                      -1])  # T V C——V C

            g_l2_loss_abs = l2_loss(pred_traj_fake, pred_traj_gt, mode='sum')
            g_l2_loss_rel = l2_loss(pred_traj_fake_rel,
                                    pred_traj_gt_rel,
                                    mode='sum')

            ade = displacement_error(pred_traj_fake, pred_traj_gt)  # TVC NVCT
            fde = final_displacement_error(pred_traj_fake[-1],
                                           pred_traj_gt[0, :, :, -1])  # VC  VC

            traj_real = torch.cat([obs_traj[:, :, 0, :], pred_traj_gt], dim=0)
            traj_real_rel = torch.cat(
                [obs_traj_rel[:, :, 0, :], pred_traj_gt_rel], dim=0)
            traj_fake = torch.cat([obs_traj[:, :, 0, :], pred_traj_fake],
                                  dim=0)
            traj_fake_rel = torch.cat(
                [obs_traj_rel[:, :, 0, :], pred_traj_fake_rel], dim=0)

            scores_fake = discriminator(traj_fake, traj_fake_rel)
            scores_real = discriminator(traj_real, traj_real_rel)

            d_loss = d_loss_fn(scores_real, scores_fake)
            d_losses.append(d_loss.item())

            g_l2_losses_abs.append(g_l2_loss_abs.item())
            g_l2_losses_rel.append(g_l2_loss_rel.item())
            disp_error.append(ade.item())
            f_disp_error.append(fde.item())

            mask_sum += (pred_traj_gt.size(1) * PRED_LEN)
            total_traj += pred_traj_gt.size(1)
            if limit and total_traj >= NUM_SAMPLES_CHECK:
                break

    metrics['d_loss'] = sum(d_losses) / len(d_losses)
    metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / mask_sum
    metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / mask_sum

    metrics['ade'] = sum(disp_error) / (total_traj * PRED_LEN)
    metrics['fde'] = sum(f_disp_error) / total_traj
    generator.train()
    return metrics