Exemple #1
0
def test(model, device, test_loader, loss_function):
    '''
    Evaluates DTP
    args:
        model: DTP as defined in the DynamicTrajectoryPredictor class
        device: GPU or CPU
        test_loader: Dataloader to produce stacks of optical flow images
        loss_function: eg. MSE
    returns:
        MSE and FDE at intervals of 5,10,15 frames into the future
        Outputs and targets 15 frames into the future
    '''
    model.eval()
    test_loss = 0
    all_outputs_5 = np.array([])
    all_targets_5 = np.array([])
    all_outputs_10 = np.array([])
    all_targets_10 = np.array([])
    all_outputs_15 = np.array([])
    all_targets_15 = np.array([])

    with torch.no_grad():
        for batch_idx, data in enumerate(test_loader):
            flow, targets = data['flow_stack'].to(device), data['labels'].to(
                device)
            flow = flow.float()
            targets = targets.float()

            output = model(flow)

            test_loss += loss_function(output,
                                       targets).item()  # sum up batch loss

            output_5 = torch.cat((output[:, 0:5], output[:, 15:20]), dim=1)
            output_10 = torch.cat((output[:, 0:10], output[:, 15:25]), dim=1)
            output_15 = output

            targets_5 = torch.cat((targets[:, 0:5], targets[:, 15:20]), dim=1)
            targets_10 = torch.cat((targets[:, 0:10], targets[:, 15:25]),
                                   dim=1)
            targets_15 = targets

            all_outputs_5 = np.append(all_outputs_5,
                                      output_5.detach().cpu().numpy())
            all_targets_5 = np.append(all_targets_5,
                                      targets_5.detach().cpu().numpy())
            all_outputs_10 = np.append(all_outputs_10,
                                       output_10.detach().cpu().numpy())
            all_targets_10 = np.append(all_targets_10,
                                       targets_10.detach().cpu().numpy())
            all_outputs_15 = np.append(all_outputs_15,
                                       output_15.detach().cpu().numpy())
            all_targets_15 = np.append(all_targets_15,
                                       targets_15.detach().cpu().numpy())

    MSE_5 = utils.calc_mse(all_outputs_5, all_targets_5)
    FDE_5 = utils.calc_fde(all_outputs_5, all_targets_5, n=5)
    MSE_10 = utils.calc_mse(all_outputs_10, all_targets_10)
    FDE_10 = utils.calc_fde(all_outputs_10, all_targets_10, n=10)
    MSE_15 = utils.calc_mse(all_outputs_15, all_targets_15)
    FDE_15 = utils.calc_fde(all_outputs_15, all_targets_15, n=15)

    print(
        'Validation: \t\t\t\tMSE@5: {:.0f} \tFDE@5: {:.0f} \tMSE@10: {:.0f} \tFDE@10: {:.0f} \tMSE@15: {:.0f} \tFDE@15: {:.0f}'
        .format(MSE_5, FDE_5, MSE_10, FDE_10, MSE_15, FDE_15))
    return MSE_5, FDE_5, MSE_10, FDE_10, MSE_15, FDE_15, all_outputs_15, all_targets_15
Exemple #2
0
def train(model, device, train_loader, optimizer, epoch, loss_function):
    '''
    Trains DTP
    args:
        model: DTP as defined in the DynamicTrajectoryPredictor class
        device: GPU or CPU
        train_loader: Dataloader to produce stacks of optical flow images
        optimizer: eg. ADAM
        epoch: Current epoch (for printing progress)
        loss_function: eg. MSE
    '''
    model.train()
    all_outputs_5 = np.array([])
    all_targets_5 = np.array([])
    all_outputs_10 = np.array([])
    all_targets_10 = np.array([])
    all_outputs_15 = np.array([])
    all_targets_15 = np.array([])
    for batch_idx, data in enumerate(train_loader):
        if batch_idx % 50 == 0:
            print('Batch ', batch_idx, ' of ', len(train_loader))

        flow, targets = data['flow_stack'].to(device), data['labels'].to(
            device)

        targets = targets.float()

        optimizer.zero_grad()

        flow = flow.float()

        output = model(flow)

        loss = loss_function(output, targets)

        loss.backward()
        optimizer.step()

        output_5 = torch.cat((output[:, 0:5], output[:, 15:20]), dim=1)
        output_10 = torch.cat((output[:, 0:10], output[:, 15:25]), dim=1)
        output_15 = output

        targets_5 = torch.cat((targets[:, 0:5], targets[:, 15:20]), dim=1)
        targets_10 = torch.cat((targets[:, 0:10], targets[:, 15:25]), dim=1)
        targets_15 = targets

        all_outputs_5 = np.append(all_outputs_5,
                                  output_5.detach().cpu().numpy())
        all_targets_5 = np.append(all_targets_5,
                                  targets_5.detach().cpu().numpy())
        all_outputs_10 = np.append(all_outputs_10,
                                   output_10.detach().cpu().numpy())
        all_targets_10 = np.append(all_targets_10,
                                   targets_10.detach().cpu().numpy())
        all_outputs_15 = np.append(all_outputs_15,
                                   output_15.detach().cpu().numpy())
        all_targets_15 = np.append(all_targets_15,
                                   targets_15.detach().cpu().numpy())

    MSE_5 = utils.calc_mse(all_outputs_5, all_targets_5)
    FDE_5 = utils.calc_fde(all_outputs_5, all_targets_5, n=5)
    MSE_10 = utils.calc_mse(all_outputs_10, all_targets_10)
    FDE_10 = utils.calc_fde(all_outputs_10, all_targets_10, n=10)
    MSE_15 = utils.calc_mse(all_outputs_15, all_targets_15)
    FDE_15 = utils.calc_fde(all_outputs_15, all_targets_15, n=15)

    print(
        'Train Epoch: {} [{}/{} ({:.0f}%)] \tMSE@5: {:.0f} \tFDE@5: {:.0f} \tMSE@10: {:.0f} \tFDE@10: {:.0f} \tMSE@15: {:.0f} \tFDE@15: {:.0f}'
        .format(epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), MSE_5, FDE_5, MSE_10,
                FDE_10, MSE_15, FDE_15))