Beispiel #1
0
def eval_one(net,
             loss_fn,
             config,
             loader,
             image_id,
             device,
             plot=False,
             verbose=False):
    input, label_map, image_id = loader.dataset[image_id]
    input = input.to(device)
    label_map, label_list = loader.dataset.get_label(image_id)
    loader.dataset.reg_target_transform(label_map)
    label_map = torch.from_numpy(label_map).permute(2, 0,
                                                    1).unsqueeze_(0).to(device)

    # Forward Pass
    t_start = time.time()
    pred = net(input.unsqueeze(0))
    t_forward = time.time() - t_start

    loss, cls_loss, loc_loss = loss_fn(pred, label_map)
    pred.squeeze_(0)
    cls_pred = pred[0, ...]

    if verbose:
        print("Forward pass time", t_forward)

    # Filter Predictions
    t_start = time.time()
    corners, scores = filter_pred(config, pred)
    t_post = time.time() - t_start

    if verbose:
        print("Non max suppression time:", t_post)

    gt_boxes = np.array(label_list)
    gt_match, pred_match, overlaps = compute_matches(gt_boxes,
                                                     corners,
                                                     scores,
                                                     iou_threshold=0.5)

    num_gt = len(label_list)
    num_pred = len(scores)
    input_np = input.cpu().permute(1, 2, 0).numpy()
    pred_image = get_bev(input_np, corners)

    if plot == True:
        # Visualization
        plot_bev(input_np, label_list, window_name='GT')
        plot_bev(input_np, corners, window_name='Prediction')
        plot_label_map(cls_pred.numpy())

    return num_gt, num_pred, scores, pred_image, pred_match, loss.item(
    ), t_forward, t_post
Beispiel #2
0
    def __call__(self, velo, path):
        t_start = time.time()
        bev = self.preprocess(velo, path)
        t_pre = time.time()
        with torch.no_grad():
            pred = self.net(bev.unsqueeze(0)).squeeze_(0)

        t_m = time.time()
        corners, scores = filter_pred(self.config, pred)
        input_np = bev.permute(1, 2, 0).cpu().numpy()

        t_post = time.time()
        pred_bev = get_bev(input_np, corners)

        t_s = [t_pre - t_start, t_m - t_pre, t_post - t_m]
        return t_s, corners, scores, pred_bev
Beispiel #3
0
def eval_batch(config, net, loss_fn, loader, device, eval_range='all'):
    net.eval()
    if config['mGPUs']:
        net.module.set_decode(True)
    else:
        net.set_decode(True)

    cls_loss = 0
    loc_loss = 0
    all_scores = []
    all_matches = []
    log_images = []
    gts = 0
    preds = 0
    t_fwd = 0
    t_nms = 0

    log_img_list = random.sample(range(len(loader.dataset)), 10)

    with torch.no_grad():
        for i, data in enumerate(loader):
            tic = time.time()
            input, label_map, image_id = data
            input = input.to(device)
            label_map = label_map.to(device)
            tac = time.time()
            predictions = net(input)
            t_fwd += time.time() - tac
            loss, cls, loc = loss_fn(predictions, label_map)
            cls_loss += cls
            loc_loss += loc
            t_fwd += (time.time() - tic)

            toc = time.time()
            # Parallel post-processing
            predictions = list(torch.split(predictions.cpu(), 1, dim=0))
            batch_size = len(predictions)
            with Pool(processes=1) as pool:
                preds_filtered = pool.starmap(filter_pred,
                                              [(config, pred)
                                               for pred in predictions])
            t_nms += (time.time() - toc)
            args = []
            for j in range(batch_size):
                _, label_list = loader.dataset.get_label(image_id[j].item())
                corners, scores = preds_filtered[j]
                gts += len(label_list)
                preds += len(scores)
                all_scores.extend(list(scores))
                if image_id[j] in log_img_list:
                    input_np = input[j].cpu().permute(1, 2, 0).numpy()
                    pred_image = get_bev(input_np, corners)
                    log_images.append(pred_image)

                arg = (np.array(label_list), corners, scores)
                args.append(arg)

            # Parallel compute matchesi

            with Pool(processes=1) as pool:
                matches = pool.starmap(compute_matches, args)

            for j in range(batch_size):
                all_matches.extend(list(matches[j][1]))

            #print(time.time() -tic)
    all_scores = np.array(all_scores)
    all_matches = np.array(all_matches)
    sort_ids = np.argsort(all_scores)
    all_matches = all_matches[sort_ids[::-1]]

    metrics = {}
    AP, precisions, recalls, precision, recall = compute_ap(
        all_matches, gts, preds)
    metrics['AP'] = AP
    metrics['Precision'] = precision
    metrics['Recall'] = recall
    metrics['Forward Pass Time'] = t_fwd / len(loader.dataset)
    metrics['Postprocess Time'] = t_nms / len(loader.dataset)

    cls_loss = cls_loss / len(loader)
    loc_loss = loc_loss / len(loader)
    metrics['loss'] = cls_loss + loc_loss

    return metrics, precisions, recalls, log_images