Ejemplo n.º 1
0
        norm[i] = _get_bboxsize(gt)

    if dataset == 'LS3D-W' or dataset == '300VW-3D':
        for i in range(3):
            if dataset == 'LS3D-W':
                category = {'0': 'Easy', '1': 'Media', '2': 'Hard'}[str(i)]
                l, f = 2400 * i, 2400 * (i + 1)
            else:
                category = {
                    '0': 'Category A',
                    '1': 'Category B',
                    '2': 'Category C'
                }[str(i)]
                l, f = {
                    0: [0, 62643],
                    1: [62643, 62642 + 32872],
                    2: [95515, -1]
                }[i]
            # For LS3D-W dataset which landmark indexed on `0`
            dist = calc_dists(preds[l:f] - 1., gts[l:f], norm[l:f])
            auc = calc_metrics(dist, save_dir, category)
            print("FINAL: Mean Error: {}. AUC: {} of {} subset".format(
                round(torch.mean(dist) * 100., 2), auc, category))
    else:
        dists = calc_dists(preds, gts, norm)
        #print(dists.shape)
        auc = calc_metrics(dists, save_dir)
        print(
            "FINAL: Mean Error: {}. AUC: {}".format(
                torch.mean(dists) * 100., 2), auc)
Ejemplo n.º 2
0

if __name__ == "__main__":
    import opts
    args = opts.argparser()
    dataset = args.data.split('/')[-1]
    save_dir = osp.join(args.checkpoint, dataset)
    print("save dictory: " + save_dir)
    preds = torch.from_numpy(loadpreds_if_exists(osp.join(save_dir, 'preds_valid.mat')))
    gts, _ = loadgts(args.data, args.pointType)
    norm = np.ones(preds.size(0))
    for i, gt in enumerate(gts):
        norm[i] = _get_bboxsize(gt)

    if dataset == 'LS3D-W' or dataset == '300VW-3D':
        for i in range(3):
            if dataset == 'LS3D-W':
                category = {'0': 'Easy', '1': 'Media', '2': 'Hard'}[str(i)]
                l, f = 2400*i, 2400*(i+1)
            else:
                category = {'0': 'Category A', '1': 'Category B', '2': 'Category C'}[str(i)]
                l, f = {0: [0, 62643], 1: [62643, 62642+32872], 2: [95515,-1]}[i]
            # For LS3D-W dataset which landmark indexed on `0`
            dist = calc_dists(preds[l:f] - 1., gts[l:f], norm[l:f])
            auc = calc_metrics(dist, save_dir, category)
            print("FINAL: Mean Error: {}. AUC: {} of {} subset".format(round(torch.mean(dist) * 100., 2), auc, category))
    else:
        dists = calc_dists(preds, gts, norm)
        auc = calc_metrics(dists, save_dir)
        print("FINAL: Mean Error: {}. AUC: {}".format(round(torch.mean(dists) * 100., 2), auc))
Ejemplo n.º 3
0
def validate(loader, model, criterion, netType, debug, flip):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()
    end = time.time()

    # predictions
    predictions = torch.Tensor(loader.dataset.__len__(), 68, 2)

    model.eval()
    gt_win, pred_win = None, None
    bar = Bar('Validating', max=len(loader))
    all_dists = torch.zeros((68, loader.dataset.__len__()))
    for i, (inputs, target, meta) in enumerate(loader):
        data_time.update(time.time() - end)

        input_var = torch.autograd.Variable(inputs.cuda())
        target_var = torch.autograd.Variable(target.cuda(async=True))

        output = model(input_var)
        score_map = output[-1].data.cpu()

        if flip:
            flip_input_var = torch.autograd.Variable(
                torch.from_numpy(shufflelr(
                    inputs.clone().numpy())).float().cuda())
            flip_output_var = model(flip_input_var)
            flip_output = flip_back(flip_output_var[-1].data.cpu())
            score_map += flip_output

        # intermediate supervision
        loss = 0
        for o in output:
            loss += criterion(o, target_var)
        acc, batch_dists = accuracy(score_map, target.cpu(), idx, thr=0.07)
        all_dists[:, i * args.val_batch:(i + 1) * args.val_batch] = batch_dists

        preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
        for n in range(score_map.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, :, :]

        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        losses.update(loss.data[0], inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        batch_time.update(time.time() - end)
        end = time.time()

        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    mean_error = torch.mean(all_dists)
    auc = calc_metrics(all_dists)  # this is auc of predicted maps and target.
    print("=> Mean Error: {:.2f}, [email protected]: {} based on maps".format(
        mean_error * 100., auc))
    return losses.avg, acces.avg, predictions, auc
Ejemplo n.º 4
0
def validate(loader, model, criterion, netType, debug, flip):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()
    end = time.time()

    # predictions
    predictions = torch.Tensor(loader.dataset.__len__(), 68, 2)

    model.eval()
    gt_win, pred_win = None, None
    bar = Bar('Validating', max=len(loader))
    all_dists = torch.zeros((68, loader.dataset.__len__()))
    for i, (inputs, target, meta) in enumerate(loader):
        data_time.update(time.time() - end)

        input_var = torch.autograd.Variable(inputs.cuda())
        target_var = torch.autograd.Variable(target.cuda(async=True))

        output = model(input_var)
        score_map = output[-1].data.cpu()

        if flip:
            flip_input_var = torch.autograd.Variable(
                torch.from_numpy(shufflelr(inputs.clone().numpy())).float().cuda())
            flip_output_var = model(flip_input_var)
            flip_output = flip_back(flip_output_var[-1].data.cpu())
            score_map += flip_output

        # intermediate supervision
        loss = 0
        for o in output:
            loss += criterion(o, target_var)
        acc, batch_dists = accuracy(score_map, target.cpu(), idx, thr=0.07)
        all_dists[:, i * args.val_batch:(i + 1) * args.val_batch] = batch_dists

        preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
        for n in range(score_map.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, :, :]

        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        losses.update(loss.data[0], inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        batch_time.update(time.time() - end)
        end = time.time()

        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    mean_error = torch.mean(all_dists)
    auc = calc_metrics(all_dists) # this is auc of predicted maps and target.
    print("=> Mean Error: {:.2f}, [email protected]: {} based on maps".format(mean_error*100., auc))
    return losses.avg, acces.avg, predictions, auc