Esempio n. 1
0
    def forward_pass(self, img):

        #img=img.unsqueeze(0)
        #img=[img]
        #img=np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        points = []
        pointers = []

        #image = im_to_numpy(img)

        c = [img.shape[1] / 2, img.shape[2] / 2]
        s = float(img.shape[1] / 200.0)

        #print ("cropped", c, s)
        img = crop(self.img_path, img, c, s, [self.inp_res, self.inp_res])

        #image = im_to_numpy(img)

        # while True:
        # 	#print (type(image))
        # 	image=img.cpu().numpy()
        # 	image=np.moveaxis(image, 0, -1)
        # 	image=cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        # 	cv2.imshow('scaled', image)
        # 	cv2.waitKey(10)

        img = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
        img = img.to(device, non_blocking=True)
        output = self.model(img)

        score_map = output[-1].cpu() if type(output) == list else output.cpu()
        preds, vals = final_preds(score_map, [c], [s], [64, 64])
        coords = np.squeeze(preds)

        for m in range(0, len(coords)):
            val = vals[0][m].detach().numpy()
            print("val", val)
            if val > 0.4:  #threshold for confidence score
                x, y = coords[m][0].cpu().detach().numpy(), coords[m][1].cpu(
                ).detach().numpy()
                if ([x, y] != present for present in pointers):
                    #print ("coming in here")
                    pointers.append([x, y, m])

                else:
                    for present in pointers:
                        if [present[0], present[1]] == [x, y]:
                            if val > present[2]:
                                pointers.remove(present)
                                pointers.append([x, y, m])

        finalpoints = []
        finalpointers = []
        for j in pointers:
            x, y, m = j[0], j[1], j[2]
            finalpointers.append([j[0], j[1]])
            finalpoints.append(j[2])

        return finalpoints, finalpointers
Esempio n. 2
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))
    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight)
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight)

            acc = accuracy(score_map, target.cpu(), idx)

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
    return losses.avg, acces.avg, predictions
Esempio n. 3
0
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True, _logger=None):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)
    autoloss =  models.loss.UniLoss(valid=True)
    # switch to evaluate mode
    model.eval()
    #model.train()
    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Processing', max=len(val_loader))
    for i, (inputs, target, meta) in enumerate(val_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)

        input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)
        score_map = output[-1].data.cpu()
        if flip:
            flip_input_var = torch.autograd.Variable(
                    torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(), 
                    volatile=True
                )
            flip_output_var = model(flip_input_var)
            flip_output = flip_back(flip_output_var[-1].data.cpu())
            score_map += flip_output



        loss = 0
        for o in output:
            loss += criterion(o, target_var)
        #acc = accuracy(score_map, target.cpu(), idx)
        _, acc, _ = autoloss(output[-1], meta)
        # generate predictions
        preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
        for n in range(score_map.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, :, :]


        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.item(), inputs.size(0))
        acces.update(acc.item(), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(val_loader),
                    data=data_time.val,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    loss=losses.avg*100,
                    acc=acces.avg*100
                    )
        _logger.info(bar.suffix)

    bar.finish()
    return losses.avg*100, acces.avg*100, predictions
Esempio n. 4
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             idx,
             save_result_dir,
             meta_dir,
             anno_type,
             flip=True,
             evaluate=False,
             scales=[0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6],
             multi_scale=False,
             save_heatmap=False):

    anno_type = anno_type[0].lower()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    num_scales = len(scales)

    # switch to evaluate mode
    model.eval()

    meanstd_file = '../datasets/arm/mean.pth.tar'
    meanstd = torch.load(meanstd_file)
    mean = meanstd['mean']

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Processing', max=len(val_loader))
    for i, (inputs, target, meta) in enumerate(val_loader):
        #print(inputs.shape)
        # measure data loading time
        data_time.update(time.time() - end)

        if anno_type != 'none':

            target = target.cuda(async=True)
            target_var = torch.autograd.Variable(target)

        input_var = torch.autograd.Variable(inputs.cuda())

        with torch.no_grad():
            # compute output
            output = model(input_var)

            score_map = output[-1].data.cpu()
            if flip:
                flip_input_var = torch.autograd.Variable(
                    torch.from_numpy(fliplr(
                        inputs.clone().numpy())).float().cuda(), )
                flip_output_var = model(flip_input_var)
                flip_output = flip_back(flip_output_var[-1].data.cpu(),
                                        meta_dir=meta_dir[0])
                score_map += flip_output
                score_map /= 2

            if anno_type != 'none':

                loss = 0
                for o in output:
                    loss += criterion(o, target_var)
                acc = accuracy(score_map, target.cpu(), idx, pck_threshold)

        if multi_scale:
            new_scales = []
            new_res = []
            new_score_map = []
            new_inp = []
            new_meta = []
            img_name = []
            confidence = []
            new_center = []

            num_imgs = score_map.size(0) // num_scales
            for n in range(num_imgs):
                score_map_merged, res, conf = multi_scale_merge(
                    score_map[num_scales * n:num_scales * (n + 1)].numpy(),
                    meta['scale'][num_scales * n:num_scales * (n + 1)])
                inp_merged, _, _ = multi_scale_merge(
                    inputs[num_scales * n:num_scales * (n + 1)].numpy(),
                    meta['scale'][num_scales * n:num_scales * (n + 1)])
                new_score_map.append(score_map_merged)
                new_scales.append(meta['scale'][num_scales * (n + 1) - 1])
                new_center.append(meta['center'][num_scales * n])
                new_res.append(res)
                new_inp.append(inp_merged)
                img_name.append(meta['img_name'][num_scales * n])
                confidence.append(conf)

            if len(new_score_map) > 1:
                score_map = torch.tensor(
                    np.stack(new_score_map))  #stack back to 4-dim
                inputs = torch.tensor(np.stack(new_inp))
            else:
                score_map = torch.tensor(
                    np.expand_dims(new_score_map[0], axis=0))
                inputs = torch.tensor(np.expand_dims(new_inp[0], axis=0))

        else:
            img_name = []
            confidence = []
            for n in range(score_map.size(0)):
                img_name.append(meta['img_name'][n])
                confidence.append(
                    np.amax(score_map[n].numpy(), axis=(1, 2)).tolist())

        # generate predictions
        if multi_scale:
            preds = final_preds(score_map, new_center, new_scales, new_res[0])
        else:
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])

        for n in range(score_map.size(0)):
            if evaluate:
                with open(
                        os.path.join(save_result_dir, 'preds',
                                     img_name[n] + '.json'), 'w') as f:
                    obj = {
                        'd2_key': preds[n].numpy().tolist(),
                        'score': confidence[n]
                    }
                    json.dump(obj, f)

        if evaluate:
            for n in range(score_map.size(0)):
                inp = inputs[n]
                pred = score_map[n]
                for t, m in zip(inp, mean):
                    t.add_(m)
                scipy.misc.imsave(
                    os.path.join(save_result_dir, 'visualization',
                                 '{}.jpg'.format(img_name[n])),
                    sample_with_heatmap(inp, pred))

                if save_heatmap:
                    score_map_original_size = align_back(
                        score_map[n], meta['center'][n],
                        meta['scale'][len(scales) * n - 1],
                        meta['original_size'][n])
                    np.save(
                        os.path.join(save_result_dir, 'heatmaps',
                                     '{}.npy'.format(img_name[n])),
                        score_map_original_size)

        if anno_type != 'none':

            # measure accuracy and record loss
            losses.update(loss.item(), inputs.size(0))
            acces.update(acc[0], inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(val_loader),
            data=data_time.val,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()

    if anno_type != 'none':
        return losses.avg, acces.avg
    else:
        return 0, 0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             args,
             flip=False,
             test_batch=6):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    acces = AverageMeter()

    pck_score = np.zeros(num_classes)
    pck_count = np.zeros(num_classes)

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            if args.arch == 'hg':
                output = model(input)
            elif args.arch == 'hg_multitask':
                output, _ = model(input)
            else:
                raise Exception("unspecified arch")

            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()

            if flip:
                flip_input = torch.from_numpy(
                    fliplr(input.clone().cpu().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            acc, _ = accuracy_2animal(score_map, target.cpu(), idx1, idx2)

            # cal per joint [email protected]
            for j in range(num_classes):
                if acc[j + 1] > -1:
                    pck_score[j] += acc[j + 1].numpy()
                    pck_count[j] += 1

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            # measure accuracy and record loss
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Acc: {acc: .8f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                acc=acces.avg)
            bar.next()

        bar.finish()

    for j in range(num_classes):
        pck_score[j] /= float(pck_count[j])

    print("\nper joint [email protected]:")
    print(list(pck_score))

    return _, acces.avg, predictions
Esempio n. 6
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))
    with torch.no_grad():
        for i, (input, target, meta, img_path) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            indexes = []

            input = input.to(device, non_blocking=True)
            #print (input.shape)

            #image = input.cpu().permute(0,2,3,1).numpy()
            #image = np.squeeze(image)

            path = str(img_path)
            path = path[3:len(path) - 2]
            image = cv2.imread(path)
            # cv2.imshow("image", image)
            # cv2.waitKey(10)
            # time.sleep(1)

            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            #print (input.shape)
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()

            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight)
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight)

            #print (acc)
            # generate predictions
            preds, vals = final_preds(score_map, meta['center'], meta['scale'],
                                      [64, 64])

            # for z in range(target.shape[1]):
            #     for j in range(target.shape[2]):
            #         for k in range(target.shape[3]):
            #             if target[0,z,j,k]==1.0:
            #                 indexes.append(z)

            # coords = np.squeeze(preds)
            # for m in range(0,len(coords)):
            #     val = vals[0][m].numpy()
            #     if val>0.6: #threshold for confidence score
            #         x,y = coords[m][0].cpu().numpy(), coords[m][1].cpu().numpy()
            #         cv2.circle(image, (x,y), 1, (0,0,255), -1)
            #         #indexes.append(m)

            acc = accuracy(score_map, target.cpu(), indexes)
            #print ((target.cpu()).shape[1])

            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            #print ("scored", score_map.shape)

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                cv2.imwrite(
                    '/home/shantam/Documents/Programs/pytorch-pose/example/predictions/pred'
                    + str(i) + '.png', image)
                #time.sleep(5)

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
    return losses.avg, acces.avg, predictions
Esempio n. 7
0
def prediction_check(previous_inp,
                     previous_kpts,
                     inp,
                     model,
                     dataset,
                     device,
                     num_transform=5,
                     num_kpts=18,
                     lambda_decay=0.9):
    """
    Input:
        Image: 3x256x256
    Output:
        generated_kpts: 18x3
        target: 3x256x256
        target_weight: 
    """

    # equivariant consistency
    animal_mean = dataset.mean
    animal_std = dataset.std
    s0 = 256 / 200.0
    sf = 0.25
    rf = 30
    c = torch.Tensor((128, 128))
    preds_all = np.zeros((num_transform, num_kpts, 2))  # (x, y)

    confidence = np.ones(18)
    score_map_avg = np.zeros((1, 18, 64, 64))
    for i in range(num_transform):

        img = inp.clone()
        if i == 0:
            s = s0
            rot = 0
        else:
            s = s0 * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
            rot = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0]

        img = crop_ori(img, c, s, [256, 256], rot)
        img = color_normalize(img, animal_mean, animal_std)

        model_out, model_out_refine = model(img.unsqueeze(0),
                                            1,
                                            return_domain=False)
        score_map = model_out_refine[0].cpu()
        feat_map = score_map.squeeze(0).detach().cpu().numpy()

        # with flip
        flip_input = torch.from_numpy(fliplr(
            img.clone().cpu().numpy())).float().to(device)
        flip_output, flip_output_refine = model(flip_input.unsqueeze(0),
                                                1,
                                                return_domain=False)
        flip_output_re = flip_back(flip_output_refine[0].detach().cpu(),
                                   'real_animal')
        feat_map += flip_output_re.squeeze(0).numpy()
        feat_map /= 2

        # rotate and scale score_map back
        for j in range(feat_map.shape[0]):
            feat_map_j = feat_map[j]
            M = cv2.getRotationMatrix2D((32, 32), -rot, 1)
            feat_map_j = cv2.warpAffine(feat_map_j, M, (64, 64))
            feat_map_j = cv2.resize(feat_map_j,
                                    None,
                                    fx=s * 200.0 / 256.0,
                                    fy=s * 200.0 / 256.0,
                                    interpolation=cv2.INTER_LINEAR)

            if feat_map_j.shape[0] < 64:
                start = 32 - feat_map_j.shape[0] // 2
                end = start + feat_map_j.shape[0]
                score_map_avg[0][j][start:end, start:end] += feat_map_j
            else:
                start = feat_map_j.shape[0] // 2 - 32
                end = feat_map_j.shape[0] // 2 + 32
                score_map_avg[0][j] += feat_map_j[start:end, start:end]

    score_map_avg = score_map_avg / num_transform
    confidence_score = np.max(score_map_avg, axis=(0, 2, 3))

    confidence = confidence_score.astype(np.float32)
    score_map_avg = torch.Tensor(score_map_avg)

    preds = final_preds(score_map_avg, [c], [s0], [64, 64])
    preds = preds.squeeze(0)
    pts = preds.clone().cpu().numpy()

    generated_kpts = np.zeros((num_kpts, 3)).astype(np.float32)
    generated_kpts[:, :2] = pts
    generated_kpts[:, 2] = confidence

    # temporal consistency
    if previous_inp is not None:
        lk_params = dict(winSize=(15, 15),
                         maxLevel=2,
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        current_img = (inp.clone().cpu() +
                       animal_mean.view(3, 1, 1)).numpy().transpose(1, 2, 0)
        previous_img = (previous_inp.clone().cpu() +
                        animal_mean.view(3, 1, 1)).numpy().transpose(1, 2, 0)
        current_frame = (current_img * 255).astype(np.uint8)
        previous_frame = (previous_img * 255).astype(np.uint8)
        current_frame_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        previous_frame_gray = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)

        previous_preds = previous_kpts[:, :2].reshape(18, 1,
                                                      2).astype(np.float32)
        # flow preds (18,1,2)
        flow_preds, st, err = cv2.calcOpticalFlowPyrLK(previous_frame_gray,
                                                       current_frame_gray,
                                                       previous_preds, None,
                                                       **lk_params)
        flow_preds = flow_preds.reshape(18, 2)
        previous_preds = previous_preds.reshape(18, 2)
        flow_confidence = previous_kpts[:, 2].reshape(18, 1)

        # caculate kpts dist to check flow confidence
        for j in range(18):
            if np.linalg.norm(flow_preds[j] - previous_preds[j]) > 15:
                flow_confidence[j] = 0
        # combine flow_preds (flow_preds, confidence) with generated preds (generated_kpts, confidence)
        for j in range(18):
            if flow_confidence[j] > 0:
                if (confidence[j] / flow_confidence[j]) < lambda_decay:
                    generated_kpts[j, :2] = flow_preds[j, :2]
                    generated_kpts[j, 2] = flow_confidence[j] * lambda_decay
    target = kpts_to_heatmap(generated_kpts)
    return target, generated_kpts
Esempio n. 8
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             args,
             flip=False,
             test_batch=6):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    acces = AverageMeter()

    pck_score = np.zeros(num_classes)
    pck_count = np.zeros(num_classes)

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))
    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)

            output, output_refine = model(input, 1, return_domain=False)
            score_map = output_refine[0].cpu()

            if flip:
                flip_input = torch.from_numpy(
                    fliplr(input.clone().cpu().numpy())).float().to(device)
                _, flip_output_refine = model(flip_input,
                                              1,
                                              return_domain=False)
                flip_output = flip_output_refine[0].cpu()
                flip_output = flip_back(flip_output, 'real_animal')
                score_map += flip_output

            acc, _ = accuracy_2animal(score_map, target.cpu(), idx1, idx2)
            # cal per joint [email protected]
            for j in range(num_classes):
                if acc[j + 1] > -1:
                    pck_score[j] += acc[j + 1].numpy()
                    pck_count[j] += 1

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])

            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            # measure accuracy and record loss
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Acc: {acc: .8f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                acc=acces.avg)
            bar.next()

        bar.finish()

    for j in range(num_classes):
        pck_score[j] /= float(pck_count[j])
    print("\nper joint [email protected]:")
    print('Animal: {}, total number of joints: {}'.format(
        args.animal, pck_count.sum()))
    print(list(pck_score))

    parts = {
        'eye': [0, 1],
        'chin': [2],
        'hoof': [3, 4, 5, 6],
        'hip': [7],
        'knee': [8, 9, 10, 11],
        'shoulder': [12, 13],
        'elbow': [14, 15, 16, 17]
    }
    for p in parts.keys():
        part = parts[p]
        score = 0.
        count = 0.
        for joint in part:
            score += pck_score[joint] * pck_count[joint]
            count += pck_count[joint]
        print('\n Joint {}: {} '.format(p, score / count))

    return _, acces.avg, predictions
def validate(val_loader,
             model,
             criterion,
             debug=False,
             flip=True,
             test_batch=6,
             njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__()))

    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            target = target.to(device, non_blocking=True)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight, len(idx))
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight, len(idx))

            acc, batch_interocular_dists = accuracy(score_map, target.cpu(),
                                                    idx)
            interocular_dists[:, i * test_batch:(i + 1) *
                              test_batch] = batch_interocular_dists

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
        idx_array = np.array(idx) - 1
        interocular_dists_pickup = interocular_dists[idx_array, :]
        mean_error = torch.mean(
            interocular_dists_pickup[interocular_dists_pickup != -1])
        auc = calc_metrics(interocular_dists,
                           idx)  # this is auc of predicted maps and target.
        #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc))
    return losses.avg, acces.avg, predictions, auc, mean_error
Esempio n. 10
0
def validate(unit_path, model, criterion, num_classes, debug=False, flip=True):
    losses = AverageMeter()
    acces = AverageMeter()

    dimensions = [0, 0]
    scale_factor = 0
    with Image.open(unit_path) as img:
        dimensions = map(lambda x: x / 2, img.size)
        scale_factor = (img.size[1]) / 200.0
        print(dimensions)
        print(scale_factor)

    # Custom edits [for testing]
    # dimensions = [459.0, 335.0]
    scale_factor = 6.0969 + 3
    # predictions
    predictions = torch.Tensor(1, num_classes, 2)

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        input_temp, target_temp, meta = customMpiiObject.get_image_data(
            './data/custom/images/015620151.jpg', dimensions, scale_factor)

        input = torch.zeros([1, 3, 256, 256])
        target = torch.zeros([1, 16, 64, 64])
        target_weight = torch.zeros([1, 16, 1])

        input[0, :, :, :] = input_temp.to(device, non_blocking=True)
        target[0, :, :, :] = target.to(device, non_blocking=True)
        target_weight[0, :, :] = meta['target_weight'].to(device,
                                                          non_blocking=True)

        # compute output
        output = model(input)
        score_map = output[-1].cpu() if type(output) == list else output.cpu()
        # if type(output) == list:  # multiple output
        #     loss = 0
        #     for o in output:
        #         loss += criterion(o, target, target_weight)
        #     output = output[-1]
        # else:  # single output
        #     loss = criterion(output, target, target_weight)

        acc = accuracy(score_map, target.cpu(), idx)

        meta_center_temp = torch.Tensor(meta['center'])
        meta['center'] = torch.zeros([1, 2])
        meta['center'][0, :] = meta_center_temp

        # generate predictions
        preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
        # for n in range(score_map.size(0)):
        #     predictions[meta['index'][n], :, :] = preds[n, :, :]
        predictions = preds
        acces.update(acc[0], input.size(0))

        # measure accuracy and record loss

    return losses.avg, acces.avg, predictions
Esempio n. 11
0
    def validate(self):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        acces = AverageMeter()

        predictions = torch.Tensor(self.val_loader.dataset.__len__(),
                                   self.num_classes, 2)

        self.netG.eval()

        gt_win, pred_win = None, None
        end = time.time()
        bar = Bar('Eval ', max=len(self.val_loader))
        with torch.no_grad():
            for i, (input, target, meta, mpii) in enumerate(self.val_loader):
                if mpii == False:
                    continue
                data_time.update(time.time() - end)

                input = input.to(self.device, non_blocking=True)
                target = target.to(self.device, non_blocking=True)
                target_weight = meta['target_weight'].to(self.device,
                                                         non_blocking=True)

                output = self.netG(input)
                score_map = output[-1].cpu() if type(
                    output) == list else output.cpu()
                if self.flip:
                    flip_input = torch.from_numpy
                    flip_output = self.netG(flip_input)
                    flip_output = flip_output[-1].cpu() if type(
                        flip_output) == list else flip_output.cpu()
                    flip_output = flip_back(flip_output)
                    score_map += flip_output

                if type(output) == list:
                    loss = 0
                    for o in output:
                        loss += self.criterion(o, target, target_weight)
                    output = output[-1]
                else:
                    loss = self.criterion(output, target, target_weight)

                acc = accuracy(score_map, target.cpu(), self.idx)

                preds = final_preds(score_map, meta['center'], meta['scale'],
                                    [64, 64])
                for n in range(score_map.size(0)):
                    predictions[meta['index'][n], :, :] = preds[n, :, :]

                if self.debug:
                    gt_batch_img = batch_with_heatmap(input, target)
                    pred_batch_img = batch_with_heatmap(input, score_map)
                    if not gt_win or not pred_win:
                        plt.subplot(121)
                        gt_win = plt.imshow(gt_batch_img)
                        plt.subplot(122)
                        pred_win = plt.imshow(pred_batch_img)
                    else:
                        gt_win.set_data(gt_batch_img)
                        pred_win.set_data(pred_batch_img)
                    plt.pause(.05)
                    plt.draw()

                losses.update(loss.item, input.size(0))
                acces.update(acc[0], input.size(0))

                batch_time.update(time.time() - end)
                end = time.time()

                bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                    batch=i + 1,
                    size=len(self.val_loader),
                    data=data_time.val,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    acc=acces.avg)

                bar.next()

            bar.finish()
        return losses.avg, acces.avg, predictions
def validate(val_loader,
             model,
             criterion,
             flip=True,
             test_batch=6,
             njoints=18):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    with torch.no_grad():
        for i, (input, target, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)
            if global_animal == 'horse':
                target = target.to(device, non_blocking=True)
                target_weight = meta['target_weight'].to(device,
                                                         non_blocking=True)
            elif global_animal == 'tiger':
                target = target.to(device, non_blocking=True)
                target_weight = meta['target_weight'].to(device,
                                                         non_blocking=True)
                target = target[:,
                                np.array([
                                    1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13,
                                    14, 9, 10, 11, 12
                                ]) - 1, :, :]
                target_weight = target_weight[:,
                                              np.array([
                                                  1, 2, 3, 4, 5, 6, 7, 8, 15,
                                                  16, 17, 18, 13, 14, 9, 10,
                                                  11, 12
                                              ]) - 1, :]
            else:
                raise Exception('please add new animal category')

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(
                output) == list else output.cpu()
            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output) == list:  # multiple output
                loss = 0
                for o in output:
                    loss += criterion(o, target, target_weight, len(idx))
                output = output[-1]
            else:  # single output
                loss = criterion(output, target, target_weight, len(idx))

            acc, _ = accuracy(score_map, target.cpu(), idx)

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            #for n in range(score_map.size(0)):
            #    predictions[meta['index'][n], :, :] = preds[n, :, :]

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                acc=acces.avg)
            bar.next()

        bar.finish()
    return losses.avg, acces.avg
Esempio n. 13
0
def main(args):

    img_path = "/home/shantam/Documents/Programs/hourglasstensorlfow/images/cropped0.jpg"
    img = load_image(img_path)
    c = [img.shape[1] / 2, img.shape[2] / 2]
    s = float(img.shape[1] / 200.0)

    img = crop(img_path, img, c, s, [256, 256])
    trans = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    #img = trans(img)
    img = img.unsqueeze(0)
    print(img.shape)

    njoints = 24

    model = models.__dict__[args.arch](num_stacks=args.stacks,
                                       num_blocks=args.blocks,
                                       num_classes=njoints,
                                       resnet_layers=args.resnet_layers)

    model = torch.nn.DataParallel(model).to(device)
    criterion = losses.JointsMSELoss().to(device)

    if args.solver == 'rms':
        print("done")
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
    elif args.solver == 'adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=args.lr,
        )

    checkpoint = torch.load(
        "/home/shantam/Documents/Programs/pytorch-pose/checkpoint/mpii/hg_fullset/checkpoint.pth.tar"
    )
    args.start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    img = img.to(device, non_blocking=True)
    output = model(img)

    #print (len(output))

    score_map = output[-1].cpu() if type(output) == list else output.cpu()
    preds, vals = final_preds(score_map, [c], [s], [64, 64])

    image = cv2.imread(img_path)
    coords = np.squeeze(preds)

    for m in range(0, len(coords)):
        val = vals[0][m].detach().numpy()
        print(val)
        if val > 0.25:  #threshold for confidence score
            x, y = coords[m][0].cpu().detach().numpy(), coords[m][1].cpu(
            ).detach().numpy()
            print(x, y)
            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    while True:
        cv2.imshow("dec", image)
        cv2.waitKey(10)
Esempio n. 14
0
def validate(val_loader,
             model,
             criterion,
             criterion_seg,
             debug=False,
             flip=True,
             test_batch=6,
             njoints=68):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses_kpt = AverageMeter()
    losses_seg = AverageMeter()
    acces = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=len(val_loader))

    interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__()))

    with torch.no_grad():
        for i, (input, target, target_seg, meta) in enumerate(val_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input, target, target_seg = input.to(device), target.to(
                device, non_blocking=True), target_seg.to(device)
            target_weight = meta['target_weight'].to(device, non_blocking=True)

            # compute output
            output_kpt, output_seg = model(input)
            score_map = output_kpt[-1].cpu() if type(
                output_kpt) == list else output_kpt.cpu()

            if flip:
                flip_input = torch.from_numpy(fliplr(
                    input.clone().numpy())).float().to(device)
                flip_output = model(flip_input)
                flip_output = flip_output[-1].cpu() if type(
                    flip_output) == list else flip_output.cpu()
                flip_output = flip_back(flip_output)
                score_map += flip_output

            if type(output_kpt) == list:  # multiple output
                loss_kpt = 0
                loss_seg = 0
                for (o, o_seg) in zip(output_kpt, output_seg):
                    loss_kpt += criterion(o, target, target_weight, len(idx))
                    loss_seg += criterion_seg(o_seg, target_seg)
                output = output_kpt[-1]
                output_seg = output_seg[-1]
            else:  # single output
                loss_kpt = criterion(output_kpt, target, target_weight,
                                     len(idx))
                loss_seg = criterion(output_seg, target_seg)

            acc, batch_interocular_dists = accuracy(score_map, target.cpu(),
                                                    idx)
            _, pred_seg = torch.max(output_seg, 1)

            # generate predictions
            preds = final_preds(score_map, meta['center'], meta['scale'],
                                [64, 64])
            for n in range(score_map.size(0)):
                predictions[meta['index'][n], :, :] = preds[n, :, :]

            if debug:
                gt_batch_img = batch_with_heatmap(input, target)
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    plt.subplot(121)
                    gt_win = plt.imshow(gt_batch_img)
                    plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    gt_win.set_data(gt_batch_img)
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()

            # measure accuracy and record loss
            losses_kpt.update(loss_kpt.item(), input.size(0))
            losses_seg.update(loss_seg.item(), input.size(0))
            acces.update(acc[0], input.size(0))

            inter, union = inter_and_union(
                pred_seg.data.cpu().numpy().astype(np.uint8),
                target_seg.data.cpu().numpy().astype(np.uint8))
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            iou = inter_meter.sum / (union_meter.sum + 1e-10)

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_kpt: {loss_kpt:.8f} | Loss_seg: {loss_seg:.8f} | Acc: {acc: .8f} | IOU: {iou:.2f}'.format(
                batch=i + 1,
                size=len(val_loader),
                data=data_time.val,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss_kpt=losses_kpt.avg,
                loss_seg=losses_seg.avg,
                acc=acces.avg,
                iou=iou.mean() * 100)
            bar.next()

        bar.finish()
        print(iou)
    return losses_kpt.avg, acces.avg, predictions, iou.mean() * 100
Esempio n. 15
0
def validate(val_loader,
             model,
             criterion,
             num_classes,
             debug=False,
             flip=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Processing', max=len(val_loader))
    for i, (inputs, target, target2, meta) in enumerate(val_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        target2 = target2.cuda(async=True)

        input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        target2_var = torch.autograd.Variable(target2, volatile=True)

        # compute output
        output = model(input_var)
        score_map_hg = output[-2].data.cpu()
        score_map_emb = output[-1].data.cpu()

        score_map_emb2 = score_map_emb.cuda(async=True)

        loss = criterion(output[0], target_var)
        for j in range(1, (len(output) - 1)):
            loss += criterion(output[j], target_var)
        loss += criterion(output[-1], target2_var)
        acc = accuracy(score_map_emb2, target2, idx)

        # generate predictions
        #print(np.shape(predictions))

        preds = final_preds(score_map_emb, meta['center'], meta['scale'],
                            [64, 64])
        for n in range(score_map_emb.size(0)):
            predictions[meta['index'][n], :, :] = preds[n, ::2, :]
            #predictions[meta['index'][n], :, :] = preds[n, :, :]

        if debug:
            gt_batch_img = batch_with_heatmap(inputs, target)
            pred_batch_img = batch_with_heatmap(inputs, score_map_emb)
            if not gt_win or not pred_win:
                plt.subplot(121)
                gt_win = plt.imshow(gt_batch_img)
                plt.subplot(122)
                pred_win = plt.imshow(pred_batch_img)
            else:
                gt_win.set_data(gt_batch_img)
                pred_win.set_data(pred_batch_img)
            plt.pause(.05)
            plt.draw()

        # measure accuracy and record loss
        losses.update(loss.data[0], inputs.size(0))
        acces.update(acc[0], inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
            batch=i + 1,
            size=len(val_loader),
            data=data_time.val,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            acc=acces.avg)
        bar.next()

    bar.finish()
    return losses.avg, acces.avg, predictions
Esempio n. 16
0
def myvalidate( model, criterion, num_classes, debug=False, flip=True):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    acces = AverageMeter()

    img_folder = '/data3/wzwu/dataset/my'
    img_num = 1
    r = 0
    center1 = torch.Tensor([1281,2169])
    center2 = torch.Tensor([[1281,2169]])
    scale = torch.Tensor([10.0])
    inp_res = 256
    meanstd_file = './data/mpii/mean.pth.tar'
    if isfile(meanstd_file):
        meanstd = torch.load(meanstd_file)
        mean = meanstd['mean']
        std = meanstd['std']

    input_list = []
    for i in range(img_num):
        img_name = str(i)+'.jpg'
        img_path = os.path.join(img_folder,img_name)
        print('img_path')
        print(img_path)
        set_trace()
        img = load_image(img_path)
        inp = crop(img, center1, scale, [inp_res, inp_res], rot=r)
        inp = color_normalize(inp, mean, std)
        input_list.append(inp)



    # predictions
    predictions = torch.Tensor(img_num, num_classes, 2)

    # switch to evaluate mode
    model.eval()

    gt_win, pred_win = None, None
    end = time.time()
    bar = Bar('Eval ', max=img_num)
    with torch.no_grad():
        for i, input in enumerate(input_list):
            # measure data loading time
            s0, s1, s2 = input.size()
            input = input.view(1, s0, s1, s2)
            data_time.update(time.time() - end)

            input = input.to(device, non_blocking=True)

            # compute output
            output = model(input)
            score_map = output[-1].cpu() if type(output) == list else output.cpu()
            #if flip:
            #    flip_input = torch.from_numpy(fliplr(input.clone().numpy())).float().to(device)
            #    flip_output = model(flip_input)
            #    flip_output = flip_output[-1].cpu() if type(flip_output) == list else flip_output.cpu()
            #    flip_output = flip_back(flip_output)
            #    score_map += flip_output

            # generate predictions
            set_trace()
            preds = final_preds(score_map, center2, scale, [64, 64])
            set_trace()
            print('preds')
            print(preds)
            print('predictions')
            print(predictions)
            for n in range(score_map.size(0)):
                predictions[i, :, :] = preds[n, :, :]


            if debug:
                pred_batch_img = batch_with_heatmap(input, score_map)
                if not gt_win or not pred_win:
                    #plt.subplot(121)
                    #plt.subplot(122)
                    pred_win = plt.imshow(pred_batch_img)
                else:
                    pred_win.set_data(pred_batch_img)
                plt.pause(.05)
                plt.draw()
                plt.savefig('/data3/wzwu/test/'+str(i)+'.png')


            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix  = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
                        batch=i + 1,
                        size=img_num,
                        data=data_time.val,
                        bt=batch_time.avg,
                        total=bar.elapsed_td,
                        eta=bar.eta_td,
                        loss=losses.avg,
                        acc=acces.avg
                        )
            bar.next()

        bar.finish()
    return predictions