Example #1
0
def validate(val_loader, net, epoch, visualizer, idx, num_classes):
    batch_time = AverageMeter()
    losses_det = AverageMeter()
    losses = AverageMeter()
    pckhs = AverageMeter()
    pckhs_origin_res = AverageMeter()
    img_batch_list = []
    pts_batch_list = []
    # predictions
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    net.eval()

    end = time.time()
    for i, (img, heatmap, center, scale, rot, grnd_pts, normalizer,
            index) in enumerate(val_loader):
        # input and groundtruth
        input_var = torch.autograd.Variable(img, volatile=True)

        heatmap = heatmap.cuda(async=True)
        target_var = torch.autograd.Variable(heatmap)

        # output and loss
        #output1, output2 = net(input_var)
        #loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output1 = net(input_var)
        loss = 0
        for per_out in output1:
            tmp_loss = (per_out - target_var)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()

        # flipping the image
        img_flip = img.numpy()[:, :, :, ::-1].copy()
        img_flip = torch.from_numpy(img_flip)
        input_var = torch.autograd.Variable(img_flip, volatile=True)
        #output11, output22 = net(input_var)
        output2 = net(input_var)
        output2 = HumanAug.flip_channels(output2[-1].cpu().data)
        output2 = HumanAug.shuffle_channels_for_horizontal_flipping(output2)
        output = (output1[-1].cpu().data + output2) / 2

        # calculate measure
        # pred_pts = HumanPts.heatmap2pts(output)  # b x L x 2
        # pts = HumanPts.heatmap2pts(target_var.cpu().data)
        # pckh = HumanAcc.approx_PCKh(pred_pts, pts, idx, heatmap.size(3))  # b -> 1
        pckh = Evaluation.accuracy(output, target_var.data.cpu(), idx)
        pckhs.update(pckh[0])
        pckh_origin_res = Evaluation.accuracy_origin_res(
            output, center, scale, [64, 64], grnd_pts, normalizer, rot)
        pckhs_origin_res.update(pckh_origin_res[0])
        """measure elapsed time"""
        batch_time.update(time.time() - end)
        end = time.time()

        # print(log)
        losses.update(loss.data[0])
        loss_dict = OrderedDict([('loss', losses.avg), ('pckh', pckhs.avg),
                                 ('pckh_origin_res', pckhs_origin_res.avg)])
        visualizer.print_log(epoch, i, len(val_loader), value1=loss_dict)
        # img_batch_list.append(img)
        # pts_batch_list.append(pred_pts*4.)
        # preds = Evaluation.final_preds(output, meta['center'], meta['scale'], [64, 64])
        # for n in range(output.size(0)):
        #     predictions[meta['index'][n], :, :] = preds[n, :, :]
        preds = Evaluation.final_preds(output, center, scale, [64, 64], rot)
        for n in range(output.size(0)):
            predictions[index[n], :, :] = preds[n, :, :]

        # if i == 2:
        #     break
    # return losses.avg, pckhs.avg, img_batch_list, pts_batch_list
    return losses.avg, pckhs_origin_res.avg, predictions
def validate(val_loader, net, epoch, visualizer, num_classes, flip_index):
    batch_time = AverageMeter()
    losses_det = AverageMeter()
    losses = AverageMeter()
    rmses0 = AverageMeter()
    rmses1 = AverageMeter()
    rmses2 = AverageMeter()
    img_batch_list = []
    pts_batch_list = []
    predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)

    # switch to evaluate mode
    net.eval()

    end = time.time()
    for i, (img, heatmap, pts, index, center, scale) in enumerate(val_loader):
        # input and groundtruth
        input_var = torch.autograd.Variable(img, volatile=True)

        heatmap = heatmap.cuda(async=True)
        target_var = torch.autograd.Variable(heatmap)

        # output and loss
        #output1, output2 = net(input_var)
        #loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output1 = net(input_var)
        loss = 0
        for per_out in output1:
            tmp_loss = (per_out - target_var)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()

        # # flipping the image
        # img_flip = img.numpy()[:, :, :, ::-1].copy()
        # img_flip = torch.from_numpy(img_flip)
        # input_var = torch.autograd.Variable(img_flip, volatile=True)
        # #output11, output22 = net(input_var)
        # output2 = net(input_var)
        # output2 = HumanAug.flip_channels(output2[-1].cpu().data)
        # output2 = HumanAug.shuffle_channels_for_horizontal_flipping(output2, flip_index)
        # output = (output1[-1].cpu().data + output2) / 2

        # calculate measure
        output = output1[-1].data.cpu()
        # pred_pts_0, pred_pts_1, pred_pts_2 = FaceAcc.heatmap2pts(output)
        # pred_pts_origin_0 = pred_pts_0.clone()
        # pred_pts_origin_1 = pred_pts_1.clone()
        # pred_pts_origin_2 = pred_pts_2.clone()
        # for j in range(pred_pts_0.size(0)):
        #     # print type(coords[i]), type(center[i]), type(scale[i])
        #     tmp_pts = HumanAug.TransformPts(pred_pts_0[j].numpy()-1,
        #                                     center[j].numpy(), scale[j].numpy(), 0, 64, 200, invert=1)
        #     pred_pts_origin_0[j] = torch.from_numpy(tmp_pts)
        #     pred_pts_origin_1[j] = Evaluation.transform_preds(pred_pts_1[j], center[j], scale[j], [64, 64])
        #     pred_pts_origin_2[j] = Evaluation.transform_preds(pred_pts_2[j], center[j], scale[j], [64, 64])
        # rmse0 = np.sum(FaceAcc.per_image_rmse(pred_pts_origin_0.numpy(), pts.numpy())) / img.size(0)
        # rmse1 = np.sum(FaceAcc.per_image_rmse(pred_pts_origin_1.numpy(), pts.numpy())) / img.size(0)
        # rmse2 = np.sum(FaceAcc.per_image_rmse(pred_pts_origin_2.numpy(), pts.numpy())) / img.size(0)
        # rmses0.update(rmse0, img.size(0))
        # rmses1.update(rmse1, img.size(0))
        # rmses2.update(rmse2, img.size(0))
        preds = Evaluation.final_preds(output, center, scale, [64, 64])
        rmse = np.sum(FaceAcc.per_image_rmse(preds.numpy(),
                                             pts.numpy())) / img.size(0)
        rmses2.update(rmse, img.size(0))
        """measure elapsed time"""
        batch_time.update(time.time() - end)
        end = time.time()

        # print log
        losses.update(loss.data[0])
        loss_dict = OrderedDict([('loss', losses.avg), ('rmse', rmses2.avg)])
        visualizer.print_log(epoch,
                             i,
                             len(val_loader),
                             batch_time.avg,
                             value1=loss_dict)
        # preds = Evaluation.final_preds(output, center, scale, [64, 64])
        for n in range(output.size(0)):
            predictions[index[n], :, :] = preds[n, :, :]

    return losses.avg, rmses2.avg, predictions