def check_is_2nd_error(det, GT, tracks, GTracks, selects, shape):
    det = normalize_points(shape.tolist(), det[:2, selects])
    GT = normalize_points(shape.tolist(), GT[:2, selects])
    erroe_det, xis = get_error(det, GT, None, 0.005)
    tracks = [normalize_points(shape.tolist(), x[:2, selects]) for x in tracks]
    GTracks = [
        normalize_points(shape.tolist(), x[:2, selects]) for x in GTracks
    ]
    error_A, xis_A = get_error(tracks[0], GTracks[0], None, 0.05)
    error_B, xis_B = get_error(tracks[1], GTracks[1], None, 0.05)
    for i, (er_det, er_A, er_B) in enumerate(zip(erroe_det, error_A, error_B)):
        if (er_det < 0.005) and (er_A > 0.05 or er_B > 0.05):
            return selects[i].item(), True
    return -1, False
def check_is_1st_error(detections, GT_points, selects, shape):
    detections = [normalize_points(shape.tolist(), x[:2]) for x in detections]
    GT_points = [normalize_points(shape.tolist(), x[:2]) for x in GT_points]
    detections = [x[:, selects] for x in detections]
    GT_points = [x[:, selects] for x in GT_points]

    all_errors = []
    for det, gt in zip(detections, GT_points):
        #error, xis = get_error(det, gt, None, 0.05)
        is_errors = get_error_v2(det, gt, None, 0.05)
        all_errors.append(is_errors)
    all_errors = np.array(all_errors)
    temp = all_errors.sum(0) > 0
    return all_errors.any(), (temp.sum(), temp.size)
def eval_robust_heatmap(detector, xloader, print_freq, logger):
    batch_time, NUM_PTS = AverageMeter(), xloader.dataset.NUM_PTS
    Preds, GT_locs, Distances = [], [], []
    eval_meta, end = Eval_Meta(), time.time()

    with torch.no_grad():
        detector.eval()
        for i, (inputs, heatmaps, masks, norm_points, thetas, data_index,
                nopoints, xshapes) in enumerate(xloader):
            data_index = data_index.squeeze(1).tolist()
            batch_size, iters, C, H, W = inputs.size()
            for ibatch in range(batch_size):
                xinputs, xpoints, xthetas = inputs[ibatch], norm_points[
                    ibatch].permute(0, 2, 1).contiguous(), thetas[ibatch]
                batch_features, batch_heatmaps, batch_locs, batch_scos = detector(
                    xinputs.cuda(non_blocking=True))
                batch_locs = batch_locs.cpu()[:, :-1]
                all_locs = []
                for _iter in range(iters):
                    _locs = normalize_points((H, W),
                                             batch_locs[_iter].permute(1, 0))
                    xlocs = torch.cat((_locs, torch.ones(1, NUM_PTS)), dim=0)
                    nlocs = torch.mm(xthetas[_iter, :2], xlocs)
                    rlocs = denormalize_points(xshapes[ibatch].tolist(), nlocs)
                    rlocs = torch.cat(
                        (rlocs.permute(1, 0), xpoints[_iter, :, 2:]), dim=1)
                    all_locs.append(rlocs.clone())
                GT_loc = xloader.dataset.labels[
                    data_index[ibatch]].get_points()
                norm_distance = xloader.dataset.get_normalization_distance(
                    data_index[ibatch])
                # save the results
                eval_meta.append((sum(all_locs) / len(all_locs)).numpy().T,
                                 GT_loc.numpy(),
                                 xloader.dataset.datas[data_index[ibatch]],
                                 norm_distance)
                Distances.append(norm_distance)
                Preds.append(all_locs)
                GT_locs.append(GT_loc.permute(1, 0))
            # compute time
            batch_time.update(time.time() - end)
            end = time.time()
            if i % print_freq == 0 or i + 1 == len(xloader):
                last_time = convert_secs2time(
                    batch_time.avg * (len(xloader) - i - 1), True)
                logger.log(
                    ' -->>[Robust HEATMAP-based Evaluation] [{:03d}/{:03d}] Time : {:}'
                    .format(i, len(xloader), last_time))
    # evaluate the results
    errors, valids = calculate_robust(Preds, GT_locs, Distances, NUM_PTS)
    return errors, valids, eval_meta
Exemplo n.º 4
0
def apply_affine2point(points, theta, shape):
    assert points.size(0) == 3, "invalid points shape : {:}".format(points.size())
    with torch.no_grad():
        ok_points = points[2, :] == 1
        assert torch.sum(ok_points).item() > 0, "there is no visiable point"
        points[:2, :] = normalize_points(shape, points[:2, :])

        norm_trans_points = ok_points.unsqueeze(0).repeat(3, 1).float()

        trans_points, ___ = torch.gesv(points[:, ok_points], theta)

        norm_trans_points[:, ok_points] = trans_points

    return norm_trans_points
Exemplo n.º 5
0
def main(use_gray, transform_strs):
    if not use_gray:
        mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        normalize = transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
        color = (102, 255, 102)
    else:
        mean_fill = (0.5, )
        normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
        normalize = transforms.Normalize(mean=[0], std=[1])
        color = (255, )

    debug_dir = '{:}/cache/gray-{:}'.format(this_dir, use_gray)
    if not os.path.isdir(debug_dir):
        os.makedirs(debug_dir)

    transform_funcs = [transforms.ToTensor(), normalize
                       ] + get_transforms(transform_strs)
    transform = transforms.Compose(transform_funcs)

    shape = (300, 200)
    images, labels, boxes = get_list()
    for image, label, box in zip(images, labels, boxes):
        imgx = datasets.pil_loader(image, use_gray)
        np_points, _ = datasets.anno_parser(label, 68)
        meta = Point_Meta(68, np_points, box, image, 'face68')
        I, L, theta = transform(imgx, meta)
        points = torch.Tensor(L.get_points(True))
        points = normalize_points((I.size(1), I.size(2)), points)
        name = Path(image).name
        image = get_image_from_affine(I, theta, shape)
        points = torch.cat((points, torch.ones((1, points.shape[1]))), dim=0)
        # new_points, LU = torch.gesv(points, theta)
        new_points, _ = torch.solve(points, theta)

        PImage = draw_image_by_points(image,
                                      new_points[:2, :],
                                      2,
                                      color,
                                      False,
                                      False,
                                      True,
                                      draw_idx=True)

        save_name = os.path.join(debug_dir,
                                 '{:}-{:}'.format(transform_strs, name))
        PImage.save(save_name)
Exemplo n.º 6
0
def evaluate(args):
    if args.cuda:
        assert torch.cuda.is_available(), 'CUDA is not available.'
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True
    else:
        print('Use the CPU mode')

    print('The image is {:}'.format(args.image))
    print('The model is {:}'.format(args.model))
    last_info = Path(args.model)
    assert last_info.exists(), 'The model path {:} does not exist'.format(
        last_info)
    last_info = torch.load(last_info, map_location=torch.device('cpu'))
    snapshot = last_info['last_checkpoint']
    assert snapshot.exists(), 'The model path {:} does not exist'.format(
        snapshot)
    print('The face bounding box is {:}'.format(args.face))
    assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
    snapshot = torch.load(snapshot, map_location=torch.device('cpu'))

    param = snapshot['args']
    # General Data Argumentation
    if param.use_gray == False:
        mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    else:
        mean_fill = (0.5, )
        normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
    eval_transform  = transforms.Compose2V([transforms.ToTensor(), normalize, \
                                            transforms.PreCrop(param.pre_crop_expand), \
                                            transforms.CenterCrop(param.crop_max)])

    model_config = load_configure(param.model_config, None)
    dataset = Dataset(eval_transform, param.sigma, model_config.downsample,
                      param.heatmap_type, (120, 96), param.use_gray, None,
                      param.data_indicator)
    #dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (param.height,param.width), param.use_gray, None, param.data_indicator)
    dataset.reset(param.num_pts)
    net = obtain_pro_model(model_config, param.num_pts + 1, param.sigma,
                           param.use_gray)
    net.load_state_dict(remove_module_dict(snapshot['state_dict']))
    if args.cuda: net = net.cuda()
    print('Processing the input face image.')
    face_meta = PointMeta(dataset.NUM_PTS, None, args.face, args.image,
                          'BASE-EVAL')
    face_img = pil_loader(args.image, dataset.use_gray)
    affineImage, heatmaps, mask, norm_trans_points, transthetas, _, _, _, shape = dataset._process_(
        face_img, face_meta, -1)

    #import cv2; cv2.imwrite('temp.png', transforms.ToPILImage(normalize, False)(affineImage))
    # network forward
    with torch.no_grad():
        if args.cuda: inputs = affineImage.unsqueeze(0).cuda()
        else: inputs = affineImage.unsqueeze(0)

        _, _, batch_locs, batch_scos = net(inputs)
        batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu()
        (batch_size, C, H, W), num_pts = inputs.size(), param.num_pts
        locations, scores = batch_locs[0, :-1, :], batch_scos[:, :-1]
        norm_locs = normalize_points((H, W), locations.transpose(1, 0))
        norm_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0)
        transtheta = transthetas[:2, :]
        norm_locs = torch.mm(transtheta, norm_locs)
        real_locs = denormalize_points(shape.tolist(), norm_locs)
        real_locs = torch.cat((real_locs, scores), dim=0)
    print('the coordinates for {:} facial landmarks:'.format(param.num_pts))
    for i in range(param.num_pts):
        point = real_locs[:, i]
        print(
            'the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f}), score = {:.2f}'.
            format(i, param.num_pts, float(point[0]), float(point[1]),
                   float(point[2])))

    if args.save:
        resize = 512
        image = draw_image_by_points(args.image, real_locs, 2, (255, 0, 0),
                                     args.face, resize)
        image.save(args.save)
        print('save the visualization results into {:}'.format(args.save))
    else:
        print('ignore the visualization procedure')
Exemplo n.º 7
0
def basic_main_heatmap(args, loader, net, criterion, optimizer, epoch_str,
                       logger, opt_config, mode):
    assert mode == 'train' or mode == 'test', 'invalid mode : {:}'.format(mode)
    args = copy.deepcopy(args)
    batch_time, data_time, forward_time, eval_time = AverageMeter(
    ), AverageMeter(), AverageMeter(), AverageMeter()
    visible_points, losses = AverageMeter(), AverageMeter()
    eval_meta = Eval_Meta()
    cpu = torch.device('cpu')

    if args.debug:
        save_dir = Path(
            args.save_path) / 'DEBUG' / ('{:}-'.format(mode) + epoch_str)
    else:
        save_dir = None

    # switch to train mode
    if mode == 'train':
        logger.log('basic-main-V2 : training mode.')
        print_freq = args.print_freq
        net.train()
        criterion.train()
    else:
        logger.log('basic-main-V2 : evaluation mode.')
        print_freq = args.print_freq_eval
        net.eval()
        criterion.eval()

    end = time.time()
    for i, (inputs, targets, masks, normpoints, transthetas, meanthetas,
            image_index, nopoints, shapes) in enumerate(loader):
        # inputs : Batch, Channel, Height, Width

        # information
        image_index = image_index.squeeze(1).tolist()
        (batch_size, C, H, W), num_pts = inputs.size(), args.num_pts
        visible_point_num = float(np.sum(
            masks.numpy()[:, :-1, :, :])) / batch_size
        visible_points.update(visible_point_num, batch_size)
        annotated_num = batch_size - sum(nopoints)

        det_masks = (1 - nopoints).view(batch_size, 1, 1, 1) * masks
        det_masks = det_masks.cuda(non_blocking=True)
        nopoints = nopoints.squeeze(1).tolist()
        targets = targets.cuda(non_blocking=True)
        # measure data loading time
        data_time.update(time.time() - end)

        # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W]
        batch_features, batch_heatmaps, batch_locs, batch_scos = net(inputs)
        forward_time.update(time.time() - end)

        loss, each_stage_loss_value = compute_stage_loss(
            criterion, targets, batch_heatmaps, det_masks)

        # measure accuracy and record loss
        losses.update(loss.item(), batch_size)

        # compute gradient and do SGD step
        if mode == 'train':  # training mode
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        eval_time.update(time.time() - end)

        with torch.no_grad():
            batch_locs, batch_scos = batch_locs.detach().to(
                cpu), batch_scos.detach().to(cpu)
            # evaluate the training data
            for ibatch, (imgidx,
                         nopoint) in enumerate(zip(image_index, nopoints)):
                locations = batch_locs[ibatch, :-1, :]
                norm_locs = normalize_points((H, W), locations.transpose(1, 0))
                norm_locs = torch.cat((norm_locs, torch.ones(1, num_pts)),
                                      dim=0)
                transtheta = transthetas[ibatch][:2, :]
                norm_locs = torch.mm(transtheta, norm_locs)
                real_locs = denormalize_points(shapes[ibatch].tolist(),
                                               norm_locs)
                real_locs = torch.cat(
                    (real_locs, batch_scos[ibatch, :-1].view(1, -1)), dim=0)
                #real_locs = torch.cat((real_locs, torch.ones(1, num_pts)), dim=0)
                image_path = loader.dataset.datas[imgidx]
                normDistce = loader.dataset.NormDistances[imgidx]

                if nopoint == 1: xpoints = None
                else:
                    xpoints = loader.dataset.labels[imgidx].get_points().numpy(
                    )
                eval_meta.append(real_locs.numpy(), xpoints, image_path,
                                 normDistce)
                if save_dir:
                    pro_debug_save(save_dir,
                                   Path(image_path).name, inputs[ibatch],
                                   targets[ibatch], normpoints[ibatch],
                                   meanthetas[ibatch],
                                   batch_heatmaps[-1][ibatch],
                                   args.tensor2imageF)

        # measure elapsed time
        batch_time.update(time.time() - end)
        last_time = convert_secs2time(batch_time.avg * (len(loader) - i - 1),
                                      True)
        end = time.time()

        if i % print_freq == 0 or i + 1 == len(loader):
            logger.log(' -->>[{:}]: [{:}][{:03d}/{:03d}] '
                      'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) '
                      'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) '
                      'Forward {forward_time.val:4.2f} ({forward_time.avg:4.2f}) '
                      'Loss {loss.val:7.4f} ({loss.avg:7.4f})  '.format(
                          mode, epoch_str, i, len(loader), batch_time=batch_time,
                          data_time=data_time, forward_time=forward_time, loss=losses)
                        + last_time + show_stage_loss(each_stage_loss_value) \
                        + ' In={:} Tar={:}'.format(list(inputs.size()), list(targets.size())) \
                        + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg))
    nme, _, _ = eval_meta.compute_mse(loader.dataset.dataset_name, logger)
    return losses.avg, eval_meta, nme
Exemplo n.º 8
0
def main(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.set_num_threads(args.workers)
    print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed))
    prepare_seed(args.rand_seed)
    temporal_main, eval_all = procedures['{:}-train'.format(
        args.procedure)], procedures['{:}-test'.format(args.procedure)]

    logger = prepare_logger(args)

    # General Data Argumentation
    normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation(
        transforms, args)
    recover = transforms.ToPILImage(normalize)
    args.tensor2imageF = recover
    assert (args.scale_min +
            args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format(
                args.scale_min, args.scale_max)

    # Model Configure Load
    model_config = load_configure(args.model_config, logger)
    sbr_config = load_configure(args.sbr_config, logger)
    shape = (args.height, args.width)
    logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format(
        model_config, args.sigma, shape))
    logger.log('--> SBR Configuration : {:}\n'.format(sbr_config))

    # Training Dataset
    train_data   = VDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \
                              args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray'))
    train_data.load_list(args.train_lists, args.num_pts, args.boxindicator,
                         args.normalizeL, True)

    # Evaluation Dataloader
    assert len(
        args.eval_ilists) == 1, 'invalid length of eval_ilists : {:}'.format(
            len(eval_ilists))
    eval_data = IDataset(eval_transform, args.sigma, model_config.downsample,
                         args.heatmap_type, shape, args.use_gray,
                         args.mean_point, args.data_indicator)
    eval_data.load_list(args.eval_ilists[0], args.num_pts, args.boxindicator,
                        args.normalizeL, True)
    if args.x68to49:
        assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format(
            args.num_pts)
        if train_data is not None: train_data = convert68to49(train_data)
        eval_data = convert68to49(eval_data)
        args.num_pts = 49

    # define the temporal model (accelerated SBR)
    net = obtain_pro_temporal(model_config, sbr_config, args.num_pts,
                              args.sigma, args.use_gray)
    assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format(
        model_config.downsample, net.downsample)
    logger.log("=> network :\n {}".format(net))

    logger.log('Training-data : {:}'.format(train_data))
    logger.log('Evaluate-data : {:}'.format(eval_data))

    logger.log('arguments : {:}'.format(args))
    opt_config = load_configure(args.opt_config, logger)

    optimizer, scheduler, criterion = obtain_optimizer(net.parameters(),
                                                       opt_config, logger)
    logger.log('criterion : {:}'.format(criterion))
    net, criterion = net.cuda(), criterion.cuda()
    net = torch.nn.DataParallel(net)

    last_info = logger.last_info()
    try:
        last_checkpoint = load_checkpoint(args.init_model)
        checkpoint = remove_module_dict(last_checkpoint['state_dict'], False)
        net.module.detector.load_state_dict(checkpoint)
    except:
        last_checkpoint = load_checkpoint(args.init_model)
        net.load_state_dict(last_checkpoint['state_dict'])

    detector = torch.nn.DataParallel(net.module.detector)
    logger.log("=> initialize the detector : {:}".format(args.init_model))

    net.eval()
    detector.eval()

    logger.log('SBR Config : {:}'.format(sbr_config))
    save_xdir = logger.path('meta')
    type_error = 0
    random.seed(111)
    index_list = list(range(len(train_data)))
    random.shuffle(index_list)
    #selected_list = index_list[: min(200, len(index_list))]

    selected_list = [
        7260, 11506, 39952, 75196, 51614, 41061, 37747, 41355, 47875
    ]
    for iidx, i in enumerate(selected_list):
        frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images = train_data[
            i]

        frames, Fflows, Bflows, is_images = frames.unsqueeze(
            0), Fflows.unsqueeze(0), Bflows.unsqueeze(0), is_images.unsqueeze(
                0)
        # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down]
        if args.procedure == 'heatmap':
            batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck = net(
                frames, Fflows, Bflows, is_images)
        else:
            batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net(
                frames, Fflows, Bflows, is_images)

        (batch_size, frame_length, C, H,
         W), num_pts, annotate_index = frames.size(
         ), args.num_pts, train_data.video_L
        batch_locs = batch_locs.cpu()[:, :, :num_pts]
        video_mask = masks.unsqueeze(0)[:, :num_pts]
        batch_past2now = batch_past2now.cpu()[:, :, :num_pts]
        batch_future2now = batch_future2now.cpu()[:, :, :num_pts]
        batch_FBcheck = batch_FBcheck[:, :num_pts].cpu()
        FB_check_oks = FB_communication(criterion, batch_locs, batch_past2now,
                                        batch_future2now, batch_FBcheck,
                                        video_mask, sbr_config)

        # locations
        norm_past_det_locs = torch.cat(
            (batch_locs[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_noww_det_locs = torch.cat(
            (batch_locs[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_det_locs = torch.cat(
            (batch_locs[0, annotate_index + 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_locs = torch.cat(
            (batch_past2now[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_past_locs = torch.cat(
            (batch_future2now[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        transtheta = transthetas[:2, :]
        norm_past_det_locs = torch.mm(transtheta, norm_past_det_locs)
        norm_noww_det_locs = torch.mm(transtheta, norm_noww_det_locs)
        norm_next_det_locs = torch.mm(transtheta, norm_next_det_locs)
        norm_next_locs = torch.mm(transtheta, norm_next_locs)
        norm_past_locs = torch.mm(transtheta, norm_past_locs)
        real_past_det_locs = denormalize_points(shapes.tolist(),
                                                norm_past_det_locs)
        real_noww_det_locs = denormalize_points(shapes.tolist(),
                                                norm_noww_det_locs)
        real_next_det_locs = denormalize_points(shapes.tolist(),
                                                norm_next_det_locs)
        real_next_locs = denormalize_points(shapes.tolist(), norm_next_locs)
        real_past_locs = denormalize_points(shapes.tolist(), norm_past_locs)
        gt_noww_points = train_data.labels[image_index.item()].get_points()

        FB_check_oks = FB_check_oks[:num_pts].squeeze()
        #import pdb; pdb.set_trace()
        if FB_check_oks.sum().item() > 2:
            point_index = FB_check_oks.nonzero().squeeze().tolist()
            something_wrong = False
            for pidx in point_index:
                real_now_det_loc = real_noww_det_locs[:, pidx]
                real_pst_det_loc = real_past_det_locs[:, pidx]
                real_net_det_loc = real_next_det_locs[:, pidx]
                real_nex_loc = real_next_locs[:, pidx]
                real_pst_loc = real_next_locs[:, pidx]
                grdt_now_loc = gt_noww_points[:2, pidx]
                #if torch.abs(real_now_loc - grdt_now_loc).max() > 5:
                #  something_wrong = True
                #if torch.abs(real_nex_loc - grdt_nex_loc).max() > 5:
                #  something_wrong = True
            #if something_wrong == True:
            if True:
                [image_past, image_noww,
                 image_next] = train_data.datas[image_index.item()]
                try:
                    crop_box = train_data.labels[
                        image_index.item()].get_box().tolist()
                    #crop_box = [crop_box[0]-20, crop_box[1]-20, crop_box[2]+20, crop_box[3]+20]
                except:
                    crop_box = False

                RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)
                colors = [
                    GREEN if _i in point_index else RED
                    for _i in range(num_pts)
                ]
                if crop_box != False or True:
                    I_past_det = draw_image_by_points(image_past,
                                                      real_past_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_noww_det = draw_image_by_points(image_noww,
                                                      real_noww_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_next_det = draw_image_by_points(image_next,
                                                      real_next_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_next = draw_image_by_points(image_next,
                                                  real_next_locs[:], 3, colors,
                                                  crop_box, (400, 500))
                    I_past = draw_image_by_points(image_past,
                                                  real_past_locs[:], 3, colors,
                                                  crop_box, (400, 500))

                    I_past.save(
                        str(save_xdir / '{:05d}-v1-a-pastt.png'.format(i)))
                    I_noww_det.save(
                        str(save_xdir / '{:05d}-v1-b-curre.png'.format(i)))
                    I_next.save(
                        str(save_xdir / '{:05d}-v1-c-nextt.png'.format(i)))

                    I_past_det.save(
                        str(save_xdir / '{:05d}-v1-det-a-past.png'.format(i)))
                    I_noww_det.save(
                        str(save_xdir / '{:05d}-v1-det-b-curr.png'.format(i)))
                    I_next_det.save(
                        str(save_xdir / '{:05d}-v1-det-c-next.png'.format(i)))

                #[image_past, image_noww, image_next] = train_data.datas[image_index.item()]
                #image_noww = draw_image_by_points(image_noww, real_noww_locs[:], 2, colors, False, False)
                #image_next = draw_image_by_points(image_next, real_next_locs[:], 2, colors, False, False)
                #image_past = draw_image_by_points(image_past, real_past_locs[:], 2, colors, False, False)
                #image_noww.save( str(save_xdir / '{:05d}-v2-b-curre.png'.format(i)) )
                #image_next.save( str(save_xdir / '{:05d}-v2-c-nextt.png'.format(i)) )
                #image_past.save( str(save_xdir / '{:05d}-v2-a-pastt.png'.format(i)) )
                #type_error += 1
        logger.log(
            'Handle {:05d}/{:05d} :: {:05d}, ok-points={:.3f}, wrong data={:}'.
            format(iidx, len(selected_list), i,
                   FB_check_oks.float().mean().item(), type_error))

    save_xx_dir = save_xdir.parent / 'image-data'
    save_xx_dir.mkdir(parents=True, exist_ok=True)
    selected_list = [100, 115, 200, 300, 400] + list(range(200, 220))
    for iidx, i in enumerate(selected_list):
        inputs, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes = eval_data[
            i]
        inputs = inputs.unsqueeze(0)
        (batch_size, C, H, W), num_pts = inputs.size(), args.num_pts
        _, _, batch_locs, batch_scos = detector(inputs)  # inputs

        batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu()
        norm_locs = normalize_points((H, W),
                                     batch_locs[0, :num_pts].transpose(1, 0))
        norm_det_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0)
        norm_det_locs = torch.mm(transthetas[:2, :], norm_det_locs)
        real_det_locs = denormalize_points(shapes.tolist(), norm_det_locs)
        gt_now_points = eval_data.labels[image_index.item()].get_points()
        image_now = eval_data.datas[image_index.item()]
        crop_box = eval_data.labels[image_index.item()].get_box().tolist()

        RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)
        Gcolors = [GREEN for _ in range(num_pts)]
        points = torch.cat((real_det_locs, gt_now_points[:2]), dim=1)
        colors = [GREEN
                  for _ in range(num_pts)] + [BLUE for _ in range(num_pts)]
        image = draw_image_by_points(image_now, real_det_locs, 3, Gcolors,
                                     crop_box, (400, 500))
        image.save(str(save_xx_dir / '{:05d}-crop.png'.format(i)))
        image = draw_image_by_points(image_now, points, 3, colors, False,
                                     False)
        #image  = draw_image_by_points(image_now, real_det_locs, 3, colors , False, False)
        image.save(str(save_xx_dir / '{:05d}-orig.png'.format(i)))
    logger.log('Finish drawing : {:}'.format(save_xdir))
    logger.log('Finish drawing : {:}'.format(save_xx_dir))
    logger.close()
Exemplo n.º 9
0
def main(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.set_num_threads(args.workers)
    print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed))
    prepare_seed(args.rand_seed)

    logger = prepare_logger(args)

    checkpoint = load_checkpoint(args.init_model)
    xargs = checkpoint['args']
    logger.log('Previous args : {:}'.format(xargs))

    # General Data Augmentation
    if xargs.use_gray == False:
        mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    else:
        mean_fill = (0.5, )
        normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
    eval_transform  = transforms.Compose2V([transforms.ToTensor(), normalize, \
                                                transforms.PreCrop(xargs.pre_crop_expand), \
                                                transforms.CenterCrop(xargs.crop_max)])

    # Model Configure Load
    model_config = load_configure(xargs.model_config, logger)
    shape = (xargs.height, xargs.width)
    logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format(
        model_config, xargs.sigma, shape))

    # Evaluation Dataloader
    eval_loaders = []
    if args.eval_ilists is not None:
        for eval_ilist in args.eval_ilists:
            eval_idata = EvalDataset(eval_transform, xargs.sigma,
                                     model_config.downsample,
                                     xargs.heatmap_type, shape, xargs.use_gray,
                                     xargs.data_indicator)
            eval_idata.load_list(eval_ilist, args.num_pts, xargs.boxindicator,
                                 xargs.normalizeL, True)
            eval_iloader = torch.utils.data.DataLoader(
                eval_idata,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)
            eval_loaders.append((eval_iloader, False))
    if args.eval_vlists is not None:
        for eval_vlist in args.eval_vlists:
            eval_vdata = EvalDataset(eval_transform, xargs.sigma,
                                     model_config.downsample,
                                     xargs.heatmap_type, shape, xargs.use_gray,
                                     xargs.data_indicator)
            eval_vdata.load_list(eval_vlist, args.num_pts, xargs.boxindicator,
                                 xargs.normalizeL, True)
            eval_vloader = torch.utils.data.DataLoader(
                eval_vdata,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)
            eval_loaders.append((eval_vloader, True))

    # define the detector
    detector = obtain_pro_model(model_config, xargs.num_pts, xargs.sigma,
                                xargs.use_gray)
    assert model_config.downsample == detector.downsample, 'downsample is not correct : {:} vs {:}'.format(
        model_config.downsample, detector.downsample)
    logger.log("=> detector :\n {:}".format(detector))
    logger.log("=> Net-Parameters : {:} MB".format(
        count_parameters_in_MB(detector)))
    logger.log('=> Eval-Transform : {:}'.format(eval_transform))

    detector = detector.cuda()
    net = torch.nn.DataParallel(detector)
    net.eval()
    net.load_state_dict(checkpoint['detector'])
    cpu = torch.device('cpu')

    assert len(args.use_stable) == 2

    for iLOADER, (loader, is_video) in enumerate(eval_loaders):
        logger.log(
            '{:} The [{:2d}/{:2d}]-th test set [{:}] = {:} with {:} batches.'.
            format(time_string(), iLOADER, len(eval_loaders),
                   'video' if is_video else 'image', loader.dataset,
                   len(loader)))
        with torch.no_grad():
            all_points, all_results, all_image_ps = [], [], []
            for i, (inputs, targets, masks, normpoints, transthetas,
                    image_index, nopoints, shapes) in enumerate(loader):
                image_index = image_index.squeeze(1).tolist()
                (batch_size, C, H, W), num_pts = inputs.size(), xargs.num_pts
                # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W]
                if xargs.procedure == 'heatmap':
                    batch_features, batch_heatmaps, batch_locs, batch_scos = net(
                        inputs)
                    batch_locs = batch_locs[:, :-1, :]
                else:
                    batch_locs = net(inputs)
                batch_locs = batch_locs.detach().to(cpu)
                # evaluate the training data
                for ibatch, (imgidx,
                             nopoint) in enumerate(zip(image_index, nopoints)):
                    if xargs.procedure == 'heatmap':
                        norm_locs = normalize_points(
                            (H, W), batch_locs[ibatch].transpose(1, 0))
                        norm_locs = torch.cat(
                            (norm_locs, torch.ones(1, num_pts)), dim=0)
                    else:
                        norm_locs = torch.cat((batch_locs[ibatch].permute(
                            1, 0), torch.ones(1, num_pts)),
                                              dim=0)
                    transtheta = transthetas[ibatch][:2, :]
                    norm_locs = torch.mm(transtheta, norm_locs)
                    real_locs = denormalize_points(shapes[ibatch].tolist(),
                                                   norm_locs)
                    #real_locs  = torch.cat((real_locs, batch_scos[ibatch].permute(1,0)), dim=0)
                    real_locs = torch.cat((real_locs, torch.ones(1, num_pts)),
                                          dim=0)
                    xpoints = loader.dataset.labels[imgidx].get_points().numpy(
                    )
                    image_path = loader.dataset.datas[imgidx]
                    # put into the list
                    all_points.append(torch.from_numpy(xpoints))
                    all_results.append(real_locs)
                    all_image_ps.append(image_path)
            total = len(all_points)
            logger.log(
                '{:} The [{:2d}/{:2d}]-th test set finishes evaluation : {:} frames/images'
                .format(time_string(), iLOADER, len(eval_loaders), total))
        """
    if args.use_stable[0] > 0:
      save_dir = Path( osp.join(args.save_path, '{:}-X-{:03d}'.format(args.model_name, iLOADER)) )
      save_dir.mkdir(parents=True, exist_ok=True)
      wrap_parallel = WrapParallel(save_dir, all_image_ps, all_results, all_points, 180, (255, 0, 0))
      wrap_loader   = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True)
      for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES
      cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir)
      logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd))
      os.system( cmd )

    if args.use_stable[1] > 0:
      save_dir = Path( osp.join(args.save_path, '{:}-Y-{:03d}'.format(args.model_name, iLOADER)) )
      save_dir.mkdir(parents=True, exist_ok=True)
      Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points)
      new_preds = fc_solve(Xgts, Xpredictions, is_cuda=True)
      wrap_parallel = WrapParallel(save_dir, all_image_ps, new_preds, all_points, 180, (0, 0, 255))
      wrap_loader   = torch.utils.data.DataLoader(wrap_parallel, batch_size=args.workers, shuffle=False, num_workers=args.workers, pin_memory=True)
      for iL, INDEXES in enumerate(wrap_loader): _ = INDEXES
      cmd = 'ffmpeg -y -i {:}/%06d.png -framerate 30 {:}.avi'.format(save_dir, save_dir)
      logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd))
      os.system( cmd )
    """
        Xpredictions, Xgts = torch.stack(all_results), torch.stack(all_points)
        save_path = Path(
            osp.join(args.save_path,
                     '{:}-result-{:03d}.pth'.format(args.model_name, iLOADER)))
        torch.save(
            {
                'paths': all_image_ps,
                'ground-truths': Xgts,
                'predictions': all_results
            }, save_path)
        logger.log('{:} save into {:}'.format(time_string(), save_path))
        if False:
            new_preds = fc_solve_v2(Xgts, Xpredictions, is_cuda=True)
            # create the dir
            save_dir = Path(
                osp.join(args.save_path,
                         '{:}-T-{:03d}'.format(args.model_name, iLOADER)))
            save_dir.mkdir(parents=True, exist_ok=True)
            wrap_parallel = WrapParallelV2(save_dir, all_image_ps, Xgts,
                                           all_results, new_preds, all_points,
                                           180, [args.model_name, 'SRT'])
            wrap_parallel[0]
            wrap_loader = torch.utils.data.DataLoader(wrap_parallel,
                                                      batch_size=args.workers,
                                                      shuffle=False,
                                                      num_workers=args.workers,
                                                      pin_memory=True)
            for iL, INDEXES in enumerate(wrap_loader):
                _ = INDEXES
            cmd = 'ffmpeg -y -i {:}/%06d.png -vb 5000k {:}.avi'.format(
                save_dir, save_dir)
            logger.log('{:} possible >>>>> : {:}'.format(time_string(), cmd))
            os.system(cmd)

    logger.close()
    return