def multiview_debug_save_v2(save_dir, base, names, images, points, rev_points):
    save_dir.mkdir(parents=True, exist_ok=True)
    for index, (name, image) in enumerate(zip(names, images)):
        _points = points[index].transpose(1, 0)
        pil_img = draw_image_by_points(image, _points, 2, (255, ), False,
                                       False, False)
        pil_img.save(str(save_dir / '{:}-trans-x-{:}'.format(base, name)))
        _points = rev_points[index].transpose(1, 0)
        pil_img = draw_image_by_points(image, _points, 2, (0, ), False, False,
                                       False)
        pil_img.save(str(save_dir / '{:}-trans-p-{:}'.format(base, name)))
def multiview_debug_save(save_dir, base, image_paths, points, rev_points):
    save_dir.mkdir(parents=True, exist_ok=True)
    images = [pil_loader(x) for x in image_paths]
    names = [Path(x).name for x in image_paths]
    for index, (name, image) in enumerate(zip(names, images)):
        _points = points[index].transpose(1, 0)
        pil_img = draw_image_by_points(image, _points, 2, (102, 255, 102),
                                       False, False, False)
        pil_img.save(str(save_dir / '{:}-ori-x-{:}'.format(base, name)))
        _points = rev_points[index].transpose(1, 0)
        pil_img = draw_image_by_points(image, _points, 2, (30, 144, 255),
                                       False, False, False)
        pil_img.save(str(save_dir / '{:}-ori-p-{:}'.format(base, name)))
Exemple #3
0
def main(save_dir, meta, mindex, maximum):
    save_dir = Path(save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    assert osp.isfile(meta), 'invalid meta file : {:}'.format(meta)
    checkpoint = torch.load(meta)
    xmeta = checkpoint[mindex]
    RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)

    random.seed(111)
    index_list = list(range(len(xmeta)))
    random.shuffle(index_list)

    for i in range(0, min(maximum, len(xmeta))):
        index = index_list[i]
        image, predicts, gts = xmeta[index]
        crop_box = get_box(gts)
        num_pts = predicts.shape[1]
        predicts, gts = torch.Tensor(predicts), torch.Tensor(gts)
        avaliable = gts[2, :] == 1
        predicts, gts = predicts[:2, avaliable], gts[:2, avaliable]

        colors = [BLUE for _ in range(avaliable.sum().item())
                  ] + [GREEN for _ in range(avaliable.sum().item())]
        points = torch.cat((gts, predicts), dim=1)

        image = draw_image_by_points(image, points, 3, colors, crop_box,
                                     (400, 500))
        image.save('{:}/image-{:05d}.png'.format(save_dir, index))
    print('save into {:}'.format(save_dir))
Exemple #4
0
def evaluate(args):
  assert torch.cuda.is_available(), 'CUDA is not available.'
  torch.backends.cudnn.enabled   = True
  torch.backends.cudnn.benchmark = True

  print ('The image is {:}'.format(args.image))
  print ('The model is {:}'.format(args.model))
  snapshot = Path(args.model)
  assert snapshot.exists(), 'The model path {:} does not exist'
  print ('The face bounding box is {:}'.format(args.face))
  assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
  snapshot = torch.load(snapshot)

  # General Data Argumentation
  mean_fill   = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] )
  normalize   = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

  param = snapshot['args']
  import pdb; pdb.set_trace()
  eval_transform  = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize])
  model_config = load_configure(param.model_config, None)
  dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator)
  dataset.reset(param.num_pts)
  
  net = obtain_model(model_config, param.num_pts + 1)
  net = net.cuda()
  weights = remove_module_dict(snapshot['detector'])
  net.load_state_dict(weights)
  print ('Prepare input data')
  [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face)
  inputs = image.unsqueeze(0).cuda()
  # network forward
  with torch.no_grad():
    batch_heatmaps, batch_locs, batch_scos = net(inputs)
  # obtain the locations on the image in the orignial size
  cpu = torch.device('cpu')
  np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy()
  locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1)

  scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1)

  locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3]
  prediction = np.concatenate((locations, scores), axis=1).transpose(1,0)

  print ('the coordinates for {:} facial landmarks:'.format(param.num_pts))
  for i in range(param.num_pts):
    point = prediction[:, i]
    print ('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2])))

  if args.save:
    resize = 512
    image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), args.face, resize)
    image.save(args.save)
    print ('save the visualization results into {:}'.format(args.save))
  else:
    print ('ignore the visualization procedure')
def evaluate(args):
  assert torch.cuda.is_available(), 'CUDA is not available.'
  torch.backends.cudnn.enabled   = True
  torch.backends.cudnn.benchmark = True

  print ('The image is {:}'.format(args.image))
  print ('The model is {:}'.format(args.model))
  snapshot = Path(args.model)
  assert snapshot.exists(), 'The model path {:} does not exist'
  print ('The face bounding box is {:}'.format(args.face))
  assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
  snapshot = torch.load(snapshot)

  # General Data Argumentation
  mean_fill   = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] )
  normalize   = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

  param = snapshot['args']
  eval_transform  = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)),  transforms.ToTensor(), normalize])
  model_config = load_configure(param.model_config, None)
  dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator)
  dataset.reset(param.num_pts)
  
  net = obtain_model(model_config, param.num_pts + 1)
  net = net.cuda()
  weights = remove_module_dict(snapshot['state_dict'])
  net.load_state_dict(weights)
  print ('Prepare input data')
  [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face)
  inputs = image.unsqueeze(0).cuda()
  # network forward
  with torch.no_grad():
    batch_heatmaps, batch_locs, batch_scos = net(inputs)
  # obtain the locations on the image in the orignial size
  cpu = torch.device('cpu')
  np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy()
  locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1)

  scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1)

  locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3]
  prediction = np.concatenate((locations, scores), axis=1).transpose(1,0)

  print ('the coordinates for {:} facial landmarks:'.format(param.num_pts))
  for i in range(param.num_pts):
    point = prediction[:, i]
    print ('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2])))

  if args.save:
    resize = 512
    image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), args.face, resize)
    image.save(args.save)
    print ('save the visualization results into {:}'.format(args.save))
  else:
    print ('ignore the visualization procedure')
Exemple #6
0
 def __getitem__(self, index):
     assert index >= 0 and index < self.length, 'Invalid index : {:}'.format(
         index)
     image_path, points = self.all_image_ps[index], self.all_results[index]
     ctr_x, ctr_y = self.get_center(index)
     W, H = self.crop_size, self.crop_size
     image = draw_image_by_points(
         image_path, points, 2, self.color,
         [ctr_x - W, ctr_y - H, ctr_x + W, ctr_y + H], False)
     image.save(str(self.save_dir / image_path.split('/')[-1]))
     return index
Exemple #7
0
def evaluate(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

    print('The image is {:}'.format(args.image))
    print('The face bounding box is {:}'.format(args.face))
    assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)

    # General Data Argumentation
    print('Prepare input data')
    [image, _, _, _, _, _,
     cropped_size], meta = dataset.prepare_input(args.image, args.face)
    # network forward
    with torch.no_grad():
        inputs = image.unsqueeze(0).cuda()
        batch_heatmaps, batch_locs, batch_scos = net(inputs)
        flops, params = get_model_infos(net, inputs.shape)
        print('IN-shape : {:}, FLOPs : {:} MB, Params : {:} MB'.format(
            list(inputs.shape), flops, params))
    # obtain the locations on the image in the orignial size
    cpu = torch.device('cpu')
    np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(
        cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy()
    locations, scores = np_batch_locs[0, :-1, :], np.expand_dims(
        np_batch_scos[0, :-1], -1)

    scale_h, scale_w = cropped_size[0] * 1. / \
        inputs.size(-2), cropped_size[1] * 1. / inputs.size(-1)

    locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + \
        cropped_size[2], locations[:, 1] * scale_h + cropped_size[3]
    prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0)

    print('the coordinates for {:} facial landmarks:'.format(param.num_pts))
    for i in range(param.num_pts):
        point = prediction[:, i]
        print('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.
              format(i, param.num_pts, float(point[0]), float(point[1]),
                     float(point[2])))

    if args.save:
        resize = 0
        image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0),
                                     args.face, resize)
        image.save(args.save)
        print('save the visualization results into {:}'.format(args.save))
    else:
        print('ignore the visualization procedure')
Exemple #8
0
def main(use_gray, transform_strs):
    if not use_gray:
        mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        normalize = transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
        color = (102, 255, 102)
    else:
        mean_fill = (0.5, )
        normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
        normalize = transforms.Normalize(mean=[0], std=[1])
        color = (255, )

    debug_dir = '{:}/cache/gray-{:}'.format(this_dir, use_gray)
    if not os.path.isdir(debug_dir):
        os.makedirs(debug_dir)

    transform_funcs = [transforms.ToTensor(), normalize
                       ] + get_transforms(transform_strs)
    transform = transforms.Compose(transform_funcs)

    shape = (300, 200)
    images, labels, boxes = get_list()
    for image, label, box in zip(images, labels, boxes):
        imgx = datasets.pil_loader(image, use_gray)
        np_points, _ = datasets.anno_parser(label, 68)
        meta = Point_Meta(68, np_points, box, image, 'face68')
        I, L, theta = transform(imgx, meta)
        points = torch.Tensor(L.get_points(True))
        points = normalize_points((I.size(1), I.size(2)), points)
        name = Path(image).name
        image = get_image_from_affine(I, theta, shape)
        points = torch.cat((points, torch.ones((1, points.shape[1]))), dim=0)
        # new_points, LU = torch.gesv(points, theta)
        new_points, _ = torch.solve(points, theta)

        PImage = draw_image_by_points(image,
                                      new_points[:2, :],
                                      2,
                                      color,
                                      False,
                                      False,
                                      True,
                                      draw_idx=True)

        save_name = os.path.join(debug_dir,
                                 '{:}-{:}'.format(transform_strs, name))
        PImage.save(save_name)
def pro_debug_save(save_dir, name, image, heatmap, normpoint, meantheta,
                   predmap, recover):
    name, ext = name.split('.')
    save_dir.mkdir(parents=True, exist_ok=True)
    C, H, W = image.size()
    oriimage = recover(image)
    oriimage.save(str(save_dir / '{:}-ori.{:}'.format(name, ext)))
    if C == 1: color = (255, )
    else: color = (102, 255, 102)
    ptsimage = draw_image_by_points(oriimage, normpoint, 2, color, False,
                                    False, True)
    ptsimage.save(str(save_dir / '{:}-pts.{:}'.format(name, ext)))
    meanI = affine2image(image, meantheta, (H, W))
    meanimg = recover(meanI)
    meanimg.save(str(save_dir / '{:}-tomean.{:}'.format(name, ext)))

    _save_heatmap(oriimage, heatmap, save_dir, name, ext, 'GT')
    _save_heatmap(oriimage, predmap, save_dir, name, ext, 'PD')
def visualize(args):

  print ('The result file is {:}'.format(args.meta))
  print ('The save path is {:}'.format(args.save))
  meta = Path(args.meta)
  save = Path(args.save)
  assert meta.exists(), 'The model path {:} does not exist'
  xmeta = Eval_Meta()
  xmeta.load(meta)
  print ('this meta file has {:} predictions'.format(len(xmeta)))
  if not save.exists(): os.makedirs( args.save )
  for i in range(len(xmeta)):
    image, prediction = xmeta.image_lists[i], xmeta.predictions[i]
    name = osp.basename(image)
    image = draw_image_by_points(image, prediction, 2, (255, 0, 0), False, False)
    path = save / name
    image.save(path)
    print ('{:03d}-th image is saved into {:}'.format(i, path))
Exemple #11
0
def visualize(args):

  print ('The result file is {:}'.format(args.meta))
  print ('The save path is {:}'.format(args.save))
  meta = Path(args.meta)
  save = Path(args.save)
  assert meta.exists(), 'The model path {:} does not exist'
  xmeta = Eval_Meta()
  xmeta.load(meta)
  print ('this meta file has {:} predictions'.format(len(xmeta)))
  if not save.exists(): os.makedirs( args.save )
  for i in range(len(xmeta)):
    image, prediction = xmeta.image_lists[i], xmeta.predictions[i]
    name = osp.basename(image)
    image = draw_image_by_points(image, prediction, 6, (255, 0, 0), False, False)
    path = save / name
    image.save(path)
    print ('{:03d}-th image is saved into {:}'.format(i, path))
def visualize(args):

    print('The result file is {:}'.format(args.meta))
    print('The save path is {:}'.format(args.save))
    meta = Path(args.meta)
    save = Path(args.save)
    assert meta.exists(), 'The model path {:} does not exist'
    eval_metas = torch.load(meta)
    print('There are {:} evaluation results and use {:}.'.format(
        len(eval_metas), args.idx))
    xmeta = eval_metas[args.idx]
    print('this meta file has {:} predictions'.format(len(xmeta)))
    if not save.exists(): save.mkdir(parents=True, exist_ok=True)
    for i in range(len(xmeta)):
        image, prediction = xmeta.image_lists[i], xmeta.predictions[i]
        name = osp.basename(image)
        image = draw_image_by_points(image, prediction, 2, (255, 0, 0), False,
                                     False)
        path = save / name
        image.save(path)
        print('[{:02d}] {:03d}/{:03d}-th image is saved into {:}'.format(
            args.idx, i, len(xmeta), path))
def evaluate(args):
    if args.cuda:
        assert torch.cuda.is_available(), 'CUDA is not available.'
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True
    else:
        print('Use the CPU mode')

    print('The image is {:}'.format(args.image))
    print('The model is {:}'.format(args.model))
    last_info = Path(args.model)
    assert last_info.exists(), 'The model path {:} does not exist'.format(
        last_info)
    last_info = torch.load(last_info, map_location=torch.device('cpu'))
    snapshot = last_info['last_checkpoint']
    assert snapshot.exists(), 'The model path {:} does not exist'.format(
        snapshot)
    print('The face bounding box is {:}'.format(args.face))
    assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
    snapshot = torch.load(snapshot, map_location=torch.device('cpu'))

    param = snapshot['args']
    # General Data Argumentation
    if param.use_gray == False:
        mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    else:
        mean_fill = (0.5, )
        normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
    eval_transform  = transforms.Compose2V([transforms.ToTensor(), normalize, \
                                            transforms.PreCrop(param.pre_crop_expand), \
                                            transforms.CenterCrop(param.crop_max)])

    model_config = load_configure(param.model_config, None)
    dataset = Dataset(eval_transform, param.sigma, model_config.downsample,
                      param.heatmap_type, (120, 96), param.use_gray, None,
                      param.data_indicator)
    #dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (param.height,param.width), param.use_gray, None, param.data_indicator)
    dataset.reset(param.num_pts)
    net = obtain_pro_model(model_config, param.num_pts + 1, param.sigma,
                           param.use_gray)
    net.load_state_dict(remove_module_dict(snapshot['state_dict']))
    if args.cuda: net = net.cuda()
    print('Processing the input face image.')
    face_meta = PointMeta(dataset.NUM_PTS, None, args.face, args.image,
                          'BASE-EVAL')
    face_img = pil_loader(args.image, dataset.use_gray)
    affineImage, heatmaps, mask, norm_trans_points, transthetas, _, _, _, shape = dataset._process_(
        face_img, face_meta, -1)

    #import cv2; cv2.imwrite('temp.png', transforms.ToPILImage(normalize, False)(affineImage))
    # network forward
    with torch.no_grad():
        if args.cuda: inputs = affineImage.unsqueeze(0).cuda()
        else: inputs = affineImage.unsqueeze(0)

        _, _, batch_locs, batch_scos = net(inputs)
        batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu()
        (batch_size, C, H, W), num_pts = inputs.size(), param.num_pts
        locations, scores = batch_locs[0, :-1, :], batch_scos[:, :-1]
        norm_locs = normalize_points((H, W), locations.transpose(1, 0))
        norm_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0)
        transtheta = transthetas[:2, :]
        norm_locs = torch.mm(transtheta, norm_locs)
        real_locs = denormalize_points(shape.tolist(), norm_locs)
        real_locs = torch.cat((real_locs, scores), dim=0)
    print('the coordinates for {:} facial landmarks:'.format(param.num_pts))
    for i in range(param.num_pts):
        point = real_locs[:, i]
        print(
            'the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f}), score = {:.2f}'.
            format(i, param.num_pts, float(point[0]), float(point[1]),
                   float(point[2])))

    if args.save:
        resize = 512
        image = draw_image_by_points(args.image, real_locs, 2, (255, 0, 0),
                                     args.face, resize)
        image.save(args.save)
        print('save the visualization results into {:}'.format(args.save))
    else:
        print('ignore the visualization procedure')
def main(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.set_num_threads(args.workers)
    print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed))
    prepare_seed(args.rand_seed)
    temporal_main, eval_all = procedures['{:}-train'.format(
        args.procedure)], procedures['{:}-test'.format(args.procedure)]

    logger = prepare_logger(args)

    # General Data Argumentation
    normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation(
        transforms, args)
    recover = transforms.ToPILImage(normalize)
    args.tensor2imageF = recover
    assert (args.scale_min +
            args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format(
                args.scale_min, args.scale_max)

    # Model Configure Load
    model_config = load_configure(args.model_config, logger)
    sbr_config = load_configure(args.sbr_config, logger)
    shape = (args.height, args.width)
    logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format(
        model_config, args.sigma, shape))
    logger.log('--> SBR Configuration : {:}\n'.format(sbr_config))

    # Training Dataset
    train_data   = VDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \
                              args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray'))
    train_data.load_list(args.train_lists, args.num_pts, args.boxindicator,
                         args.normalizeL, True)
    if args.x68to49:
        assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format(
            args.num_pts)
        if train_data is not None: train_data = convert68to49(train_data)
        args.num_pts = 49

    # define the temporal model (accelerated SBR)
    net = obtain_pro_temporal(model_config, sbr_config, args.num_pts,
                              args.sigma, args.use_gray)
    assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format(
        model_config.downsample, net.downsample)
    logger.log("=> network :\n {}".format(net))

    logger.log('Training-data : {:}'.format(train_data))

    logger.log('arguments : {:}'.format(args))
    opt_config = load_configure(args.opt_config, logger)

    optimizer, scheduler, criterion = obtain_optimizer(net.parameters(),
                                                       opt_config, logger)
    logger.log('criterion : {:}'.format(criterion))
    net, criterion = net.cuda(), criterion.cuda()
    net = torch.nn.DataParallel(net)

    last_info = logger.last_info()
    try:
        last_checkpoint = load_checkpoint(args.init_model)
        checkpoint = remove_module_dict(last_checkpoint['state_dict'], False)
        net.module.detector.load_state_dict(checkpoint)
    except:
        last_checkpoint = load_checkpoint(args.init_model)
        net.load_state_dict(last_checkpoint['state_dict'])

    detector = torch.nn.DataParallel(net.module.detector)
    logger.log("=> initialize the detector : {:}".format(args.init_model))

    net.eval()
    detector.eval()

    logger.log('SBR Config : {:}'.format(sbr_config))
    save_xdir = logger.path('meta')
    random.seed(111)
    index_list = list(range(len(train_data)))
    random.shuffle(index_list)
    #selected_list = index_list[: min(200, len(index_list))]
    #selected_list = [7260, 11506, 39952, 75196, 51614, 41061, 37747, 41355]
    #for iidx, i in enumerate(selected_list):
    index_list.remove(47875)
    selected_list = [47875] + index_list
    save_xdir = logger.path('meta')

    type_error_1, type_error_2, type_error, misses = 0, 0, 0, 0
    type_error_pts, total_pts = 0, 0
    for iidx, i in enumerate(selected_list):
        frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images = train_data[
            i]

        frames, Fflows, Bflows, is_images = frames.unsqueeze(
            0), Fflows.unsqueeze(0), Bflows.unsqueeze(0), is_images.unsqueeze(
                0)
        # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down]
        with torch.no_grad():
            if args.procedure == 'heatmap':
                batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck = net(
                    frames, Fflows, Bflows, is_images)
            else:
                batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net(
                    frames, Fflows, Bflows, is_images)

        (batch_size, frame_length, C, H,
         W), num_pts, annotate_index = frames.size(
         ), args.num_pts, train_data.video_L
        batch_locs = batch_locs.cpu()[:, :, :num_pts]
        video_mask = masks.unsqueeze(0)[:, :num_pts]
        batch_past2now = batch_past2now.cpu()[:, :, :num_pts]
        batch_future2now = batch_future2now.cpu()[:, :, :num_pts]
        batch_FBcheck = batch_FBcheck[:, :num_pts].cpu()
        FB_check_oks = FB_communication(criterion, batch_locs, batch_past2now,
                                        batch_future2now, batch_FBcheck,
                                        video_mask, sbr_config)

        # locations
        norm_past_det_locs = torch.cat(
            (batch_locs[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_noww_det_locs = torch.cat(
            (batch_locs[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_det_locs = torch.cat(
            (batch_locs[0, annotate_index + 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_locs = torch.cat(
            (batch_past2now[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_past_locs = torch.cat(
            (batch_future2now[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        transtheta = transthetas[:2, :]
        norm_past_det_locs = torch.mm(transtheta, norm_past_det_locs)
        norm_noww_det_locs = torch.mm(transtheta, norm_noww_det_locs)
        norm_next_det_locs = torch.mm(transtheta, norm_next_det_locs)
        norm_next_locs = torch.mm(transtheta, norm_next_locs)
        norm_past_locs = torch.mm(transtheta, norm_past_locs)
        real_past_det_locs = denormalize_points(shapes.tolist(),
                                                norm_past_det_locs)
        real_noww_det_locs = denormalize_points(shapes.tolist(),
                                                norm_noww_det_locs)
        real_next_det_locs = denormalize_points(shapes.tolist(),
                                                norm_next_det_locs)
        real_next_locs = denormalize_points(shapes.tolist(), norm_next_locs)
        real_past_locs = denormalize_points(shapes.tolist(), norm_past_locs)
        gt_noww_points = train_data.labels[image_index.item()].get_points()
        gt_past_points = train_data.find_index(
            train_data.datas[image_index.item()][annotate_index - 1])
        gt_next_points = train_data.find_index(
            train_data.datas[image_index.item()][annotate_index + 1])

        FB_check_oks = FB_check_oks[:num_pts].squeeze()
        #import pdb; pdb.set_trace()
        if FB_check_oks.sum().item() > 2:
            # type 1 error : detection at both (t) and (t-1) is wrong, while pass the check
            is_type_1, (T_wrong, T_total) = check_is_1st_error(
                [real_past_det_locs, real_noww_det_locs, real_next_det_locs],
                [gt_past_points, gt_noww_points, gt_next_points], FB_check_oks,
                shapes)
            # type 2 error : detection at frame t is ok, while tracking are wrong and frame at (t-1) is wrong:
            spec_index, is_type_2 = check_is_2nd_error(
                real_noww_det_locs, gt_noww_points,
                [real_past_locs, real_next_locs],
                [gt_past_points, gt_next_points], FB_check_oks, shapes)
            type_error_1 += is_type_1
            type_error_2 += is_type_2
            type_error += is_type_1 or is_type_2
            type_error_pts, total_pts = type_error_pts + T_wrong, total_pts + T_total
            if is_type_2:
                RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)
                [image_past, image_noww,
                 image_next] = train_data.datas[image_index.item()]
                crop_box = train_data.labels[
                    image_index.item()].get_box().tolist()
                point_index = FB_check_oks.nonzero().squeeze().tolist()
                colors = [
                    GREEN if _i in point_index else RED
                    for _i in range(num_pts)
                ] + [BLUE for _i in range(num_pts)]

                I_past_det = draw_image_by_points(
                    image_past,
                    torch.cat((real_past_det_locs, gt_past_points[:2]), dim=1),
                    3, colors, crop_box, (400, 500))
                I_noww_det = draw_image_by_points(
                    image_noww,
                    torch.cat((real_noww_det_locs, gt_noww_points[:2]), dim=1),
                    3, colors, crop_box, (400, 500))
                I_next_det = draw_image_by_points(
                    image_next,
                    torch.cat((real_next_det_locs, gt_next_points[:2]), dim=1),
                    3, colors, crop_box, (400, 500))
                I_past = draw_image_by_points(
                    image_past,
                    torch.cat((real_past_locs, gt_past_points[:2]), dim=1), 3,
                    colors, crop_box, (400, 500))
                I_next = draw_image_by_points(
                    image_next,
                    torch.cat((real_next_locs, gt_next_points[:2]), dim=1), 3,
                    colors, crop_box, (400, 500))
                ###
                I_past.save(str(save_xdir / '{:05d}-v1-a-pastt.png'.format(i)))
                I_noww_det.save(
                    str(save_xdir / '{:05d}-v1-b-curre.png'.format(i)))
                I_next.save(str(save_xdir / '{:05d}-v1-c-nextt.png'.format(i)))

                I_past_det.save(
                    str(save_xdir / '{:05d}-v1-det-a-past.png'.format(i)))
                I_noww_det.save(
                    str(save_xdir / '{:05d}-v1-det-b-curr.png'.format(i)))
                I_next_det.save(
                    str(save_xdir / '{:05d}-v1-det-c-next.png'.format(i)))

                logger.log('TYPE-ERROR : {:}, landmark-index : {:}'.format(
                    i, spec_index))
        else:
            misses += 1
        string = 'Handle {:05d}/{:05d} :: {:05d}'.format(
            iidx, len(selected_list), i)
        string += ', error-1 : {:} ({:.2f}%), error-2 : {:} ({:.2f}%)'.format(
            type_error_1, type_error_1 * 100.0 / (iidx + 1), type_error_2,
            type_error_2 * 100.0 / (iidx + 1))
        string += ', error : {:} ({:.2f}%), miss : {:}'.format(
            type_error, type_error * 100.0 / (iidx + 1), misses)
        string += ', final-error : {:05d} / {:05d} = {:.2f}%'.format(
            type_error_pts, total_pts, type_error_pts * 100.0 / total_pts)
        logger.log(string)
Exemple #15
0
def main(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.set_num_threads(args.workers)
    print('Training Base Detector : prepare_seed : {:}'.format(args.rand_seed))
    prepare_seed(args.rand_seed)
    temporal_main, eval_all = procedures['{:}-train'.format(
        args.procedure)], procedures['{:}-test'.format(args.procedure)]

    logger = prepare_logger(args)

    # General Data Argumentation
    normalize, train_transform, eval_transform, robust_transform = prepare_data_augmentation(
        transforms, args)
    recover = transforms.ToPILImage(normalize)
    args.tensor2imageF = recover
    assert (args.scale_min +
            args.scale_max) / 2 == 1, 'The scale is not ok : {:} ~ {:}'.format(
                args.scale_min, args.scale_max)

    # Model Configure Load
    model_config = load_configure(args.model_config, logger)
    sbr_config = load_configure(args.sbr_config, logger)
    shape = (args.height, args.width)
    logger.log('--> {:}\n--> Sigma : {:}, Shape : {:}'.format(
        model_config, args.sigma, shape))
    logger.log('--> SBR Configuration : {:}\n'.format(sbr_config))

    # Training Dataset
    train_data   = VDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, shape, args.use_gray, args.mean_point, \
                              args.data_indicator, sbr_config, transforms.ToPILImage(normalize, 'cv2gray'))
    train_data.load_list(args.train_lists, args.num_pts, args.boxindicator,
                         args.normalizeL, True)

    # Evaluation Dataloader
    assert len(
        args.eval_ilists) == 1, 'invalid length of eval_ilists : {:}'.format(
            len(eval_ilists))
    eval_data = IDataset(eval_transform, args.sigma, model_config.downsample,
                         args.heatmap_type, shape, args.use_gray,
                         args.mean_point, args.data_indicator)
    eval_data.load_list(args.eval_ilists[0], args.num_pts, args.boxindicator,
                        args.normalizeL, True)
    if args.x68to49:
        assert args.num_pts == 68, 'args.num_pts is not 68 vs. {:}'.format(
            args.num_pts)
        if train_data is not None: train_data = convert68to49(train_data)
        eval_data = convert68to49(eval_data)
        args.num_pts = 49

    # define the temporal model (accelerated SBR)
    net = obtain_pro_temporal(model_config, sbr_config, args.num_pts,
                              args.sigma, args.use_gray)
    assert model_config.downsample == net.downsample, 'downsample is not correct : {:} vs {:}'.format(
        model_config.downsample, net.downsample)
    logger.log("=> network :\n {}".format(net))

    logger.log('Training-data : {:}'.format(train_data))
    logger.log('Evaluate-data : {:}'.format(eval_data))

    logger.log('arguments : {:}'.format(args))
    opt_config = load_configure(args.opt_config, logger)

    optimizer, scheduler, criterion = obtain_optimizer(net.parameters(),
                                                       opt_config, logger)
    logger.log('criterion : {:}'.format(criterion))
    net, criterion = net.cuda(), criterion.cuda()
    net = torch.nn.DataParallel(net)

    last_info = logger.last_info()
    try:
        last_checkpoint = load_checkpoint(args.init_model)
        checkpoint = remove_module_dict(last_checkpoint['state_dict'], False)
        net.module.detector.load_state_dict(checkpoint)
    except:
        last_checkpoint = load_checkpoint(args.init_model)
        net.load_state_dict(last_checkpoint['state_dict'])

    detector = torch.nn.DataParallel(net.module.detector)
    logger.log("=> initialize the detector : {:}".format(args.init_model))

    net.eval()
    detector.eval()

    logger.log('SBR Config : {:}'.format(sbr_config))
    save_xdir = logger.path('meta')
    type_error = 0
    random.seed(111)
    index_list = list(range(len(train_data)))
    random.shuffle(index_list)
    #selected_list = index_list[: min(200, len(index_list))]

    selected_list = [
        7260, 11506, 39952, 75196, 51614, 41061, 37747, 41355, 47875
    ]
    for iidx, i in enumerate(selected_list):
        frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images = train_data[
            i]

        frames, Fflows, Bflows, is_images = frames.unsqueeze(
            0), Fflows.unsqueeze(0), Bflows.unsqueeze(0), is_images.unsqueeze(
                0)
        # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down]
        if args.procedure == 'heatmap':
            batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck = net(
                frames, Fflows, Bflows, is_images)
        else:
            batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net(
                frames, Fflows, Bflows, is_images)

        (batch_size, frame_length, C, H,
         W), num_pts, annotate_index = frames.size(
         ), args.num_pts, train_data.video_L
        batch_locs = batch_locs.cpu()[:, :, :num_pts]
        video_mask = masks.unsqueeze(0)[:, :num_pts]
        batch_past2now = batch_past2now.cpu()[:, :, :num_pts]
        batch_future2now = batch_future2now.cpu()[:, :, :num_pts]
        batch_FBcheck = batch_FBcheck[:, :num_pts].cpu()
        FB_check_oks = FB_communication(criterion, batch_locs, batch_past2now,
                                        batch_future2now, batch_FBcheck,
                                        video_mask, sbr_config)

        # locations
        norm_past_det_locs = torch.cat(
            (batch_locs[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_noww_det_locs = torch.cat(
            (batch_locs[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_det_locs = torch.cat(
            (batch_locs[0, annotate_index + 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_next_locs = torch.cat(
            (batch_past2now[0, annotate_index, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        norm_past_locs = torch.cat(
            (batch_future2now[0, annotate_index - 1, :num_pts].permute(
                1, 0), torch.ones(1, num_pts)),
            dim=0)
        transtheta = transthetas[:2, :]
        norm_past_det_locs = torch.mm(transtheta, norm_past_det_locs)
        norm_noww_det_locs = torch.mm(transtheta, norm_noww_det_locs)
        norm_next_det_locs = torch.mm(transtheta, norm_next_det_locs)
        norm_next_locs = torch.mm(transtheta, norm_next_locs)
        norm_past_locs = torch.mm(transtheta, norm_past_locs)
        real_past_det_locs = denormalize_points(shapes.tolist(),
                                                norm_past_det_locs)
        real_noww_det_locs = denormalize_points(shapes.tolist(),
                                                norm_noww_det_locs)
        real_next_det_locs = denormalize_points(shapes.tolist(),
                                                norm_next_det_locs)
        real_next_locs = denormalize_points(shapes.tolist(), norm_next_locs)
        real_past_locs = denormalize_points(shapes.tolist(), norm_past_locs)
        gt_noww_points = train_data.labels[image_index.item()].get_points()

        FB_check_oks = FB_check_oks[:num_pts].squeeze()
        #import pdb; pdb.set_trace()
        if FB_check_oks.sum().item() > 2:
            point_index = FB_check_oks.nonzero().squeeze().tolist()
            something_wrong = False
            for pidx in point_index:
                real_now_det_loc = real_noww_det_locs[:, pidx]
                real_pst_det_loc = real_past_det_locs[:, pidx]
                real_net_det_loc = real_next_det_locs[:, pidx]
                real_nex_loc = real_next_locs[:, pidx]
                real_pst_loc = real_next_locs[:, pidx]
                grdt_now_loc = gt_noww_points[:2, pidx]
                #if torch.abs(real_now_loc - grdt_now_loc).max() > 5:
                #  something_wrong = True
                #if torch.abs(real_nex_loc - grdt_nex_loc).max() > 5:
                #  something_wrong = True
            #if something_wrong == True:
            if True:
                [image_past, image_noww,
                 image_next] = train_data.datas[image_index.item()]
                try:
                    crop_box = train_data.labels[
                        image_index.item()].get_box().tolist()
                    #crop_box = [crop_box[0]-20, crop_box[1]-20, crop_box[2]+20, crop_box[3]+20]
                except:
                    crop_box = False

                RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)
                colors = [
                    GREEN if _i in point_index else RED
                    for _i in range(num_pts)
                ]
                if crop_box != False or True:
                    I_past_det = draw_image_by_points(image_past,
                                                      real_past_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_noww_det = draw_image_by_points(image_noww,
                                                      real_noww_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_next_det = draw_image_by_points(image_next,
                                                      real_next_det_locs[:], 3,
                                                      colors, crop_box,
                                                      (400, 500))
                    I_next = draw_image_by_points(image_next,
                                                  real_next_locs[:], 3, colors,
                                                  crop_box, (400, 500))
                    I_past = draw_image_by_points(image_past,
                                                  real_past_locs[:], 3, colors,
                                                  crop_box, (400, 500))

                    I_past.save(
                        str(save_xdir / '{:05d}-v1-a-pastt.png'.format(i)))
                    I_noww_det.save(
                        str(save_xdir / '{:05d}-v1-b-curre.png'.format(i)))
                    I_next.save(
                        str(save_xdir / '{:05d}-v1-c-nextt.png'.format(i)))

                    I_past_det.save(
                        str(save_xdir / '{:05d}-v1-det-a-past.png'.format(i)))
                    I_noww_det.save(
                        str(save_xdir / '{:05d}-v1-det-b-curr.png'.format(i)))
                    I_next_det.save(
                        str(save_xdir / '{:05d}-v1-det-c-next.png'.format(i)))

                #[image_past, image_noww, image_next] = train_data.datas[image_index.item()]
                #image_noww = draw_image_by_points(image_noww, real_noww_locs[:], 2, colors, False, False)
                #image_next = draw_image_by_points(image_next, real_next_locs[:], 2, colors, False, False)
                #image_past = draw_image_by_points(image_past, real_past_locs[:], 2, colors, False, False)
                #image_noww.save( str(save_xdir / '{:05d}-v2-b-curre.png'.format(i)) )
                #image_next.save( str(save_xdir / '{:05d}-v2-c-nextt.png'.format(i)) )
                #image_past.save( str(save_xdir / '{:05d}-v2-a-pastt.png'.format(i)) )
                #type_error += 1
        logger.log(
            'Handle {:05d}/{:05d} :: {:05d}, ok-points={:.3f}, wrong data={:}'.
            format(iidx, len(selected_list), i,
                   FB_check_oks.float().mean().item(), type_error))

    save_xx_dir = save_xdir.parent / 'image-data'
    save_xx_dir.mkdir(parents=True, exist_ok=True)
    selected_list = [100, 115, 200, 300, 400] + list(range(200, 220))
    for iidx, i in enumerate(selected_list):
        inputs, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes = eval_data[
            i]
        inputs = inputs.unsqueeze(0)
        (batch_size, C, H, W), num_pts = inputs.size(), args.num_pts
        _, _, batch_locs, batch_scos = detector(inputs)  # inputs

        batch_locs, batch_scos = batch_locs.cpu(), batch_scos.cpu()
        norm_locs = normalize_points((H, W),
                                     batch_locs[0, :num_pts].transpose(1, 0))
        norm_det_locs = torch.cat((norm_locs, torch.ones(1, num_pts)), dim=0)
        norm_det_locs = torch.mm(transthetas[:2, :], norm_det_locs)
        real_det_locs = denormalize_points(shapes.tolist(), norm_det_locs)
        gt_now_points = eval_data.labels[image_index.item()].get_points()
        image_now = eval_data.datas[image_index.item()]
        crop_box = eval_data.labels[image_index.item()].get_box().tolist()

        RED, GREEN, BLUE = (255, 0, 0), (0, 255, 0), (0, 0, 255)
        Gcolors = [GREEN for _ in range(num_pts)]
        points = torch.cat((real_det_locs, gt_now_points[:2]), dim=1)
        colors = [GREEN
                  for _ in range(num_pts)] + [BLUE for _ in range(num_pts)]
        image = draw_image_by_points(image_now, real_det_locs, 3, Gcolors,
                                     crop_box, (400, 500))
        image.save(str(save_xx_dir / '{:05d}-crop.png'.format(i)))
        image = draw_image_by_points(image_now, points, 3, colors, False,
                                     False)
        #image  = draw_image_by_points(image_now, real_det_locs, 3, colors , False, False)
        image.save(str(save_xx_dir / '{:05d}-orig.png'.format(i)))
    logger.log('Finish drawing : {:}'.format(save_xdir))
    logger.log('Finish drawing : {:}'.format(save_xx_dir))
    logger.close()