Exemplo n.º 1
0
def save_to_file_trainval(save_dir, trains, valids):

  save_dir = Path(save_dir)
  if not save_dir.exists(): save_dir.mkdir(parents=True, exist_ok=True)

  ## Train
  mean_landmark = [[] for i in range(16)]
  TDatas, OKs = [], []
  for index, DX in enumerate(trains):
    image_path = osp.join(image_dir, DX['name'])
    assert osp.isfile(image_path), '{:} does not exist'.format(image_path)
    for person in DX['persons']:
      data = get_person(person, image_path)
      TDatas.append( data )
      ok = check_in_image(data, 'TRAIN-{:}'.format(index))
      OKs.append( ok )
      # calculate means
      box, landmarks = data['box-default'], data['points']
      for idx in range(landmarks.shape[1]):
        if int(landmarks[2, idx] + 0.5) == 0: continue
        x, y = float(landmarks[0,idx]-box[0]), float(landmarks[1,idx]-box[1])
        x, y = normalize_L(x, box[2]-box[0]), normalize_L(y, box[3]-box[1])
        mean_landmark[idx].append( (x,y) )
  torch.save(TDatas, save_dir / 'train.pth')
  print ('Training has {:} persons with {:} % having out-of-box person.'.format(len(TDatas), 100 - np.array(OKs).mean() * 100))

  # Validation
  VDatas, OKs = [], []
  for index, DX in enumerate(valids):
    image_path = osp.join(image_dir, DX['name'])
    assert osp.isfile(image_path), '{:} does not exist'.format(image_path)
    for person in DX['persons']:
      data = get_person(person, image_path)
      VDatas.append( data )
      ok = check_in_image(data, 'VALID-{:}'.format(index))
      OKs.append( ok )
      # calculate means
      box, landmarks = data['box-default'], data['points']
      for idx in range(landmarks.shape[1]):
        if int(landmarks[2, idx] + 0.5) == 0: continue
        x, y = float(landmarks[0,idx]-box[0]), float(landmarks[1,idx]-box[1])
        x, y = normalize_L(x, box[2]-box[0]), normalize_L(y, box[3]-box[1])
        mean_landmark[idx].append( (x,y) )
  print ('Validation has {:} persons with {:} % having out-of-box person.'.format(len(VDatas), 100 - np.array(OKs).mean() * 100))

  torch.save(VDatas, save_dir / 'valid.pth')
  
  torch.save(TDatas + VDatas, save_dir / 'trainval.pth')

  mean_landmark = [np.array(x) for x in mean_landmark]
  mean_landmark = [np.mean(x, axis=0)  for x in mean_landmark]
  mean_landmark = np.array(mean_landmark)
  image = draw_points(mean_landmark, 600, 500, True)
  image.save(osp.join(save_dir, 'MPII-trainval.png'))
  torch.save({'default': mean_landmark}, osp.join(save_dir, 'MPII-trainval-mean.pth'))
Exemplo n.º 2
0
def stm_main_heatmap(args, loader, net, criterion, optimizer, epoch_str,
                     logger, opt_config, stm_config, use_stm, mode):
    assert mode == 'train' or mode == 'test', 'invalid mode : {:}'.format(mode)
    args = copy.deepcopy(args)
    batch_time, data_time, forward_time, eval_time = AverageMeter(
    ), AverageMeter(), AverageMeter(), AverageMeter()
    visible_points, DetLosses, TemporalLosses, MultiviewLosses, TotalLosses = AverageMeter(
    ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
    alk_points, a3d_points = AverageMeter(), AverageMeter()
    annotate_index = loader.dataset.video_L
    eval_meta = Eval_Meta()
    cpu = torch.device('cpu')

    if args.debug:
        save_dir = Path(
            args.save_path) / 'DEBUG' / ('{:}-'.format(mode) + epoch_str)
    else:
        save_dir = None

    # switch to train mode
    if mode == 'train':
        logger.log('STM-Main-REG : training : {:} .. STM = {:}'.format(
            stm_config, use_stm))
        print_freq = args.print_freq
        net.train()
        criterion.train()
    else:
        logger.log('STM-Main-REG : evaluation mode.')
        print_freq = args.print_freq_eval
        net.eval()
        criterion.eval()

    i_batch_size, v_batch_size, m_batch_size = args.i_batch_size, args.v_batch_size, args.m_batch_size
    iv_size = i_batch_size + v_batch_size
    end = time.time()
    for i, (frames, Fflows, Bflows, targets, masks, normpoints, transthetas, MV_Tensors, MV_Thetas, MV_Shapes, MV_KRT, torch_is_3D, torch_is_images \
              , image_index, nopoints, shapes, MultiViewPaths) in enumerate(loader):
        # frames : IBatch+VBatch+MBatch, Frame, Channel, Height, Width
        # Fflows : IBatch+VBatch+MBatch, Frame-1, Height, Width, 2
        # Bflows : IBatch+VBatch+MBatch, Frame-1, Height, Width, 2

        # information
        MV_Mask = masks[iv_size:]
        frames, Fflows, Bflows, targets, masks, normpoints, transthetas = frames[:
                                                                                 iv_size], Fflows[:
                                                                                                  iv_size], Bflows[:
                                                                                                                   iv_size], targets[:
                                                                                                                                     iv_size], masks[:
                                                                                                                                                     iv_size], normpoints[:
                                                                                                                                                                          iv_size], transthetas[:
                                                                                                                                                                                                iv_size]
        nopoints, shapes, torch_is_images = nopoints[:
                                                     iv_size], shapes[:
                                                                      iv_size], torch_is_images[:
                                                                                                iv_size]
        MV_Tensors, MV_Thetas, MV_Shapes, MV_KRT, torch_is_3D = \
          MV_Tensors[iv_size:], MV_Thetas[iv_size:], MV_Shapes[iv_size:], MV_KRT[iv_size:], torch_is_3D[iv_size:]
        assert torch.sum(torch_is_images[:i_batch_size]).item(
        ) == i_batch_size, 'Image Check Fail : {:} vs. {:}'.format(
            torch_is_images[:i_batch_size], i_batch_size)
        assert v_batch_size == 0 or torch.sum(
            torch_is_images[i_batch_size:]).item(
            ) == 0, 'Video Check Fail : {:} vs. {:}'.format(
                torch_is_images[i_batch_size:], v_batch_size)
        assert torch_is_3D.sum().item(
        ) == m_batch_size, 'Multiview Check Fail : {:} vs. {:}'.format(
            torch_is_3D, m_batch_size)
        image_index = image_index.squeeze(1).tolist()
        (batch_size, frame_length, C, H, W), num_pts, num_views = frames.size(
        ), args.num_pts, stm_config.max_views
        visible_point_num = float(np.sum(
            masks.numpy()[:, :-1, :, :])) / batch_size
        visible_points.update(visible_point_num, batch_size)

        normpoints = normpoints.permute(0, 2, 1)
        target_heats = targets.cuda(non_blocking=True)
        target_points = normpoints[:, :, :2].contiguous().cuda(
            non_blocking=True)
        target_scores = normpoints[:, :,
                                   2:].contiguous().cuda(non_blocking=True)
        det_masks = (1 - nopoints).view(batch_size, 1, 1, 1) * masks
        have_det_loss = det_masks.sum().item() > 0
        det_masks = det_masks.cuda(non_blocking=True)
        nopoints = nopoints.squeeze(1).tolist()

        # measure data loading time
        data_time.update(time.time() - end)

        # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down]
        batch_heatmaps, batch_locs, batch_scos, batch_past2now, batch_future2now, batch_FBcheck, multiview_heatmaps, multiview_locs = net(
            frames, Fflows, Bflows, MV_Tensors, torch_is_images)
        annot_heatmaps = [x[:, annotate_index] for x in batch_heatmaps]
        forward_time.update(time.time() - end)

        # detection loss
        if have_det_loss:
            det_loss, each_stage_loss_value = compute_stage_loss(
                criterion, target_heats, annot_heatmaps, det_masks)
            DetLosses.update(det_loss.item(), batch_size)
            each_stage_loss_value = show_stage_loss(each_stage_loss_value)
        else:
            det_loss, each_stage_loss_value = 0, 'no-det-loss'

        # temporal loss
        if use_stm[0]:
            video_batch_locs = batch_locs[i_batch_size:, :, :num_pts]
            video_past2now, video_future2now = batch_past2now[
                i_batch_size:, :, :num_pts], batch_future2now[
                    i_batch_size:, :, :num_pts]
            video_FBcheck = batch_FBcheck[i_batch_size:, :num_pts]
            video_mask = masks[i_batch_size:, :num_pts].contiguous().cuda(
                non_blocking=True)
            video_heatmaps = [
                x[i_batch_size:, :, :num_pts] for x in batch_heatmaps
            ]
            sbr_loss, available_nums, loss_string = calculate_temporal_loss(
                criterion, video_heatmaps, video_batch_locs, video_past2now,
                video_future2now, video_FBcheck, video_mask, stm_config)
            alk_points.update(
                float(available_nums) / v_batch_size, v_batch_size)
            if available_nums > stm_config.available_sbr_thresh:
                TemporalLosses.update(sbr_loss.item(), v_batch_size)
            else:
                sbr_loss, sbr_loss_string = 0, 'non-sbr-loss'
        else:
            sbr_loss, sbr_loss_string = 0, 'non-sbr-loss'

        # multiview loss
        if use_stm[1]:
            MV_Mask_G = MV_Mask[:, :-1].view(
                m_batch_size, 1, -1, 1).contiguous().cuda(non_blocking=True)
            MV_Thetas_G = MV_Thetas.to(multiview_locs.device)
            MV_Shapes_G = MV_Shapes.to(multiview_locs.device).view(
                m_batch_size, num_views, 1, 2)
            MV_KRT_G = MV_KRT.to(multiview_locs.device)
            mv_norm_locs_trs = torch.cat(
                (multiview_locs[:, :, :num_pts].permute(0, 1, 3, 2),
                 torch.ones(m_batch_size,
                            num_views,
                            1,
                            num_pts,
                            device=multiview_locs.device)),
                dim=2)
            mv_norm_locs_ori = torch.matmul(MV_Thetas_G[:, :, :2],
                                            mv_norm_locs_trs)
            mv_norm_locs_ori = mv_norm_locs_ori.permute(0, 1, 3, 2)
            mv_real_locs_ori = denormalize_L(mv_norm_locs_ori, MV_Shapes_G)
            mv_3D_locs_ori = TriangulateDLT_BatchCam(MV_KRT_G,
                                                     mv_real_locs_ori)
            mv_proj_locs_ori = ProjectKRT_Batch(
                MV_KRT_G, mv_3D_locs_ori.view(m_batch_size, 1, num_pts, 3))
            mv_pnorm_locs_ori = normalize_L(mv_proj_locs_ori, MV_Shapes_G)
            mv_pnorm_locs_trs = convert_theta(mv_pnorm_locs_ori, MV_Thetas_G)
            MV_locs = multiview_locs[:, :, :num_pts].contiguous()
            MV_heatmaps = [x[:, :, :num_pts] for x in multiview_heatmaps]

            if args.debug:
                with torch.no_grad():
                    for ims in range(m_batch_size):
                        x_index = image_index[iv_size + ims]
                        x_paths = [
                            xlist[iv_size + ims] for xlist in MultiViewPaths
                        ]
                        x_mv_locs, p_mv_locs = mv_real_locs_ori[
                            ims], mv_proj_locs_ori[ims]
                        multiview_debug_save(save_dir, '{:}'.format(x_index),
                                             x_paths,
                                             x_mv_locs.cpu().numpy(),
                                             p_mv_locs.cpu().numpy())
                        y_mv_locs = denormalize_points_batch((H, W),
                                                             MV_locs[ims])
                        q_mv_locs = denormalize_points_batch(
                            (H, W), mv_pnorm_locs_trs[ims])
                        temp_tensors = MV_Tensors[ims]
                        temp_images = [
                            args.tensor2imageF(x) for x in temp_tensors
                        ]
                        temp_names = [Path(x).name for x in x_paths]
                        multiview_debug_save_v2(save_dir,
                                                '{:}'.format(x_index),
                                                temp_names, temp_images,
                                                y_mv_locs.cpu().numpy(),
                                                q_mv_locs.cpu().numpy())

            stm_loss, available_nums = calculate_multiview_loss(
                criterion, MV_heatmaps, MV_locs, mv_pnorm_locs_trs, MV_Mask_G,
                stm_config)
            a3d_points.update(
                float(available_nums) / m_batch_size, m_batch_size)
            if available_nums > stm_config.available_stm_thresh:
                MultiviewLosses.update(stm_loss.item(), m_batch_size)
            else:
                stm_loss = 0
        else:
            stm_loss = 0

        # measure accuracy and record loss
        if use_stm[0]:
            total_loss = det_loss + sbr_loss * stm_config.sbr_weights + stm_loss * stm_config.stm_weights
        else:
            total_loss = det_loss + stm_loss * stm_config.stm_weights
        if isinstance(total_loss, numbers.Number):
            warnings.warn(
                'The {:}-th iteration has no detection loss and no lk loss'.
                format(i))
        else:
            TotalLosses.update(total_loss.item(), batch_size)
            # compute gradient and do SGD step
            if mode == 'train':  # training mode
                optimizer.zero_grad()
                total_loss.backward()
                optimizer.step()

        eval_time.update(time.time() - end)

        with torch.no_grad():
            batch_locs = batch_locs.detach().to(cpu)[:,
                                                     annotate_index, :num_pts]
            batch_scos = batch_scos.detach().to(cpu)[:,
                                                     annotate_index, :num_pts]
            # evaluate the training data
            for ibatch in range(iv_size):
                imgidx, nopoint = image_index[ibatch], nopoints[ibatch]
                if nopoint == 1: continue
                norm_locs = torch.cat(
                    (batch_locs[ibatch].permute(1, 0), torch.ones(1, num_pts)),
                    dim=0)
                transtheta = transthetas[ibatch][:2, :]
                norm_locs = torch.mm(transtheta, norm_locs)
                real_locs = denormalize_points(shapes[ibatch].tolist(),
                                               norm_locs)
                real_locs = torch.cat(
                    (real_locs, batch_scos[ibatch].view(1, num_pts)), dim=0)

                image_path = loader.dataset.datas[imgidx][annotate_index]
                normDistce = loader.dataset.NormDistances[imgidx]
                xpoints = loader.dataset.labels[imgidx].get_points()
                eval_meta.append(real_locs.numpy(), xpoints.numpy(),
                                 image_path, normDistce)
                if save_dir:
                    pro_debug_save(save_dir,
                                   Path(image_path).name,
                                   frames[ibatch,
                                          annotate_index], targets[ibatch],
                                   normpoints[ibatch], meanthetas[ibatch],
                                   batch_heatmaps[-1][ibatch, annotate_index],
                                   args.tensor2imageF)

        # measure elapsed time
        batch_time.update(time.time() - end)
        last_time = convert_secs2time(batch_time.avg * (len(loader) - i - 1),
                                      True)
        end = time.time()

        if i % print_freq == 0 or i + 1 == len(loader):
            logger.log(' -->>[{:}]: [{:}][{:03d}/{:03d}] '
                      'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) '
                      'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) '
                      'F-time {forward_time.val:4.2f} ({forward_time.avg:4.2f}) '
                      'Det {dloss.val:7.4f} ({dloss.avg:7.4f}) '
                      'SBR {sloss.val:7.6f} ({sloss.avg:7.6f}) '
                      'STM {mloss.val:7.6f} ({mloss.avg:7.6f}) '
                      'Loss {loss.val:7.4f} ({loss.avg:7.4f})  '.format(
                          mode, epoch_str, i, len(loader), batch_time=batch_time,
                          data_time=data_time, forward_time=forward_time, \
                          dloss=DetLosses, sloss=TemporalLosses, mloss=MultiviewLosses, loss=TotalLosses)
                        + last_time + each_stage_loss_value \
                        + ' I={:}'.format(list(frames.size())) \
                        + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg) \
                        + ' Ava-PTS : {:.1f} ({:.1f})'.format(alk_points.val, alk_points.avg) \
                        + ' A3D-PTS : {:.1f} ({:.1f})'.format(a3d_points.val, a3d_points.avg) )
            if args.debug:
                logger.log('  -->>Indexes : {:}'.format(image_index))
    nme, _, _ = eval_meta.compute_mse(loader.dataset.dataset_name, logger)
    return TotalLosses.avg, nme
Exemplo n.º 3
0
def generate_300w_list(root, save_dir, box_data, SUFFIXS):
  assert osp.isdir(root), '{} is not dir'.format(root)
  #assert osp.isdir(save_dir), '{} is not dir'.format(save_dir)
  if not osp.isdir(save_dir): os.makedirs(save_dir)
  train_length, common_length, challenge_length = 3148, 554, 135
  subsets = ['afw', 'helen', 'ibug', 'lfpw']
  dir_lists = [osp.join(root, subset) for subset in subsets]
  imagelist, num_image = load_list_from_folders(dir_lists, ext_filter=['png', 'jpg', 'jpeg'], depth=3)

  train_set, common_set, challenge_set = [], [], []
  for image_path in imagelist:
    name, ext = osp.splitext(image_path)
    anno_path = name + '.pts'
    assert osp.isfile(anno_path), 'annotation for : {} does not exist'.format(image_path)
    if name.find('ibug') > 0:
      challenge_set.append( (image_path, anno_path) )
    elif name.find('afw') > 0:
      train_set.append( (image_path, anno_path) )
    elif name.find('helen') > 0 or name.find('lfpw') > 0:
      if name.find('trainset') > 0:
        train_set.append( (image_path, anno_path) )
      elif name.find('testset') > 0:
        common_set.append( (image_path, anno_path) )
      else:
        raise Exception('Unknow name : {}'.format(name))
    else:
      raise Exception('Unknow name : {}'.format(name))
  assert len(train_set) == train_length, 'The length is not right for train : {} vs {}'.format(len(train_set), train_length)
  assert len(common_set) == common_length, 'The length is not right for common : {} vs {}'.format(len(common_set), common_length)
  assert len(challenge_set) == challenge_length, 'The length is not right for challeng : {} vs {}'.format(len(common_set), common_length)

  mean_landmark = {SUFFIX : [[]for i in range(68)] for SUFFIX in SUFFIXS}

  trainData = []#OrderedDict()
  for cpair in train_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0],
            'name'  : get_name(cpair[0])}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
      for idx in range(68):
        if int(landmarks[0][2, idx] + 0.5) == 0: continue
        x, y = float(landmarks[0][0,idx]-box[0]), float(landmarks[0][1,idx]-box[1])
        x, y = normalize_L(x, box[2]-box[0]), normalize_L(y, box[3]-box[1])
        #x, y = x / (box[2]-box[0]), y / (box[3]-box[1])
        mean_landmark[SUFFIX][idx].append( (x,y) )
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    trainData.append( data )
  torch.save(trainData, osp.join(save_dir, '300w.train.pth'))
  for SUFFIX in SUFFIXS:
    allp = mean_landmark[SUFFIX]
    allp = np.array(allp)
    mean_landmark[SUFFIX] = np.mean(allp, axis=1)
    mean_landmark[SUFFIX] = mean_landmark[SUFFIX] * 0.9
    image = draw_points(mean_landmark[SUFFIX], 600, 500, True)
    image.save(osp.join(save_dir, '300w.train-{:}.png'.format(SUFFIX)))
  mean_landmark['default'] = mean_landmark['DET']
  torch.save(mean_landmark, osp.join(save_dir, '300w.train-mean.pth'))
  print ('Training Set   : {:5d} facial images.'.format(len(trainData)))


  commonData = []#OrderedDict()
  for cpair in common_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0]}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    commonData.append( data )
    #commonData[cpair[0]] = data
  torch.save(commonData, osp.join(save_dir, '300w.test-common.pth'))
  print ('Common-Test    : {:5d} facial images.'.format(len(commonData)))

  challengeData = [] #OrderedDict()
  for cpair in challenge_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0]}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    challengeData.append( data )
  torch.save(challengeData, osp.join(save_dir, '300w.test-challenge.pth'))
  print ('Challenge-Test : {:5d} facial images.'.format(len(challengeData)))

  fullset = copy.deepcopy(commonData) + copy.deepcopy(challengeData)
  #fullset.update( challengeData )
  torch.save(fullset, osp.join(save_dir, '300w.test-full.pth'))
  print ('Full-Test      : {:5d} facial images.'.format(len(fullset)))

  print ('Save all dataset files into {:}'.format(save_dir))

  """
Exemplo n.º 4
0
def load_video_dir(root, dirs, save_dir, save_name, subsave):

  save_path = osp.join(save_dir, save_name + '.pth')
  nopoints_save_path = osp.join(save_dir, save_name + '-no-points.pth')
  sub_save_dir = Path(save_dir) / subsave
  if not sub_save_dir.exists(): sub_save_dir.mkdir(parents=True, exist_ok=True)

  Datas, total_frames = [], 0
  mean_landmark = {'GTL' : [[]for i in range(68)]}

  #for idx, cdir in enumerate(dirs):
  for idx in tqdm( range(len(dirs)) ):
    cdir       = dirs[idx]
    annot_path = osp.join(root, cdir, 'annot')
    frame_path = osp.join(root, cdir, 'extraction')
    all_frames = glob.glob( osp.join(frame_path, '*.png') )
    all_annots = glob.glob( osp.join(annot_path, '*.pts') )
    assert len(all_frames) == len(all_annots), 'The length is not right for {:} : {:} vs {:}'.format(cdir, len(all_frames), len(all_annots))
    all_frames, all_annots = sorted(all_frames), sorted(all_annots)
    total_frames += len(all_frames)

    XXDatas = []
    for idx, (frame, annot) in enumerate(zip(all_frames, all_annots)):
      basename_f = osp.basename(frame)
      basename_a = osp.basename(annot)
      assert basename_a[:6] == basename_f[:6], 'The name of {:} is not right with {:}'.format(frame, annot)
      landmarks = datasets.dataset_utils.anno_parser(annot, 68)
  
      data = {'points': landmarks[0],
              'name'  : get_name(frame)}
      box  = return_box(annot)
      data['box-GTL'] = box
      data['box-DET'] = box
      data['box-GTB'] = box
      data['box-default'] = box
      data['normalizeL-default'] = None
      for idx in range(68):
        if int(landmarks[0][2, idx] + 0.5) == 0: continue
        x, y = float(landmarks[0][0,idx]-box[0]), float(landmarks[0][1,idx]-box[1])
        x, y = normalize_L(x, box[2]-box[0]), normalize_L(y, box[3]-box[1])
        mean_landmark['GTL'][idx].append( (x,y) )

      Previous, Next = get_offset_frame(frame)
      data['previous_frame'] = Previous
      data['current_frame']  = frame
      data['next_frame']     = Next
      #Datas[frame] = data
      XXDatas.append( data )
    torch.save(XXDatas, str(sub_save_dir / (cdir + '.pth')))
    Datas = Datas + XXDatas
  print ('--->>> {:} : {:} datas with {:} frames'.format(save_path, len(Datas), total_frames))
  torch.save(Datas, save_path)
  for data in Datas:
    data['points'] = None
  torch.save(Datas, nopoints_save_path)
  print ('--->>> save no-point data into : {:}'.format(nopoints_save_path))

  mean_landmark['GTL'] = np.mean( np.array(mean_landmark['GTL']), axis=1)
  mean_landmark['GTL'] = mean_landmark['GTL'] * 0.9
  image = draw_points(mean_landmark['GTL'], 600, 500)
  image.save(osp.join(save_dir, save_name + '-GTL.png'))
  torch.save(mean_landmark, osp.join(save_dir, save_name + '-mean.pth'))
Exemplo n.º 5
0
def save_to_list_file(allfaces, lst_file, image_style_dir, face_indexes,
                      use_front, USE_BOXES):
    if face_indexes is not None:
        save_faces = []
        for index in face_indexes:
            face = allfaces[index]
            if use_front == False or face.check_front():
                save_faces.append(face)
    else:
        save_faces = allfaces
    print('Prepare to save {:05} face images into {:}'.format(
        len(save_faces), lst_file))

    mean_landmark = {SUFFIX: [[] for i in range(19)] for SUFFIX in USE_BOXES}

    Datas = []
    for index, face in enumerate(save_faces):
        image_path = face.image_path
        image_path = osp.join(image_style_dir, image_path)

        landmarks = face.landmarks.T.copy()
        try:
            assert osp.isfile(
                image_path), 'The image [{:}/{:}] {:} does not exsit'.format(
                    index, len(save_faces), image_path)
        except AssertionError:
            # some image extensions are incorrect
            image_path = image_path.replace('.png', '.jpg')
            assert osp.isfile(
                image_path), 'The image [{:}/{:}] {:} does not exsit'.format(
                    index, len(save_faces), image_path)
        data = {'points': landmarks, 'name': get_name(image_path)}

        for SUFFIX in USE_BOXES:
            box, face_size = face.get_face_size(SUFFIX)
            data['box-{:}'.format(SUFFIX)] = box
            data['face-size-{:}'.format(SUFFIX)] = face_size
            for idx in range(19):
                if int(landmarks[2, idx] + 0.5) == 0: continue
                x, y = float(landmarks[0, idx] -
                             box[0]), float(landmarks[1, idx] - box[1])
                x, y = normalize_L(x, box[2] - box[0]), normalize_L(
                    y, box[3] - box[1])
                mean_landmark[SUFFIX][idx].append([x, y])
            data['normalizeL-{:}'.format(SUFFIX)] = face_size
        data['box-default'] = data['box-GTL']
        data['normalizeL-default'] = data['normalizeL-GTL']
        data['face-size-default'] = data['face-size-GTL']
        data['previous_frame'] = None
        data['current_frame'] = image_path
        data['next_frame'] = None
        Datas.append(data)  #[image_path] = data
    torch.save(Datas, lst_file + '.pth')

    for SUFFIX in USE_BOXES:
        alls = []
        for idx in range(19):
            allp = mean_landmark[SUFFIX][idx]
            allp = np.array(allp)
            pts = np.mean(allp, axis=0)
            alls.append(pts)
        alls = np.array(alls)
        mean_landmark[SUFFIX] = alls * 0.9
        image = draw_points(mean_landmark[SUFFIX], 600, 500, True)
        image.save(lst_file + '-{:}.png'.format(SUFFIX))
    mean_landmark['default'] = mean_landmark['GTL']
    torch.save(mean_landmark, lst_file + '-mean.pth')
Exemplo n.º 6
0
def Generate_WFLW_LIST(root, save_dir):
    assert osp.isdir(root), '{:} is not dir'.format(root)
    #assert osp.isdir(save_dir), '{} is not dir'.format(save_dir)
    if not osp.isdir(save_dir): os.makedirs(save_dir)
    image_dir = osp.join(root, 'WFLW_images')

    train_list = osp.join(root, 'WFLW_annotations',
                          'list_98pt_rect_attr_train_test',
                          'list_98pt_rect_attr_train.txt')
    with open(train_list) as f:
        content = f.readlines()
        train_faces = [FACE(x.strip()) for x in content]
    assert len(
        train_faces) == 7500, 'invalid number of training faces : {:}'.format(
            len(train_faces))

    SUFFIXS = ['GTB', 'GTL']
    mean_landmark = {SUFFIX: [[] for i in range(98)] for SUFFIX in SUFFIXS}

    trainData = []  # OrderedDict()
    for face in train_faces:
        landmarks = face.landmarks
        data = {'points': landmarks.T, 'name': face.name}
        for SUFFIX in SUFFIXS:
            box = return_box(landmarks, face.xbox, SUFFIX)
            data['box-{:}'.format(SUFFIX)] = box
            for idx in range(98):
                if int(landmarks[idx, 2] + 0.5) == 0: continue
                x, y = float(landmarks[idx, 0] -
                             box[0]), float(landmarks[idx, 1] - box[1])
                x, y = normalize_L(x, box[2] - box[0]), normalize_L(
                    y, box[3] - box[1])
                mean_landmark[SUFFIX][idx].append((x, y))
        data['box-default'] = data['box-GTB']
        data['previous_frame'] = None
        data['current_frame'] = osp.join(image_dir, face.name)
        data['next_frame'] = None
        assert osp.isfile(data['current_frame'])
        trainData.append(data)
    torch.save(trainData, osp.join(save_dir, 'train.pth'))
    for SUFFIX in SUFFIXS:
        allp = mean_landmark[SUFFIX]
        allp = np.array(allp)
        mean_landmark[SUFFIX] = np.mean(allp, axis=1)
        mean_landmark[SUFFIX] = mean_landmark[SUFFIX] * 0.9
        image = draw_points(mean_landmark[SUFFIX], 600, 500, True)
        image.save(osp.join(save_dir, 'train-{:}.png'.format(SUFFIX)))
    mean_landmark['default'] = mean_landmark['GTB']
    torch.save(mean_landmark, osp.join(save_dir, 'train-mean.pth'))
    print('Training Set   : {:5d} facial images.'.format(len(trainData)))

    test_list = osp.join(root, 'WFLW_annotations',
                         'list_98pt_rect_attr_train_test',
                         'list_98pt_rect_attr_test.txt')
    with open(test_list) as f:
        content = f.readlines()
        test_faces = [FACE(x.strip()) for x in content]
    assert len(
        test_faces) == 2500, 'invalid number of training faces : {:}'.format(
            len(train_faces))
    testData = []
    for face in test_faces:
        landmarks = face.landmarks
        data = {'points': landmarks.T, 'name': face.name}
        for SUFFIX in SUFFIXS:
            box = return_box(landmarks, face.xbox, SUFFIX)
            data['box-{:}'.format(SUFFIX)] = box
        data['box-default'] = data['box-GTB']
        data['previous_frame'] = None
        data['current_frame'] = osp.join(image_dir, face.name)
        data['next_frame'] = None
        assert osp.isfile(data['current_frame'])
        testData.append(data)
    torch.save(testData, osp.join(save_dir, 'test.pth'))
    print('Test Set   : {:5d} facial images.'.format(len(testData)))
    print('Save all dataset files into {:}'.format(save_dir))