コード例 #1
0
def generate(demo_dir, list_dir, savename, check):
  imagelist, num_image = load_list_from_folders(demo_dir, ext_filter=['png'], depth=1)
  assert num_image == check, 'The number of images is not right vs. {:}'.format(num_image)
  if not osp.isdir(list_dir): os.makedirs(list_dir)
  
  gap, x1, y1, x2, y2 = 5, 5, 5, 450, 680

  imagelist.sort()

  #DemoData = OrderedDict()
  DemoData = []
  for idx, image in enumerate(imagelist):
    if idx < 2 or idx + 2 >= len(imagelist): continue
    data = {'points' : None,
            'box'    : [gap, gap, x2-x1-gap, y2-y1-gap],
            'box-DET': [gap, gap, x2-x1-gap, y2-y1-gap],
            'box-default': [gap, gap, x2-x1-gap, y2-y1-gap],
            'normalizeL-default': math.sqrt( (x2-x1-2*gap) * (y2-y1-2*gap) ),
            'name'   : 'demo'}
    data['previous_frame'] = imagelist[idx-1]
    data['current_frame']  = imagelist[idx]
    data['next_frame']     = imagelist[idx+1]
    DemoData.append( data )
  savepath = osp.join(list_dir, savename)
  torch.save(DemoData, savepath)
  print('there are {:} images for the demo video sequence, and save them into {:}.'.format(num_image, savepath))
コード例 #2
0
def generate_300w_list(root, save_dir, box_data, SUFFIX):
  assert osp.isdir(root), '{} is not dir'.format(root)
  #assert osp.isdir(save_dir), '{} is not dir'.format(save_dir)
  if not osp.isdir(save_dir): os.makedirs(save_dir)
  train_length, common_length, challenge_length = 3148, 554, 135
  subsets = ['afw', 'helen', 'ibug', 'lfpw']
  dir_lists = [osp.join(root, subset) for subset in subsets]
  imagelist, num_image = load_list_from_folders(dir_lists, ext_filter=['png', 'jpg', 'jpeg'], depth=3)

  train_set, common_set, challenge_set = [], [], []
  for image_path in imagelist:
    name, ext = osp.splitext(image_path)
    anno_path = name + '.pts'
    assert osp.isfile(anno_path), 'annotation for : {} does not exist'.format(image_path)
    if name.find('ibug') > 0:
      challenge_set.append( (image_path, anno_path) )
    elif name.find('afw') > 0:
      train_set.append( (image_path, anno_path) )
    elif name.find('helen') > 0 or name.find('lfpw') > 0:
      if name.find('trainset') > 0:
        train_set.append( (image_path, anno_path) )
      elif name.find('testset') > 0:
        common_set.append( (image_path, anno_path) )
      else:
        raise Exception('Unknow name : {}'.format(name))
    else:
      raise Exception('Unknow name : {}'.format(name))
  assert len(train_set) == train_length, 'The length is not right for train : {} vs {}'.format(len(train_set), train_length)
  assert len(common_set) == common_length, 'The length is not right for common : {} vs {}'.format(len(common_set), common_length)
  assert len(challenge_set) == challenge_length, 'The length is not right for challeng : {} vs {}'.format(len(common_set), common_length)

  with open(osp.join(save_dir, '300w.train.' + SUFFIX), 'w') as txtfile:
    for cpair in train_set:
      #box_str = datasets.dataset_utils.for_generate_box_str(cpair[1], 68, EXPAND_RATIO)
      box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
  txtfile.close()

  with open(osp.join(save_dir, '300w.test.common.' + SUFFIX), 'w') as txtfile:
    for cpair in common_set:
      #box_str = datasets.dataset_utils.for_generate_box_str(cpair[1], 68, EXPAND_RATIO)
      box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
  txtfile.close()

  with open(osp.join(save_dir, '300w.test.challenge.' + SUFFIX), 'w') as txtfile:
    for cpair in challenge_set:
      #box_str = datasets.dataset_utils.for_generate_box_str(cpair[1], 68, EXPAND_RATIO)
      box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
  txtfile.close()

  with open(osp.join(save_dir, '300w.test.full.' + SUFFIX), 'w') as txtfile:
    fullset = common_set + challenge_set
    for cpair in fullset:
      #box_str = datasets.dataset_utils.for_generate_box_str(cpair[1], 68, EXPAND_RATIO)
      box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
  txtfile.close()
コード例 #3
0
def make_300W_train_list(root, box_data, output_file_name):
  subsets = ['afw', 'helen', 'ibug', 'lfpw']
  dir_lists = [osp.join(root, subset) for subset in subsets]
  imagelist, _ = load_list_from_folders(dir_lists, ext_filter=['png', 'jpg', 'jpeg'], depth=3)

  with open(output_file_name, 'w') as txtfile:
    for image_path in imagelist:
      basename, _ = osp.splitext(image_path)
      anno_path = basename + '.pts'
      box_str = return_box(image_path, anno_path, box_data)
      txtfile.write('{} {} {}\n'.format(image_path, anno_path, box_str))
コード例 #4
0
def generate(demo_dir, list_dir, savename, check):
    imagelist, num_image = load_list_from_folders(demo_dir,
                                                  ext_filter=['png'],
                                                  depth=1)
    assert num_image == check, 'The number of images is not right vs. {:}'.format(
        num_image)
    if not osp.isdir(list_dir): os.makedirs(list_dir)

    gap, x1, y1, x2, y2 = 5, 5, 5, 450, 680

    imagelist.sort()

    txtfile = open(osp.join(list_dir, savename), 'w')
    for idx, image in enumerate(imagelist):
        if idx < 2 or idx + 2 >= len(imagelist): continue
        box_str = '{:.1f} {:.1f} {:.1f} {:.1f}'.format(gap, gap, x2 - x1 - gap,
                                                       y2 - y1 - gap)
        txtfile.write('{:} {:} {:}\n'.format(image, 'none', box_str))
        txtfile.flush()
    txtfile.close()
    print('there are {:} images for the demo video sequence'.format(num_image))
コード例 #5
0
def generate_300w_list(root, save_dir, box_data, SUFFIXS):
  assert osp.isdir(root), '{} is not dir'.format(root)
  #assert osp.isdir(save_dir), '{} is not dir'.format(save_dir)
  if not osp.isdir(save_dir): os.makedirs(save_dir)
  train_length, common_length, challenge_length = 3148, 554, 135
  subsets = ['afw', 'helen', 'ibug', 'lfpw']
  dir_lists = [osp.join(root, subset) for subset in subsets]
  imagelist, num_image = load_list_from_folders(dir_lists, ext_filter=['png', 'jpg', 'jpeg'], depth=3)

  train_set, common_set, challenge_set = [], [], []
  for image_path in imagelist:
    name, ext = osp.splitext(image_path)
    anno_path = name + '.pts'
    assert osp.isfile(anno_path), 'annotation for : {} does not exist'.format(image_path)
    if name.find('ibug') > 0:
      challenge_set.append( (image_path, anno_path) )
    elif name.find('afw') > 0:
      train_set.append( (image_path, anno_path) )
    elif name.find('helen') > 0 or name.find('lfpw') > 0:
      if name.find('trainset') > 0:
        train_set.append( (image_path, anno_path) )
      elif name.find('testset') > 0:
        common_set.append( (image_path, anno_path) )
      else:
        raise Exception('Unknow name : {}'.format(name))
    else:
      raise Exception('Unknow name : {}'.format(name))
  assert len(train_set) == train_length, 'The length is not right for train : {} vs {}'.format(len(train_set), train_length)
  assert len(common_set) == common_length, 'The length is not right for common : {} vs {}'.format(len(common_set), common_length)
  assert len(challenge_set) == challenge_length, 'The length is not right for challeng : {} vs {}'.format(len(common_set), common_length)

  mean_landmark = {SUFFIX : [[]for i in range(68)] for SUFFIX in SUFFIXS}

  trainData = []#OrderedDict()
  for cpair in train_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0],
            'name'  : get_name(cpair[0])}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
      for idx in range(68):
        if int(landmarks[0][2, idx] + 0.5) == 0: continue
        x, y = float(landmarks[0][0,idx]-box[0]), float(landmarks[0][1,idx]-box[1])
        x, y = normalize_L(x, box[2]-box[0]), normalize_L(y, box[3]-box[1])
        #x, y = x / (box[2]-box[0]), y / (box[3]-box[1])
        mean_landmark[SUFFIX][idx].append( (x,y) )
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    trainData.append( data )
  torch.save(trainData, osp.join(save_dir, '300w.train.pth'))
  for SUFFIX in SUFFIXS:
    allp = mean_landmark[SUFFIX]
    allp = np.array(allp)
    mean_landmark[SUFFIX] = np.mean(allp, axis=1)
    mean_landmark[SUFFIX] = mean_landmark[SUFFIX] * 0.9
    image = draw_points(mean_landmark[SUFFIX], 600, 500, True)
    image.save(osp.join(save_dir, '300w.train-{:}.png'.format(SUFFIX)))
  mean_landmark['default'] = mean_landmark['DET']
  torch.save(mean_landmark, osp.join(save_dir, '300w.train-mean.pth'))
  print ('Training Set   : {:5d} facial images.'.format(len(trainData)))


  commonData = []#OrderedDict()
  for cpair in common_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0]}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    commonData.append( data )
    #commonData[cpair[0]] = data
  torch.save(commonData, osp.join(save_dir, '300w.test-common.pth'))
  print ('Common-Test    : {:5d} facial images.'.format(len(commonData)))

  challengeData = [] #OrderedDict()
  for cpair in challenge_set:
    landmarks = datasets.dataset_utils.anno_parser(cpair[1], 68)
    data = {'points': landmarks[0]}
    for SUFFIX in SUFFIXS:
      box = return_box(cpair[0], cpair[1], box_data, SUFFIX)
      data['box-{:}'.format(SUFFIX)] = box
    data['box-default']    = data['box-GTB']
    data['previous_frame'] = None
    data['current_frame']  = cpair[0]
    data['next_frame']     = None
    challengeData.append( data )
  torch.save(challengeData, osp.join(save_dir, '300w.test-challenge.pth'))
  print ('Challenge-Test : {:5d} facial images.'.format(len(challengeData)))

  fullset = copy.deepcopy(commonData) + copy.deepcopy(challengeData)
  #fullset.update( challengeData )
  torch.save(fullset, osp.join(save_dir, '300w.test-full.pth'))
  print ('Full-Test      : {:5d} facial images.'.format(len(fullset)))

  print ('Save all dataset files into {:}'.format(save_dir))

  """
コード例 #6
0
ファイル: generate_300W.py プロジェクト: guyafeng/SAN
def generage_300w_list(root, save_dir, box_data, SUFFIX):
    assert osp.isdir(root), '{} is not dir'.format(root)
    if not osp.isdir(save_dir): os.makedirs(save_dir)
    train_length, common_length, challeng_length = 3148, 554, 135
    subsets = ['afw', 'helen', 'ibug', 'lfpw']
    dir_lists = [osp.join(root, subset) for subset in subsets]
    imagelist, num_image = load_list_from_folders(
        dir_lists, ext_filter=['png', 'jpg', 'jpeg'], depth=3)

    indoor, indoor_num = load_list_from_folders(
        [osp.join(root, '300W', '01_Indoor')],
        ext_filter=['png', 'jpg', 'jpeg'],
        depth=3)
    otdoor, otdoor_num = load_list_from_folders(
        [osp.join(root, '300W', '02_Outdoor')],
        ext_filter=['png', 'jpg', 'jpeg'],
        depth=3)
    assert indoor_num == 300 and otdoor_num == 300, 'The number of images are not right for 300-W-IO: {} & {}'.format(
        indoor_num, otdoor_num)

    train_set, common_set, challeng_set = [], [], []
    for image_path in imagelist:
        name, ext = osp.splitext(image_path)
        anno_path = name + '.pts'
        assert osp.isfile(
            anno_path), 'annotation {} for : {} does not exist'.format(
                image_path, anno_path)
        if name.find('ibug') > 0:
            challeng_set.append((image_path, anno_path))
        elif name.find('afw') > 0:
            train_set.append((image_path, anno_path))
        elif name.find('helen') > 0 or name.find('lfpw') > 0:
            if name.find('trainset') > 0:
                train_set.append((image_path, anno_path))
            elif name.find('testset') > 0:
                common_set.append((image_path, anno_path))
            else:
                raise Exception('Unknow name : {}'.format(name))
        else:
            raise Exception('Unknow name : {}'.format(name))
    assert len(
        train_set
    ) == train_length, 'The length is not right for train : {} vs {}'.format(
        len(train_set), train_length)
    assert len(
        common_set
    ) == common_length, 'The length is not right for common : {} vs {}'.format(
        len(common_set), common_length)
    assert len(
        challeng_set
    ) == challeng_length, 'The length is not right for challeng : {} vs {}'.format(
        len(common_set), common_length)

    all_lines = []
    with open(osp.join(save_dir, '300w.train.' + SUFFIX), 'w') as txtfile:
        for cpair in train_set:
            box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
            txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
            all_lines.append('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
    txtfile.close()

    with open(osp.join(save_dir, '300w.test.common.' + SUFFIX),
              'w') as txtfile:
        for cpair in common_set:
            box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
            txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
            all_lines.append('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
    txtfile.close()

    with open(osp.join(save_dir, '300w.test.challenge.' + SUFFIX),
              'w') as txtfile:
        for cpair in challeng_set:
            box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
            txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
            all_lines.append('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
    txtfile.close()

    with open(osp.join(save_dir, '300w.test.full.' + SUFFIX), 'w') as txtfile:
        for cpair in common_set:
            box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
            txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
            all_lines.append('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
        for cpair in challeng_set:
            box_str = return_box(cpair[0], cpair[1], box_data, SUFFIX)
            txtfile.write('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
            all_lines.append('{} {} {}\n'.format(cpair[0], cpair[1], box_str))
    txtfile.close()

    with open(osp.join(save_dir, '300w.all.' + SUFFIX), 'w') as txtfile:
        for line in all_lines:
            txtfile.write('{}'.format(line))
    txtfile.close()