def anno_parser_v0(anno_path, num_pts):  
  '''                        
  parse the annotation for 300W dataset, which has a fixed format for .pts file                                
  return:                    
    pts: 3 x num_pts (x, y, oculusion)                                
  '''                        
  data, num_lines = load_txt_file(anno_path)                          
  assert data[0].find('version: ') == 0, 'version is not correct'     
  assert data[1].find('n_points: ') == 0, 'number of points in second line is not correct'                     
  assert data[2] == '{' and data[-1] == '}', 'starting and end symbol is not correct'                          
                             
  assert data[0] == 'version: 1' or data[0] == 'version: 1.0', 'The version is wrong : {}'.format(data[0])
  n_points = int(data[1][len('n_points: '):])                         
                             
  assert num_lines == n_points + 4, 'number of lines is not correct'    # 4 lines for general information: version, n_points, start and end symbol      
  assert num_pts == n_points, 'number of points is not correct'
                             
  # read points coordinate   
  pts = np.zeros((3, n_points), dtype='float32')                      
  line_offset = 3    # first point starts at fourth line              
  point_set = set()
  for point_index in range(n_points):                                
    try:                     
      pts_list = data[point_index + line_offset].split(' ')       # x y format                                 
      if len(pts_list) > 2:    # handle edge case where additional whitespace exists after point coordinates   
        pts_list = remove_item_from_list(pts_list, '')              
      pts[0, point_index] = float(pts_list[0])                        
      pts[1, point_index] = float(pts_list[1])                        
      pts[2, point_index] = float(1)      # oculusion flag, 0: oculuded, 1: visible. We use 1 for all points since no visibility is provided by 300-W   
      point_set.add( point_index )
    except ValueError:       
      print('error in loading points in %s' % anno_path)              
  return pts, point_set
Example #2
0
def anno_parser_v1(anno_path, NUM_PTS, one_base=True):
    '''
  parse the annotation for MUGSY-Full-Face dataset, which has a fixed format for .pts file
  return: pts: 3 x num_pts (x, y, oculusion)
  '''
    data, n_points = load_txt_file(anno_path)
    assert n_points <= NUM_PTS, '{} has {} points'.format(anno_path, n_points)
    # read points coordinate
    pts = np.zeros((3, NUM_PTS), dtype='float32')
    point_set = set()
    for line in data:
        try:
            idx, point_x, point_y, oculusion = line.split(' ')
            idx, point_x, point_y, oculusion = int(idx), float(point_x), float(
                point_y), oculusion == 'True'
            if one_base == False: idx = idx + 1
            assert idx >= 1 and idx <= NUM_PTS, 'Wrong idx of points : {:02d}-th in {:s}'.format(
                idx, anno_path)
            pts[0, idx - 1] = point_x
            pts[1, idx - 1] = point_y
            pts[2, idx - 1] = float(oculusion)
            point_set.add(idx)
        except ValueError:
            raise Exception('error in loading points in {}'.format(anno_path))
    return pts, point_set
def anno_parser_v0(anno_path, num_pts):  
  '''                        
  parse the annotation for 300W dataset, which has a fixed format for .pts file                                
  return:                    
    pts: 3 x num_pts (x, y, oculusion)                                
  '''                        
  data, num_lines = load_txt_file(anno_path)                          
  assert data[0].find('version: ') == 0, 'version is not correct'     
  assert data[1].find('n_points: ') == 0, 'number of points in second line is not correct'                     
  assert data[2] == '{' and data[-1] == '}', 'starting and end symbol is not correct'                          
                             
  assert data[0] == 'version: 1' or data[0] == 'version: 1.0', 'The version is wrong : {}'.format(data[0])
  n_points = int(data[1][len('n_points: '):])                         
                             
  assert num_lines == n_points + 4, 'number of lines is not correct'    # 4 lines for general information: version, n_points, start and end symbol      
  assert num_pts == n_points, 'number of points is not correct'
                             
  # read points coordinate   
  pts = np.zeros((3, n_points), dtype='float32')                      
  line_offset = 3    # first point starts at fourth line              
  point_set = set()
  for point_index in range(n_points):                                
    try:                     
      pts_list = data[point_index + line_offset].split(' ')       # x y format                                 
      if len(pts_list) > 2:    # handle edge case where additional whitespace exists after point coordinates   
        pts_list = remove_item_from_list(pts_list, '')              
      pts[0, point_index] = float(pts_list[0])                        
      pts[1, point_index] = float(pts_list[1])                        
      pts[2, point_index] = float(1)      # oculusion flag, 0: oculuded, 1: visible. We use 1 for all points since no visibility is provided by 300-W   
      point_set.add( point_index )
    except ValueError:       
      print('error in loading points in %s' % anno_path)              
  return pts, point_set
Example #4
0
def anno_parser_v2(anno_path, NUM_PTS):
    '''
  parse the annotation for MUGSY-Full-Face dataset, which has a fixed format for .pts file
  return: pts: 3 x num_pts (x, y, oculusion)
  '''
    data, n_points = load_txt_file(anno_path)
    assert n_points == NUM_PTS, '{:} has {:} points'.format(
        anno_path, n_points)
    # read points coordinate
    pts = np.zeros((3, NUM_PTS), dtype='float32')
    point_set = set()
    for line in data:
        idx, point_x, point_y, annotated = line.split(' ')
        assert annotated == 'True' or annotated == 'False', 'invalid annotated : {:}'.format(
            annotated)
        idx, point_x, point_y, annotated = int(idx), float(point_x), float(
            point_y), annotated == 'True'
        assert idx >= 0 and idx < NUM_PTS, 'Wrong idx of points : {:02d}-th in {:s}'.format(
            idx, anno_path)
        if point_x > 0 and point_y > 0 and annotated:
            pts[0, idx] = point_x
            pts[1, idx] = point_y
            pts[2, idx] = True
        else:
            pts[2, idx] = False
        if annotated: point_set.add(idx)
    return pts, point_set
def anno_parser_v1(anno_path, NUM_PTS, one_base=True):
  '''
  parse the annotation for MUGSY-Full-Face dataset, which has a fixed format for .pts file
  return: pts: 3 x num_pts (x, y, oculusion)
  '''
  data, n_points = load_txt_file(anno_path)
  assert n_points <= NUM_PTS, '{} has {} points'.format(anno_path, n_points)
  # read points coordinate
  pts = np.zeros((3, NUM_PTS), dtype='float32')
  point_set = set()
  for line in data:
    try:
      idx, point_x, point_y, oculusion = line.split(' ')
      idx, point_x, point_y, oculusion = int(idx), float(point_x), float(point_y), oculusion == 'True'
      if one_base==False: idx = idx+1
      assert idx >= 1 and idx <= NUM_PTS, 'Wrong idx of points : {:02d}-th in {:s}'.format(idx, anno_path)
      pts[0, idx-1] = point_x
      pts[1, idx-1] = point_y
      pts[2, idx-1] = float( oculusion )
      point_set.add(idx)
    except ValueError:
      raise Exception('error in loading points in {}'.format(anno_path))
  return pts, point_set
Example #6
0
def anno_parser(anno_path, num_pts):
    data, num_lines = load_txt_file(anno_path)
    if data[0].find('version: ') == 0:  # 300-W
        return anno_parser_v0(anno_path, num_pts)
    else:
        return anno_parser_v1(anno_path, num_pts)
def anno_parser(anno_path, num_pts):  
  data, num_lines = load_txt_file(anno_path)                          
  if data[0].find('version: ') == 0: # 300-W
    return anno_parser_v0(anno_path, num_pts)
  else:
    return anno_parser_v1(anno_path, num_pts)