def main(mtcnn_model_dir, save_dir=None): if save_dir is None: save_dir = './result_after_align' if not osp.exists(save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(save_dir) aligner = MtcnnAligner(mtcnn_model_dir, False) fp = open('./dataset-gender-all.json', 'r') all_lines = fp.readlines() count = 25499 for line in all_lines[25499:]: err_msg = '' count = count + 1 print count data = json.loads(line) image_url = data["url"] image_bbox = data["label"]["detect"]["general_d"]["bbox"] #"label":{"detect":{"general_d":{"bbox":[{"class":"male","pts": print image_url print image_bbox #print len(image_bbox) for idx in range(len(image_bbox)): if image_bbox[idx] == []: continue else: img = io.imread(image_url) if len(img.shape) != 3: continue else: file_idx = "%06d" % count print file_idx filename = str(file_idx) + '_' + str(idx) + '.json' fp_rlt = open(osp.join(save_dir, filename), 'w') item = {} print('===> Processing image: ' + filename) per_bbox = image_bbox[idx]["pts"] print "bbox:", per_bbox GT_RECT = [ per_bbox[0][0], per_bbox[0][1], per_bbox[2][0], per_bbox[2][1] ] print "GT:", GT_RECT boxes, points = aligner.align_face(img, [GT_RECT]) box = boxes[0] pts = points[0] item['url'] = image_url item['class'] = image_bbox[idx]["class"] tmp = {'rect': GT_RECT, 'pts': pts} item['detect'] = tmp print "item:", item json.dump(item, fp_rlt, indent=4) fp_rlt.close()
class FaceAligner: def __init__(self, caffe_model_path=None): self.aligner = None if caffe_model_path: self.aligner = MtcnnAligner(caffe_model_path) def align_face(self, img, face_rects): if isinstance(img, str): img = cv2.imread(img) regressed_rects, facial_points = self.aligner.align_face( img, face_rects) return (regressed_rects, facial_points) def get_face_chips(self, img, face_rects, facial_points=None): if facial_points is None: if self.aligner is None: raise Exception('FaceAligner.aligner is not initialized') rects, facial_points = self.aligner.align_face(img, face_rects) reference_5pts = None output_size = (96, 112) # (w, h) not (h,w) face_chips = [] for facial_5pts in facial_points: facial_5pts = np.reshape(facial_5pts, (2, -1)) dst_img = warp_and_crop_face(img, facial_5pts, reference_5pts, output_size) face_chips.append(dst_img) return face_chips
def main(img_path, face_rects, save_dir=None, save_img=True, show_img=True): if save_dir is None: save_dir = './fa_test_rlt' save_json = 'mtcnn_align_rlt.json' caffe_model_path = "../../model" if not osp.exists(save_dir): os.makedirs(save_dir) fp_rlt = open(osp.join(save_dir, save_json), 'w') results = [] print '===> Processing image: ' + img_path img = cv2.imread(img_path) aligner = MtcnnAligner(caffe_model_path, False) rlt = {} rlt["filename"] = img_path rlt["faces"] = [] rlt['face_count'] = 0 t1 = time.clock() bboxes, points = aligner.align_face(img, face_rects) t2 = time.clock() n_boxes = len(face_rects) print( "-->Alignment cost %f seconds, processed %d face rects, avg time: %f seconds" % ((t2 - t1), n_boxes, (t2 - t1) / n_boxes)) if bboxes is not None and len(bboxes) > 0: for (box, pts) in zip(bboxes, points): # box = box.tolist() # pts = pts.tolist() tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts} rlt['faces'].append(tmp) rlt['face_count'] = len(bboxes) rlt['message'] = 'success' results.append(rlt) if save_img or show_img: draw_faces(img, bboxes, points) if save_img: save_name = osp.join(save_dir, osp.basename(img_path)) cv2.imwrite(save_name, img) if show_img: cv2.imshow('img', img) cv2.waitKey(0) cv2.destroyAllWindows() json.dump(results, fp_rlt, indent=4) fp_rlt.close()
def main(face_json_file, mtcnn_model_dir, save_dir=None): if not osp.exists(save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(save_dir) if not osp.exists(aligned_save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(aligned_save_dir) aligner = MtcnnAligner(mtcnn_model_dir, False) #root_dir = '/disk2/du/face-asian/' #path_walk = os.walk(root_dir) fp = open('./face_asian_list.txt','r') all_lines = fp.readlines() print len(all_lines) count = 180001 for line in all_lines[180000:360000]: print line #for root,dirs,files in path_walk: err_msg = '' print count count = count + 1 #data = json.loads(line) #image_url = data["url"] #print image_url overlap_thresh_0 = overlap_thresh #save_path = osp.join(data[u'url'].split('/')[-5],data[u'url'].split('/')[-4],data[u'url'].split('/')[-3],data[u'url'].split('/')[-2]) save_path = line.split('/')[-2] print osp.join(save_dir, save_path) isExists1 = osp.exists(osp.join(save_dir,save_path)) if not isExists1: os.makedirs(osp.join(save_dir,save_path)) #save_img_fn = osp.join(data[u'url'].split('/')[-5],data[u'url'].split('/')[-4],data[u'url'].split('/')[-3],data[u'url'].split('/')[-2]) save_img_fn = line.split('/')[-2] save_fn =aligned_save_dir + '/' + save_img_fn + '/' + line.split('/')[-1] print save_fn isExists2 = osp.exists(osp.join(aligned_save_dir,save_img_fn)) if not isExists2: os.makedirs(osp.join(aligned_save_dir,save_img_fn)) filename = line.split('/')[-1][:-6] + '.json' print filename fp_rlt = open(osp.join(save_dir,save_path,filename), 'w') item = {} print('===> Processing image: ' + save_path + '/' + filename) #imageBGR = io.imread(data[u'url']) #image = cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RGB) image = cv2.imread(line[:-2]) print image.shape GT_RECT = [0,0,256,256] boxes, points = aligner.align_face(image, [GT_RECT]) box = boxes[0] pts = points[0] facial5points = np.reshape(points, (2, -1)) dst_img = warp_and_crop_face(image, facial5points, reference_5pts, output_size) cv2.imwrite(save_fn, dst_img) tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts } item['faces'] = tmp #item['id'] = data[u'url'].split('/')[-3] item['shape'] = image.shape json.dump(item, fp_rlt, indent=4) fp_rlt.close()
def __init__(self, caffe_model_path=None): self.aligner = None if caffe_model_path: self.aligner = MtcnnAligner(caffe_model_path)
def main(args): rect_root_dir = args.rect_root_dir save_dir = args.save_dir mtcnn_model_dir = args.mtcnn_model_dir img_root_dir = args.image_root_dir gpu_id = args.gpu_id MAX_LINE = 500 rect_list = get_rect_list(rect_root_dir) print('all %d images' % (len(rect_list))) if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('makedirs for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('makedirs for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('makedirs for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir, True, gpu_id=gpu_id) for i in range(len(rect_list)): with open(osp.join(rect_root_dir, rect_list[i]), 'r') as f: #lines = f.readlines() contents = json.load(f) img_name = contents['filename'] face_count = contents['face_count'] if face_count == 0: print('not detect face: %s' % img_name) continue img = cv2.imread(img_name) img_h = img.shape[0] img_w = img.shape[1] max_line = 0 resize_scale = 0 new_img_h = img_h new_img_w = img_w if img_h >= img_w: max_line = img_h if max_line > MAX_LINE: resize_scale = max_line / MAX_LINE new_img_h = MAX_LINE new_img_w = img_w / resize_scale if img_w > img_h: max_line = img_w if max_line > MAX_LINE: resize_scale = max_line / MAX_LINE new_img_w = MAX_LINE new_img_h = img_h / resize_scale print 'image.shape:', img.shape img = cv2.resize(img, (new_img_w, new_img_h)) img_center_x = img_w / 2 img_center_y = img_h / 2 #choose center face center_face_index = 0 min_dist = 9999999 gt_rect = '' for j in range(face_count): gt_rect = contents['faces'][j]['rect'] face_center_x = int(gt_rect[0]) + int(gt_rect[2] / 2) face_center_y = int(gt_rect[1]) + int(gt_rect[3] / 2) dist = math.sqrt( math.pow(face_center_x - img_center_x, 2) + math.pow(face_center_y - img_center_y, 2)) if dist < min_dist: min_dist = dist center_face_index = j gt_rect = contents['faces'][center_face_index]['rect'] boxes, points = aligner.align_face(img, [gt_rect]) box = boxes[0] pts = points[0] spl = img_name.split('/') base_name = spl[-1] save_img_subdir = osp.join(save_aligned_dir, spl[-2]) if not osp.exists(save_img_subdir): os.makedirs(save_img_subdir) save_img_fn = osp.join(save_img_subdir, base_name) print('save_img_fn: %s' % save_img_fn) facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face(img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img)
def main(nsplits, split_id, list_file, img_root_dir, mtcnn_model_dir, save_dir=None, rects_fn=None): if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('mkdir for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('mkdir for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('mkdir for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir) fp = open(list_file, 'r') all_lines = fp.readlines() fp.close() rects_list = load_rect_list(rects_fn) total_line_cnt = len(all_lines) print('--->%d imgs in total' % total_line_cnt) if nsplits < 2: if split_id > 0: print('===> Will only process first %d imgs' % split_id) start_line = 0 end_line = split_id else: print('===> Will process all of the images') start_line = 0 end_line = total_line_cnt else: assert (split_id < nsplits) lines_per_split = float(total_line_cnt) / nsplits start_line = int(lines_per_split * split_id) end_line = int(lines_per_split * (split_id + 1)) if end_line + 1 >= total_line_cnt: end_line = total_line_cnt print('===> Will only process imgs in the range [%d, %d)]' % (start_line, end_line)) count = start_line for line in all_lines[start_line:end_line]: line = line.strip() print count count = count + 1 img_fn = osp.join(img_root_dir, line) print('===> Processing img: ' + img_fn) img = cv2.imread(img_fn) ht = img.shape[0] wd = img.shape[1] print 'image.shape:', img.shape spl = osp.split(line) sub_dir = osp.split(spl[0])[1] print 'sub_dir: ', sub_dir if CHINESE_2_PINYIN: sub_dir = pinyin.get(sub_dir, format="strip") # replace the dot sign in names sub_dir = sub_dir.replace(u'\xb7', '-').encode('utf-8') base_name = osp.splitext(spl[1])[0] save_img_subdir = osp.join(save_aligned_dir, sub_dir) if not osp.exists(save_img_subdir): os.mkdir(save_img_subdir) save_rect_subdir = osp.join(save_rects_dir, sub_dir) if not osp.exists(save_rect_subdir): os.mkdir(save_rect_subdir) # print pts save_rects_fn = osp.join(save_rect_subdir, base_name + '.txt') fp_rect = open(save_rects_fn, 'w') rect = get_rects_for_image(rects_list, base_name) boxes, points = aligner.align_face(img, [rect]) nfaces = len(boxes) fp_rect.write('%d\n' % nfaces) for i in range(nfaces): box = boxes[i] pts = points[i] if i: save_img_fn = osp.join(save_img_subdir, base_name + '_%d.jpg' % (i + 1)) else: save_img_fn = osp.join(save_img_subdir, base_name + '.jpg') facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face(img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img) print 'aligend face saved into: ', save_img_fn for it in box: fp_rect.write('%5.2f\t' % it) fp_rect.write('\n') for i in range(5): fp_rect.write('%5.2f\t%5.2f\n' % (facial5points[0][i], facial5points[1][i])) fp_rect.close()
def main(args): rect_root_dir = args.rect_root_dir save_dir = args.save_dir mtcnn_model_dir = args.mtcnn_model_dir img_root_dir = args.image_root_dir gpu_id = args.gpu_id rect_list = get_rect_list(rect_root_dir) if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('makedirs for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('makedirs for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('makedirs for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir, True, gpu_id=gpu_id) for i in range(len(rect_list)): with open(osp.join(rect_root_dir, rect_list[i]), 'r') as f: lines = f.readlines() for line in lines: img_name = line.split(',')[0].replace('\\', '/') gt_rect = get_gt_rect(line) if gt_rect is None: print('Failed to get_gt_rect(), skip to next image') continue img_fn = osp.join(img_root_dir, img_name) print('===>Processing img: ' + img_fn) img = cv2.imread(img_fn) ht = img.shape[0] wd = img.shape[1] print 'image.shape:', img.shape boxes, points = aligner.align_face(img, [gt_rect]) box = boxes[0] pts = points[0] spl = img_name.split('/') sub_dir = '' for j in range(len(spl) - 1): sub_dir = osp.join(sub_dir, spl[j]) base_name = spl[-1] save_img_subdir = osp.join(save_aligned_dir, sub_dir) if not osp.exists(save_img_subdir): os.makedirs(save_img_subdir) save_img_fn = osp.join(save_img_subdir, base_name) facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face(img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img)
def main(face_json_file, mtcnn_model_dir, save_dir=None): if not osp.exists(save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(save_dir) if not osp.exists(aligned_save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(aligned_save_dir) aligner = MtcnnAligner(mtcnn_model_dir, False) fp = open('./identity_json_list_new.json', 'r') all_lines = fp.readlines() print len(all_lines) count = 3432946 for line in all_lines[count:3600000]: err_msg = '' print count #print line count = count + 1 data = json.loads(line) #print data #origin_image_url = data[u'url'] image_url = data[u'url'] #splits = origin_image_url.split('/') #image_url = splits[0] + '//' + osp.join(splits[2],splits[3],splits[4],splits[6],splits[7],splits[8]) #print '*******',image_url gt = data[u'label'][u'detect'][u'general_d'][u'bbox'][0][u'pts'] print image_url overlap_thresh_0 = overlap_thresh save_path = osp.join(data[u'url'].split('/')[-3], data[u'url'].split('/')[-2]) #print save_path isExists1 = osp.exists(osp.join(save_dir, save_path)) if not isExists1: os.makedirs(osp.join(save_dir, save_path)) save_img_fn = osp.join(data[u'url'].split('/')[-3], data[u'url'].split('/')[-2]) save_fn = aligned_save_dir + '/' + save_img_fn + '/' + data[ u'url'].split('/')[-1] #print save_fn isExists2 = osp.exists(osp.join(aligned_save_dir, save_img_fn)) if not isExists2: os.makedirs(osp.join(aligned_save_dir, save_img_fn)) filename = data[u'url'].split('/')[-1][:-4] + '.json' print filename fp_rlt = open(osp.join(save_dir, save_path, filename), 'w') item = {} item[u'url'] = image_url try: #print('===> Processing image: ' + save_path + '/' + filename) imageBGR = io.imread(image_url) image = cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RGB) print image.shape #GT_RECT = [0,0,image.shape[0],image.shape[1]] #GT_RECT = [item['gt'][0][0],item['gt'][0][1],item['gt'][2][0],item['gt'][2][1]] GT_RECT = [gt[0][0], gt[0][1], gt[2][0], gt[2][1]] #print 'gt',gt #print 'GT_RECT',GT_RECT boxes, points = aligner.align_face(image, [GT_RECT]) box = boxes[0] pts = points[0] #print pts facial5points = np.reshape(points, (2, -1)) dst_img = warp_and_crop_face(image, facial5points, reference_5pts, output_size) cv2.imwrite(save_fn, dst_img) #print '########' print('===> Processing image: ' + save_path + '/' + filename) tmp = {'rect': box[0:4], 'score': box[4], 'gt': gt, 'pts': pts} item['faces'] = tmp item['id'] = data[u'url'].split('/')[-2] item['shape'] = image.shape except: continue json.dump(item, fp_rlt, indent=4) fp_rlt.close()
def main(face_json_file, img_root_dir, mtcnn_model_dir, save_dir=None): if save_dir is None: save_dir = './fd_json_add_missed' fp_in = open(face_json_file, 'r') img_list = json.load(fp_in) fp_in.close() if not osp.exists(img_root_dir): print('ERROR: Cannot find image root dir: ' + img_root_dir) return if not osp.exists(save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(save_dir) json_basename = osp.basename(face_json_file) splits = osp.splitext(json_basename) new_json_fn = splits[0] + '_add_missed' + splits[1] aligner = MtcnnAligner(mtcnn_model_dir, False) fp_log1 = open(osp.join(save_dir, log_fn1), 'w') fp_log2 = open(osp.join(save_dir, log_fn2), 'w') fp_rlt = open(osp.join(save_dir, new_json_fn), 'w') missed_count1 = 0 missed_count2 = 0 for item in img_list: err_msg = '' if 'filename' not in item: err_msg = "'filename' not in item, break..." print(err_msg) fp_log2.write(err_msg + '\n') break img_fn = osp.join(img_root_dir, item['filename']) save_fn = osp.join(save_dir, item['filename']) save_fn_dir = osp.dirname(save_fn) overlap_thresh_0 = overlap_thresh ## Tom_Brady_0002 is special cauz the face in the image is very small if 'Tom_Brady_0002' in img_fn: overlap_thresh_0 = 0.25 print('===> Processing image: ' + img_fn) if 'faces' not in item: err_msg = "'faces' not in item" fp_log2.write(item['filename'] + ': ' + err_msg + '\n') continue elif 'face_count' not in item: err_msg = "'face_count' not in item" fp_log2.write(item['filename'] + ': ' + err_msg + '\n') continue nfaces = item['face_count'] if nfaces < 1: missed_count1 += 1 fp_log2.write(item['filename'] + ': ' + "item['face_count'] < 1" + '\n') img = cv2.imread(img_fn) boxes, points = aligner.align_face(img, [GT_RECT]) item['face_count'] = 1 box = boxes[0] pts = points[0] tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts} item['faces'].append(tmp) item['message'] = 'success' item['used_gt'] = 1 fp_log2.write('-->faces added by aligner:\n{}\n'.format(tmp)) continue overlaps = get_gt_overlap(item['faces']) max_overlap_idx = overlaps.argmax() if overlaps[max_overlap_idx] >= overlap_thresh_0: fp_log1.write(item['filename'] + ': ' + " max_overlap_idx=" + str(max_overlap_idx) + '\n') else: missed_count2 += 1 fp_log2.write(item['filename'] + ': ' + "no faces have overlap>={} with groundtruth".format( overlap_thresh_0) + '\n') fp_log2.write("--> max_overlap_idx = {}\n".format(max_overlap_idx)) fp_log2.write("--> overlaps = {}\n".format(overlaps)) img = cv2.imread(img_fn) boxes, points = aligner.align_face(img, [GT_RECT]) item['face_count'] += 1 box = boxes[0] pts = points[0] tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts} item['faces'].append(tmp) item['used_gt'] = 1 fp_log2.write('-->faces added by aligner:\n{}\n'.format(tmp)) fp_log2.write("\n==>Images with missed faces: {}\n".format(missed_count1 + missed_count2)) fp_log2.write( "\t{} missed because of no detection\n".format(missed_count1)) fp_log2.write( "\t{} missed because of max_overlap<thresh\n".format(missed_count2)) json.dump(img_list, fp_rlt, indent=2) fp_rlt.close() fp_log1.close() fp_log2.close()
def __init__(self, caffe_model_path): self.aligner = MtcnnAligner(caffe_model_path)
def main(img_list_file, root_dir, mtcnn_model_dir, save_dir=None): if not save_dir: save_dir = './aligned_images' if not osp.exists(save_dir): print('mkdir for aligned faces, aligned root dir: ', save_dir) os.makedirs(save_dir) aligned_save_dir = osp.join(save_dir, 'aligned_faces') if not osp.exists(aligned_save_dir): print('mkdir for aligned faces, aligned images dir: ', aligned_save_dir) os.makedirs(aligned_save_dir) aligner = MtcnnAligner(mtcnn_model_dir, False) fp = open(img_list_file,'r') fn_rlt = osp.join(save_dir, 'fd_rlt.json') fp_rlt = open(fn_rlt, 'w') fp_rlt.write('[\n') count = 0 for line in fp: print line line_split = line.split() img_fn = line_split[0] id_num = line_split[1] img_fn_split = img_fn.split('/') img_full_fn = osp.join(root_dir, img_fn) print 'process image: ', img_full_fn, " id_num: ", id_num #for root,dirs,files in path_walk: err_msg = '' if not count: fp_rlt.write(',\n') count = count + 1 print 'count: ', count overlap_thresh_0 = overlap_thresh save_subdir = osp.join(aligned_save_dir, img_fn_split[-2]) save_img_fn = osp.join(save_subdir, img_fn_split[-1]) if not osp.exists(save_subdir): os.makedirs(save_subdir) image = cv2.imread(img_full_fn) print image.shape boxes, points = aligner.align_face(image, [GT_RECT]) box = boxes[0] pts = points[0] facial5points = np.reshape(points, (2, -1)) dst_img = warp_and_crop_face(image, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img) item = { 'filename': img_base_fn, 'face_count': 1, } tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts, 'id': id_num } item['faces'] = [tmp] #item['id'] = data[u'url'].splitit('/')[-3] item['shape'] = image.shape json_str = json.dumps(item, indent=2) fp_rlt.write(json_str+'\n') fp_rlt.flush() fp_rlt.write(']\n') fp_rlt.close() fp.close()
def main(args): rect_root_dir = args.rect_root_dir save_dir = args.save_dir mtcnn_model_dir = args.mtcnn_model_dir img_root_dir = args.image_root_dir gpu_id = args.gpu_id rect_list = get_rect_list(rect_root_dir) print('all %d images' % (len(rect_list))) if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('makedirs for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('makedirs for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('makedirs for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir, True, gpu_id=gpu_id) for i in range(len(rect_list)): with open(osp.join(rect_root_dir, rect_list[i]), 'r') as f: #lines = f.readlines() contents = json.load(f) img_name = contents['filename'] gt_rect = contents['faces'][0]['rect'] if gt_rect is None: print('Failed to get_gt_rect(), skip to next image') continue img = cv2.imread(img_name) ht = img.shape[0] wd = img.shape[1] print 'image.shape:', img.shape boxes, points = aligner.align_face(img, [gt_rect]) box = boxes[0] pts = points[0] spl = img_name.split('/') base_name = spl[-1] save_img_subdir = osp.join(save_aligned_dir, spl[-2]) if not osp.exists(save_img_subdir): os.makedirs(save_img_subdir) save_img_fn = osp.join(save_img_subdir, base_name) print('save_img_fn: %s' % save_img_fn) facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face(img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img)
def main(args): save_dir = args.save_dir list_file = args.image_list nsplits = args.nsplits split_id = args.split_id mtcnn_model_dir = args.mtcnn_model_dir img_root_dir = args.image_root_dir rect_root_dir = args.rect_root_dir gpu_id = args.gpu_id if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('makedirs for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('makedirs for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('makedirs for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir, True, gpu_id=gpu_id) fp = open(list_file, 'r') all_lines = fp.readlines() fp.close() total_line_cnt = len(all_lines) print('--->%d imgs in total' % total_line_cnt) if nsplits < 2: if split_id > 0: print('===> Will only process first %d imgs' % split_id) start_line = 0 end_line = split_id else: print('===> Will process all of the images') start_line = 0 end_line = total_line_cnt else: assert (split_id < nsplits) lines_per_split = float(total_line_cnt) / nsplits start_line = int(lines_per_split * split_id) end_line = int(lines_per_split * (split_id + 1)) if end_line + 1 >= total_line_cnt: end_line = total_line_cnt print('===> Will only process imgs in the range [%d, %d)]' % (start_line, end_line)) count = start_line all_lines = all_lines[start_line:end_line] for line in all_lines: line = line.strip() print '%d\n' % count count = count + 1 # img_fn = osp.join(img_root_dir, line) img_fn = line print('===> Processing img: ' + img_fn) img = cv2.imread(img_fn) ht = img.shape[0] wd = img.shape[1] print 'image.shape:', img.shape rect_fn = osp.join( rect_root_dir, rename_with_folderName(line) + '.json') #osp.join(rect_root_dir,line[0:-4] + '.json') gt_rect = get_gt_rect(rect_fn) if gt_rect is None: print('Failed to get_gt_rect(), skip to next image') continue # print 'face rect: ', gt boxes, points = aligner.align_face(img, [gt_rect]) assert len(boxes) == len(points) for i in range(len(boxes)): box = boxes[i] pts = points[i] spl = osp.split(line) sub_dir = spl[0] base_name = spl[-1] save_img_subdir = osp.join(save_aligned_dir, sub_dir) if not osp.exists(save_img_subdir): os.makedirs(save_img_subdir) save_rect_subdir = osp.join(save_rects_dir, sub_dir) if not osp.exists(save_rect_subdir): os.makedirs(save_rect_subdir) # print pts save_img_fn = osp.join(save_img_subdir, base_name) facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face(img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img) save_rect_fn = osp.join(save_rect_subdir, osp.splitext(base_name)[0] + '.txt') fp_rect = open(save_rect_fn, 'w') for it in box: fp_rect.write('%5.2f\t' % it) fp_rect.write('\n') for i in range(5): fp_rect.write('%5.2f\t%5.2f\n' % (facial5points[0][i], facial5points[1][i])) fp_rect.close()
def main(nsplits, split_id, list_file, img_root_dir, mtcnn_model_dir, save_dir=None): if not save_dir: save_dir = './aligned_root_dir' if not osp.exists(save_dir): print('makedirs for aligned root dir: ', save_dir) os.makedirs(save_dir) save_aligned_dir = osp.join(save_dir, 'aligned_imgs') if not osp.exists(save_aligned_dir): print('makedirs for aligned/cropped face imgs: ', save_dir) os.makedirs(save_aligned_dir) save_rects_dir = osp.join(save_dir, 'face_rects') if not osp.exists(save_rects_dir): print('makedirs for face rects/landmarks: ', save_rects_dir) os.makedirs(save_rects_dir) aligner = MtcnnAligner(mtcnn_model_dir, False) fp = open(list_file, 'r') all_lines = fp.readlines() fp.close() total_line_cnt = len(all_lines) print('--->%d imgs in total' % total_line_cnt) if nsplits < 2: if split_id > 0: print('===> Will only process first %d imgs' % split_id) start_line = 0 end_line = split_id else: print('===> Will process all of the images') start_line = 0 end_line = total_line_cnt else: assert(split_id < nsplits) lines_per_split = float(total_line_cnt) / nsplits start_line = int(lines_per_split * split_id) end_line = int(lines_per_split * (split_id + 1)) if end_line + 1 >= total_line_cnt: end_line = total_line_cnt print('===> Will only process imgs in the range [%d, %d)]' % ( start_line, end_line)) count = start_line for line in all_lines[start_line:end_line]: line = line.strip() print count count = count + 1 img_fn = osp.join(img_root_dir, line) print('===> Processing img: ' + img_fn) img = cv2.imread(img_fn) ht = img.shape[0] wd = img.shape[1] print 'image.shape:', img.shape # GT_RECT = [0,0,img.shape[0],img.shape[1]] # GT_RECT = [int(wd * 0.25), int(ht * 0.25), # int(wd * 0.75), int(ht * 0.72)] GT_RECT = get_gt_rect(img) # print 'face rect: ', gt boxes, points = aligner.align_face(img, [GT_RECT]) box = boxes[0] pts = points[0] spl = osp.split(line) sub_dir = spl[0] base_name = spl[-1] save_img_subdir = osp.join(save_aligned_dir, sub_dir) if not osp.exists(save_img_subdir): os.makedirs(save_img_subdir) save_rect_subdir = osp.join(save_rects_dir, sub_dir) if not osp.exists(save_rect_subdir): os.makedirs(save_rect_subdir) # print pts save_img_fn = osp.join(save_img_subdir, base_name) facial5points = np.reshape(pts, (2, -1)) dst_img = warp_and_crop_face( img, facial5points, reference_5pts, output_size) cv2.imwrite(save_img_fn, dst_img) save_rect_fn = osp.join( save_rect_subdir, osp.splitext(base_name)[0] + '.txt') fp_rect = open(save_rect_fn, 'w') for it in box: fp_rect.write('%5.2f\t' % it) fp_rect.write('\n') for i in range(5): fp_rect.write('%5.2f\t%5.2f\n' % (facial5points[0][i], facial5points[1][i])) fp_rect.close()
def __init__(self, caffe_model_path=None, gpu_id=0): self.aligner = None if caffe_model_path: self.aligner = MtcnnAligner(caffe_model_path, gpu_id)
def main(json_file, save_dir=None, save_img=True, show_img=True): if not osp.exists(json_file): print 'Cannot find json file: ' + json_file pass if save_dir is None: save_dir = './fa_facex_rlt' save_json = 'mtcnn_align_rlt.json' caffe_model_path = "../../model" fp_json = open(json_file, 'r') facex_response = json.load(fp_json) fp_json.close() if (not facex_response or not isinstance(facex_response, dict) or 'facex_det' not in facex_response): print 'Invalid json file: ' + json_file pass facex_det_response = facex_response['facex_det'] if not osp.exists(save_dir): os.makedirs(save_dir) fp_rlt = open(osp.join(save_dir, save_json), 'w') results = [] for item in facex_det_response: img_path = item['name'] print '===> Processing image: ' + img_path if 'detections' not in item: continue face_rects = [] for face in item['detections']: face_rects.append(face['pts']) img = cv2.imread(img_path) aligner = MtcnnAligner(caffe_model_path, False) rlt = {} rlt["filename"] = img_path rlt["faces"] = [] rlt['face_count'] = 0 t1 = time.clock() bboxes, points = aligner.align_face(img, face_rects) t2 = time.clock() n_boxes = len(face_rects) print( "-->Alignment cost %f seconds, processed %d face rects, avg time: %f seconds" % ((t2 - t1), n_boxes, (t2 - t1) / n_boxes)) if bboxes is not None and len(bboxes) > 0: for (box, pts) in zip(bboxes, points): # box = box.tolist() # pts = pts.tolist() tmp = {'rect': box[0:4], 'score': box[4], 'pts': pts} rlt['faces'].append(tmp) rlt['face_count'] = len(bboxes) rlt['message'] = 'success' results.append(rlt) if save_img or show_img: draw_faces(img, bboxes, points) if save_img: pdir = save_dir dir_name, base_name = osp.split(img_path) if dir_name: _, pdir = osp.split(dir_name) if pdir: pdir = osp.join(save_dir, pdir) if not osp.exists(pdir): os.makedirs(pdir) save_name = osp.join(pdir, osp.basename(img_path)) cv2.imwrite(save_name, img) if show_img: cv2.imshow('img', img) cv2.waitKey(0) cv2.destroyAllWindows() json.dump(results, fp_rlt, indent=4) fp_rlt.close()
def __init__(self, model_path=None, gpu_id=0): self.aligner = None if model_path: self.aligner = MtcnnAligner(model_path, gpu_id=gpu_id)
def main(mtcnn_model_dir, save_dir=None, save_img=None): if save_dir is None: save_dir = './gender_origin/label' if not osp.exists(save_dir): os.makedirs(save_dir) if save_img is None: save_img = './gender_origin/img' if not osp.exists(save_img): os.makedirs(save_img) aligner = MtcnnAligner(mtcnn_model_dir, False) fp = open( '/disk2/du/mtcnn-caffe-good/mtcnn_aligner/dataset-gender-all.json', 'r') all_lines = fp.readlines() count = 23097 for line in all_lines[23097:]: count = count + 1 print count data = json.loads(line) image_url = data["url"] image_bbox = data["label"]["detect"]["general_d"]["bbox"] file_idx = "%06d" % count print file_idx filename = str(file_idx) + '.json' imgname = str(file_idx) + '.jpg' urllib.urlretrieve(image_url, osp.join(save_img, imgname)) print('===> Save image: ' + filename) fp_rlt = open(osp.join(save_dir, filename), 'w') item = {} item['url'] = image_url item['imgname'] = imgname if image_bbox == []: item['detect'] = [] #continue else: img = io.imread(image_url) if len(img.shape) != 3: item['detect'] = [] #continue else: item['detect'] = [] for idx in range(len(image_bbox)): print "len of box", len(image_bbox) #item['detect'] = [] per_bbox = image_bbox[idx]["pts"] GT_RECT = [ per_bbox[0][0], per_bbox[0][1], per_bbox[2][0], per_bbox[2][1] ] boxes, points = aligner.align_face(img, [GT_RECT]) pts = points[0] label = image_bbox[idx]["class"] tmp = {'rect': GT_RECT, 'pts': pts, 'class': label} item['detect'].append(tmp) print "item:", item json.dump(item, fp_rlt, indent=4) fp_rlt.close()