def bounding_box_face(img, gpu_mem): ### Model parameters model_dir = 'assets/save_model/' minsize = 20 # factor = 0.7 # threshold = [0.8, 0.8, 0.8] factor = 0.709 threshold = [0.6, 0.7, 0.7] file_paths = get_model_filenames(model_dir) with tf.device('/gpu:0'): with tf.Graph().as_default(): config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.per_process_gpu_memory_fraction = gpu_mem with tf.Session(config=config) as sess: if len(file_paths) == 3: image_pnet = tf.placeholder(tf.float32, [None, None, None, 3]) pnet = PNet({'data': image_pnet}, mode='test') out_tensor_pnet = pnet.get_all_output() image_rnet = tf.placeholder(tf.float32, [None, 24, 24, 3]) rnet = RNet({'data': image_rnet}, mode='test') out_tensor_rnet = rnet.get_all_output() image_onet = tf.placeholder(tf.float32, [None, 48, 48, 3]) onet = ONet({'data': image_onet}, mode='test') out_tensor_onet = onet.get_all_output() saver_pnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "pnet/" ]) saver_rnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "rnet/" ]) saver_onet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "onet/" ]) saver_pnet.restore(sess, file_paths[0]) def pnet_fun(img): return sess.run(out_tensor_pnet, feed_dict={image_pnet: img}) saver_rnet.restore(sess, file_paths[1]) def rnet_fun(img): return sess.run(out_tensor_rnet, feed_dict={image_rnet: img}) saver_onet.restore(sess, file_paths[2]) def onet_fun(img): return sess.run(out_tensor_onet, feed_dict={image_onet: img}) else: saver = tf.train.import_meta_graph(file_paths[0]) saver.restore(sess, file_paths[1]) def pnet_fun(img): return sess.run( ('softmax/Reshape_1:0', 'pnet/conv4-2/BiasAdd:0'), feed_dict={'Placeholder:0': img}) def rnet_fun(img): return sess.run(('softmax_1/softmax:0', 'rnet/conv5-2/rnet/conv5-2:0'), feed_dict={'Placeholder_1:0': img}) def onet_fun(img): return sess.run(('softmax_2/softmax:0', 'onet/conv6-2/onet/conv6-2:0', 'onet/conv6-3/onet/conv6-3:0'), feed_dict={'Placeholder_2:0': img}) rectangles, points = detect_face(img, minsize, pnet_fun, rnet_fun, onet_fun, threshold, factor) tf.reset_default_graph() return rectangles, points
class MTCNN_detector: def __init__(self, model_path, gpu_id=0, minsize=40, factor=0.5, threshold=[0.8, 0.8, 0.9]): rnet_model_path = os.path.join(model_path, "rnet/rnet-3000000") pnet_model_path = os.path.join(model_path, "pnet/pnet-3000000") onet_model_path = os.path.join(model_path, "onet/onet-500000") if not os.path.exists(model_path) or \ not os.path.exists(os.path.join(model_path,"rnet")) or \ not os.path.exists(os.path.join(model_path,"pnet")) or \ not os.path.exists(os.path.join(model_path,"onet")): raise Exception("Error when loading {}".format(model_path)) # default detection parameters self.minsize = minsize self.factor = factor self.threshold = threshold # load models with tf.device('/gpu:{}'.format(gpu_id)): with tf.Graph().as_default() as p: config = tf.ConfigProto(allow_soft_placement=True) self.sess = tf.Session(config=config) self.pnet_input = tf.placeholder(tf.float32, [None, None, None, 3]) self.pnet = PNet({'data': self.pnet_input}, mode='test') self.pnet_output = self.pnet.get_all_output() self.rnet_input = tf.placeholder(tf.float32, [None, 24, 24, 3]) self.rnet = RNet({'data': self.rnet_input}, mode='test') self.rnet_output = self.rnet.get_all_output() self.onet_input = tf.placeholder(tf.float32, [None, 48, 48, 3]) self.onet = ONet({'data': self.onet_input}, mode='test') self.onet_output = self.onet.get_all_output() saver_pnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "pnet/" ]) saver_rnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "rnet/" ]) saver_onet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "onet/" ]) saver_pnet.restore(self.sess, pnet_model_path) self.pnet_func = lambda img: self.sess.run( self.pnet_output, feed_dict={self.pnet_input: img}) saver_rnet.restore(self.sess, rnet_model_path) self.rnet_func = lambda img: self.sess.run( self.rnet_output, feed_dict={self.rnet_input: img}) saver_onet.restore(self.sess, onet_model_path) self.onet_func = lambda img: self.sess.run( self.onet_output, feed_dict={self.onet_input: img}) def destroy(self): self.sess.close() # Returns: # rects: a numpy array of shape [num_face, 5]. # Denote of each row: # [left_top_x, left_top_y, right_bottom_x, right_bottom_y, confidence] def calc_det_result(self, image): rects, shapes = self.calc_landmark_result(image) return rects # Returns: # rectangles: a numpy array of shape [num_face, 5]. # Denote of each row: # [left_top_x, left_top_y, right_bottom_x, right_bottom_y, confidence] # points: a numpy array of shape [num_face, 10], # Denote of each row: # [left_eye_x, left_eye_y, right_eye_x, right_eye_y, # nose_x, nose_y, # left_mouthcorner_x,left_mouthcorner_y,right_mouthcorner_x,right_mouthcorner_y,] def calc_landmark_result(self, image): # TODO time test start = cv2.getTickCount() rectangles, shapes = tools.detect_face(image, self.minsize, self.pnet_func, self.rnet_func, self.onet_func, self.threshold, self.factor) shapes = np.transpose(shapes) # TODO time test usetime = (cv2.getTickCount() - start) / cv2.getTickFrequency() print "Use time {}s.".format(usetime) return rectangles, shapes # SAME as calc_landmark_result but return shape [num_face, 4] # Denote of each row: # [left_eye_x, left_eye_y, right_eye_x, right_eye_y] def extract_eye_result(self, shapes): assert (shapes is not None) assert (shapes.shape[0] > 0 and shapes.shape[1] == 10) return shapes[:, 0:4] # show the detection results def show_result(self, image_path, rectangles, shapes): image = cv2.imread(image_path, cv2.IMREAD_COLOR) if rectangles.shape[0] != shapes.shape[0]: print "Error in show results {} != {}.".format( rectangles.shape[0], shapes.shape[0]) for rect in rectangles: cv2.rectangle(image, (int(round(rect[0])),int(round(rect[1]))), \ (int(round(rect[2])),int(round(rect[3]))), (255,255,0), 2) for shape in shapes: shape_num = len(shape) / 2 for i in xrange(shape_num): pt = (int(round(shape[2 * i])), int(round(shape[2 * i + 1]))) cv2.circle(image, pt, 2, (0, 0, 255), 2) cv2.imwrite("show.jpg", image)
def main(args): img = cv2.imread(args.image_path) file_paths = get_model_filenames(args.model_dir) with tf.device('/gpu:0'): with tf.Graph().as_default(): config = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=config) as sess: if len(file_paths) == 3: image_pnet = tf.placeholder(tf.float32, [None, None, None, 3]) pnet = PNet({'data': image_pnet}, mode='test') out_tensor_pnet = pnet.get_all_output() image_rnet = tf.placeholder(tf.float32, [None, 24, 24, 3]) rnet = RNet({'data': image_rnet}, mode='test') out_tensor_rnet = rnet.get_all_output() image_onet = tf.placeholder(tf.float32, [None, 48, 48, 3]) onet = ONet({'data': image_onet}, mode='test') out_tensor_onet = onet.get_all_output() saver_pnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "pnet/" ]) saver_rnet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "rnet/" ]) saver_onet = tf.train.Saver([ v for v in tf.global_variables() if v.name[0:5] == "onet/" ]) saver_pnet.restore(sess, file_paths[0]) def pnet_fun(img): return sess.run(out_tensor_pnet, feed_dict={image_pnet: img}) saver_rnet.restore(sess, file_paths[1]) def rnet_fun(img): return sess.run(out_tensor_rnet, feed_dict={image_rnet: img}) saver_onet.restore(sess, file_paths[2]) def onet_fun(img): return sess.run(out_tensor_onet, feed_dict={image_onet: img}) else: saver = tf.train.import_meta_graph(file_paths[0]) saver.restore(sess, file_paths[1]) def pnet_fun(img): return sess.run( ('softmax/Reshape_1:0', 'pnet/conv4-2/BiasAdd:0'), feed_dict={'Placeholder:0': img}) def rnet_fun(img): return sess.run(('softmax_1/softmax:0', 'rnet/conv5-2/rnet/conv5-2:0'), feed_dict={'Placeholder_1:0': img}) def onet_fun(img): return sess.run(('softmax_2/softmax:0', 'onet/conv6-2/onet/conv6-2:0', 'onet/conv6-3/onet/conv6-3:0'), feed_dict={'Placeholder_2:0': img}) start_time = time.time() rectangles, points = detect_face(img, args.minsize, pnet_fun, rnet_fun, onet_fun, args.threshold, args.factor) duration = time.time() - start_time print(duration) print('rectangles->', rectangles) print('pts->', points) points = np.transpose(points) for rectangle in rectangles: cv2.putText(img, str(rectangle[4]), (int(rectangle[0]), int(rectangle[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (255, 0, 0), 1) for point in points: for i in range(0, 10, 2): cv2.circle(img, (int(point[i]), int(point[i + 1])), 2, (0, 255, 0)) cv2.imshow("test", img) if args.save_image: cv2.imwrite(args.save_name, img) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows()