def main(): if request.method == 'POST': imageFile = request.files['file'] file_bytes = np.asarray(bytearray(imageFile.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) uniqueCode = hashlib.sha1(image).hexdigest()[:16] basepath = "/home/ubuntu/face-search-engine" directory = os.path.join(basepath, 'static/images', str(uniqueCode)) fileUniqueName = str(uniqueCode) + '.jpg' os.system("mkdir -p %s" % directory) cv2.imwrite(os.path.join(directory, fileUniqueName), image) # extract the faces face = fdv.Face(os.path.join(directory, fileUniqueName), directory) face.detect_face(detector=detector) face.write_clip(write_to_file=True) face.vectorize(sess=sess, model=model, model_input_size=(IMAGE_SZ, IMAGE_SZ)) input_faces = [str(i) + ".png" for i in range(face.num_faces)] output_faces = {} origin_img = {} scores = {} count = 0 for feature in face.face_features: tmp = client.search(feature.reshape(512), K=10) out = [] org_url = [] score = [] for f in tmp: key = f['key'] score.append(round(f['score'], 3)) out.append(key + '.png') org_url.append(f['meta']) scores[input_faces[count]] = score output_faces[input_faces[count]] = out origin_img[input_faces[count]] = org_url count += 1 columns = ["Similar Face %s" % (i + 1) for i in range(10)] return render_template('analyze.html', directUniqueName=uniqueCode, fileUniqueName=fileUniqueName, column_names=columns, input_faces=input_faces, output_faces=output_faces, output_scores=scores, origin_img=origin_img) return render_template('index.html')
def analyze(): if request.method == 'POST': #f = request.form['image'] #user_input = request.form.get("name") # current path basepath = os.path.dirname(__file__) print(basepath) path = os.path.join(basepath, 'static/images', 'test.jpg') face = fdv.Face(path, os.path.join(basepath, 'static/images/'), 30) face.detect_face() face.write_clip(write_to_file=True) face.vectorize(sess=sess, model=model, model_input_size=(IMAGE_SZ, IMAGE_SZ)) input_faces = [str(i) + ".png" for i in range(face.num_faces)] output_faces = {} origin_img = {} scores = {} count = 0 for feature in face.face_features: tmp = client.search(feature.reshape(512), K=5) out = [] org = [] score = [] for f in tmp: barcode = f['key'].split('_') score.append(round(f['score'], 5)) out.append(barcode[0] + '/' + barcode[1] + '.png') org.append(barcode[0] + '/origin.png') scores[input_faces[count]] = score output_faces[input_faces[count]] = out origin_img[input_faces[count]] = org count += 1 return render_template('analyze.html', input_faces=input_faces, output_faces=output_faces, output_scores=scores, origin_img=origin_img) #, userinput=user_input) return render_template('upload.html')
def process(barcode, url): # Construct directory structure save_dir = os.path.join('data', barcode[4:6], barcode[6:8], barcode) try: os.makedirs(save_dir) except: if os.path.exists(os.path.join(save_dir, 'info')): logf.write("%s aleardy exist\n" % save_dir) return pass try: face = fdv.Face(image_path=url, output_dir=save_dir, is_url=True, key=barcode) face.detect_face(detector=detector) face.write_clip(write_to_file=True) face.write_info("info") logf.write("%s done\n" % save_dir) except Exception as e: print(e) os.system("mv %s %s_fail" % (save_dir, save_dir)) logf.write("Failed to process %s (%s): %s\n" % (url, barcode, e))
import tensorflow as tf import numpy as np import face_detect_vectorize as fdv IMAGE_SZ = 160 MODEL_PATH = '20180402-114759/model-20180402-114759' client = fawn.Fawn('http://127.0.0.1:8000') model = fdv.Model(path=MODEL_PATH, image_size=IMAGE_SZ) config = tf.ConfigProto() config.gpu_options.allow_growth = True print("Load faceNet =>>>") with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) model.loader(sess) print("Done!\n") face = fdv.Face('sample_input.png', None, 30) face.detect_face() face.write_clip(write_to_file=False) face.vectorize(sess=sess, model=model, model_input_size=(IMAGE_SZ, IMAGE_SZ)) for feature in face.face_features: res = client.search(feature.reshape(512), K=5) print(res) print("-----------")
with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) model.loader(sess) print("Done!\n") for image in ORIGINAL_IMAGE: print('Processing %s ... ' % image) faces_info = [] target_dir = IMAGE_OUTPUT_PATH + os.path.splitext( os.path.basename(image))[0] print("Save to %s" % target_dir) if not os.path.exists(target_dir): os.system('mkdir -p ' + target_dir) try: face = fdv.Face(image_path=image, output_dir=target_dir, min_face_size=MIN_FACE_SIZE) face.detect_face() face.write_origin() face.write_clip(write_to_file=True) face.write_label() face.vectorize(sess=sess, model=model, model_input_size=(IMAGE_SZ, IMAGE_SZ)) face.write_info("info") except: os.system('mv %s %s_fail' % (target_dir, target_dir)) sys.exit()
IMAGE_SZ = 160 MODEL_PATH = '20180402-114759/model-20180402-114759' client = fawn.Fawn('http://127.0.0.1:8888') SAMPLE = sys.argv[1] model = fdv.Model(path=MODEL_PATH, image_size=IMAGE_SZ) config = tf.ConfigProto() config.gpu_options.allow_growth = True print("Load faceNet =>>>") with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) model.loader(sess) print("Done!\n") detector = Detector() detector.set_min_face_size(30) face = fdv.Face(SAMPLE, None) face.detect_face(detector=detector) face.write_clip(write_to_file=False) face.vectorize(sess=sess, model=model, model_input_size=(IMAGE_SZ, IMAGE_SZ)) for feature in face.face_features: res = client.search(feature.reshape(512), K=5) print(res) print("-----------")