recognition_t = 0.3 required_size = (160, 160) encoding_dict = load_pickle(encodings_path) face_detector = mtcnn.MTCNN() face_encoder = load_model(encoder_model) img = cv2.imread(test_img_path) # plt_show(img) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) results = face_detector.detect_faces(img_rgb) for res in results: face, pt_1, pt_2 = get_face(img_rgb, res['box']) encode = get_encode(face_encoder, face, required_size) encode = l2_normalizer.transform(np.expand_dims(encode, axis=0))[0] name = 'unknown' distance = float("inf") for db_name, db_encode in encoding_dict.items(): dist = cosine(db_encode, encode) if dist < recognition_t and dist < distance: name = db_name distance = dist if name == 'unknown': cv2.rectangle(img, pt_1, pt_2, (0, 0, 255), 2) cv2.putText(img, name, pt_1, cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2) else: cv2.rectangle(img, pt_1, pt_2, (0, 255, 0), 2)
# Train the network net.cae_train() # save result net.cae_save('mnist/net.pkl') # Plot the loss curve # net.cae_eval() # Test the network imgs = X_test.reshape(-1,28,28) img_small = imgs[30,:,:] # encode img_en = utils.get_encode(net.cae, img_small) # decode img_de = utils.get_decode(net.cae, img_en) # Compare img_pre = np.rint(img_de.reshape(28,28) * 256).astype(int) img_pre = np.clip(img_pre, a_min = 0, a_max = 255) img_pre = img_pre.astype('uint8') plt.imshow(img_pre) def get_picture_array(X, rescale=4): array = X.reshape(28,28) array = np.clip(array, a_min = 0, a_max = 255) return array.repeat(rescale, axis = 0).repeat(rescale, axis = 1).astype(np.uint8()) def compare_images(img, img_pre):