def __init__(self):
        """ initialization
            mtcnn_model:Used to extract face frame, output 5 key points (eyes, mouth, nose tip)  
            facenet_model:Face recognition model, output 128 individual face feature points
        """
        self.mtcnn_model = mtcnn.MTCNN()
        self.threshold = [0.5, 0.6, 0.8]
        self.facenet_model = InceptionResNetV1()
        model_path = 'model/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)
        self.known_face_encodings = []
        self.known_face_names = []

        # Import face library face_date.pkl
        if os.path.exists('model/face_date.pkl'):
            with open('model/face_date.pkl', 'rb') as fr:
                try:
                    data = pickle.load(fr)
                    self.known_face_encodings = data[0]
                    self.known_face_names = data[1]
                except EOFError:
                    print("face_date.pkl文件为空")
        else:
            with open("model/face_date.pkl", mode='w', encoding='utf-8') as ff:
                pass
def face_extract():
    print('Face The Camera')
    camera = cv2.VideoCapture(0)
    time.sleep(1)

    return_value, image = camera.read()
    del camera

    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    pixels = np.asarray(image)
    detector = mtcnn.MTCNN()
    results = detector.detect_faces(pixels)

    # extract the bounding box from the first face
    x1, y1, width, height = results[0]['box']
    x1, y1 = abs(x1), abs(y1)
    x2, y2 = x1 + width, y1 + height

    # extract the face
    face = pixels[y1:y2, x1:x2]
    image = Image.fromarray(face)
    image = image.resize((160, 160))
    face_array = np.asarray(image)
    face_array = np.expand_dims(face_array, axis=0)
    return face_array
Exemple #3
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        frames = VideoMeta(args.video).fps()
        #print(meta)

        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            if (fno % frames != 0):
                continue
            try:
                frame = imutils.resize(frame, width=450)
            except:
                break
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # frame = np.dstack([frame, frame, frame])

            if args.detect == "haarcascade":
                results = self.face_cascade.detectMultiScale(frame, 1.3, 5)
                if len(results) != 0:

                    self.crop(frame, results[0],
                              str(output_path / Path(str(fno) + ".jpg")))
            else:
                results = detector.detect_faces(frame)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
Exemple #4
0
    def get_frame(self):

        conn = sqlite3.connect('database.db')
        c = conn.cursor()
        fname = "recognizer/trainingData.yml"
        if not os.path.isfile(fname):
            print("\nPlease train the data first\n")
            return None

        # Model For Facial Expression Recognition
        model = FacialExpressionModel("model.json", "model_weights.h5")

        # Model For Face Recognizer
        face_detector = mtcnn.MTCNN()
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read(fname)

        _, img = self.video.read()
        faces = face_detector.detect_faces(img)

        for res in faces:
            x, y, w, h = res["box"]
            x, y = abs(x), abs(y)
            x1, y1 = x+w, y+h
            image = img[y:y1, x:x1]
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # Recognizing Face
            ids, conf = recognizer.predict(image)
            c.execute("select roll_no from users where id = (?);", (ids,))
            result = c.fetchall()
            try:
                roll_no = result[0][0]
            except:
                roll_no = 'Error'

            if conf > 50:
                roll_no = "No Match"

            image2 = cv2.resize(image, (48, 48))

            # Predicting Expression
            pred = model.predict_emotion(image2[np.newaxis, :, :, np.newaxis])

            msg = pred + " " + roll_no

            # Mark the Expression if Face is detected
            if roll_no != "Error" and roll_no != "No Match" :
                marked = count(roll_no, pred)
                if(marked):
                    Attendance(roll_no)
                    msg = "MARKED"

            cv2.putText(img, msg, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (255, 255, 0), 2)
            cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)

        _, jpeg = cv2.imencode('.jpg', img)
        return jpeg.tobytes()
Exemple #5
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        #frames = VideoMeta(args.video).fps()
        #print(meta)
        e = TfPoseEstimator(get_graph_path("mobilenet_thin"),
                            target_size=(432, 368))
        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            #if (fno % frames != 0):
            #   continue
            try:
                frame = imutils.resize(frame, width=432, height=368)
            except:
                break
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #frame = np.dstack([frame, frame, frame])

            humans = e.inference(frame)
            image = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
            print("humans:", humans)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", image)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
Exemple #6
0
def Entry(uname, your_roll_no, branch):
    # Connecting database
    conn = sqlite3.connect('database.db')
    if not os.path.exists('dataset'):
        os.makedirs('dataset')
    c = conn.cursor()

    face_detector = mtcnn.MTCNN()
    cap = cv2.VideoCapture(0)
    new_folder = str(your_roll_no) + "-" + str(uname) + "-" + str(branch)
    if not os.path.exists("dataset/" + new_folder):
        os.mkdir("dataset/" + new_folder)
    else:
        return "Student already exists!!!"

    c.execute('INSERT INTO users (roll_no) VALUES (?)', (your_roll_no, ))
    uid = c.lastrowid
    sampleNum = 0

    while True:
        ret, img = cap.read()

        # Detecting faces from live Camera and cropping the faces
        faces = face_detector.detect_faces(img)
        for res in faces:
            x, y, w, h = res['box']
            x, y = abs(x), abs(y)
            x1, y1 = x + w, y + h
            sampleNum = sampleNum + 1
            image = img[y:y1, x:x1]
            image = cv2.resize(image, (256, 320))

            # Saving the cropped faces with ID
            cv2.imwrite(
                "dataset/" + new_folder + "/Student." + str(uid) + "." +
                str(sampleNum) + ".jpg", image)
            register(uname, your_roll_no, branch)
            cv2.rectangle(img, (x, y), (x1, y1), (255, 0, 0), 2)
            cv2.waitKey(20)
        cv2.waitKey(1)
        if sampleNum >= 15:
            break

    cap.release()
    conn.commit()
    conn.close()
    cv2.destroyAllWindows()

    # Training the Dataset
    TrainDataset()

    return "Student Data Registered!!!"
Exemple #7
0
def detect_face(img):
    pixels = np.asarray(img)
    detector = mtcnn.MTCNN()
    faces = detector.detect_faces(pixels)
    try:
        if(len(faces)!=1):
            raise DetectionError
    except DetectionError:
        print("Either no or multiple faces were found in the captured image.")
        pass
    x1, y1, width, height = faces[0]['box']
    x2 = x1 + width
    y2 = y1 + height
    face = pixels[y1:y2, x1:x2]
    return face
Exemple #8
0
def preprocessing(directory_path, save_path, required_size=(225, 225)):
    detector = dlib.get_frontal_face_detector()
    detecto_mtcnn = mtcnn.MTCNN()
    train_paths = glob(fr"{directory_path}\*")

    if not os.path.exists(f"{save_path}"):
        os.mkdir(fr"{save_path}")
    else:
        pass
    for path in tqdm(train_paths):
        name = path.split("\\")[-1]
        images = glob(f"{path}\\*")
        for image_path in images:
            try:
                temp_path = image_path.split("\\")[-1]
                image = cv2.imread(image_path)
                # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                face_recs = detector(image, 1)
                face = None
                if len(face_recs) == 1:
                    x = face_recs[0].left()
                    y = face_recs[0].top()
                    w = face_recs[0].right() - x
                    h = face_recs[0].bottom() - y
                    face = cv2.resize(image[y:y + h, x:x + w], required_size, interpolation=cv2.INTER_LINEAR)

                else:
                    print(fr"{name}")
                    faces = detecto_mtcnn.detect_faces(image)
                    for face in faces:
                        x, y, width, height = face['box']
                        face = cv2.resize(image[y:y + height, x:x + width], required_size,
                                          interpolation=cv2.INTER_LINEAR)
                if not os.path.exists(fr"{save_path}\{name}"):
                    os.mkdir(fr"{save_path}\{name}")
                    cv2.imwrite(fr"{save_path}\{name}\{temp_path}", face)
                else:
                    cv2.imwrite(fr"{save_path}\{name}\{temp_path}", face)
            except Exception as e:

                print(e)
                exc_info = sys.exc_info()
                traceback.print_exception(*exc_info)
                del exc_info
                pass
Exemple #9
0
def main():
    ch = 'y'
    detector = mtcnn.MTCNN()
    while(ch=='y' or ch=='Y'):
        try:
            cap = cv2.VideoCapture(0)
        except:
            sys.exit('An error occured while initializing the camera.')
        i=0
        
        name = input('Enter name : ')
        roll = input('Enter roll no. : ')
        year = input('Enter graduation year : ')
        branch = input('Enter branch : ')
        
        isImageDirPresent, path = imagePath(roll)
        if isImageDirPresent=='end':
            continue
        print('Directory created successfully.')
        print('Alright. Good to go. Say cheese...')
        
#       To capture an image, press 'c'. To end capturing, press (or long press) 'q'.
        while(True):
            try:
                mydir = path
                ret, frame = cap.read()
                frame = cv2.flip(frame, 1)
                cv2.imshow('Capturing', frame)
                if cv2.waitKey(1) & 0xff==ord('c'):
                    face = detectFace(frame)
                    filename = os.path.join(path, str(i) + '.jpg')
    #                 print(filename)
                    cv2.imwrite(filename, frame)
                    i = i+1
                elif cv2.waitKey(1) & 0xff==ord('q'):
                    break
            except:
                print("Something went wrong")
        cap.release()
        cv2.destroyAllWindows()
        print('Saved {} images of {}.'.format(i, name))
        print('')
        ch = input('Run again ? (y/n) : ')
        
    print('*******end*******')
def face_extract(file, size):
    image = cv2.imread(file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    pixels = np.asarray(image)
    detector = mtcnn.MTCNN()
    results = detector.detect_faces(pixels)

    # extract the bounding box from the first face
    x1, y1, width, height = results[0]['box']
    x1, y1 = abs(x1), abs(y1)
    x2, y2 = x1 + width, y1 + height

    # extract the face
    face = pixels[y1:y2, x1:x2]
    image = Image.fromarray(face)
    image = image.resize(size)
    face_array = np.asarray(image)
    return face_array
Exemple #11
0
def collect_data(img):
    detector = mtcnn.MTCNN()
    image_size = detector.image_size
    input_image_size = detector.input_image_size

    bounding_boxes, img = detector.run_mtcnn(img)
    nrof_faces = bounding_boxes.shape[0]

    if nrof_faces > 0:
        det = bounding_boxes[:, 0:4]
        img_size = np.asarray(img.shape)[0:2]

        if nrof_faces > 1:
            bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                           det[:, 1])
            img_center = img_size / 2
            offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                 (det[:, 1] + det[:, 3]) / 2 - img_center[0]])

            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
            index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)
            det = det[index, :]

        det = np.squeeze(det)
        bb_temp = np.zeros(4, dtype=np.int32)

        bb_temp[0] = det[0]
        bb_temp[1] = det[1]
        bb_temp[2] = det[2]
        bb_temp[3] = det[3]

        cropped_temp = img[bb_temp[1]:bb_temp[3], bb_temp[0]:bb_temp[2], :]
        cropped_temp = facenet.flip(cropped_temp, False)
        scaled_temp = misc.imresize(cropped_temp, (image_size, image_size),
                                    interp='bilinear')
        scaled_temp = cv2.resize(scaled_temp,
                                 (input_image_size, input_image_size),
                                 interpolation=cv2.INTER_CUBIC)
        scaled_temp = facenet.prewhiten(scaled_temp)
        scaled_reshape = scaled_temp.reshape(-1, input_image_size,
                                             input_image_size, 3)

    return scaled_reshape
Exemple #12
0
def extract_face(filename, required_size=(160, 160)):
#     print(filename)
    image = Image.open(filename)
    image = image.convert('RGB')
    
    pixels = np.asarray(image)
    detector = mtcnn.MTCNN()
    faces = detector.detect_faces(pixels)
    if len(faces)==0:
        #print('No faces were detected in the image {}'.format(filename))
        return 'end', 'end'
    x1, y1, width, height = faces[0]['box']
    x1, y1 = abs(x1), abs(y1)
    x2, y2 = x1 + width, y1 + height
    face = pixels[y1:y2, x1:x2]
    image = Image.fromarray(face)
    image = image.resize(required_size)
    image.save(filename)
    face_array = np.asarray(image)
    return face_array, 'true'
def recognizeEmotion(model):
    pixels = cv2.cvtColor(get_new_img_webcam(), cv2.COLOR_BGR2RGB)
    detector = mtcnn.MTCNN()
    faces = detector.detect_faces(pixels)
    print(faces)
    x, y, width, height = faces[0]['box']
    face = pixels[y:y + height, x:x + width]
    fig, axs = plt.subplots(1, 2, figsize=(12, 5))
    axs[0].imshow(pixels)
    axs[1].imshow(face)
    plt.show()
    face = Image.fromarray(face, 'RGB')
    face = transforms.Resize((224, 224))(face)
    face = transforms.ToTensor()(face).unsqueeze(1)

    # https://stackoverflow.com/questions/56789038/runtimeerror-given-groups-1-weight-of-size-64-3-3-3-expected-input4-50
    face = face.permute(1, 0, 2, 3)
    output = model(face)
    pred = output.argmax(dim=1, keepdim=True)
    class_ = getClassById(pred)
    return class_
Exemple #14
0
def save_img():
    key = cv2.waitKey()
    webcam = cv2.VideoCapture(0)
    while True:
        try:
            check, frame = webcam.read()
            cv2.imshow("capturing", frame)
            key = cv2.waitKey(1)
            if key == ord('s'):
                cv2.imshow("captured image", frame)
                pixels = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                detector = mtcnn.MTCNN()
                results = detector.detect_faces(pixels)
                x1, y1, width, height = results[0]['box']
                x1, y1 = abs(x1), abs(y1)
                x2, y2 = x1 + width, y1 + height
                #face=pixels[y1:y2,x1:x2]
                face = pixels[y1 - 15:y2 + 15, x1 - 15:x2 + 15]
                image = Image.fromarray(face)
                image = image.resize((96, 96))
                face = np.array(image)
                opencvImage = cv2.cvtColor(face, cv2.COLOR_RGB2BGR)
                n = input("Enter name:")
                cv2.imwrite(filename="./images/" + n + ".jpg", img=opencvImage)
                print("image saved!")
                webcam.release()
                cv2.waitKey(1650)
                cv2.destroyAllWindows()
                break
            elif key == ord('q'):
                print("off...")
                webcam.release()
                cv2.destroyAllWindows()
                break

        except (KeyboardInterrupt):
            print("off...")
            webcam.release()
            cv2.destroyAllWindows()
            break
    def _prepare_detector(self):
        """Prepare the MTCNN detector.
        This function should be invoked from a suitable Keras context
        (with controlled TensorFlow Graph and Session), that is
        usually it will be called via :py:meth:`run_tensorflow`.
        """
        # Initialize the MTCNN detector
        detector = mtcnn.MTCNN()

        # The last part of the preparation process of MTCNN is to
        # create the models' (P-net, R-net and O-net) predict
        # functions. It may be possible to achieve this by calling
        # model._make_predict_function() for each of them, but this is
        # discourageds as it uses a private Keras API.  The recomended
        # way to create the predict function is to call predict, as
        # the predict function will be automatically compiled on first
        # invocation.  Hence we provide some dummy image and invoke
        # predict for all three networks by calling
        # detector.detect_faces().
        image = np.random.randint(0, 255, (200, 200, 3), np.uint8)
        _ = detector.detect_faces(image)
        self._detector = detector
Exemple #16
0
def start_camera():
    #cv2.namedWindow("preview")
    vc = cv2.VideoCapture(0)

    if vc.isOpened(): # try to get the first frame
        rval, frame = vc.read()
    else:
        rval = False
    while rval:
        #cv2.imshow("preview", frame)
        rval, frame = vc.read()
        key = cv2.waitKey(20)
        detector = mtcnn.MTCNN()
        result = detector.detect_faces(frame)
        if len(result) > 0:
            image = crop_image(result, frame)
            image_resize = cv2.resize(image, (224, 224))
            image_reshape = np.reshape(image_resize, [1, 224, 224, 3])
            prediction = model.predict(image_reshape)
            #print(prediction)
        if key == 27: # exit on ESC
            break
Exemple #17
0
def get_faces(file:str):
    """for file (path to file), open video and extract facial locations"""
    
    j = 0
    cap = cv2.VideoCapture(file)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    faces = []
    i = 0
    detector = mtcnn.MTCNN()
    while cap.isOpened():
        ret, frame = cap.read()
        i += 1
        if ret==True:   
          print('file %d frame %1.2f' % (j, i/length))
          f = detector.detect_faces(frame)
          faces.append([x['box'] for x in f])
        else:
          break
    print('writing to ' + file + 'faces.p')
    pickle.dump(faces, open(file+'faces.p', 'wb'))
    cap.release()
    cv2.destroyAllWindows()
def extract_face(filename, required_size=None):
    # Avoid the caveat of default collection argument
    if required_size is None:
        required_size = (160, 160)

    image = Image.open(filename)
    image = image.convert('RGB')

    pixels = np.asarray(image)
    detector = mtcnn.MTCNN()

    results = detector.detect_faces(pixels)

    x1, y1, width, height = results[0]['box']

    x1, y1 = abs(x1), abs(y1)
    x2, y2 = x1 + width, y1 + height

    face = pixels[y1:y2, x1:x2]

    image = Image.fromarray(face)
    image = image.resize(required_size)
    return np.asarray(image)
    def __init__(self, face_cascade_path, eye_cascade_path,
                 shape_predictor_path):

        self.cap = cv2.VideoCapture(0)
        _, self.capture = self.cap.read()

        self.face_cascade = cv2.CascadeClassifier(face_cascade_path)
        self.eye_cascade = cv2.CascadeClassifier(eye_cascade_path)

        self.pose_predictor = dlib.shape_predictor(shape_predictor_path)
        self.mtcnn_detector = mtcnn.MTCNN()
        self.hog_detector = dlib.get_frontal_face_detector()

        self.current_state = {
            "face": (0, 0, 100, 100),
            "right_eye": (0, 0, 10, 10),
            "left_eye": (0, 0, 10, 10),
            "right_pupil": (0, 0),
            "left_pupil": (0, 0),
            "pose": [0 for i in range(0, 68 * 2)]
        }

        cv2.startWindowThread()
Exemple #20
0
import cv2
import mtcnn

face_detector = mtcnn.MTCNN(min_face_size=50)
img = cv2.imread('Coffee.jpeg')
conf_t = 0.90

img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = face_detector.detect_faces(img_rgb)

print(results)
for res in results:
    x1, y1, width, height = res['box']
    x1, y1 = abs(x1), abs(y1)
    x2, y2 = x1 + width, y1 + height

    confidence = res['confidence']
    if confidence < conf_t:
        continue
    key_points = res['keypoints'].values()

    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 0), thickness=6)
    # cv2.putText(img, f'conf: {confidence:.3f}', (x1, y1), cv2.FONT_ITALIC, 1, (0, 0, 255), 2)

    for point in key_points:
        cv2.circle(img, point, 4, (0, 255, 0), thickness=-1)

cv2.imshow('Sameer', img)
cv2.imwrite('Coffee_face_detected.jpeg', img)
cv2.waitKey(0)
Exemple #21
0
def load_extractor():
    global EXTRACTOR
    if EXTRACTOR is None:
        EXTRACTOR = mtcnn.MTCNN()
    return EXTRACTOR
            frame = res

        if show:
            cv2.imshow(win_name, frame)
        if cv2.waitKey(1) & 0xff == ord(break_key):
            break


# End
#-----------------------------------------------------------------------------

# build function of face detection and recognition
#-----------------------------------------------------------------------------
encoder_model = 'facenet_keras.h5'
required_size = (160, 160)  # default size
face_detector = mtcnn.MTCNN()  # set face detector
face_encoder = load_model(encoder_model)


# build a function to generate an embedding from single face
def get_embedding(photo):
    '''
    photo: path of photo file, only support jpg and png format
    '''
    # get features of face from photo
    img = cv2.imread(photo)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    results = face_detector.detect_faces(img_rgb)

    # convert the features of face into embedding of model
    embeddings = []
Exemple #23
0
import cv2, mtcnn
import numpy as np
detect_net = mtcnn.MTCNN()

imgpath = '../Data/Face_Normalization/'
imglist = '../Data/Face_Normalization/crop_list.txt'
savepath = '../Data/Face_Normalization/'
boxes = detect_net.crop_image(imgpath, imglist, savepath)
Exemple #24
0
import numpy as np
import cv2 as cv
import mtcnn
from matplotlib import pyplot as plt

detect_face = mtcnn.MTCNN()
cap = cv.VideoCapture(0)

while True:
    ret, frame = cap.read()

    frame_rgb = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
    result = detect_face.detect_faces(frame_rgb)
    print(frame_rgb.shape)

    mask = np.zeros(frame_rgb.shape, np.uint8)

    for res in result:
        x1, y1, w, h = res['box']
        x2, y2 = x1 + w, y1 + h

        print("-------", res)

        cv.rectangle(frame_rgb, (x1, y1), (x2, y2), (0, 0, 255), thickness=2)

        mask[y1:y2, x1:x2, :] = 1

        frame_rgb = frame * mask[:, :, 1, np.newaxis]

        cv.imshow("Frame", frame_rgb)
Exemple #25
0
 def __init__(self):
     self.model = mtcnn.MTCNN(min_face_size=20)
Exemple #26
0
import tensorflow as tf
import mtcnn

export_path = './mtcnn'

model = mtcnn.MTCNN()
print(dir(model))

with tf.keras.backend.get_session() as sess:
    tf.saved_model.simple_save(sess,
                               export_path,
                               inputs={'input_image': model.input},
                               outputs={t.name: t
                                        for t in model.outputs})
Exemple #27
0
def predict(filename, modelname):
    app_root = os.path.dirname(os.path.abspath(__file__))
    app_root = os.path.join(app_root, 'images')
    print('# load the model')
    model = load_model(os.path.join(app_root, 'facenet_keras.h5'))
    print('loaded the model')
    # load image from file
    print('# load the file')
    image = Image.open(os.path.join(app_root, filename))
    print('# opened the  flie')
    # convert to RGB, if needed
    image = image.convert('RGB')
    # convert to array
    pixels = np.asarray(image)
    os.remove(os.path.join(app_root, filename))
    # create the detector, using default weights
    detector = mtcnn.MTCNN()
    # detect faces in the image
    results = detector.detect_faces(pixels)
    # extract the bounding box from the first face
    faces = list()
    print('# load the model')
    for i in range(0, len(results)):
        x1, y1, width, height = results[i]['box']
        # bug fix
        x1, y1 = abs(x1), abs(y1)
        x2, y2 = x1 + width, y1 + height
        # extract the face
        face = pixels[y1:y2, x1:x2]
        # resize pixels to the model size
        image = Image.fromarray(face)
        image = image.resize((160, 160))
        face_array = np.asarray(image)
        faces.append(face_array)
        #image.save(str(i)+".jpg")
        #pyplot.subplot(2, 7, i+1)
        #pyplot.axis('off')
        #pyplot.imshow(face_array)
    #pyplot.show()
    facesarray = np.asarray(faces)
    #getembeddings
    newfaceX = list()
    for face_pixels in facesarray:
        embedding = get_embedding(model, face_pixels)
        newfaceX.append(embedding)
    newfaceX = np.asarray(newfaceX)
    #print(newfaceX.shape)
    #normalising
    in_encoder = Normalizer(norm='l2')
    newfaceX = in_encoder.transform(newfaceX)
    loaded_model = pickle.load(
        open(os.path.join(app_root, modelname + '.sav'), 'rb'))
    aaa = loaded_model.predict(newfaceX)
    data = load(os.path.join(app_root, modelname + 'encoder.npz'))
    trainy, testy = data['arr_0'], data['arr_1']
    out_encoder = LabelEncoder()
    out_encoder.fit(trainy)
    trainy = out_encoder.transform(trainy)
    testy = out_encoder.transform(testy)
    predict_names = out_encoder.inverse_transform(aaa)
    return predict_names


#predict('file.jpg','ootypicsmodel.sav')
Exemple #28
0
 def __init__(self):
     print("MTCNN version: " + mtcnn.__version__)
     print("FaceDetection class initialized")
     # create the detector, using default weights
     self.detector = mtcnn.MTCNN()
Exemple #29
0
import threading
import mtcnn
from flask import jsonify
import json
import requests

button_flag  = [1,1,1,1,1,1]
feature_list = []
button_name = ['','evans','hermsworth','jeremy','mark','olsen']


input_img="abc.jpg"
modeldir = './model/20180402-114759.pb'
npy='./npy'

detector = mtcnn.MTCNN()
image_size = detector.image_size
input_image_size = detector.input_image_size

sess = tf.Session()
print('Loading Modal')
with sess.as_default():
    facenet.load_model(modeldir)
    
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]


print('Start Recognition')
 def __init__(self):
     self.face_encoder, _ = get_network('FaceEncoder', 'test')
     self.detector = mtcnn.MTCNN()
     self.image_cnt = 0