Example #1
0
def simpleFaceCancellationII():
    """This function cancel the face of people"""
    cap = cv.VideoCapture(0)
    frameCtn = 1
    currentFrame = 0

    while (cap.isOpened()):
        ret, frame = cap.read()
        if currentFrame <= frameCtn:
            print("Skipping\n")
            currentFrame = currentFrame + 1
            continue
        currentFrame = 0
        faces, confidences = c.detect_face(frame)
        print("Score: " + str(confidences))
        for (x, y, w, h) in faces:
            frame[y:h, x:w] = cv.blur(frame[y:h, x:w], (100, 100),
                                      cv.BORDER_DEFAULT)
            cv.putText(frame, "Score: " + str(confidences), (x, y),
                       cv.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
        cv.imshow('frame', frame)
        if cv.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv.destroyAllWindows()
Example #2
0
def detect(img):
    face, confidence = cvlib.detect_face(img)

    if len(face) == 0:
        return []

    return face
Example #3
0
def classifayEmotionGender(filename):

    frame = cv2.imread(UPLOAD_FOLDER + filename)

    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    faces, confidence = cv.detect_face(frame)

    for idx, f in enumerate(faces):

        # get corner points of face rectangle
        (startX, startY) = f[0], f[1]
        (endX, endY) = f[2], f[3]

        # draw rectangle over face
        cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)

        # crop the detected face region
        face_crop = np.copy(frame[startY:endY, startX:endX])

        if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:
            continue
        gender = G.GenderRun(face_crop)
        emotion = E.process_image(face_crop, frame)
        font = cv2.FONT_HERSHEY_SIMPLEX
        Y = startY - 10 if startY - 10 > 10 else startY + 10
        cv2.putText(frame, "Gender : " + gender, (startX, Y), font, 0.7,
                    (0, 255, 0), 2)
        cv2.putText(frame, "Emotion : " + emotion, (startX, Y - 50), font, 0.7,
                    (0, 255, 0), 2)

    cv2.imwrite(UPLOAD_FOLDER + 'new' + filename, frame)
    imagebas64 = File_Path + 'new' + filename
    return jsonify({"image": imagebas64})
Example #4
0
def pic(abc):
    path = 'media/media/abc/' + str(abc)
    print(path, type(path))
    img = cv2.imread(os.path.join(settings.BASE_DIR, str(path)))
    #img=cv2.resize(img,(640,640))
    faces, confidences = cv.detect_face(img)
    for face in faces:
        (startX, startY) = face[0], face[1]
        (endX, endY) = face[2], face[3]  # draw rectangle over face
        cv2.rectangle(img, (startX, startY), (endX, endY), (0, 255, 0), 2)
        #cv2.rectangle(img, pt1=(x, y), pt2=(x + w, y + h), color=(255, 0, 0), thickness=5)
        roi_gray = img[
            startX:endX, startY:
            endY]  #cropping region of interest i.e. face area from  image
        try:
            roi_gray = cv2.cvtColor(roi_gray, cv2.COLOR_BGR2GRAY)
            roi_gray = cv2.resize(roi_gray, (48, 48))
            img_pixels = image.img_to_array(roi_gray)
            img_pixels = np.expand_dims(img_pixels, axis=0)
            img_pixels /= 255
            predictions = model.predict(img_pixels)
            max_index = np.argmax(predictions[0])
            emotions = ('angry', 'disgust', 'fear', 'Happy', 'sad', 'surprise',
                        'normal')
            predicted_emotion = emotions[max_index]
            cv2.putText(img, predicted_emotion, (int(startX), int(startY)),
                        cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 5)
        except:
            return False
    cv2.imwrite(os.path.join(settings.BASE_DIR, str(path)), img)
    print("success")
    return True
def get_frames_detections(frames):
    detections = []
    for i in frames:
        faces, confidences = cvlib.detect_face(np.asarray(i), threshold=0.2)
        detections.append(faces)

    return detections
Example #6
0
    def run(self):
        flag = False
        while flag == False:
            face, confidence = cv.detect_face(self.cam)
            for idx, f in enumerate(face):
                (startX, startY) = f[0], f[1]
                (endX, endY) = f[2], f[3]

                if 0 <= startX <= self.cam.shape[1] and 0 <= endX <= self.cam.shape[1] and 0 <= startY <= \
                        self.cam.shape[0] and 0 <= endY <= self.cam.shape[0]:

                    face_region = self.cam[startY:endY, startX:endX]

                    face_region1 = cv2.resize(face_region, (224, 224),
                                              interpolation=cv2.INTER_AREA)

                    x = img_to_array(face_region1)
                    x = np.expand_dims(x, axis=0)
                    x = preprocess_input(x)

                    prediction = self.model.predict(x)

                    # 마스크 미착용으로 판별된다면
                    if prediction < 0.5:
                        print("No Mask(({:.2f}%)".format(
                            (1 - prediction[0][0]) * 100))

                    # 마스크 착용으로 판별된다면(1)
                    else:
                        flag = True
                        print("Mask ({:.2f}%)".format(prediction[0][0] * 100))

        self.pro2.start()
    def __init__(self):
        # use VideoStream Class variables
        self.videostream = VideoStream()
        self.frame = self.videostream.frame

        # apply face detection
        self.face, self.confidence = cv.detect_face(self.frame)

        # loop through detected faces
        for self.idx, self.f in enumerate(self.face):
            # get the corner point of the rectangle
            self.startX, self.startY = self.f[0], self.f[1]
            self.endX, self.endY = self.f[2], self.f[3]

            cv2.rectangle(self.frame, (self.startX, self.startY), (self.endX, self.endY), (0,255,0), 2)
            self.face_crop = np.copy(self.frame[self.startY:self.endY, self.startX:self.endX])

            if self.face_crop.shape[0] < 10 or self.face_crop.shape[1] < 10:
                continue

            # preprocessing for gender detection model
            self.face_crop = cv2.resize(self.face_crop, (96,96))
            self.face_crop = self.face_crop.astype("float") / 255.0
            self.face_crop = img_to_array(self.face_crop)
            self.face_crop = np.expand_dims(self.face_crop, axis=0)

            GFR()

        print("face_detection working")
def detectGender(image, model):
    if image is None:
        print("Could not read input image")
        exit()
    # Detect faces in the image
    face, confidence = cv.detect_face(image)
    classes = ['Male', 'Female']
    # Take the very first face
    if len(face) > 0:
        (startX, startY) = face[0][0], face[0][1]
        (endX, endY) = face[0][2], face[0][3]
    else:
        return "Couldn't detect a face"
    # Crop the face
    croppedFace = np.copy(image[startY:endY, startX:endX])
    # Size and normalize te face
    croppedFace = cv2.resize(croppedFace, (96, 96))
    croppedFace = croppedFace.astype("float") / 255.0
    croppedFace = img_to_array(croppedFace)
    croppedFace = np.expand_dims(croppedFace, axis=0)
    # Predict the gender
    conf = model.predict(croppedFace)[0]
    # Get the label which has the maximum accuracy
    idx = np.argmax(conf)
    label = classes[idx]
    return label + " | Confidence: " + str(int(
        max(conf[0], conf[1]) * 100)) + "%"
Example #9
0
def face_blur(img):
    coor, _ = cvlib.detect_face(img)
    for face in coor:
        x, y, w, h = face
        roi = img[y:h, x:w]
        img[y:h, x:w] = cv2.medianBlur(roi, ksize=151)
    return img
Example #10
0
def get_single_frame(file_name,output_loc, face = False):
    try:
        os.mkdir(output_loc)
    except OSError:
        pass
    # Start capturing the feed
    cap = cv2.VideoCapture(file_name)
    while cap.isOpened():
        cap.set(cv2.CAP_PROP_POS_MSEC, (1000))
        ret, frame = cap.read()
        if face:
            faces, chance = cvlib.detect_face(frame)
            faces[0][1] = np.clip(faces[0][1],0,len(frame))
            faces[0][3] = np.clip(faces[0][3], 0, len(frame))
            faces[0][0] = np.clip(faces[0][0], 0, len(frame[0]))
            faces[0][2] = np.clip(faces[0][2], 0, len(frame[0]))
            print(frame)
            frame = frame[faces[0][1]:faces[0][3],faces[0][0]:faces[0][2]]
            print(frame)
            print(faces,chance)
        cv2.imwrite(output_loc +"\\"+ file_name.split('\\')[-1].rstrip(".avi") + "_1.jpg", frame)
        # cap.set(cv2.CAP_PROP_POS_MSEC, (500))
        # ret, frame = cap.read()
        # cv2.imwrite(output_loc +"\\"+ file_name.split('\\')[-1].rstrip(".avi") + "_2.jpg", frame)
        # cap.set(cv2.CAP_PROP_POS_MSEC, (1000))  # added this line
        # ret, frame = cap.read()
        # cv2.imwrite(output_loc +"\\"+ file_name.split('\\')[-1].rstrip(".avi") + "_3.jpg", frame)
        # #print(output_loc +"\\"+ file_name.split('\\')[-1].rstrip(".mov") + ".jpg")
        cap.release()
        break
Example #11
0
 def face_scanner(self):
     while True:
         if len(photo_list) > self.processed:
             image = self.download_photo(photo_list[self.processed]['url'])
             if image is None:
                 self.processed += 1
                 continue
             faces, _ = cv.detect_face(image)
             for f in faces:
                 w, h = image.shape[:2]
                 x0, x1 = np.clip(f[0:3:2], 0,
                                  w)  #cap the coordinates to the image size
                 y0, y1 = np.clip(f[1:4:2], 0, h)
                 if x0 == x1 or y0 == y1:  #if the capped coordinates are equal, the image is outside of the bounds; ignore it
                     break
                 gender = self.detect_gender(image, [x0, y0, x1, y1])
                 processed_list[gender] += 1
                 if gender == 'man':
                     running_average_men.append()
                 else:
                     running_average_women.append()
             processed_list['images'].append(
                 photo_list[self.processed]['url'])
             if len(processed_list['images']) > 10:
                 processed_list['images'].pop(0)
             self.processed += 1
         else:
             time.sleep(self.interval)
Example #12
0
def DetectGender(image):
    face, confidence = cv.detect_face(image)
    classes = ['man', 'woman']
    # loop through detected faces
    for idx, f in enumerate(face):

        # get corner points of face rectangle
        (startX, startY) = f[0], f[1]
        (endX, endY) = f[2], f[3]

        # draw rectangle over face
        cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)

        # crop the detected face region
        face_crop = np.copy(image[startY:endY, startX:endX])

        # pre-processing for gender detection model
        face_crop = cv2.resize(face_crop, (96, 96))
        face_crop = face_crop.astype("float") / 255.0
        face_crop = img_to_array(face_crop)
        face_crop = np.expand_dims(face_crop, axis=0)

        conf = model.predict(face_crop)[0]
        idx = np.argmax(conf)
        label = classes[idx]
        accuracy = conf[idx] * 100
        return label, accuracy
 def face_detection(self):
     faces, confidences = cvlib.detect_face(self.frame)
     # loop through detected faces and add bounding box
     for face in faces:
         (startX, startY) = face[0], face[1]
         (endX, endY) = face[2], face[3]  # draw rectangle over face
         cv2.rectangle(self.frame, (startX, startY), (endX, endY), (0, 255, 0), 2)  # display output
Example #14
0
def classify(frame):
    lables = []
    face, confidence = cv.detect_face(frame)

    for idx, f in enumerate(face):
        (startX, startY) = f[0], f[1]
        (endX, endY) = f[2], f[3]

        cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)

        face_crop = np.copy(frame[startY:endY, startX:endX])

        if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:
            continue

        face_crop = cv2.resize(face_crop, (96, 96))
        face_crop = face_crop.astype("float") / 255.0
        face_crop = img_to_array(face_crop)
        face_crop = np.expand_dims(face_crop, axis=0)

        conf = model.predict(face_crop)[0]
        idx = np.argmax(conf)
        label = classes[idx]
        lables.append(label)

    return lables
Example #15
0
def transformation():
    data = None
    # if flask.request.content_type != 'image/jpeg':
    #     return flask.Response(response='This predictor only supports JPEG Image', status=415, mimetype='text/plain')

    # Read image
    file = flask.request.files['image']
    fileData = file.read()
    image = cv2.imdecode(np.fromstring(fileData, np.uint8), cv2.IMREAD_COLOR)

    print(image)

    # Detect face in image
    faces, confidence = cvlib.detect_face(image)
    face = faces[0]
    (startX, startY) = face[0], face[1]
    (endX, endY) = face[2], face[3]

    # Crop face
    face_crop = np.copy(image[startY:endY, startX:endX])
    # preprocessing for gender detection model
    face_crop = cv2.resize(face_crop, (96, 96))
    face_crop = face_crop.astype("float") / 255.0
    face_crop = img_to_array(face_crop)
    face_crop = np.expand_dims(face_crop, axis=0)

    confidence = InferenceService.predict(face_crop)[0]
    index = np.argmax(confidence)

    return jsonify({
        'predictedClass': classes[index],
        'confidence': str(confidence[index])
    })
Example #16
0
def detect_face_video(img):
    label_dict={0:'Female',1:'Male'}
    color_dict={0:(0,255,0),1:(0,0,255)}
    #Using OpenCV detect face for face detection available in cvlib library
    faces, confidences = cv.detect_face(img) 
#     face_coord = face_cascade.detectMultiScale(img,scaleFactor = 1.2,minNeighbors = 5)  # This gives us the coordinates of rectangle drawn across the face 
#     for x,y,w,h in face_coord:
    for index,face in enumerate(faces):
        x1,y1 = face[0],face[1]
        x2,y2 = face[2],face[3]
        image = img[y1:y2,x1:x2]
        image_gry = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        resize_img = cv2.resize(image_gry,(200,200))
        norm_img = resize_img/255.0
        reshaped=np.reshape(norm_img,(1,200,200,1))
        #reshaped = np.vstack([reshaped])

        try:  
            with tf.device('/device:GPU:5'):
                gender_res = gen_model.predict(reshaped)
        except RuntimeError as e:
            print(e)
        if(gender_res[0][0]<0.5):
            label=0
        else:
            label=1
     
        cv2.rectangle(img,(x1,y1),(x2,y2),color_dict[label],thickness = 5)
#         cv2.rectangle(img,(x1,y1),(x2,y2),color_dict[label],)
        cv2.putText(img, label_dict[label], (x1, y1-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)
    return img
Example #17
0
    def predict(self, image_path, confidence=False):
        # read input image
        image = cv2.imread(image_path)

        if image is None:
            print("Could not read input image")
            exit()

        #preproces image
        # detect faces in the image
        face, confidence = cv.detect_face(image)

        # get corner points of face rectangle
        face = face[0]
        (startX, startY) = face[0], face[1]
        (endX, endY) = face[2], face[3]

        # crop the detected face region
        face_crop = np.copy(image[startY:endY, startX:endX])

        # preprocessing for gender detection model
        face_crop = cv2.resize(face_crop, (96, 96))
        face_crop = face_crop.astype("float") / 255.0
        face_crop = img_to_array(face_crop)
        face_crop = np.expand_dims(face_crop, axis=0)

        # apply gender detection on face
        conf = self.model.predict(face_crop)[0]

        # get label with max accuracy
        idx = np.argmax(conf)

        if confidence == True:
            return conf
        return idx
Example #18
0
    def detect(self, image):
        h, w = image.shape[:2]
        if self.use_cvlib:
            faces, confidences = cvlib.detect_face(image)
        else:
            # TODO: Make constant image size, and mean value of rgb
            # (104.0, 177.0, 123.0) is the mean value of rgb, this can be use as normalization

            blob = cv2.dnn.blobFromImage(image=cv2.resize(image, (300, 300)),
                                         scalefactor=1,
                                         size=(300, 300),
                                         mean=(104.0, 177.0, 123.0))
            self.detector.setInput(blob)
            detections = self.detector.forward()

            faces = []
            confidences = []

            for i in range(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated with the prediction
                confidence = detections[0, 0, i, 2]

                # ignore detections with low confidence
                if confidence < self.threshold:
                    continue

                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (start_x, start_y, end_x, end_y) = box.astype("int")
                faces.append([start_x, start_y, end_x, end_y])
                confidences.append(confidence)

        faces = [(max(face[0], 0), max(face[1], 0), min(face[2],
                                                        w), min(face[3], h))
                 for face in faces]
        return faces, confidences
    def get_frame(self):
        global sample_num
        global captured_num
        global face_num
        # sample_num = 0
        # captured_num = 0

        ret, frame = self.webcam.read()
        # sample_num = sample_num + 1
        sample_num = sample_num + 1
        if not ret:
            print("Could not read frame")
            exit()

        face, confidence = cv.detect_face(frame)

        if len(confidence) == 1:
            # loop through detected faces
            for idx, f in enumerate(face):

                (startX, startY) = f[0], f[1]
                (endX, endY) = f[2], f[3]

                if sample_num % 8 == 0:
                    # captured_num = captured_num + 1
                    captured_num = captured_num + 1
                    print("captured_num= ", captured_num)
                    face_in_img = frame[startY:endY, startX:endX, :]
                    face_in_img = cv2.cvtColor(face_in_img, cv2.COLOR_BGR2GRAY)

                    cv2.imwrite('app/faces/user' + str(captured_num) + '.jpg',
                                face_in_img)
                    face_in_img_flip = cv2.flip(face_in_img, 1)
                    cv2.imwrite(
                        'app/faces/user' + str(captured_num + 100) + '.jpg',
                        face_in_img_flip)

                # 100장 학습
                if captured_num == 100:
                    # cv2.putText(frame, "colletion complete!", (20,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 2)
                    raise StopIteration
                else:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 0, 255), 2)
                    cv2.putText(frame, str(captured_num + 1), (20, 30),
                                cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2)

        elif len(confidence) > 1:
            image_url = cv2.imread(
                os.path.join(settings.BASE_DIR, 'img/overface.PNG'), -1)
            frame = colletion_overlay(frame, image_url, (150, 400))
        else:
            image_url2 = cv2.imread(
                os.path.join(settings.BASE_DIR, 'img/noface.PNG'), -1)
            frame = colletion_overlay(frame, image_url2, (150, 400))
            pass

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Example #20
0
    def Read(cls):
        gender ="Unkown"
        age ="Unkown"
        frame = None
        if cls.webcam.isOpened() :
            status, frame = cls.webcam.read()
            #cls.width, cls.height = frame.shape[:2]
            # apply face detection
            face, confidence = cv.detect_face(frame)

            # loop through detected faces
            for idx, f in enumerate(face):
                face_index = idx
                # get corner points of face rectangle
                (startX, startY) = f[0], f[1]
                (endX, endY) = f[2], f[3]
                # draw rectangle over face
                cv2.rectangle(frame, (startX, startY), (endX,endY), (0, 255, 0), 2)

                # crop the detected face region
                face_crop = np.copy(frame[startY:endY,startX:endX])
            
                if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:
                    continue
    
                # detect age
                cv2.resize(frame, (224, 224))
                blob = cv2.dnn.blobFromImage(face_crop, 1, (224, 224), cls.MODEL_MEAN_VALUES, swapRB=False)
                cls.age_net.setInput(blob)
                age_preds = cls.age_net.forward()
        
                # preprocessing for gender detection model
                face_crop = cv2.resize(face_crop, (96, 96))
                face_crop = face_crop.astype("float") / 255.0
                face_crop = img_to_array(face_crop)
                face_crop = np.expand_dims(face_crop, axis=0)
                
                # apply gender detection on face
                conf = cls.model.predict(face_crop)[0]
        
                # get label with max accuracy
                idx = np.argmax(conf)

                #gender = cls.gender_type[idx]
                argidx = age_preds[0].argmax()
                age = cls.age_list[argidx]

                label = cls.gender_type[idx]
                label = "{}: {:.2f}%, age:{}".format(label, conf[idx] * 100, age )
                Y = startY - 10 if startY - 10 > 10 else startY + 10
                # write label and confidence above face rectangle
                cv2.putText(frame, label, (startX, Y),  cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            #return face_index, gender, age, frame
            return frame
            # display output
            #cv2.imshow("detection", frame)
            pass
        pass
Example #21
0
def object_detection(img_ref,
                     threshold=0.75,
                     rect_th=3,
                     text_size=1,
                     text_th=3):
    """
    Main functions gets predictions and creates image.
    """
    #Query database to get image data
    img_str = get_image(img_ref)

    # Open image from sting
    img = Image.open(BytesIO(img_str))

    # Run prediction function to get predictions
    boxes, pred_class, object_count, pred_score = predict(img, threshold)

    # Convert image to use in OpenCV
    img = np.asarray(img)
    img = img[:, :, ::-1].copy()

    # Run facial recognition if persons are found in picture
    if "person" in pred_class:
        faces, conf = cv.detect_face(img)
        object_count['faces'] = len(faces)

        for face in faces:
            x1 = face[0]
            y1 = face[1]
            x2 = face[2]
            y2 = face[3]

            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)

    image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # annotate image with bounding boxes, class predictions, and prediction scores
    for i in range(len(boxes)):
        cv2.rectangle(image,
                      boxes[i][0],
                      boxes[i][1],
                      color=(0, 255, 0),
                      thickness=rect_th)
        cv2.putText(image,
                    pred_class[i] + " " + str(pred_score[i]),
                    boxes[i][0],
                    cv2.FONT_HERSHEY_SIMPLEX,
                    text_size, (0, 255, 0),
                    thickness=text_th)

    image = BytesIO(image)

    results = {}

    results['image'] = image.read()
    results['object_count'] = object_count
    return results
Example #22
0
 def cvlib(self, image):
     h, w = image.shape[:2]
     faces, confidences = cvlib.detect_face(image)
     i = 0
     for face in faces:
         x1, y1, x2, y2 = face
         self.faces.append([x1, y1, x2, y2])
         self.confidences.append(confidences[i])
         i += 1
Example #23
0
    def detect(self, image_cv) -> DetectorResponse:

        #TO BE MODIFIED ----------------------
        gender = False
        #TO BE MODIFIED ----------------------

        faces_bbox, faces_conf = cv.detect_face(image_cv)

        bbox = []
        label = []
        conf = []

        for bbox_item, conf_item in zip(faces_bbox, faces_conf):
            startX = bbox_item[0]
            startY = bbox_item[1]
            endX = bbox_item[2]
            endY = bbox_item[3]

            c = float(conf_item)
            l = 'person'
            b = [int(startX), int(startY), int(endX), int(endY)]

            bbox.append(b)
            label.append(l)
            conf.append(c)

        # for faces_item, conf_item in zip(faces, conf):
        #     logger.debug("type={}, confidence={:.2f}%".format(faces_item,conf_item))

        #     (startX, startY) = faces_item[0], faces_item[1]
        #     (endX, endY) = faces_item[2], faces_item[3]

        #     c = float(conf_item)
        #     l = 'face'
        #     b = [int(startX), int(startY), int(endX), int(endY)]

        #     bbox.append(b)
        #     label.append(l)
        #     conf.append(c)

        #     if gender == True:
        #         face_crop = np.copy(image_cv[startY:endY, startX:endX])
        #         (gender_label_arr, gender_confidence_arr) = cv.detect_gender(face_crop)
        #         idx = np.argmax(gender_confidence_arr)

        #         gender_label = gender_label_arr[idx]
        #         gender_confidence = "{:.2f}%".format(gender_confidence_arr[idx] * 100)

        #         #obj['gender'] = gender_label
        #         #obj['gender_confidence'] = gender_confidence

        model_response = DetectorResponse(self.get_model_name())
        for l, c, b in zip(label, conf, bbox):
            model_response.add(b, l, c, self.get_model_name())

        return model_response
Example #24
0
    def cvlib_face_detector(self, threshold=0.5, gpu=False):
        try:
            faces, confidences = cv.detect_face(self.rgb_image,
                                                threshold=0.5,
                                                enable_gpu=gpu)
            faces = [[x1, y1, x2, y2] for x1, y1, x2, y2 in faces]

            return np.array(faces)
        except Exception as e:
            raise e
Example #25
0
 def ssd(self, pic, padding):
     h = pic.shape[0]
     w = pic.shape[1]
     f, _ = cb.detect_face(pic)
     if len(f) < 1:
         return pic, False
     f = f[0]
     pic = pic[max(0, f[1] - padding):min(h, f[3] + padding),
               max(0, f[0] - padding):min(w, f[2] + padding)]
     pic = cv.resize(pic, (100, 125))
     return cv.cvtColor(pic, cv.COLOR_BGR2GRAY), True
Example #26
0
    def process_img(self, img):
        faces, confidences = cvlib.detect_face(img)

        if not len(confidences):
            return None

        faceCrop = faces[confidences.index(max(confidences))]
        width = faceCrop[2] - faceCrop[0]
        height = faceCrop[3] - faceCrop[1]

        #"""
        zoom = 0.1
        faceCrop[0] += width * zoom
        faceCrop[1] += height * zoom
        faceCrop[2] -= width * zoom
        faceCrop[3] -= height * zoom
        width = faceCrop[2] - faceCrop[0]
        height = faceCrop[3] - faceCrop[1]

        idealRatio = 3 / 4
        # Too narrow, expand width to .75 height
        if width / height > idealRatio:
            centerX = faceCrop[0] + width / 2
            faceCrop[0] = centerX - height * idealRatio / 2
            faceCrop[2] = centerX + height * idealRatio / 2

        # Too wide, expand height
        else:
            centerV = faceCrop[1] + height / 2
            faceCrop[1] = centerV - width / idealRatio / 2
            faceCrop[3] = centerV + width / idealRatio / 2
        """
        if width > height:
            centerV = faceCrop[1] + height / 2
            faceCrop[1] = centerV - width / 2
            faceCrop[3] = centerV + width / 2
        else:
            centerX = faceCrop[0] + width / 2
            faceCrop[0] = centerX - height / 2
            faceCrop[2] = centerX + height / 2
        """

        if min(faceCrop) < 0 or faceCrop[2] >= len(
                img[0]) or faceCrop[3] >= len(img):
            return None

        faceCrop = [int(dimension) for dimension in faceCrop]
        img = img[faceCrop[1]:faceCrop[3], faceCrop[0]:faceCrop[2]]
        #img = cv2.resize(img, (128, 128))
        #img = cv2.resize(img, (96, 128))
        img = cv2.resize(img, (192, 256))
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        return img
    def get_frame(self):
        global current_time
        global prev_time
        global fps

        ret, frame = self.webcam.read()

        current_time = time.time() - prev_time

        if not ret:
            print("Could not read frame")
            exit()

        face, confidence = cv.detect_face(frame)

        try:
            if len(confidence) >= 1:
                # loop through detected faces
                for idx, f in enumerate(face):

                    (startX, startY) = f[0], f[1]
                    (endX, endY) = f[2], f[3]
                    Y = startY - 10 if startY - 10 > 10 else startY + 10

                    face_in_img = frame[startY:endY, startX:endX, :]
                    face_in_img = cv2.cvtColor(face_in_img, cv2.COLOR_BGR2GRAY)
                    result = model.predict(face_in_img)
                    confidence = int(100 * (1 - (result[1]) / 300))

                    if confidence > 70:
                        cv2.rectangle(frame, (startX, startY), (endX, endY),
                                      (0, 0, 255), 2)

                    else:
                        roi = frame[startY:endY, startX:endX]  # 관심영역 지정
                        roi = cv2.GaussianBlur(roi, (29, 29),
                                               10)  # 블러(모자이크) 처리
                        frame[startY:endY, startX:endX] = roi
                    prev_time = time.time()
            else:
                image_url2 = cv2.imread(
                    os.path.join(settings.BASE_DIR, 'img/noface.PNG'), -1)
                frame = colletion_overlay(frame, image_url2, (150, 400))
                pass
        except:
            # cv2.putText(frame, "Face Not Found", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)
            pass

        fps += 1

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Example #28
0
    def Detect(cls, frame, isShowRect = False):
        gender = "unknown"
        age = "unknown"
        # apply face detection
        face, confidence = cv.detect_face(frame)

        # loop through detected faces
        for idx, f in enumerate(face):
            faceIndex = idx
            # get corner points of face rectangle        
            (startX, startY) = f[0], f[1]
            (endX, endY) = f[2], f[3]

            # draw rectangle over face
            cv2.rectangle(frame, (startX,startY), (endX,endY), (0, 255, 0), 2)

            # crop the detected face region
            face_crop = np.copy(frame[startY:endY,startX:endX])
        
            if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:
                continue

            # detect age
            blob = cv2.dnn.blobFromImage(face_crop, 1, (227, 227), cls.MODEL_MEAN_VALUES, swapRB=False)
            cls.age_net.setInput(blob)
            cls.age_preds = cls.age_net.forward()
            argidx = cls.age_preds[0].argmax()
            age = cls.age_list[argidx]

            # preprocessing for gender detection model
            face_crop = cv2.resize(face_crop, (96, 96))
            face_crop = face_crop.astype("float") / 255.0
            face_crop = img_to_array(face_crop)
            face_crop = np.expand_dims(face_crop, axis=0)
        
            # apply gender detection on face
            conf = cls.model.predict(face_crop)[0]
        
            # get label with max accuracy
            idx = np.argmax(conf)
            gender = cls.gender_type[idx]

            if isShowRect:
                label = gender
                label = "{}: {:.2f}%, age:{}".format(label, conf[idx] * 100, age )
                # write label and confidence above face rectangle
                Y = startY - 10 if startY - 10 > 10 else startY + 10
                cv2.putText(frame, label, (startX, Y),  cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                pass
            pass
        return gender, age
        pass
Example #29
0
def predict(image, model):
    # read input image
    #image = cv2.imread(file)
    image = cv2.resize(image, (400, 400))
    if image is None:
        print("Could not read input image")
        exit()


# detect faces in the image
    face, confidence = cv.detect_face(image)

    classes = ['man', 'woman']

    # loop through detected faces
    for idx, f in enumerate(face):

        # get corner points of face rectangle
        (startX, startY) = f[0], f[1]
        (endX, endY) = f[2], f[3]

        # draw rectangle over face
        cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)

        # crop the detected face region
        face_crop = np.copy(image[startY:endY, startX:endX])

        # preprocessing for gender detection model
        face_crop = cv2.resize(face_crop, (96, 96))
        face_crop = face_crop.astype("float") / 255.0
        face_crop = img_to_array(face_crop)
        face_crop = np.expand_dims(face_crop, axis=0)

        # apply gender detection on face
        conf = model.predict(face_crop)[0]
        print(conf)
        print(classes)

        # get label with max accuracy
        idx = np.argmax(conf)
        label = classes[idx]

        label = "{}: {:.2f}%".format(label, conf[idx] * 100)

        Y = startY - 10 if startY - 10 > 10 else startY + 10

        # write label and confidence above face rectangle
        cv2.putText(image, label, (startX, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

    return image
Example #30
0
def checker(link):
    result = 0
    img = io.imread(link, plugin='matplotlib')
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    font = cv2.FONT_HERSHEY_PLAIN

    faces, conf = cvlib.detect_face(img)

    if len(faces) > 0:
        for x, y, w, h in faces:
            roi = img[y:y + h, x:x + w]
            label, confidence = cvlib.detect_gender(roi)
            if confidence[0] > confidence[1]:
                gender = label[0]
            else:
                gender = label[1]

            cv2.rectangle(img, (x, y), (w, h), (0, 255, 0), 2)

            cv2.rectangle(img, (50, 50), (220, 80), (0, 255, 0), -1)
            cv2.putText(img, "Face Detected [+]", (55, 70), font, 1, (0, 0, 0),
                        1, cv2.LINE_AA)

            if gender == "female":

                cv2.rectangle(img, (50, 90), (220, 120), (0, 255, 0), -1)
                cv2.putText(img, gender + " [+]", (55, 110), font, 1,
                            (0, 0, 0), 1, cv2.LINE_AA)
                result = 1
                cv2.rectangle(img, (50, 130), (220, 160), (0, 255, 0), -1)
                cv2.putText(img, " Like [+]  ", (55, 150), font, 1, (0, 0, 0),
                            1, cv2.LINE_AA)
            else:
                cv2.rectangle(img, (50, 90), (220, 120), (0, 0, 255), -1)
                cv2.putText(img, gender + " [+]", (55, 110), font, 1,
                            (0, 0, 0), 1, cv2.LINE_AA)

    elif len(faces) == 0:
        cv2.rectangle(img, (50, 50), (220, 80), (0, 0, 255), -1)
        cv2.putText(img, "Face Not Detected", (55, 70), font, 1,
                    (255, 255, 255), 1, cv2.LINE_AA)
        cv2.rectangle(img, (50, 90), (220, 120), (0, 0, 255), -1)
        cv2.putText(img, "Dislike [-]", (55, 110), font, 1, (255, 255, 255), 1,
                    cv2.LINE_AA)

    cv2.imshow('Tinder Detector', img)
    cv2.waitKey(2000)
    cv2.destroyAllWindows()

    return result