Exemplo n.º 1
0
    def get_gender(image: numpy.ndarray):
        padding = 20
        try:
            face_block = FBHelper.get_face_block(image)

            if face_block:
                startX, startY = max(0,
                                     face_block.left() - padding), max(
                                         0,
                                         face_block.top() - padding)

                endX, endY = min(image.shape[1] - 1,
                                 face_block.right() + padding), min(
                                     image.shape[0] - 1,
                                     face_block.bottom() + padding)

                face_grid = numpy.copy(image[startY:endY, startX:endX])

                label, confidence = cvlib.detect_gender(face_grid)
                idx = numpy.argmax(confidence)

                if label[idx] == 'male':
                    return 'Male'
                else:
                    return 'Female'

        except Exception as e:
            print('Failed to get gender from CVLib.  ERROR: {}'.format(e))
Exemplo n.º 2
0
 def detect_gender(self, image, face_coords):
     x0, x1 = face_coords[0:3:2]
     y0, y1 = face_coords[1:4:2]
     face_crop = np.copy(image[y0:y1, x0:x1])
     label, confidence = cv.detect_gender(face_crop)
     index = np.argmax(confidence)
     gender = label[index]
     return gender
Exemplo n.º 3
0
def checker(link):
    result = 0
    img = io.imread(link, plugin='matplotlib')
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    font = cv2.FONT_HERSHEY_PLAIN

    faces, conf = cvlib.detect_face(img)

    if len(faces) > 0:
        for x, y, w, h in faces:
            roi = img[y:y + h, x:x + w]
            label, confidence = cvlib.detect_gender(roi)
            if confidence[0] > confidence[1]:
                gender = label[0]
            else:
                gender = label[1]

            cv2.rectangle(img, (x, y), (w, h), (0, 255, 0), 2)

            cv2.rectangle(img, (50, 50), (220, 80), (0, 255, 0), -1)
            cv2.putText(img, "Face Detected [+]", (55, 70), font, 1, (0, 0, 0),
                        1, cv2.LINE_AA)

            if gender == "female":

                cv2.rectangle(img, (50, 90), (220, 120), (0, 255, 0), -1)
                cv2.putText(img, gender + " [+]", (55, 110), font, 1,
                            (0, 0, 0), 1, cv2.LINE_AA)
                result = 1
                cv2.rectangle(img, (50, 130), (220, 160), (0, 255, 0), -1)
                cv2.putText(img, " Like [+]  ", (55, 150), font, 1, (0, 0, 0),
                            1, cv2.LINE_AA)
            else:
                cv2.rectangle(img, (50, 90), (220, 120), (0, 0, 255), -1)
                cv2.putText(img, gender + " [+]", (55, 110), font, 1,
                            (0, 0, 0), 1, cv2.LINE_AA)

    elif len(faces) == 0:
        cv2.rectangle(img, (50, 50), (220, 80), (0, 0, 255), -1)
        cv2.putText(img, "Face Not Detected", (55, 70), font, 1,
                    (255, 255, 255), 1, cv2.LINE_AA)
        cv2.rectangle(img, (50, 90), (220, 120), (0, 0, 255), -1)
        cv2.putText(img, "Dislike [-]", (55, 110), font, 1, (255, 255, 255), 1,
                    cv2.LINE_AA)

    cv2.imshow('Tinder Detector', img)
    cv2.waitKey(2000)
    cv2.destroyAllWindows()

    return result
Exemplo n.º 4
0
def genderDectection(img_path, output_folder):

    #Creating output folder if not present
    if output_folder and not os.path.isdir(output_folder):
        os.mkdir(output_folder)
    # read input image
    img = cv2.imread(img_path)

    # apply face detection
    face, conf = cv.detect_face(img)

    padding = 5

    # loop through detected faces
    for f in face:

        (startX, startY) = max(0, f[0] - padding), max(0, f[1] - padding)
        (endX, endY) = min(img.shape[1] - 1,
                           f[2] + padding), min(img.shape[0] - 1,
                                                f[3] + padding)

        # draw rectangle over face
        cv2.rectangle(img, (startX, startY), (endX, endY), (0, 255, 0), 2)

        face_crop = np.copy(img[startY:endY, startX:endX])

        # apply gender detection
        (label, confidence) = cv.detect_gender(face_crop)

        idx = np.argmax(confidence)
        label = label[idx]
        label = "{}: {:.2f}%".format(label, confidence[idx] * 100)
        Y = startY - 5 if startY - 5 > 5 else startY + 5
        cv2.putText(img, label, (startX, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                    (0, 255, 255), 1)

    # converting back to RBG
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    #Save the image to output folder
    if output_folder:
        input_filename, ext = os.path.splitext(os.path.basename(img_path))
        filename = os.path.join(output_folder, input_filename.format(ext))
        fig = plt.figure()
        plt.imshow(img)
        plt.axis("off")
        plt.savefig(filename, bbox_inches='tight', pad_inches=0)
        plt.close(fig)
Exemplo n.º 5
0
def gender_detect(img):
    image = cv2.imread(img)
    face, conf = cv.detect_face(image)
    padding = 20
    for f in face:
        (startX, startY) = max(0, f[0] - padding), max(0, f[1] - padding)
        (endX, endY) = min(image.shape[1] - 1,
                           f[2] + padding), min(image.shape[0] - 1,
                                                f[3] + padding)
        cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)
        face_crop = np.copy(image[startY:endY, startX:endX])
        (label, confidence) = cv.detect_gender(face_crop)
        idx = np.argmax(confidence)
        conf = np.max(confidence).item()
        label = label[idx]
        return {"conf": conf, "label": label}
Exemplo n.º 6
0
    def detect(self, f,ext, args):
        fi = f+ext
        fo = f+'-face'+ext
        g.log.debug("Reading {}".format(fi))
        image = cv2.imread(fi)
        faces, conf = cv.detect_face(image)

        detections = []
        for f, c in zip(faces, conf):
            c = "{:.2f}%".format(c * 100)

            (startX, startY) = f[0], f[1]
            (endX, endY) = f[2], f[3]
            cv2.rectangle(image, (startX, startY),
                          (endX, endY), (0, 255, 0), 2)
            rect = [int(startX), int(startY), int(endX), int(endY)]

            obj = {
                'type': 'face',
                'confidence': c,
                'box': rect
            }

            if args['gender']:
                face_crop = np.copy(image[startY:endY, startX:endX])
                (gender_label_arr, gender_confidence_arr) = cv.detect_gender(face_crop)
                idx = np.argmax(gender_confidence_arr)
                gender_label = gender_label_arr[idx]
                gender_confidence = "{:.2f}%".format(
                    gender_confidence_arr[idx] * 100)
                obj['gender'] = gender_label
                obj['gender_confidence'] = gender_confidence
                Y = startY - 10 if startY - 10 > 10 else startY + 10
                cv2.putText(image, gender_label, (startX, Y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

            detections.append(obj)

        if not args['delete']:
            g.log.debug("Writing {}".format(fo))
            cv2.imwrite(fo, image)

        if args['delete']:
            os.remove(fi)

        return detections
Exemplo n.º 7
0
def imageRecog(url):
    if url.startswith('https'):
        image = url_to_image(url)
    else:
        image = cv2.imread(url)
        image = cv2.cvtColor(image, cv2.IMREAD_COLOR)
    bbox, label, conf = cv.detect_common_objects(image)
    list_of_things = []
    list_of_genders = []
    list_of_gender_conf = []
    personInfo = []
    for i in range(len(label)):
        # object = object.lstrip("['")
        # object = object.rstrip("']")
        # confidence = confidence.lstrip("['")
        # confidence = confidence.rstrip("']")
        if label[i] not in list_of_things:
            list_of_things.append(label[i])

        if label[i] == 'person':
            label2, confidence = cv.detect_gender(image, False)

            if confidence[0] > confidence[1]:  #Male
                confidence[0] = int(confidence[0] * 100)
                list_of_genders.append(confidence[0])
                list_of_gender_conf.append(label2[0])
                personInfo = [list_of_genders, list_of_gender_conf]
            else:
                confidence[1] = int(confidence[1] * 100)
                list_of_genders.append(confidence[1])
                list_of_gender_conf.append(label2[1])
                personInfo = [list_of_genders, list_of_gender_conf]
            faces, confidences = cv.detect_face(image)
    confidence = 0
    for i in range(len(conf)):
        confidence = confidence + conf[i]

    avgConf = confidence / len(conf)
    return list_of_things, avgConf, personInfo
Exemplo n.º 8
0
def pose_estimation(IMAGE, flag):
    thr = 0.1
    inWidth = 368
    inHeight = 368
    net = cv.dnn.readNetFromTensorflow("graph_opt.pb")
    cap = cv.VideoCapture(IMAGE)
    cor = color()
    font = cv.FONT_HERSHEY_SCRIPT_SIMPLEX
    frame = None
    status = {'pose': 0}

    while cv.waitKey(1) < 0:
        hasFrame, frame = cap.read()
        try:
            face, conf = cvlib.detect_face(frame)
            padding = 20
            for f in face:
                (startX, startY) = max(0,
                                       f[0] - padding), max(0, f[1] - padding)
                (endX,
                 endY) = min(frame.shape[1] - 1,
                             f[2] + padding), min(frame.shape[0] - 1,
                                                  f[3] + padding)
                cv.rectangle(frame, (startX, startY), (endX, endY),
                             (0, 255, 0), 2)
                (label, confidence) = cvlib.detect_gender(
                    np.copy(frame[startY:endY, startX:endX]))
                idx = np.argmax(confidence)
                label = label[idx]
                conf = confidence[idx] * 100
                if conf > 90:
                    cv.putText(frame, "{}: {:.2f}%".format(label, conf),
                               (startX, startY -
                                10 if startY - 10 > 10 else startY + 10),
                               cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                    if label == 'male' and flag == True:
                        status['pose'] = 'aprovado'
                    else:
                        status['pose'] = 'reprovado'

                else:
                    print("...")

        except TypeError:
            pass

        if not hasFrame:
            cv.waitKey()
            break

        frameWidth = frame.shape[1]
        frameHeight = frame.shape[0]

        net.setInput(
            cv.dnn.blobFromImage(frame,
                                 1.0, (inWidth, inHeight),
                                 (127.5, 127.5, 127.5),
                                 swapRB=True,
                                 crop=False))
        out = net.forward()
        out = out[:, :19, :, :]
        assert (len(BODY_PARTS) == out.shape[1])

        points = []
        for i in range(len(BODY_PARTS)):
            heatMap = out[0, i, :, :]
            _, conf, _, point = cv.minMaxLoc(heatMap)
            points.append((int((frameWidth * point[0]) / out.shape[3]),
                           int((frameHeight * point[1]) /
                               out.shape[2])) if conf > thr else None)

        for pair in POSE_PAIRS:
            partFrom = pair[0]
            partTo = pair[1]
            assert (partFrom in BODY_PARTS)
            assert (partTo in BODY_PARTS)

            idFrom = BODY_PARTS[partFrom]
            idTo = BODY_PARTS[partTo]

            if points[idFrom] and points[idTo]:
                cv.line(frame, points[idFrom], points[idTo], (cor, cor, cor),
                        3)
                print(idFrom, idTo, points[idFrom], points[idTo])
                cv.ellipse(frame, points[idFrom], (4, 4), 0, 0, 360,
                           (cor, cor, cor), cv.FILLED)
                cv.ellipse(frame, points[idTo], (4, 4), 0, 0, 360,
                           (cor, cor, cor), cv.FILLED)

        t, _ = net.getPerfProfile()
        freq = cv.getTickFrequency() / 1000
        cv.putText(frame, '%.2fms' % (t / freq), (10, 20),
                   cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

        #cv.imshow('OpenPose using OpenCV', frame)
        name = "output_" + str(uuid4()) + IMAGE

        cv.imwrite(name, frame)
    return status
Exemplo n.º 9
0
import numpy as np

webcam = cv2.VideoCapture(0)

padding = 20
while webcam.isOpened():
    status, frame = webcam.read()
    face, confidence = cv.detect_face(frame)
    for idx, f in enumerate(face):
        (startX, startY) = max(0, f[0] - padding), max(0, f[1] - padding)
        (endX, endY) = min(frame.shape[1] - 1,
                           f[2] + padding), min(frame.shape[0] - 1,
                                                f[3] + padding)
        cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
        face_crop = np.copy(frame[startY:endY, startX:endX])
        (label, confidence) = cv.detect_gender(face_crop)
        idx = np.argmax(confidence)
        label = label[idx]
        label = "{}: {:.2f}%".format(label, confidence[idx] * 100)
        Y = startY - 10 if startY - 10 > 10 else startY + 10
        cv2.putText(frame, label, (startX, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)
    cv2.imshow("Real-time gender detection", frame)
    # press "s" to stop
    if cv2.waitKey(1) & 0xFF == ord('s'):
        break
webcam.release()
cv2.destroyAllWindows()

# In[ ]:
Exemplo n.º 10
0
import glob

students = glob.glob("./Images/*.jpg")

# print(students)

# exit()

n = 0

for pics in students:

    try:
        image = cv2.imread(pics)

        label, confidence = cv.detect_gender(image, enable_gpu=True)
        # print(confidence)

        idx = np.argmax(confidence)
        label = label[idx]
        gender_acc = "{}: {:.2f}%".format(label, confidence[idx] * 100)

        # cordinates for cv2.imwrite
        padding = 10
        startX = max(0, (image[0] - padding).all())
        startY = max(0, (image[1] - padding).all())
        if startY - 10 > 10:
            Y = startY - 20
        else:
            Y = startY + 20
Exemplo n.º 11
0
import cvlib as cv
import numpy as np

img = cv2.imread('sample3.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# 얼굴 찾기
faces, confidences = cv.detect_face(img)

for (x, y, x2, y2)in faces:

    # 얼굴 roi 지정
    face_img = img[y:y2, x:x2]

    # 성별 예측하기
    label, confidence = cv.detect_gender(face_img)

    cv2.rectangle(img, (x, y), (x2, y2), (0, 255, 0), 2)

    gender = np.argmax(confidence)
    text = f'{label[gender]}:{confidence[gender]:.1%}'
    cv2.putText(img, text, (x,y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)

# 영상 출력
cv2.imshow('image', img)

key = cv2.waitKey(0)
cv2.destroyAllWindows()

https://github.com/arunponnusamy/cvlib
Exemplo n.º 12
0
        for idx, f in enumerate(detectedFaces):

            # Get rectangle coordinates init XY coordinates and last XY coordinates
            (initX,initY) = max(0, f[0] - padding), max(0, f[1] - padding)
            (lastX,lastY) = min(rgbFrame.shape[1] - 1, f[2] + padding), min(rgbFrame.shape[0]-1, f[3] + padding)

            # Print red rectangle in detected faces
            cv2.rectangle(in_buf_array, (initX,initY), (lastX,lastY), (255,0,0), 2)


            # Gender analysis
            # Extract detected faces
            extractedGenderFace = np.copy(rgbFrame[initY:lastY, initX:lastX])

            # Detect gender
            (genderDetectionResult, genderDetectionConfidence) = cv.detect_gender(extractedGenderFace)

            # Prepare label and confidence value
            idx = np.argmax(genderDetectionConfidence)
            genderDetectionLabel = genderDetectionResult[idx]
            genderDetectionLabel = str(genderDetectionLabel) + " " + str(int(genderDetectionConfidence[idx] * 100)) + "%"

            # Get detected gender value to send
            genderDetection = genderDetectionLabel

            # Update full detection label
            detectionFrameLabel = "G: " + str(genderDetection)

            # Age detection
            # Extract detected faces
            extractedAgeFace = rgbFrame[initY:lastY, initX:lastX].copy()