Exemplo n.º 1
0
def crop_face(img):
  bounding_boxes, landmarks = detect_faces(img)
  image = show_bboxes(img, bounding_boxes)
  (x, y, h, w)=bounding_boxes[0][:4]
  img = img.crop((x, y, h, w))
  img=img.resize((224,224))
  return img
Exemplo n.º 2
0
def get_image_from_camera():
    capture = cv2.VideoCapture(0)

    while 1:
        ret, frame = capture.read()
        window_name = "face"
        #cv2.imshow(window_name, frame)
        '''
        bounding_boxes, landmarks = detect_faces(frame)
        image = show_bboxes(image, bounding_boxes, landmarks)
        '''

        if (ret):
            cv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(cv_img)
            image = image.resize((320, 240), Image.ANTIALIAS)
            bounding_boxes, landmarks = detect_faces(image)

            image = show_bboxes(image, bounding_boxes, landmarks)

            img = cv2.cvtColor(numpy.asarray(image), cv2.COLOR_RGB2BGR)
            cv2.imshow("face detect", img)
        if cv2.waitKey(100) & 0xff == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 3
0
def main():
    image = Image.open('images/test.jpg')
    opencv_image = cv2.imread("images/test.jpg")
    print("image size: ", image.size, ", opencv image size: ", opencv_image.shape)
    bounding_boxes, landmarks = detect_faces(image)
    print("bounding_boxes: ", bounding_boxes, ", landmarks: ", landmarks)
    # save face pic
    for bbox in bounding_boxes:
        cropped = opencv_image[int(bbox[1] - 20):int(bbox[3] + 20), int(bbox[0] - 20):int(bbox[2]) + 20]
        print("cropped size: ", cropped.shape)
        cv2.imshow("Face extract", cropped)

        # get eyes center xy
        lks = landmarks[0]
        center_x = (lks[0] + lks[1]) / 2  - bbox[0]
        center_y = (lks[5] + lks[6]) / 2  - bbox[1]

        # center_x = (lks[0] + lks[1]) / 2
        # center_y = (lks[5] + lks[6]) / 2
        eyesCenter = (center_x, center_y)

        # get eye angle
        dy = lks[6] - lks[5]
        dx = lks[1] - lks[0]
        angle = math.atan(dy/dx) / 3.14 * 180.0

        # get rototion matrix
        retval = cv2.getRotationMatrix2D(eyesCenter, angle, 1.0)

        print("angle: ", angle, ", retval: ", retval, ", eyesCenter: ", eyesCenter, ", cropped size: ", cropped.shape)
        # get warp affine
        #dst = cv2.warpAffine(opencv_image, retval, opencv_image.size)
        dst = cv2.warpAffine(cropped, retval, (cropped.shape[1], cropped.shape[0]))
        print("dst: ", type(dst), "len dst: ", len(dst), "dst shape: ", dst.shape)
        cv2.imshow("Face alignment", dst)


    image = show_bboxes(image, bounding_boxes, landmarks)
    image.show()
    cv2.waitKey(0)
Exemplo n.º 4
0
def main():
    image = Image.open('images/test3.jpg')
    bounding_boxes, landmarks = detect_faces(image)
    image = show_bboxes(image, bounding_boxes, landmarks)
    image.show()
def main():

    cap = cv2.VideoCapture(
        '/home/admin/face_detection/dataset/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mp4'
    )

    mog = cv2.createBackgroundSubtractorMOG2(1, 55)

    while (1):

        ret, frame = cap.read()

        if (ret == False):
            break

        if (i % 5):
            i += 1
            continue

        j = str(i)

        cv2.imwrite('image/' + j + '.jpg', frame)

        #image = Image.open('images/test3.jpg')

        image1 = Image.open('image/' + j + '.jpg')

        path = 'image/' + j + '.jpg'

        bounding_boxes, landmarks = detect_faces(image1)

        #image = show_bboxes(image, bounding_boxes, landmarks)

        image2 = show_bboxes(image1, bounding_boxes, landmarks)

        a = 0

        for b in bounding_boxes:
            a = 1
            break

        if a:

            fgmask = mog.apply(frame, 1)

            dilate = cv2.dilate(fgmask, (21, 21), iterations=1)

            (cnts, _) = cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

            ee = open('chayi/' + j + ' chayi' + '.txt', 'a')

            for c in cnts:

                c_area = cv2.contourArea(c)

                if (c_area > 3000):

                    cc = str(c_area)

                    ee.write(cc + '\n')

                    f = open('txt' + p + '/' + j + '.txt', 'a')
                    for b in bounding_boxes:

                        x = ((b[0] + b[2]) / 2) / (image1.size[0])

                        y = (0.5 * b[1] + 0.5 * b[3]) / (image1.size[1])

                        w = ((b[2] - b[0]) * 1.0) / (image1.size[0])

                        h = ((b[3] - b[1]) * 1.0) / (image1.size[1])

                        x = str(x)

                        y = str(y)

                        w = str(w)

                        h = str(h)

                        f.write('0 ' + x + ' ' + y + ' ' + w + ' ' + h + '\n')

                        z = int(j)

                        image1.save('dataset' + p + '/' + j + '.jpg')

                    break

        #image.show()

        #image1.show()

        i += 1