Example #1
0
def generate_dataset(root_face_images_folder_path,
                     mask_images_folder="mask_images"):
    original_images_paths = list(
        pathlib.Path(root_face_images_folder_path).glob('*'))
    mask_images_paths = list(pathlib.Path(mask_images_folder).glob('*'))
    mask_images = [Image.open(path) for path in mask_images_paths]

    targets = {}

    for i, img_path in enumerate(tqdm(original_images_paths)):
        image_name = str(img_path).split(os.sep)[1]
        image = Image.open(img_path).convert("RGB").resize((800, 800))
        detected_faces_boxes, detected_faces_landmarks = detect_faces(
            np.array(image), False)
        if len(detected_faces_boxes) == 0:
            continue

        image.save("resized_images/" + image_name)
        image = apply_face_mask(image, mask_images, detected_faces_landmarks)
        image.save("generated_images/" + image_name)

        targets[image_name] = {
            "bbox": detected_faces_boxes,
        }

        # cv2.imshow("Output", cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
        # cv2.waitKey(0)

    with open("targets.json", "w", encoding="utf8") as outfile:
        json.dump(targets, outfile)
Example #2
0
    def post(self):
        args = parser.parse_args()
        img_url = args['img']
        response = req.get(img_url)
        img = Image.open(BytesIO(response.content))
        bounding_boxes = detect_faces(
            img)  # detect bboxes and landmarks for all faces in the image
        width, height = img.size
        #wnum:横向分割块数 hnum:纵向分割块数
        wnum = int(args['ysplit'])
        hnum = int(args['xsplit'])
        w_step = width / wnum
        h_step = height / hnum
        flag = np.ones((hnum, wnum))
        val = np.ones((hnum, wnum, 2))
        res = []
        for i in range(0, len(bounding_boxes)):
            y = bounding_boxes[i][0] / (w_step + 0.001)
            x = bounding_boxes[i][1] / (h_step + 0.001)

            flag[int(x)][int(y)] += 1
            val[int(x)][int(y)][0] += bounding_boxes[i][0]
            val[int(x)][int(y)][1] += bounding_boxes[i][1]
        for i in range(0, int(args['nums'])):
            mpos = np.unravel_index(np.argmax(flag), flag.shape)
            res.append([
                val[mpos[0], mpos[1], 1] / np.max(flag) / height,
                val[mpos[0], mpos[1], 0] / np.max(flag) / width
            ])
            flag[mpos[0], mpos[1]] = 0

        return res, 201
def align_face(img_path, resize_face_size=112):
    img = Image.open(img_path)
    bounding_boxes, landmarks = detect_faces(img)
    scale = resize_face_size / 112.
    reference = get_reference_facial_points(default_square = True) * scale
    facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)]
    warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(resize_face_size, resize_face_size))
    return warped_face
Example #4
0
def image_gaze(img_dir):
    for image in glob.glob(img_dir):

        head, tail = os.path.split(image)
        # stage1
        img = Image.open(image)  # modify the image path to yours
        image = cv2.imread(image, cv2.IMREAD_COLOR)
        bounding_boxes, landmarks = detect_faces(
            img)  # detect bboxes and landmarks for all faces in the image
        h, w = image.shape[:2]
        for i in range(len(bounding_boxes)):
            #imgpts, modelpts, rotate_degree, nose = face_orientation(image, landmarks[i])
            #random_color = tuple(np.random.random_integers(0, 255, size=3))
            cv2.rectangle(
                image, (int(bounding_boxes[i][0]), int(bounding_boxes[i][1])),
                (int(bounding_boxes[i][2]), int(bounding_boxes[i][3])),
                (0, 155, 255), 2)
            headpose_center_cords = normalize_eyeCords(
                int(bounding_boxes[i][0]), int(bounding_boxes[i][1]),
                int(bounding_boxes[i][2]), int(bounding_boxes[i][3]), w, h)
            px, py = detect_target(
                image,
                (headpose_center_cords[0][0], headpose_center_cords[0][1]))
            draw_result(
                image,
                (headpose_center_cords[0][0], headpose_center_cords[0][1]),
                (px, py), [0, 0, 255])
        #stage2
        result = detector.detect_faces(image)

        if result != []:
            for person in result:
                #imgpts, modelpts, rotate_degree, nose = face_orientation(image,person['keypoints'])
                #cv2.line(image, nose, tuple(imgpts[1].ravel()), (0, 255, 0), 2)  # GREEN
                #cv2.line(image, nose, tuple(imgpts[0].ravel()), (255, 0,), 2)  # BLUE
                #cv2.line(image, nose, tuple(imgpts[2].ravel()), (0, 0, 255), 2)  # RED
                confidence = person['confidence']
                if (confidence > 0.90):
                    bounding_box = person['box']
                    keypoints = person['keypoints']
                    #commented cv2 drawing
                    #cv2.rectangle(image,(bounding_box[0], bounding_box[1]),(bounding_box[0] + bounding_box[2], bounding_box[1] + bounding_box[3]),(0, 155, 255),2)
                    #cv2.circle(image, (keypoints['left_eye']), 2, (0, 155, 255), 2)
                    #cv2.circle(image, (keypoints['right_eye']), 2, (0, 155, 255), 2)

                    leyecoords, reyecoords = enormalize(
                        keypoints['left_eye'], keypoints['right_eye'], w, h)
                    lpx, lpy = detect_target(
                        image, (leyecoords[0][0], leyecoords[0][1]))
                    rpx, rpy = detect_target(
                        image, (reyecoords[0][0], reyecoords[0][1]))
                    draw_result(image, (leyecoords[0][0], leyecoords[0][1]),
                                (lpx, lpy), [255, 0, 0])
                    draw_result(image, (reyecoords[0][0], reyecoords[0][1]),
                                (rpx, rpy), [0, 255, 255])

        cv2.imwrite("./output/" + tail, image)
        cv2.waitKey(0)
Example #5
0
def get_central_face_attributes(full_path):
    img = cv.imread(full_path, cv.IMREAD_COLOR)
    bboxes, landmarks = detect_faces(img)

    if len(landmarks) > 0:
        i = select_significant_face(bboxes)
        return [bboxes[i]], [landmarks[i]]

    return None, None
Example #6
0
def detect():
    if request.method == 'POST':
        print(request)
        if 'file' not in request.files:
            return jsonify({'status': "FAILURE", "message": "No file part found."})
    file = request.files['file']
    filename = secure_filename(file.filename)
    file.save(os.path.join(app.config['DATA_FOLDER'], filename))
    return detect_faces(filename)
Example #7
0
def get_face_attributes(full_path):
    try:
        img = cv.imread(full_path, cv.IMREAD_COLOR)
        bounding_boxes, landmarks = detect_faces(img)

        if len(landmarks) > 0:
            landmarks = [int(round(x)) for x in landmarks[0]]
            return True, landmarks

    except KeyboardInterrupt:
        raise
    except Exception as err:
        print(err)
    return False, None
Example #8
0
def detect_face():
    filename = 'face.jpg'
    try:
        print(request)
        request_data = request.get_json()
        file = request_data['file']
        starter = file.find(',')
        image_data = file[starter + 1:]
        image_data = bytes(image_data, encoding="ascii")
        im = Image.open(BytesIO(base64.b64decode(image_data)))
        im.save(os.path.join(app.config['DATA_FOLDER'], filename))
    except Exception as e:
        print("Exception while detecting faces: ", e)
        return jsonify({"status": "FAILURE", "message": "Unknown error while getting encoded file."})

    return detect_faces(filename)
Example #9
0
def take_images():
    # initialize the camera
    cam = cv2.VideoCapture(0)   # 0 -> index of camera
    log.info('Initialized camera capturing')
    captured = True
    while captured:
        captured, img = cam.read()
        time.sleep(sleep_time)
        if captured:
            # log.debug('Image captured')
            if detector.detect_faces(img) or detector.detect_people(img):
                log.info('Detected people')
                cv2.imwrite(hierarchical_file(datetime.now(), detection=True), resize_image(img, people_scale_factor))
            elif random.random() > 0.5:
                cv2.imwrite(hierarchical_file(datetime.now(), detection=False), resize_image(img, no_people_scale_factor))
        else:
            log.error('No image captured')
            sys.exit(1)
Example #10
0
def get_all_face_attributes(full_path):
    img = cv.imread(full_path, cv.IMREAD_COLOR)
    bounding_boxes, landmarks = detect_faces(img)
    return bounding_boxes, landmarks
def predict_image():
    data = {'success': False}

    if request.method == 'POST':
        if request.files.get('image'):
            f = request.files.get('image')
            type = secure_filename(f.filename).split('.')[1]
            if type not in ALLOWED_EXTENSIONS:
                return 'Invalid type of file'
            if f:
                filename = os.path.join(app.config['UPLOAD_FOLDER'],
                                        secure_filename(f.filename))
                f.save(filename)

        elif request.form['url']:
            try:
                url = request.form.get('url')
                print(url)
                f = urllib.request.urlopen(url)
                filename = url.split('/')[-1]
                filename = secure_filename(filename)

                if filename:
                    filename = os.path.join(app.config['UPLOAD_FOLDER'],
                                            filename)
                    f.save(filename)
            except:
                print('Cannot read image from url')
        if filename:
            fn = secure_filename(filename)[:-4]
            min_side = 512
            img = cv2.imread(filename)
            size = img.shape
            h, w = size[0], size[1]
            if max(w, h) > min_side:
                img_pad = process_image(img)
            else:
                img_pad = img
            cv2.imwrite(
                os.path.join(app.config['UPLOAD_FOLDER'], f'{fn}_resize.png'),
                img_pad)

            img = Image.open(
                os.path.join(app.config['UPLOAD_FOLDER'], f'{fn}_resize.png'))
            bounding_boxes, landmarks = detect_faces(
                img)  # detect bboxes and landmarks for all faces in the image
            pic_face_detect = show_results(img, bounding_boxes,
                                           landmarks)  # visualize the results
            pic_face_detect.save(
                os.path.join(app.config['UPLOAD_FOLDER'],
                             f'{fn}_landmark.png'))
            crop_size = 224
            scale = crop_size / 112
            reference = get_reference_facial_points(
                default_square=True) * scale
            for i in range(len(landmarks)):
                facial5points = [[landmarks[i][j], landmarks[i][j + 5]]
                                 for j in range(5)]
                warped_face = warp_and_crop_face(np.array(img),
                                                 facial5points,
                                                 reference,
                                                 crop_size=(crop_size,
                                                            crop_size))
                img_warped = Image.fromarray(warped_face)
                pic_face_crop = img_warped.save(
                    os.path.join(UPLOAD_FOLDER, f'{fn}_{i}_crop.png'))

            # face recognition
            cleb_name = []

            for i in range(len(landmarks)):
                name = model.predict(
                    os.path.join(app.config['UPLOAD_FOLDER'],
                                 f'{fn}_{i}_crop.png'))
                cleb_name.append(name)

            employeeList = []
            for i in range(len(landmarks)):
                for j in bounding_boxes:
                    face = {
                        "bounding_boxes": {
                            "top": j[0],
                            "right": j[1],
                            "left": j[2],
                            "bottom": j[3]
                        },
                        "landmark": landmarks[i],
                        "prediction": cleb_name[i],
                        "success": True
                    }
                employeeList.append(face)
    return jsonify(jsanitize(employeeList))
Example #12
0
def detect_faces(*args, **kwargs):
    """wrapper for detector.detect_faces"""
    with peek("./face.evoLVe.PyTorch/align"), torch.no_grad():
        return detector.detect_faces(*args, **kwargs)
Example #13
0
    print("Welcome to the Facial Recognition doorbell!\n")

    if not os.path.exists('database.db'):
        print('We have detected this is your first time running this program! We will create a database for you!\n')
        create_database.create_database()

    running = True

    while running:
        user_input = input("You can start the doorbell by typing 1\n"
                           "You can record a new face by typing 2\n"
                           "You can exit by typing 3\n"
                           "---> ")

        if user_input == "1":
            detector.detect_faces()
            cv2.destroyAllWindows()

        elif user_input == "2":
            record_faces.record_faces()
            trainer.train_faces()
            print("Face saved!")

        elif user_input == "3":
            running = False

        else:
            print("Invalid Input! Try again")

    print("Thank you for using the Facial Recognition doorbell!")
Example #14
0
def detect(photo):
  detection = detector.detect_faces(photo['source'])
  if 'tags' not in detection:
    print detection, '\nExiting!'
    return None #sys.exit()
  return detection
Example #15
0
from PIL import Image
from detector import detect_faces
import os, glob

path = "./data/AgeDB/*.jpg"

for img_file in glob.glob(path):
    im = Image.open(img_file)
    bounding_boxes, landmarks = detect_faces(im)
    print("For file", img_file, "the bounding boxes are:", str(bounding_boxes))
    crop_size = args.crop_size # specify size of aligned faces, align and crop with padding
    scale = crop_size / 112.
    reference = get_reference_facial_points(default_square = True) * scale

    cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
    os.chdir(source_root)
    os.system("find . -name '*.DS_Store' -type f -delete")
    os.chdir(cwd)

    if not os.path.isdir(dest_root):
        os.mkdir(dest_root)

    for subfolder in tqdm(os.listdir(source_root)):
        if not os.path.isdir(os.path.join(dest_root, subfolder)):
            os.mkdir(os.path.join(dest_root, subfolder))
        for image_name in os.listdir(os.path.join(source_root, subfolder)):
            print("Processing\t{}".format(os.path.join(source_root, subfolder, image_name)))
            img = Image.open(os.path.join(source_root, subfolder, image_name))
            try: # Handle exception
                _, landmarks = detect_faces(img)
            except Exception:
                print("{} is discarded due to exception!".format(os.path.join(source_root, subfolder, image_name)))
                continue
            if len(landmarks) == 0: # If the landmarks cannot be detected, the img will be discarded
                print("{} is discarded due to non-detected landmarks!".format(os.path.join(source_root, subfolder, image_name)))
                continue
            facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)]
            warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size))
            img_warped = Image.fromarray(warped_face)
            img_warped.save(os.path.join(dest_root, subfolder, image_name.split('.')[0] + '.jpg'))
Example #17
0
    os.chdir(source_root)
    os.system("find . -name '*.DS_Store' -type f -delete")
    os.chdir(cwd)

    if not os.path.isdir(dest_root):
        os.mkdir(dest_root)

    for subfolder in tqdm(os.listdir(source_root)):
        if not os.path.isdir(os.path.join(dest_root, subfolder)):
            os.mkdir(os.path.join(dest_root, subfolder))
        for image_name in os.listdir(os.path.join(source_root, subfolder)):
            if os.path.exists(os.path.join(dest_root, subfolder, image_name)):
                continue
            img = Image.open(os.path.join(source_root, subfolder, image_name))
            try:  # Handle exception
                _, landmarks = detect_faces(img, gpu_id)
            except Exception:
                print("{} is discarded due to exception!".format(
                    os.path.join(source_root, subfolder, image_name)))
                continue
            if len(
                    landmarks
            ) == 0:  # If the landmarks cannot be detected, the img will be discarded
                print("{} is discarded due to non-detected landmarks!".format(
                    os.path.join(source_root, subfolder, image_name)))
                continue
            facial5points = [[landmarks[0][j], landmarks[0][j + 5]]
                             for j in range(5)]
            warped_face = warp_and_crop_face(np.array(img),
                                             facial5points,
                                             reference,
Example #18
0
    os.system("find . -name '*.DS_Store' -type f -delete")
    os.system("find . -name '*.ipynb_checkpoints' -prune -exec rm -rf {} \;")
    os.chdir(cwd)

    if not os.path.isdir(dest_root):
        os.mkdir(dest_root)

    for subfolder in tqdm(os.listdir(source_root)):
        if not os.path.isdir(os.path.join(dest_root, subfolder)):
            os.mkdir(os.path.join(dest_root, subfolder))
        for image_name in os.listdir(os.path.join(source_root, subfolder)):
            print("Processing\t{}".format(
                os.path.join(source_root, subfolder, image_name)))
            img = Image.open(os.path.join(source_root, subfolder, image_name))
            try:  # Handle exception
                _, landmarks = detect_faces(img)
            except Exception:
                print("{} is discarded due to exception!".format(
                    os.path.join(source_root, subfolder, image_name)))
                continue
            if len(
                    landmarks
            ) == 0:  # If the landmarks cannot be detected, the img will be discarded
                print("{} is discarded due to non-detected landmarks!".format(
                    os.path.join(source_root, subfolder, image_name)))
                continue
            facial5points = [[landmarks[0][j], landmarks[0][j + 5]]
                             for j in range(5)]
            warped_face = warp_and_crop_face(np.array(img),
                                             facial5points,
                                             reference,
Example #19
0
uid = uid[0:len(uid)-5] + '@wicknicks'

if uid in cache: 
  print uid, 'has already been trained'
  print 'Exiting'
  sys.exit()

print 'Tagging', uid

if len(sys.argv) < 2: sys.exit()
links = open(sys.argv[1], 'r')

count = 0
for img in links.readlines():
  img = img.strip();
  detection = detector.detect_faces(img)
  
  if 'tags' not in detection: 
    print detection
    sys.exit()
  
  if len(detection['tags']) != 1:
    print "# of tags != ", img, len(detection['tags'])
    print json.dumps(detection, sort_keys = True, indent = 2)
    print 
    sys.exit()
    
  tag = detection['tags'][0]['tid']
  rsp = tagger.save_tag(uid, tag, img)
  count += 1