Example #1
0
def get_target_face(target_face_path):
    rgb_image = imread(target_face_path)
    ret, _ = detect_face(rgb_image)
    if len(ret) <= 0:
        raise Exception('no face in sample image')
    l, t, r, b = ret[0]
    return rgb_image[t:b, l:r]
Example #2
0
def get_target_face():
    rgb_image = imread(config.SAMPLE_IMAGE_PATH)
    ret, _ = detect_face(rgb_image)
    if len(ret) <= 0:
        raise Exception('no face in sample image')
    l, t, r, b = ret[0]
    return rgb_image[t:b,l:r]
Example #3
0
def test_live(message):
    app.queue.put(message['data'])
    img_bytes = base64.b64decode(app.queue.get())
    img_np = np.array(Image.open(io.BytesIO(img_bytes)))
    img_np = detect_face(img_np)
    frame = cv2.imencode('.jpg', img_np)[1].tobytes()
    base64_bytes = base64.b64encode(frame)
    base64_string = base64_bytes.decode('utf-8')
    emit('camera_update', {'data': base64_string}, broadcast=True)
Example #4
0
def dump_data():
    for person in lists:
        person_images = os.path.join(data_path, person)
        for image_path in glob.glob(os.path.join(person_images, '*.jpg')):
            image = utils.load_rgb_image(image_path)
            ret, _ = detect_face(image)
            maxx = 0
            face = None
            for bb in ret:
                l, t, r, b = bb
                if (b - t) * (r - l) > maxx:
                    face = image[t:b, l:r]
            if face is None:
                os.remove(image_path)
                continue
            cv2.imwrite(image_path, face[:, :, ::-1])
    def select_candidates(self, path, cur_video, scene_list, n):
        candidates_list = []
        for scene in scene_list:
            candidates = []
            start = scene[0]
            end = scene[1]
            k = (end - start) // n + 1 if end - start > n else 1
            for i in range(start, end + 1, k):
                cand = imagetool.fast_readrgbfile(path + cur_video[i])
                num_of_faces = face_detection.detect_face(cand)
                if num_of_faces >= 1:
                    candidates.append(cand)
                elif random.random() <= 0.4:
                    candidates.append(cand)

            candidates_list.append(candidates)

        return candidates_list
    def extract_keyframe_for_videos(self, path, all_videos, image_selecter):

        videos_scene = []
        for i in range(0, len(all_videos)):
            scene_list = self.find_scene_boundary(path, all_videos[i], 0,
                                                  len(all_videos[i]), 15)
            videos_scene.append(scene_list)
        for group in videos_scene:
            print(group)
        candidates_frames = []
        for i in range(0, len(videos_scene)):
            cur_video = all_videos[i]
            cur_video_scene = videos_scene[i]
            candidates_frames.extend(
                self.select_candidates(path, cur_video, cur_video_scene, 20))

        final_key_frames = []
        for cand in candidates_frames:
            frame_list, _ = image_selecter.select_best_frames(
                cand, 5)  # can optimize with face detection
            # if 5 candidates have faces images, then pick one of them
            face_frames = []
            for frame in frame_list:
                face_nums = face_detection.detect_face(frame)
                if face_nums > 0: face_frames.append(frame)

            if len(face_frames) > 0:
                frame_list, _ = image_selecter.select_best_frames(
                    face_frames, 1)
            else:
                frame_list, _ = image_selecter.select_best_frames(
                    frame_list, 1)

            for frame in frame_list:
                final_key_frames.append(frame)
        res = imagetool.combine_rgbimages(final_key_frames)

        cv2.imshow("before-postprocessing",
                   cv2.cvtColor(res, cv2.COLOR_RGB2BGR))

        videos_scene, final_key_frames = self.video_postprocessing(
            videos_scene, final_key_frames)
        return videos_scene, final_key_frames
    def extract_key_framses_from_images(self, root, all_images_path,
                                        image_selecter):
        candidates_images = []
        candidates_name = []
        for path in all_images_path:
            img = imagetool.fast_readrgbfile(root + path)
            hasFace = face_detection.detect_face(img)
            if (hasFace or random.random() < 0.3):
                candidates_images.append(img)
                candidates_name.append(path)

        frame_list, index_list = image_selecter.select_best_frames(
            candidates_images, 5)
        final_images_name = []
        final_images = []
        index_list.sort()
        for i in index_list:
            final_images_name.append(candidates_name[i])
        for img_name in final_images_name:
            final_images.append(imagetool.fast_readrgbfile(root + img_name))
        return final_images_name, final_images
def home():
    faces = []
    if request.method == "POST":
        file_one = request.files['file_one']
        file_two = request.files['file_two']

        filename_one = os.path.join(basedir, file_one.filename)
        filename_two = os.path.join(basedir, file_two.filename)

        file_one.save(filename_one)
        file_two.save(filename_two)
        
        faces = detect_face(file_one.filename, file_two.filename)
        faces_as_html = []

        for _faces in faces:
            for f in _faces:
                faces_as_html.append(serve_pil_image(f))
        faces = faces_as_html

        os.remove(filename_one)
        os.remove(filename_two)

    return render_template("index.html", faces=faces, str=str, enumerate=enumerate)
Example #9
0
def get_annotation_data():
    data = pd.read_csv(config.ANNOTATION_PATH).values
    filenames = data[:,0]
    labels = data[:,-1]
    return filenames, labels

filenames, labels = get_annotation_data()
features = []
for fname in filenames:
    image_path = os.path.join(config.PUBLIC_TEST_PATH, fname + '.jpg')
    print(image_path)
    image = utils.load_rgb_image(image_path)
    if image is None:
        features.append([])
        continue
    bbs, _ = detect_face(image.copy())
    if len(bbs) == 0:
        print("No face found in: ", image_path)
        features.append([])
    else:
        vecs = []
        for bounding_box in bbs:
            l, t, r, b = bounding_box
            face = image[t:b,l:r]
            face_emb = utils.get_feature_vec(face, fname)
            print(len(face_emb))
            vecs.append(face_emb)
        features.append(vecs)
print(len(features))
pickle.dump(features, open('data/publictest_facenetfeatures.p', 'wb'))
import cv2

from face_detection import detect_face

i = 0
cam = cv2.VideoCapture(i)
print('trying slot {}'.format(i))

if not cam.isOpened():
    print('camera is not open')

ret, image = cam.read()

cv2.imshow('image', image)
cv2.waitKey(1)

# time.sleep(3)


while ret:
    ret, image = cam.read()
    image = detect_face(image)
    cv2.imshow('image', image)
    cv2.waitKey(1)

Example #11
0
    # slic segmentation
    slic_map, mean_color = _segmentation(img, visualise=False)

    saliency_object = saliency.Saliency(img, 3)
    saliency_map = saliency_object.getSaliencyMap()

    # find surf features for superpixels
    b_surf = False
    surf_features = None
    if b_surf:
        surf_features = _find_surf_feaures(img, slic_map)

    # detect faces
    grid_size = (60, 80)
    padding = img.shape[1] / grid_size[1]
    faces, frames = fdetect.detect_face(img, padding=padding, visualise=False)

    feature_set, descriptor = image_graph(slic_map,
                                          mean_color,
                                          frames,
                                          grid_size=grid_size,
                                          visualise=True,
                                          b_surf=b_surf,
                                          surf_features=surf_features)

    sal_descriptor, ds_sal_map = get_saliency_based_descriptor(
        saliency_map, frames, grid_size=grid_size, visualise=True)

    plot_map(saliency_map, ds_sal_map)
import cv2
import face_detection

vc = cv2.VideoCapture(0)

if vc.isOpened():  # try to get the first frame
    rval, frame = vc.read()
else:
    rval = False

while rval:
    cv2.imshow("face detection", frame)
    rval, frame = vc.read()
    face_detection.detect_face(frame)
    key = cv2.waitKey(20)
    if key == 27:  # exit on ESC
        break
cv2.destroyWindow("preview")
        motion_status = 1

        (x, y, w, h) = cv2.boundingRect(contour)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 3)

    #store time of motion entering & exiting frame
    status_list.append(motion_status)
    status_list = status_list[-2:]

    if status_list[-1] == 1 and status_list[-2] == 0:
        motion_times.append(datetime.now())
    if status_list[-1] == 0 and status_list[-2] == 1:
        motion_times.append(datetime.now())

    #check if face is detected in motion
    if detect_face(frame):
        if face_status == False:
            count += 1
            face_times.append(datetime.now())
            cv2.imwrite(filename='./imgs/saved_img' + str(count) + '.jpg',
                        img=frame)
            face_status = True
    else:
        if face_status == True:
            face_times.append(datetime.now())
        face_status = False

    cv2.imshow("Colour Frame", frame)

    key = cv2.waitKey(100)
    if key == ord('q'):