예제 #1
0
def run_job(params):
    '''
    Pre process image.
    '''
    directory = params[0]
    root_path = params[1]
    before_crop_resize = params[2]
    image_size = params[3]
    
    # Read and crop image.
    image_files = Files(directory + "/")
    directory = directory.split("/")
    directory = directory[len(directory) - 1]
    
    for image_file in image_files.paths:
        image = Image(image_file)
        
        image = crop_face(image, image_size)
         
        if image.width > before_crop_resize:
            image = image.resize(before_crop_resize)
         
        # Save image.
        file_name = image_file.split("/")
        file_name = file_name[len(file_name) - 1]                
        image.save(root_path + directory + "/" + file_name)
예제 #2
0
    def monitor(self, interval=0.5, time_span=10, threshold=1e-10, gray_scale=True):
        """ During the past `time_span` seconds if there're less than
            `threshold` * `time_span` pictures that's me, do something
        """
        lookback = np.ones([int(time_span / interval)])

        for image in self.cctv(interval, gray_scale):
            faces = get_faces(image)
            if faces is None:
                print('No face detected in camera.')
                continue

            # get the face area
            x, y, w, h = faces[0]
            face = crop_face(image, x, w, y, h)
            # resizing image
            face = cv2.resize(face, (64, 64))

            lookback = np.roll(lookback, shift=-1)
            identity = whoisthis(face)
            print(identity)
            lookback[-1] = identity
            if np.mean(lookback) < threshold:
                show_image()
                send_message(r'[Alert] Someone is look at your screen!')
                raise ValueError('This is not me!')
예제 #3
0
def to_hdf5(wrapper, save_as=None):
    if save_as is None:
        save_as = wrapper.dataset_name.replace(' ', '_') + '.h5'
    
    f = None

    print wrapper.dataset_name
 
    try:
        f = tables.openFile(save_as, mode='w')

        train_group = f.createGroup('/', 'train', 'train set')
        test_group = f.createGroup('/', 'test', 'test set')

        img_groups = (train_group, None)

        test_idx = None
        dsets = [range(len(wrapper)), None]

        if wrapper.get_standard_train_test_splits() is not None:
            dsets[0], dsets[1] = wrapper.get_standard_train_test_splits()
            img_groups = (img_groups[0], test_group)

        for some_idx, testing in enumerate(dsets):
            if testing is None: 
                break
            img_group = img_groups[some_idx]
            dset = dsets[some_idx]

            img_table = f.createCArray(img_group, 'img', tables.StringAtom(itemsize=1), shape=(len(dset), 96*96*3))
            label_table = f.createCArray(img_group, 'label', tables.Float64Atom(), shape=(len(dset), len(keypoints_names), 2))

            for i, _ in enumerate(dset):
                img, label = crop_face(PIL.Image.open(wrapper.get_original_image_path(i)),
                                            wrapper.get_bbox(i),
                                            wrapper.get_eyes_location(i),
                                            wrapper.get_keypoints_location(i))
                 
                img_table[i, :] = [x for x in img.tostring()]
                #NOTE: will be stored as cstring, so it is mandatory to store as
                #strings of size 1 (as \0 would break the string)
                for name in keypoints_names:
                    if name in label:
                        point = label[name]
                        label_table[i, keypoints_names.index(name), :] = [point[0], point[1]]
                    else:
                        label_table[i, keypoints_names.index(name), :] = [-1, -1]

    finally:
        if f is not None:
            f.close()
예제 #4
0
def subtract_neutral_face(img_path, dst='Same', flag='diff'):
    # img_path: Path to jpg image, of expression face.
    # dst: Destination to save cropped image.
    #   If 'same', will perform in-place operation. Or else, saved in relative location to img_path
    # flag: Determine type of thresholding.
    #   If 'diff', return difference between neutral and expression face.
    #   If 'thre', return binary threshold of the difference.
    #   If 'ath', return adaptive threshold of the difference.

    folder_path, filename = os.path.split(img_path)

    if dst == 'Same':
        dst_path = img_path
    else:
        dst_path = os.path.join(folder_path, dst)

    # Extract and resize neutral face
    nf_path = get_neutral_face_path(img_path)
    nf_img = cv2.imread(nf_path, 0)
    nf_img_crop = crop_face(nf_path, ret=True)  #return 3D np array
    nf_img_crop_rs = cv2.resize(nf_img_crop, (224, 224))

    # Read and resize expression face
    img = cv2.imread(img_path, 0)
    img_rs = cv2.resize(img, (224, 224))

    # Find absolute difference
    diff = cv2.subtract(img_rs, nf_img_crop_rs)
    retval, threshold = cv2.threshold(diff, 10, 255, cv2.THRESH_BINARY)
    ath = cv2.adaptiveThreshold(diff, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY, 11, 3)

    # Find relative difference
    #diff2 = img_rs - nf_img_crop_rs
    #retval2, threshold2 = cv2.threshold(diff2, 50, 255, cv2.THRESH_BINARY)
    #ath2 = cv2.adaptiveThreshold(diff2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,3)

    if flag == 'diff':
        return cv2.imwrite(dst_path, diff)
    elif flag == 'thre':
        return cv2.imwrite(dst_path, threshold)
    elif flag == 'ath':
        return cv2.imwrite(dst_path, ath)
예제 #5
0
def to_hdf5(wrapper, save_as=None):
    if save_as is None:
        save_as = wrapper.dataset_name.replace(' ', '_') + '.h5'

    keypoints_to_id = {
        'left_eyebrow_inner_end': 1,
        'left_eyebrow_outer_end': 27,
        'mouth_top_lip_bottom': 2,
        'right_ear_canal': 3,
        'face_right': 52,
        'right_ear_top': 4,
        'right_ear_center': 29,
        'mouth_top_lip': 5,
        'right_eye_center_top': 25,
        'mouth_bottom_lip_top': 6,
        'right_eyebrow_center': 7,
        'face_center': 31,
        'chin_left': 8,
        'right_eyebrow_center_top': 33,
        'face_left': 53,
        'left_eyebrow_center': 34,
        'right_eye_pupil': 35,
        'right_nostril': 42,
        'mouth_left_corner': 37,
        'left_eye_center_bottom': 38,
        'nose_tip': 9,
        'nostrils_center': 18,
        'left_eyebrow_center_top': 10,
        'left_eye_outer_corner': 11,
        'mouth_right_corner': 41,
        'right_eye_inner_corner': 32,
        'left_nostril': 48,
        'right_eye_center': 43,
        'right_ear': 12,
        'left_ear_top': 28,
        'mouth_bottom_lip': 13,
        'left_eye_center': 14,
        'left_mouth_outer_corner': 15,
        'right_eyebrow_outer_end': 45,
        'left_eye_center_top': 16,
        'left_ear_center': 17,
        'left_eye_pupil': 46,
        'left_eyebrow_center_bottom': 39,
        'right_ear_bottom': 36,
        'right_eyebrow_inner_end': 26,
        'right_eye_center_bottom': 20,
        'chin_center': 21,
        'left_eye_inner_corner': 22,
        'mouth_center': 47,
        'right_mouth_outer_corner': 23,
        'left_ear_bottom': 24,
        'nose_center_top': 30,
        'right_eyebrow_center_bottom': 49,
        'left_ear_canal': 50,
        'right_eye_outer_corner': 19,
        'left_ear': 51,
        'chin_right': 44
    }

    f = None

    print wrapper.dataset_name

    try:
        f = tables.openFile(save_as, mode='w')

        train_group = f.createGroup('/', 'train', 'train set')
        test_group = f.createGroup('/', 'test', 'test set')
        train_img_group = f.createGroup('/train', 'data', 'data group')
        train_label_group = f.createGroup('/train', 'label', 'label group')
        test_img_group = f.createGroup('/test', 'data', 'data group')
        test_label_group = f.createGroup('/test', 'label', 'label group')

        img_groups = (train_img_group, None)
        label_groups = (train_label_group, None)

        test_idx = None
        dsets = [range(len(wrapper)), None]

        if wrapper.get_standard_train_test_splits() is not None:
            dsets[0], dsets[1] = wrapper.get_standard_train_test_splits()
            img_groups = (img_groups[0], test_img_group)
            label_groups = (label_groups[0], test_label_group)

        for some_idx, testing in enumerate(dsets):
            if testing is None:
                break
            img_group = img_groups[some_idx]
            label_group = label_groups[some_idx]
            dset = dsets[some_idx]

            img_table = f.createTable(img_group, 'img', ImgStruct,
                                      'image data')
            label_table = f.createTable(label_group, 'label', LabelStruct,
                                        'target data')
            img_row = img_table.row
            label_row = label_table.row

            for i in dset:
                img, label = crop_face(
                    PIL.Image.open(wrapper.get_original_image_path(i)),
                    wrapper.get_bbox(i), wrapper.get_eyes_location(i),
                    wrapper.get_keypoints_location(i))

                img_row['idx'] = i
                img_row['data'] = img.tostring()
                img_row.append()

                for name, point in label.iteritems():
                    the_id = 0
                    if name in keypoints_to_id:
                        the_id = keypoints_to_id[name]

                    label_row['idx'] = i
                    label_row['name'] = the_id
                    label_row['col'] = point[0]
                    label_row['row'] = point[1]
                    label_row.append()
            img_table.flush()
            label_table.flush()

    finally:
        if f is not None:
            f.close()
예제 #6
0
def to_hdf5(wrapper, save_as=None):
    if save_as is None:
        save_as = wrapper.dataset_name.replace(' ', '_') + '.h5'

    keypoints_to_id = {'left_eyebrow_inner_end': 1, 'left_eyebrow_outer_end': 27, 'mouth_top_lip_bottom': 2, 'right_ear_canal': 3, 'face_right': 52, 'right_ear_top': 4, 'right_ear_center': 29, 'mouth_top_lip': 5, 'right_eye_center_top': 25, 'mouth_bottom_lip_top': 6, 'right_eyebrow_center': 7, 'face_center': 31, 'chin_left': 8, 'right_eyebrow_center_top': 33, 'face_left': 53, 'left_eyebrow_center': 34, 'right_eye_pupil': 35, 'right_nostril': 42, 'mouth_left_corner': 37, 'left_eye_center_bottom': 38, 'nose_tip': 9, 'nostrils_center': 18, 'left_eyebrow_center_top': 10, 'left_eye_outer_corner': 11, 'mouth_right_corner': 41, 'right_eye_inner_corner': 32, 'left_nostril': 48, 'right_eye_center': 43, 'right_ear': 12, 'left_ear_top': 28, 'mouth_bottom_lip': 13, 'left_eye_center': 14, 'left_mouth_outer_corner': 15, 'right_eyebrow_outer_end': 45, 'left_eye_center_top': 16, 'left_ear_center': 17, 'left_eye_pupil': 46, 'left_eyebrow_center_bottom': 39, 'right_ear_bottom': 36, 'right_eyebrow_inner_end': 26, 'right_eye_center_bottom': 20, 'chin_center': 21, 'left_eye_inner_corner': 22, 'mouth_center': 47, 'right_mouth_outer_corner': 23, 'left_ear_bottom': 24, 'nose_center_top': 30, 'right_eyebrow_center_bottom': 49, 'left_ear_canal': 50, 'right_eye_outer_corner': 19, 'left_ear': 51, 'chin_right': 44}
    
    f = None

    print wrapper.dataset_name
 
    try:
        f = tables.openFile(save_as, mode='w')

        train_group = f.createGroup('/', 'train', 'train set')
        test_group = f.createGroup('/', 'test', 'test set')
        train_img_group = f.createGroup('/train', 'data', 'data group')
        train_label_group = f.createGroup('/train', 'label', 'label group')
        test_img_group = f.createGroup('/test', 'data', 'data group')
        test_label_group = f.createGroup('/test', 'label', 'label group')

        img_groups = (train_img_group, None)
        label_groups = (train_label_group, None)

        test_idx = None
        dsets = [range(len(wrapper)), None]

        if wrapper.get_standard_train_test_splits() is not None:
            dsets[0], dsets[1] = wrapper.get_standard_train_test_splits()
            img_groups = (img_groups[0], test_img_group)
            label_groups = (label_groups[0], test_label_group) 

        for some_idx, testing in enumerate(dsets):
            if testing is None: 
                break
            img_group = img_groups[some_idx]
            label_group = label_groups[some_idx] 
            dset = dsets[some_idx]

            img_table = f.createTable(img_group, 'img', ImgStruct, 'image data')
            label_table = f.createTable(label_group, 'label', LabelStruct, 'target data')
            img_row = img_table.row
            label_row = label_table.row

            for i in dset:
                img, label = crop_face(PIL.Image.open(wrapper.get_original_image_path(i)),
                                            wrapper.get_bbox(i),
                                            wrapper.get_eyes_location(i),
                                            wrapper.get_keypoints_location(i))
                 
                img_row['idx'] = i
                img_row['data'] = img.tostring()
                img_row.append()

                for name, point in label.iteritems():
                    the_id = 0 
                    if name in keypoints_to_id:
                        the_id = keypoints_to_id[name]

                    label_row['idx'] = i
                    label_row['name'] = the_id
                    label_row['col'] = point[0]
                    label_row['row'] = point[1]
                    label_row.append()
            img_table.flush()
            label_table.flush()

    finally:
        if f is not None:
            f.close()
예제 #7
0
import crop_face as cf
import cfal as alf

# parameters for loading data and images
prototxt = 'ckpt_/deploy.prototxt.txt'
weights = 'ckpt_/res10_300x300_ssd_iter_140000.caffemodel'

# loading models
face_detection = load_detection_model(prototxt, weights)

filename = "filepath_of_video"

dirName = "dir_name for cropped_images"

# for detection and saved the crop the detected face 
a = cf.crop_face(filename, face_detection, dirName)

# for detection and align faces and saved the crop the detected face 
# a = afl.crop_face(filename, face_detection, dirName)

print("Done", a)

예제 #8
0
def to_hdf5(wrapper, save_as=None):
    if save_as is None:
        save_as = wrapper.dataset_name.replace(' ', '_') + '.h5'

    f = None

    print wrapper.dataset_name

    try:
        f = tables.openFile(save_as, mode='w')

        train_group = f.createGroup('/', 'train', 'train set')
        test_group = f.createGroup('/', 'test', 'test set')

        img_groups = (train_group, None)

        test_idx = None
        dsets = [range(len(wrapper)), None]

        if wrapper.get_standard_train_test_splits() is not None:
            dsets[0], dsets[1] = wrapper.get_standard_train_test_splits()
            img_groups = (img_groups[0], test_group)

        for some_idx, testing in enumerate(dsets):
            if testing is None:
                break
            img_group = img_groups[some_idx]
            dset = dsets[some_idx]

            img_table = f.createCArray(img_group,
                                       'img',
                                       tables.StringAtom(itemsize=1),
                                       shape=(len(dset), 96 * 96 * 3))
            label_table = f.createCArray(img_group,
                                         'label',
                                         tables.Float64Atom(),
                                         shape=(len(dset),
                                                len(keypoints_names), 2))

            for i, _ in enumerate(dset):
                img, label = crop_face(
                    PIL.Image.open(wrapper.get_original_image_path(i)),
                    wrapper.get_bbox(i), wrapper.get_eyes_location(i),
                    wrapper.get_keypoints_location(i))

                img_table[i, :] = [x for x in img.tostring()]
                #NOTE: will be stored as cstring, so it is mandatory to store as
                #strings of size 1 (as \0 would break the string)
                for name in keypoints_names:
                    if name in label:
                        point = label[name]
                        label_table[i, keypoints_names.index(name), :] = [
                            point[0], point[1]
                        ]
                    else:
                        label_table[i,
                                    keypoints_names.index(name), :] = [-1, -1]

    finally:
        if f is not None:
            f.close()