def test_face_jitter():
    """
    This example shows how faces were jittered and augmented to create training
    data for dlib's face recognition model.  It takes an input image and
    disturbs the colors as well as applies random translations, rotations, and
    scaling.
    :return:
    """
    img_path = 'data/running_man.jpg'
    img = cv2.imread(img_path)
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # get face
    hog_face_detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')

    rects, scores, idx = hog_face_detector.run(img_rgb, 2, 0)
    faces = dlib.full_object_detections()
    for rect in rects:
        faces.append(shape_predictor(img_rgb, rect))

    face_image = dlib.get_face_chip(img_rgb, faces[0], size=80)

    # jitter face
    jittered_images = dlib.jitter_image(face_image, num_jitters=4, disturb_colors=True)
    for idx, image in enumerate(jittered_images):
        image_brg = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('jittered_image_{}'.format(idx), image_brg)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 2
0
def get_jittered_images(image_path, num_jitters=5, disturb_colors=False):

    face_detector = FaceDetector(face_area_threshold=0.0)
    landmark_predictor = LandmarkDetector()

    rgbImg = cv2.imread(image_path)
    grayImg = None
    if rgbImg is None:
        raise Exception("Image could not be loaded!")
    elif rgbImg.shape[2] == 3:
        grayImg = cv2.cvtColor(rgbImg, cv2.COLOR_BGR2GRAY)
    else:
        grayImg = rgbImg

    # Ask the detector to find the bounding boxes of each face.
    face_bb = face_detector.detect_unbounded(grayImg)

    landmarks = landmark_predictor.predict(bounding_box=face_bb[0],
                                           grayImg=grayImg)

    aligned_face = dlib.get_face_chip(cv2.cvtColor(grayImg,
                                                   cv2.COLOR_GRAY2RGB),
                                      landmarks,
                                      size=320,
                                      padding=1.0)

    jittered_images = dlib.jitter_image(aligned_face,
                                        num_jitters=num_jitters,
                                        disturb_colors=disturb_colors)

    return jittered_images
    def get_data(self):
        global index
        global path_list
        global g_Lock
        global detect_c
        global Img_Error_list_path
        with g_Lock:
            m_index = index
            global_list_len = len(path_list)
            end = min(m_index + self._batch_size, global_list_len)
            if (m_index == end):
                return None
            m_path_list = path_list[m_index:end]
            index = end
        
        res = {
            "img": np.ndarray(shape=(self._batch_size, self._img_height, self._img_width, self._channels), dtype=np.float32),
            "path_list": [],
            "last_batch": False
        }
        if end == global_list_len:
            res["last_batch"] = True
        
        count = 0
        for i,path in enumerate(m_path_list):

            try :
                img = Image.open(path)
                img = img.convert('RGB')
                crop_img, _ = self.Crop_1_face_no_FD(img, self._img_height , self._padding)

                res["img"][count,:,:,:] = crop_img
                res["path_list"].append(path)
                count += 1
                
            except:
                print("index= ",i+ m_index)
                print("load_img_error: ",path)
                with open(Img_Error_list_path,"a") as f:
                    f.write(path+"\n")

        if len(res["path_list"]) == 0:
            return res
        elif (len(res["path_list"]) < self._batch_size):
            res["img"] = res["img"][0:len(res["path_list"])]
			
        detect_c += res["img"].shape[0]

        if self._jitter_count:
            list_imgs =[]
            for i in range(res["img"].shape[0]):
                list_imgs += dlib.jitter_image(np.uint8(res["img"][i]), num_jitters=self._jitter_count, disturb_colors=True)
            res["img"] = np.array(list_imgs,dtype = np.float32)

        
        #res["img"] = pre_process(res["img"])

        return res
Esempio n. 4
0
    def ReadDetectAndEncode(self, imgPath, sess, n_jitters=0):
        img = misc.imread(imgPath, mode='RGB')
        bbs, landmarks = detect_face.detect_face(img, self.minsize, self.pnet,
                                                 self.rnet, self.onet,
                                                 self.threshold, self.factor)
        if len(bbs) != 1:
            return []

        img_list = [None]

        prewhitened = facenet.prewhiten(img)
        img_list[0] = prewhitened

        # Fixed normalization
        controlArray = np.expand_dims(np.zeros(1, dtype=np.int32), 1)
        controlArray += np.expand_dims(np.ones(1, dtype=np.int32),
                                       1) * facenet.FIXED_STANDARDIZATION

        # Run forward pass to calculate embeddings
        feed_dict = {
            self.images_placeholder: img_list,
            self.phase_train_placeholder: False,
            self.control_placeholder: controlArray
        }
        img_encoding = sess.run(self.embeddings, feed_dict=feed_dict)

        if n_jitters:
            imgEncodings = img_encoding

            img = dlib.load_rgb_image(imgPath)
            augmented_images = dlib.jitter_image(img, num_jitters=n_jitters)

            for augmented_image in augmented_images:
                prewhitened = facenet.prewhiten(augmented_image)
                img_list[0] = prewhitened

                # Run forward pass to calculate embeddings
                feed_dict = {
                    self.images_placeholder: img_list,
                    self.phase_train_placeholder: False,
                    self.control_placeholder: controlArray
                }
                img_encoding = sess.run(self.embeddings, feed_dict=feed_dict)

                imgEncodings = np.concatenate((imgEncodings, img_encoding),
                                              axis=0)

            return np.average(imgEncodings, axis=0)

        return img_encoding[0]
Esempio n. 5
0
    print("Sorry, we could not load '{}' as an image".format(face_file_path))
    exit()

# Convert to RGB since dlib uses RGB images
img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)

# Ask the detector to find the bounding boxes of each face.
dets = detector(img)

num_faces = len(dets)

# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))

# Get the aligned face image and show it
image = dlib.get_face_chip(img, faces[0], size=320)
cv_bgr_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('image',cv_bgr_img)
cv2.waitKey(0)

# Show 5 jittered images without data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5)
show_jittered_images(jittered_images)

# Show 5 jittered images with data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5, disturb_colors=True)
show_jittered_images(jittered_images)
cv2.destroyAllWindows()
Esempio n. 6
0
# to find face landmarks so we can precisely localize the face
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)

# Load the image using dlib
img = dlib.load_rgb_image(face_file_path)

# Ask the detector to find the bounding boxes of each face.
dets = detector(img)

num_faces = len(dets)

# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
    faces.append(sp(img, detection))

# Get the aligned face image and show it
image = dlib.get_face_chip(img, faces[0], size=320)
window = dlib.image_window()
window.set_image(image)
dlib.hit_enter_to_continue()

# Show 5 jittered training_images without data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5)
show_jittered_images(window, jittered_images)

# Show 5 jittered training_images with data augmentation
jittered_images = dlib.jitter_image(image, num_jitters=5, disturb_colors=True)
show_jittered_images(window, jittered_images)
Esempio n. 7
0
 def jitter_face(self, image):
     jittered_images = dlib.jitter_image(image, num_jitters=5)
     for image in jittered_images:
         self.show(image, 'jitter_face')
         cv2.waitKey(0)