Esempio n. 1
0
def get_landmarks(images, labels):

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    win = dlib.image_window()

    landmarks = []
    l = labels.astype(np.int64)
    one_hot_labels = np.zeros((l.size, int(l.max() + 1)))
    one_hot_labels[np.arange(l.size), l] = 1

    count = 0

    for image, label in zip(images, labels):

        save_img("temp.png", image)
        img = dlib.load_grayscale_image("temp.png")

        dets = detector(img, 1)
        face_rect = dets[0]

        landmarks.append(
            np.matrix([[p.x, p.y] for p in predictor(img, face_rect).parts()]))

    print(len(one_hot_labels))
    print(len(landmarks))

    np.save("landmarks.npy", landmarks)
    np.save("one_hot_labels.npy", one_hot_labels)
Esempio n. 2
0
    def do_training(self):
        trainpath = os.path.join(self.translate_path(self.path), "train.txt")
        trainfile = open(trainpath, 'r')
        testdatadef = trainfile.readlines()
        trainfile.close()
        result = []

        images = []
        boxes = []
        for testdata in testdatadef:
            testvalues = testdata.split(";")
            boxes.append([
                dlib.rectangle(int(testvalues[0]), int(testvalues[1]),
                               int(testvalues[2]), int(testvalues[3]))
            ])
            imagepath = os.path.join(self.translate_path(self.path),
                                     testvalues[4].strip("\n"))
            result.append(testvalues[4])
            images.append(dlib.load_grayscale_image(imagepath))
        options = dlib.simple_object_detector_training_options()
        options.add_left_right_image_flips = True
        options.C = 5
        #        options.be_verbose = False
        start_time = time.clock()
        SimpleHTTPRequestHandler.trained_detector = dlib.train_simple_object_detector(
            images, boxes, options)
        self.step_times.append("Trained HOG + SVM Detection: %.3f s" %
                               (time.clock() - start_time))
        print(type(SimpleHTTPRequestHandler.trained_detector))
        return result
Esempio n. 3
0
 def img_path_to_landmarks(self,
                           img_path: str,
                           gray_scale: bool = True,
                           exclude_vector_base: bool = True):
     if gray_scale:
         return self.img_to_landmarks(dlib.load_grayscale_image(img_path),
                                      exclude_vector_base)
     return self.img_to_landmarks(dlib.load_rgb_image(img_path),
                                  exclude_vector_base)
Esempio n. 4
0
def hash_image(filename):
    img = dlib.load_grayscale_image(filename)
    img = dlib.convert_image(img, dtype='float32')
    img = dlib.resize_image(img, 75,75)
    img = img.reshape(img.size,1)
    img = np.asmatrix(img)
    img -= 110

    h = random_projections*img;
    h = h>0;
    return hash(np.packbits(h).tostring())
Esempio n. 5
0
 def create_hog_image(self, absoluteFile: str, filename: str):
     image = dlib.load_grayscale_image(absoluteFile)
     fd, hog_image = hog(image,
                         orientations=8,
                         pixels_per_cell=(16, 16),
                         cells_per_block=(1, 1),
                         visualize=True,
                         multichannel=False)
     start_time = time.clock()
     hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                     in_range=(0, 10))
     self.step_times.append("HOG Creation: %.3f s" %
                            (time.clock() - start_time))
     processed_file = self.path_to_processed_file(filename, "hog")
     pyplot.imsave(processed_file, hog_image_rescaled)
Esempio n. 6
0
    def do_POST(self):
        print('Post!')

        response = {"drowsy": False}

        ## Save file temporarily
        tmpFile = open("tmp.mpeg", 'wb')
        tmpFile.write(self.rfile.read(int(self.headers["content-length"])))
        print("Bytes read: ", self.headers["content-length"])

        tmpFile.close()
        ## Split video file into frames
        vidObj = cv2.VideoCapture("tmp.mpeg")
        count = 0
        success = 1

        errored = False

        while success:
            success, image = vidObj.read()
            if success != 1:
                break

            if count % 5 == 0:
                try:
                    cv2.imwrite("tmpFrame.jpg", image)
                    img = dlib.load_grayscale_image("tmpFrame.jpg")
                    appearsDrowsy = TestDetectionServer.detector.areEyesClosed(
                        img)

                    print(appearsDrowsy)
                    print(
                        "current consecutive drowsy frames: ",
                        TestDetectionServer.detector.
                        getNumberConsecutiveDrowsyFrames())
                    if appearsDrowsy != None:
                        if not appearsDrowsy:
                            TestDetectionServer.detector.resetNumberConsecutiveDrowsyFrames(
                            )

                        if (TestDetectionServer.detector.isDrowsy()):
                            response["drowsy"] = True
                            break

                    if os.path.exists("tmpFrame.jpg"):
                        os.remove("tmpFrame.jpg")

                except Exception as e:
                    errored = True
                    print(e)
                    break

            count += 1
        print(count)

        ## Remove tmp file
        #if os.path.exists("tmp.mpg"):
        #    os.remove("tmp.mpg")

        if errored:
            self._set_headers(500)
        else:
            self._set_headers(200)

        print(response)
        self.wfile.write(self._html(str(response)))
Esempio n. 7
0
        C = dist.euclidean(eye[0], eye[3])

        ear = (A + B) / (2.0 * C)

        return ear

    def isDrowsy(self):
        return self.getNumberConsecutiveDrowsyFrames() > \
               self.getMaxDrowsyFramesBeforeSignal()

    def _getMinimumEyeAspectRatio(self):
        return self._minimumEyeAspectRatioBeforeCloseAssumed

    def getMaxDrowsyFramesBeforeSignal(self):
        return self._maxDrowsyFramesBeforeSignal

    def getNumberConsecutiveDrowsyFrames(self):
        return self._consecutiveDrowsyFrames

    def incrementNumberConsecutiveDrowsyFrames(self):
        self._consecutiveDrowsyFrames += 1

    def resetNumberConsecutiveDrowsyFrames(self):
        self._consecutiveDrowsyFrames = 0


if __name__ == "__main__":
    d = DrowsinessDetector()
    img = dlib.load_grayscale_image("testImage.png")
    print(d.areEyesClosed(img))
print("\nSVD outputs:")
print(u)
print(v)
print("\nsingular values: ", w)

# Use the SVD output to get the filter again.  Note that the print shows the same thing as print(filt)
b = v[0] * sqrt(w[0])
print("\nFilter built from SVD outputs:")
print(b.transpose() * b)

# To drive this home a little further, lets run a few tests.  We are going to
# filter this image a few different ways.  All of these different ways of
# filtering the image give the same outputs. The only difference if if we take
# advantage of the separability of the filter or not.
img = dlib.load_grayscale_image('images/testing_faces/2007_001430.jpg')

# Do all the filtering with float values
filt = filt.astype('float32')
a = a.astype('float32')
img = img.astype('float32')

# These all output the same things, except the last version is faster.
fimg1, valid_area = dlib.spatially_filter_image(img, filt)
fimg2, valid_area = dlib.spatially_filter_image(
    dlib.spatially_filter_image(img, a),
    a.transpose().copy())
fimg3, valid_area = dlib.spatially_filter_image_separable(img, a, a)

print("")
print("filter output difference: ", np.max(np.abs(fimg1 - fimg2)))
Esempio n. 9
0
        plt.plot(hist)
        plt.xlim([0, 256])
        plt.show()

if __name__ == '__main__':

    fe = Face_Encoding()
    win1 = dlib.image_window()
    win2 = dlib.image_window()
    win3 = dlib.image_window()
    win4 = dlib.image_window()

    for image_path in paths.list_images(
            "D:\Tuts\DataScience\Python\Datasets\FGNET\Age_Test\Old"):
        image = dlib.load_rgb_image(image_path)
        gray = dlib.load_grayscale_image(image_path)

        #image = cv2.resize(image, (300, 300))
        #gray = cv2.resize(gray, (300, 300))

        descriptor, image = fe._compute_facenet_embedding_dlib(image=image,
                                                               draw=True)

        hist, lbp_image = fe.get_local_binary_pattern(gray=gray)

        hist, hog_image = fe.get_hog(image_path=image_path,
                                     image=None,
                                     multi_channel=True)

        hist, canny = fe.canny_edge_detect_cv2(image=gray,
                                               auto=False,
Esempio n. 10
0
    roi_fg = cv2.bitwise_and(eyeOverlay, eyeOverlay, mask=mask)

    # join the roi_bg and roi_fg
    dst = cv2.add(roi_bg, roi_fg)

    # place the joined image, saved to dst back over the original image
    frame[y1:y2, x1:x2] = dst


#loading in all the needed components
target_file = os.getcwd() + "/test.jpg"
my_face = cv2.imread('face.png', -1)
orig_mask = my_face[:, :, 3]
orig_mask_inv = cv2.bitwise_not(orig_mask)
my_face = my_face[:, :, 0:3]
predictor_path = os.getcwd() + "/68point.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
img = dlib.load_grayscale_image(target_file)
img_c = cv2.imread(target_file)
#starting to process image to find faces and key points
dets = detector(img, 1)
for i, d in enumerate(dets):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        i, d.left(), d.top(), d.right(), d.bottom()))
    place_eye(img_c, d, my_face, 1.5)
    #import numpy as np
cv2.imwrite('out.jpg', img_c)
cv2.imshow('image', img_c)
cv2.waitKey(0)
Esempio n. 11
0

def dist(a, b):
    return ((a[0] - b[0])**2 + (a[1] - b[1])**2)**0.5


predictor_path = '<dlib predictor_68 path>'
faces_folder_path = '<faces folder path>'

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)

for i in range(100):
    # load current image
    img_url = faces_folder_path + '/{0}.jpg'.format(i)
    img = dlib.load_grayscale_image(img_url)

    # do some resizing operations
    if img.shape[1] < 300:
        img = imutils.resize(img, width=img.shape[1] * 2)
    elif img.shape[1] < 450:
        img = imutils.resize(img, width=450)
    else:
        img = imutils.resize(img, width=900)

    # detect faces
    dets, scores, idx = detector.run(img, 0, -1)

    # get list of points
    points = shape_to_list(predictor(img, dets[0]))
Esempio n. 12
0
# Tell the code how many CPU cores your computer has for the fastest training.
options.num_threads = 4
options.be_verbose = True
options.detection_window_size = 80*80
print(options)
# Finally, note that you don't have to use the XML based input to
# train_simple_object_detector().  If you have already loaded your training
# images and bounding boxes for the objects then you can call it as shown
# below.

imgnames = glob.glob("/home/fast/Automate/20x/nuclei/dlib/train/*.png")
# You just need to put your images into a list.
images = []
boxes = []
for img in imgnames:
    images.append(dlib.load_grayscale_image(img))
    with open(img[:-4]+'.json', 'r') as f:
        imgboxes = map(lambda x: dlib.rectangle(left=x[0], top=x[1],
                                                right=x[0]+x[2], bottom=x[1]+x[3]),
                       json.load(f))
        boxes.append(list(imgboxes))

print(len(boxes), len(images))

detector = dlib.train_simple_object_detector(images, boxes, options)

# We could save this detector to disk by uncommenting the following.
detector.save('detector.svm')

# Now let's look at its HOG filter!
# We can look at the HOG filter we learned.  It should look like a face.  Neat!
Esempio n. 13
0
import dlib
import cv2
import os
import numpy as np
def place(target):
    global my_face
    
#over laying code found at https://www.codesofinterest.com/2017/07/snapchat-like-filters-with-dlib-opencv-python.html

#loading in all the needed components
my_face = cv2.imread('me.jpg')
target = cv2.imread('test.jpg')
win = dlib.image_window()
predictor_path = os.getcwd()+"/68point.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
img = dlib.load_grayscale_image(os.getcwd()+"/me.jpg")

win.set_image(img)
dlib.hit_enter_to_continue()