Esempio n. 1
0
def main():
    if len(sys.argv) < 3:
        print(
            "Give the path to the examples/faces directory as the argument to this "
            "program. For example, if you are in the python_examples folder then "
            "execute this program by running:\n"
            "    ./detect.py ../examples/faces ./detector.svm [-s]")
        exit()

    f = sys.argv[1]
    detector_fp = sys.argv[2]

    s = False
    if len(sys.argv) > 3 and sys.argv[3] == '-s':
        s = True

    dets = handle_locations(f, detector_fp)
    print("Number of detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    if s:
        win = dlib.image_window()
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
Esempio n. 2
0
def show_jittered_images(window, jittered_images):
    '''
        Shows the specified jittered images one by one
    '''
    for img in jittered_images:
        window.set_image(img)
        dlib.hit_enter_to_continue()
def display_landmarks(img, dets, shapes):
    win = dlib.image_window()
    win.clear_overlay()
    win.set_image(img)
    for shape in shapes:
        win.add_overlay(shape)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()
Esempio n. 4
0
    def _visualize(self, image, dets, poses, scores):
        print("Number of faces detected: {}".format(len(dets)))
        for i, d in enumerate(dets):
            print("Detection {} score: {:.2f}: Left: {} Top: {} Right: {} Bottom: {}".format(
                i, scores[i], d.left(), d.top(), d.right(), d.bottom()))

        if not self.SERVER_NO_GUI_MODE:
            self._win.clear_overlay()
            self._win.set_image(image)
            for pose in poses:
                self._win.add_overlay(pose)
            self._win.add_overlay(dets)
            dlib.hit_enter_to_continue()
Esempio n. 5
0
    def show_learned_hog_filter(self):
        # Now let's use the detector as you would in a normal application.  First we
        # will load it from disk.
        #print "Loading detector"

        #detector = dlib.simple_object_detector("../data/models/detector.svm")

        # We can look at the HOG filter we learned.  It should look like a face.  Neat!
        win_det = dlib.image_window()
        print("MTB filter")
        win_det.set_image(self.detectors[0])
        dlib.hit_enter_to_continue()
        print("PED filter")
        win_det.set_image(self.detectors[1])
        dlib.hit_enter_to_continue()
Esempio n. 6
0
def show_pair(path1, path2):
    global win1, win2
    if win1 == None:
        win1 = dlib.image_window()
    if win2 == None:
        win2 = dlib.image_window()
    img1 = io.imread(path1)
    img2 = io.imread(path2)
    win1.clear_overlay()
    win2.clear_overlay()
    win1.set_image(img1)
    win2.set_image(img2)
    dlib.hit_enter_to_continue()

    win1.set_image(img1)
    win2.set_image(img2)
Esempio n. 7
0
def encode(detector, shape_predictor, model, image, win=None):
  """Encodes faces from a single image into a 128 dim descriptor.

  Args:
    detector: dlib face detector object
    shape_predictor: dlib shape predictor object
    model: dlib convnet model
    image: image as numpy array
    win: dlib window object for vizualization if VIZ flag == 1

  Returns:
    list of descriptors (np array) for each face detected in image
  """
  # dlib comments:
  # Ask the detector to find the bounding boxes of each face. The 1 in the
  # second argument indicates that we should upsample the image 1 time. This
  # will make everything bigger and allow us to detect more faces.
  dets = detector(img, 1)
  print("Number of faces detected: {}".format(len(dets)))

  descriptors = []
  for k, d in enumerate(dets):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        k, d.left(), d.top(), d.right(), d.bottom()))
    # Get the landmarks/parts for the face in box d.
    shape = sp(img, d)
    # Draw the face landmarks on the screen so we can see what face is currently being processed.

    if win is not None:
      win.clear_overlay()
      win.set_image(img)
      win.add_overlay(d)
      win.add_overlay(shape)
      dlib.hit_enter_to_continue()

    # Compute the 128D vector that describes the face in img identified by shape
    face_descriptor = facerec.compute_face_descriptor(img, shape)
    descriptors.append(np.asarray(list(face_descriptor)))

  return descriptors
def run(predictor_path, faces_folder_path):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    win = dlib.image_window()

    for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)

            print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                      shape.part(1)))
            left_eye_idxs = range(42, 48)
            left_eye_pts = np.array([[p.x, p.y] for i, p in enumerate(shape.parts()) if i in left_eye_idxs])
            left_eye = np.mean(left_eye_pts, axis=0)
            right_eye_idxs = range(36, 42)
            right_eye_pts = np.array([[p.x, p.y] for i, p in enumerate(shape.parts()) if i in right_eye_idxs])
            right_eye = np.mean(right_eye_pts, axis=0)

            cv2.circle(img, tuple(left_eye.astype(np.int)), 3, color=(0, 255, 255))
            cv2.circle(img, tuple(right_eye.astype(np.int)), 3, color=(0, 255, 255))
            # Draw the face landmarks on the screen.
            win.add_overlay(shape)


        win.set_image(img)
        win.add_overlay(dets)
        cv2.imwrite(os.path.join(face_folder_path, f.replace('.', '_landmarks.')), img)
        dlib.hit_enter_to_continue()
        win.clear_overlay()
Esempio n. 9
0
def DetectFaceInListDlib(frameList, faceDetector = None, skipLength = 2, debug = False):
	'''
	Given a frame list, detect (track) the faces
	Returns subimages of faces after normalization and smoothing the enclosing rectangle
	'''
	if ((faceDetector is None)):
		predictorPath = 'coreData/shape_predictor_68_face_landmarks.dat'
		faceDetector = dlib.get_frontal_face_detector()

	if (debug):
		win = dlib.image_window()
		win.clear_overlay()

	faceList = []
	newFrameList = []
	rowList = []
	colList = []
	detsList = []
	smoothRowSize = []
	smoothColSize = []
	winSize = (20/skipLength)

	for i in range(0, frameList.shape[0], skipLength):
		frame = frameList[i]
		dets = faceDetector(frame, 1)
		dets = list(enumerate(dets))
		if (len(dets) != 1):
			continue
		detsList.append(dets)
		newFrameList.append(frame)
		for k, d in (dets):
			rowList.append(np.abs(d.left() - d.right()))
			colList.append(np.abs(d.top() - d.bottom()))

	for i in range(len(rowList)):
		rowAvg = np.mean(rowList[max(0,i-winSize):min(len(rowList),i+winSize)]) + 6
		colAvg = np.mean(colList[max(0,i-winSize):min(len(colList),i+winSize)]) + 6
		smoothRowSize.append(int(round(rowAvg)))
		smoothColSize.append(int(round(colAvg)))

	for i in range(len(detsList)):
		dets = detsList[i]
		frame = newFrameList[i]
		grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		
		rowc = (d.left() + d.right())/2
		colc = (d.top() + d.bottom())/2	
		rows = smoothRowSize[i]
		cols = smoothColSize[i]
		# Forcefully make the enclosing box a square
		rows = max(rows, cols)
		cols = max(rows, cols)
		
		faceImg = grayFrame[max(0,colc-(cols/2)):min(frame.shape[0],colc+(cols/2)+1), max(0,rowc-(rows/2)):min(frame.shape[1],rowc+(rows/2)+1)]
		# Smoothing rectangle sizes (Running average of -50/+50 frames)
		faceImg = cv2.equalizeHist(faceImg)
		# Illumination (CLAHE normalization) Normalization

		if debug:
			win.clear_overlay()
			grayFrame[max(0,colc-(cols/2)):min(frame.shape[0],colc+(cols/2)+1), max(0,rowc-(rows/2)):min(frame.shape[1],rowc+(rows/2)+1)] = faceImg
			win.set_image(grayFrame)
			dlib.hit_enter_to_continue()
		
		faceImg = cv2.resize(faceImg, (100, 100))
		faceImg = np.array(faceImg)
		faceList.append(faceImg)

	faceList = np.array(faceList)
	return faceList
# a set of point locations that define the pose of the object.
predictor = dlib.shape_predictor(predictor_path) 

win = dlib.image_window()  # GUI object

for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")): # glob alows for '*.jpg' to work 
    print("Processing file: {}".format(f))
    img = io.imread(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets))) # prints the amount of faces detected 
    for k, d in enumerate(dets):   #for (k = 0; k < dets.length; k ++) where d = dets[k]
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(  # prints the corner coordinates 
            k, d.left(), d.top(), d.right(), d.bottom()))                    # of the square around the face 
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d) # d is a detected face / the region we want to get feature points from  
		# shape.part(0) returns the coordinates of the first feature point
        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))
        # Draw the face landmarks on the screen.
        win.add_overlay(shape)

    win.add_overlay(dets)
    dlib.hit_enter_to_continue() # move on to the next image
Esempio n. 11
0
import dlib
import cv2 as cv
#cnn_face_detector = dlib.cnn_face_detection_model_v1("./models/dlib/mmod_human_face_detector.dat")
cnn_face_detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor("./models/dlib/shape_predictor_5_face_landmarks.dat")
facerec = dlib.face_recognition_model_v1(
    "./models/dlib/dlib_face_recognition_resnet_model_v1.dat")
win = dlib.image_window()
img = dlib.load_rgb_image("11.png")
win.clear_overlay()
win.set_image(img)
dets = cnn_face_detector(img, 1)
for k, d in enumerate(dets):
    shape = sp(img, d)
    win.clear_overlay()
    win.add_overlay(d)
    win.add_overlay(shape)
    face_descriptor = facerec.compute_face_descriptor(img, shape)
    print(face_descriptor)
    '''''
    face_chip = dlib.get_face_chip(img, shape)
    face_descriptor_from_prealigned_image = facerec.compute_face_descriptor(face_chip)
    print(face_descriptor_from_prealigned_image)
    ''' ''
    dlib.hit_enter_to_continue()
Esempio n. 12
0
          (i, d.left(), d.top(), d.right(), d.bottom()))
    shape = predictor(img, d)
    landmarks = np.mat([[p.x, p.y] for p in shape.parts()])
    print(landmarks)
    '''
	获取每个关键点坐标shape.parts()的x,y值,
	存入landmark矩阵(模型默认提取68个关键点,所以landmark为68×2矩阵)
	'''
    # 第 0 个点和第 1 个点的坐标
    print('Part 0: {}, Part 1: {}'.format(shape.part(0), shape.part(1)))
    for idx, point in enumerate(landmarks):
        pos = (point[0, 0], point[0, 1])
        '''
		眼睛 36-47
		加入if结构圈出眼睛
		'''
        if 35 < idx < 48:
            cv2.putText(img,
                        str(idx),
                        pos,
                        fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
                        fontScale=0.3,
                        color=(0, 255, 0))  # 按照特征点的顺序加上数字,绿色点

win.clear_overlay()
win.set_image(img)  #载入图片
#win.add_overlay(dets) #外面的红框
#win.add_overlay(shape) #特征点,蓝色线条

dlib.hit_enter_to_continue()  #用于等待点击(类似于opencv的cv2.waitKey(0),不加这个会出现闪退
Esempio n. 13
0
 def recongnition(self):
     c_d = dict(zip(self.candidate, self.dist))
     cd_sorted = sorted(c_d.iteritems(), key=lambda d: d[1])
     print "\n The person is: ", cd_sorted[0][0]
     dlib.hit_enter_to_continue()
Esempio n. 14
0
 def view_object_detector(self):
     detector = dlib.simple_object_detector(DETECTOR_SVM)
     win_det = dlib.image_window()
     win_det.set_image(detector)
     dlib.hit_enter_to_continue()
Esempio n. 15
0
def train(image_folder, append):

    if append == 0:

        #code to open createXML for the noob user with all the required params

        cmd = 'createXML.exe -c' + image_folder + '/training.xml ' + image_folder
        run(cmd, 5)

        cmd = 'createXML.exe ' + image_folder + '/training.xml'
        run(cmd, 50)

    # <NOTE> <IN PROGRESS> include code to write new XML to the old XML and use the latter for the training

    elif append == 1:

        #code to open createXML for the noob user with all the required params

        cmd = 'createXML.exe -c' + image_folder + '/trainingTemp.xml ' + image_folder
        run(cmd, 5)

        cmd = 'createXML.exe ' + image_folder + '/trainingTemp.xml'
        run(cmd, 50)

        dlib.hit_enter_to_continue()

        # doing all the magic stuff to append the new XML to the old one

        xml1 = image_folder + "/training.xml"
        xml2 = image_folder + "/trainingTemp.xml"

        removeUselessText(xml1)
        removeUselessText(xml2)

        #combineXML(xml1,xml2)
        r = XMLCombiner((xml1, xml2)).combine()

        with open(xml1, "r+") as f:
            f.write(et.tostring(r.getroot()))

        #Convert the XML to better format before saving it for the training as there may be some improper indentation

    # setting option in dlib

    options = dlib.simple_object_detector_training_options()

    # symmetric detector
    options.add_left_right_image_flips = True

    # SVM C parameter.larger value will lead to overfitting
    options.C = 2

    # Tell the code how many CPU cores your computer has for the fastest training.
    options.num_threads = 4
    options.be_verbose = True

    training_xml_path = os.path.join(image_folder, "training.xml")
    #testing_xml_path = os.path.join(image_folder, "testing.xml")

    # saving the detector as detector.svm with input as the xml file after doing the training

    dlib.train_simple_object_detector(training_xml_path, "detector.svm",
                                      options)

    # Printing the accuracy with training data

    print("\nTraining accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, "detector.svm")))

    # Doing the detection
    detector = dlib.simple_object_detector("detector.svm")

    # Looking at the HOG filter the machine has learned.
    win_det = dlib.image_window()
    win_det.set_image(detector)
Esempio n. 16
0
def finding_face_landmark(file_name):
    # You can download the required pre-trained face detection model here:
    # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
    model = "shape_predictor_68_face_landmarks.dat"

    face_detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(model)

    image_window = dlib.image_window()
    image = io.imread(file_name)

    detected_faces = face_detector(image, 1)

    print("Found {} faces in the image file {}".format(len(detected_faces),
                                                       image))
    if (len(detected_faces) != 1):
        print(
            "On the photo, there are more faces. Please try out with different photo. "
        )
        return []

    image_window.set_image(image)
    face = detected_faces[0]

    image_window.add_overlay(face)
    landmarks = shape_predictor(image, face)

    leftEye1 = landmarks.part(42)
    rightEye1 = landmarks.part(39)
    nose = landmarks.part(30)
    noseTip = landmarks.part(27)
    mouth = landmarks.part(62)
    noseLeft = landmarks.part(31)
    noseRight = landmarks.part(35)

    right1 = landmarks.part(1)
    left1 = landmarks.part(15)
    right2 = landmarks.part(4)
    left2 = landmarks.part(12)
    right3 = landmarks.part(6)
    left3 = landmarks.part(10)

    leftEye2 = landmarks.part(45)
    rightEye2 = landmarks.part(36)

    # for j in range(1, 68):
    #   pos = pose_landmarks.part(j)
    #   cv2.circle(image, (pos.x, pos.y), 1, (0, 0, 255), -1)

    # cv2.circle(image, (leftEye1.x, leftEye1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (rightEye1.x, rightEye1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (nose.x, nose.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseTip.x, noseTip.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (mouth.x, mouth.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseLeft.x, noseLeft.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseRight.x, noseRight.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right1.x, right1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right2.x, right2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right3.x, right3.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left1.x, left1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left2.x, left2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left3.x, left3.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (leftEye2.x, leftEye2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (rightEye2.x, rightEye2.y), 1, (255, 255, 0), -1)

    # cv2.line(image, (leftEye1.x, leftEye1.y), (rightEye1.x, rightEye1.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (mouth.x, mouth.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (mouth.x, mouth.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (mouth.x, mouth.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (noseTip.x, noseTip.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (noseLeft.x, noseLeft.y), (noseRight.x, noseRight.y), (255, 255, 0), 1)
    # cv2.line(image, (left1.x, left1.y), (right1.x, right1.y), (255, 255, 0), 1)
    # cv2.line(image, (left2.x, left2.y), (right2.x, right2.y), (255, 255, 0), 1)
    # cv2.line(image, (left3.x, left3.y), (right3.x, right3.y), (255, 255, 0), 1)
    # cv2.line(image, (left1.x, left1.y), (leftEye2.x, leftEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (right1.x, right1.y), (rightEye2.x, rightEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (leftEye2.x, leftEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (rightEye2.x, rightEye2.y), (255, 255, 0), 1)

    d1 = euclidean_distance(leftEye1, rightEye1)  # distance between the eyes
    d2 = euclidean_distance(
        leftEye1, mouth
    )  # distance between middle of the left eyes and middle point of mouth
    d3 = euclidean_distance(
        rightEye1, mouth
    )  # distance between middle of the right eyes and middle point of mouth
    d4 = euclidean_distance(
        leftEye1, nose
    )  # distance between middle of the left eyes and middle point of nose
    d5 = euclidean_distance(
        rightEye1, nose
    )  # distance between middle of the rigth eyes and middle point of nose
    d6 = euclidean_distance(
        mouth, nose
    )  # distance between middle point of mouth and middle point of nose
    d7 = euclidean_distance(
        noseTip, nose)  # distance of middle point of d1 and middle of nose
    d8 = euclidean_distance(noseLeft, noseRight)  # width of nose
    d9 = euclidean_distance(left1, right1)  # width of face
    d10 = euclidean_distance(left2, right2)  # width of face
    d11 = euclidean_distance(left3, right3)  # width of face
    d12 = euclidean_distance(leftEye1, leftEye2)  # width od left eye
    d13 = euclidean_distance(rightEye1, rightEye2)  # width of right eye
    d14 = euclidean_distance(left1, leftEye2)
    d15 = euclidean_distance(right1, rightEye2)

    features = []

    features.append(d1)
    features.append(d2)
    features.append(d3)
    features.append(d4)
    features.append(d5)
    features.append(d6)
    features.append(d7)
    features.append(d8)
    features.append(d9)
    features.append(d10)
    features.append(d11)
    features.append(d12)
    features.append(d13)
    features.append(d14)
    features.append(d15)

    image_window.add_overlay(landmarks)

    #  cv2.imshow("Output", image)
    #  cv2.waitKey(0)

    dlib.hit_enter_to_continue()
    return features
Esempio n. 17
0
for f in sys.argv[1:]:
    print("Processing file: {}".format(f))
    img = io.imread(f)
    # The 1 in the second argument indicates that we should upsample the image
    # 1 time.  This will make everything bigger and allow us to detect more
    # faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            i, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()


# Finally, if you really want to you can ask the detector to tell you the score
# for each detection.  The score is bigger for more confident detections.
# The third argument to run is an optional adjustment to the detection threshold,
# where a negative value will return more detections and a positive value fewer.
# Also, the idx tells you which of the face sub-detectors matched.  This can be
# used to broadly identify faces in different orientations.
if (len(sys.argv[1:]) > 0):
    img = io.imread(sys.argv[1])
    dets, scores, idx = detector.run(img, 1, -1)
    for i, d in enumerate(dets):
        print("Detection {}, score: {}, face_type:{}".format(
            d, scores[i], idx[i]))
Esempio n. 18
0
def get_triangulation(im,
                      gray_image,
                      a=50,
                      b=55,
                      c=0.15,
                      show=False,
                      randomize=False):
    '''Returns triangulations'''
    # Using canny edge detection.
    #
    # Reference: http://docs.opencv.org/3.1.0/da/d22/tutorial_py_canny.html
    # First argument: Input image
    # Second argument: minVal (argument 'a')
    # Third argument: maxVal (argument 'b')
    #
    # 'minVal' and 'maxVal' are used in the Hysterisis Thresholding step.
    # Any edges with intensity gradient more than maxVal are sure to be edges
    # and those below minVal are sure to be non-edges, so discarded. Those who
    # lie between these two thresholds are classified edges or non-edges based
    # on their connectivity.
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    edges = cv2.Canny(gray_image, a, b)
    if show:
        cv2.imshow('Canny', edges)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        win = dlib.image_window()
    # Set number of points for low-poly edge vertices
    num_points = int(np.where(edges)[0].size * c)
    # Return the indices of the elements that are non-zero.
    # 'nonzero' returns a tuple of arrays, one for each dimension of a,
    # containing the indices of the non-zero elements in that dimension.
    # So, r consists of row indices of non-zero elements, and c column indices.
    r, c = np.nonzero(edges)
    # r.shape, here, returns the count of all points that belong to an edge.
    # So 'np.zeros(r.shape)' an array of this size, with all zeros.
    # 'rnd' is thus an array of this size, with all values as 'False'.
    rnd = np.zeros(r.shape) == 1
    # Mark indices from beginning to 'num_points - 1' as True.
    rnd[:num_points] = True
    # Shuffle
    np.random.shuffle(rnd)
    # Randomly select 'num_points' of points from the set of all edge vertices.
    r = r[rnd]
    c = c[rnd]
    # Number of rows and columns in image
    sz = im.shape
    r_max = sz[0]
    c_max = sz[1]
    # Co-ordinates of all randomly chosen points
    pts = np.vstack([r, c]).T
    if randomize:
        rand_offset = 50
        rand_dirs = [(0, rand_offset), (-rand_offset, 0), (0, -rand_offset),
                     (rand_offset, 0)]
        rnd_count = 0
        for point in pts:
            if random.random() < 0.3:
                rnd_count += 1
                rand_dir = random.randint(0, 3)
                point[0] += rand_dirs[rand_dir][0]
                point[1] += rand_dirs[rand_dir][1]
    # Append (0,0) to the vertical stack
    pts = np.vstack([pts, [0, 0]])
    # Append (0,c_max) to the vertical stack
    pts = np.vstack([pts, [0, c_max]])
    # Append (r_max,0) to the vertical stack
    pts = np.vstack([pts, [r_max, 0]])
    # Append (r_max,c_max) to the vertical stack
    pts = np.vstack([pts, [r_max, c_max]])
    # Append some random points to fill empty spaces
    pts = np.vstack([pts, np.random.randint(0, 750, size=(100, 2))])
    # print(len(pts))
    # pts = my_reduce(pts, 5)
    # print(len(pts))
    dets = detector(im, 1)
    # print("Number of faces detected: {}".format(len(dets)))
    if show:
        win.clear_overlay()
        win.set_image(im)
    for k, d in enumerate(dets):
        shape = predictor(im, d)
        for i in range(shape.num_parts):
            pts = np.vstack([pts, [shape.part(i).x, shape.part(i).y]])
        if show:
            win.add_overlay(shape)
    if show:
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
    # Construct Delaunay Triangulation from these set of points.
    # Reference: https://en.wikipedia.org/wiki/Delaunay_triangulation
    tris = Delaunay(pts, incremental=True)
    # tris_vertices = pts[tris.simplices]
    # for tri in range(tris_vertices.shape[0]):
    #     x_coords = []
    #     y_coords = []
    #     print(tris_vertices[tri])
    #     for coord in range(tris_vertices.shape[1]):
    #         x_coords.append(tris_vertices[tri][coord][0])
    #         y_coords.append(tris_vertices[tri][coord][1])
    # divideHighVariance(tris, im)
    tris.close()
    # exit(0)
    # Return triangulation
    return tris