Пример #1
0
class GazeCnnHPE:
    def __init__(
            self,
            roll_file="../../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf",
            pitch_file="../../etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf",
            yaw_file="../../etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"):
        self.sess = tf.Session()  #Launch the graph in a session.
        self.HPE = CnnHeadPoseEstimator(
            self.sess)  #Head pose estimation object

        # Load the weights from the configuration folders
        self.HPE.load_roll_variables(os.path.realpath(roll_file))
        self.HPE.load_pitch_variables(os.path.realpath(pitch_file))
        self.HPE.load_yaw_variables(os.path.realpath(yaw_file))

    def detect(self, image):
        # Get the angles for roll, pitch and yaw
        roll = self.HPE.return_roll(
            image)  # Evaluate the roll angle using a CNN
        pitch = self.HPE.return_pitch(
            image)  # Evaluate the pitch angle using a CNN
        yaw = self.HPE.return_yaw(image)  # Evaluate the yaw angle using a CNN
        #print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0])  + "]")

        return [roll, pitch, yaw]
def pose():
	models = os.getcwd() + os.sep + 'models'
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor(models + os.sep + 'shape_predictor_68_face_landmarks.dat')

	with tf.Session() as sess:

		head_pose_estimator = CnnHeadPoseEstimator(sess)
		head_pose_estimator.load_pitch_variables(models + os.sep + 'pitch.tf')
		head_pose_estimator.load_yaw_variables(models + os.sep + 'yaw.tf')
		head_pose_estimator.load_roll_variables(models + os.sep + 'roll.tf')

		path = os.getcwd() + os.sep + 'outputs' + os.sep + 'img_stats.json'
		with open(path, "r") as p: 
			data_list = json.load(p)

		print("Head pose estimation started")

		for data in data_list:
			frame = json2im(json.dumps(data))
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			(fh, fw) = frame.shape[:2]

			faces = detector(gray, 0)

			for face in faces:
				(x, y, w, h) = face_utils.rect_to_bb(face)
				cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
				image = frame[y:y + h, x:x + w]

				try:
					image = cv2.resize(image, (480,480))
				except:
					print('Exception')
					continue

				pitch = head_pose_estimator.return_pitch(image,radians=True)[0][0][0]
				yaw = head_pose_estimator.return_yaw(image,radians=True)[0][0][0]
				roll = head_pose_estimator.return_roll(image,radians=True)[0][0][0]
				
				sys.stdout.write(f"\rProcessed frame {data['index']}")
				sys.stdout.flush()
				
				FONT = cv2.FONT_HERSHEY_DUPLEX

				data['pose'] = {
					'pitch' : float(pitch),
					'yaw' : float(yaw),
					'roll' : float(roll)
				}

			if not faces:
				data['pose'] = None
	
	print("\nHead pose estimation complete\n")
	
	path = os.getcwd() + os.sep + 'outputs' + os.sep + 'img_stats.json'
	with open(path, "w") as p: 
		json.dump(data_list, p, indent = 4)
Пример #3
0
 def __init__(self, resFolder):
     self.sess = tf.Session()  #Launch the graph in a session.
     HPestimator = CnnHeadPoseEstimator(
         self.sess)  #Head pose estimation object
     # Load the weights from the configuration folders
     HPestimator.load_yaw_variables(resFolder +
                                    "/head_pose/yaw/cnn_cccdd_30k.tf")
     HPestimator.load_roll_variables(resFolder +
                                     "/head_pose/roll/cnn_cccdd_30k.tf")
     HPestimator.load_pitch_variables(resFolder +
                                      "/head_pose/pitch/cnn_cccdd_30k.tf")
     self._estimator = HPestimator
Пример #4
0
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255))

            sub_face = img[y:y + h, x:x + w]
            fname, ext = os.path.splitext(image)
            cv2.imwrite(fname + "_cropped" + ext, sub_face)


sess = tf.Session()  #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(
    sess)  #Head pose estimation object
roll_mat = []
pitch_mat = []
yaw_mat = []
# Load the weights from the configuration folders
my_head_pose_estimator.load_roll_variables(
    os.path.realpath(
        "C:/Users/mihaela/Downloads/deepgaze-master/deepgaze-master/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"
    ))
my_head_pose_estimator.load_pitch_variables(
    os.path.realpath(
        "C:/Users/mihaela/Downloads\deepgaze-master/deepgaze-master/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"
    ))
my_head_pose_estimator.load_yaw_variables(
    os.path.realpath(
        "C:/Users/mihaela/Downloads/deepgaze-master/deepgaze-master/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"
    ))
facecrop("test_andreea/*.jpg")
imagePattern = 'test_andreea/*_cropped.jpg'

imgList = glob.glob(imagePattern)

for i in imgList:
import cv2
import face_recognition
import tensorflow as tf
from deepgaze.head_pose_estimation import CnnHeadPoseEstimator
from keras.preprocessing import image as keras_image
from PIL import Image

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.allow_growth = True
SESS = tf.Session(config=CONFIG)
MY_HEAD_POSE_ESTIMATOR = CnnHeadPoseEstimator(
    SESS)  #Head pose estimation object

# Load the weights from the configuration folders
MY_HEAD_POSE_ESTIMATOR.load_roll_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
MY_HEAD_POSE_ESTIMATOR.load_pitch_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
MY_HEAD_POSE_ESTIMATOR.load_yaw_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))
img_dict = {}


def get_head_pose_estimation(file_name):
    origin_image_numpy = face_recognition.load_image_file(file_name)
    # See also: find_faces_in_picture_cnn.py
    face_locations = face_recognition.face_locations(origin_image_numpy)
    if len(face_locations) != 1:
        # means there are multiple faces in a picture
        # discard this picture
        return None
Пример #6
0
def find_similar_img(img, topk=20):
    sess = tf.Session()  # Launch the graph in a session.
    my_head_pose_estimator = CnnHeadPoseEstimator(
        sess)  # Head pose estimation object

    # Load the weights from the configuration folders
    my_head_pose_estimator.load_roll_variables(
        os.path.realpath(
            "deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
    my_head_pose_estimator.load_pitch_variables(
        os.path.realpath(
            "deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
    my_head_pose_estimator.load_yaw_variables(
        os.path.realpath(
            "deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))

    angle_dict = {}
    most_similar = {}

    # get angle of input image
    image = cv2.imread(img)  # Read the image with OpenCV
    # Get the angles for roll, pitch and yaw
    roll = my_head_pose_estimator.return_roll(
        image)  # Evaluate the roll angle using a CNN
    pitch = my_head_pose_estimator.return_pitch(
        image)  # Evaluate the pitch angle using a CNN
    yaw = my_head_pose_estimator.return_yaw(
        image)  # Evaluate the yaw angle using a CNN
    image_angel = [
        float(roll[0, 0, 0]),
        float(pitch[0, 0, 0]),
        float(yaw[0, 0, 0])
    ]

    # can only find similar picture using extracted img (64 * 64)
    # but we need origin img which has higer resolution
    origin_img_size_folder, extracted_img_size_folder = Path(
        ARGS.folder + '_preprocess'), Path(ARGS.folder + '_extract')
    for extracted_file_path in tqdm.tqdm(
            extracted_img_size_folder.glob('*_0.jpg')):
        suffix = extracted_file_path.suffix.lower()
        if suffix != '.png' and suffix != '.jpg' and suffix != '.jpeg':
            continue
        image = cv2.imread(
            str(extracted_file_path))  # Read the image with OpenCV
        # Get the angles for roll, pitch and yaw
        roll = my_head_pose_estimator.return_roll(
            image)  # Evaluate the roll angle using a CNN
        pitch = my_head_pose_estimator.return_pitch(
            image)  # Evaluate the pitch angle using a CNN
        yaw = my_head_pose_estimator.return_yaw(
            image)  # Evaluate the yaw angle using a CNN
        angle_dict[extracted_file_path] = [
            float(roll[0, 0, 0]),
            float(pitch[0, 0, 0]),
            float(yaw[0, 0, 0])
        ]
        most_similar[extracted_file_path] = cosineDS(
            angle_dict[extracted_file_path], image_angel)
    print('Finish calculating {}\'s most similari pic'.format(img))
    result = sorted(most_similar.items(), key=lambda x: x[1])[:topk]
    return [
        str(origin_img_size_folder / img_path.name.replace('_0.jpg', '.jpg'))
        for img_path, cosine_distance in result
    ]
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)

    classes_name = []
    img_name = []
    for path in os.listdir(path_exp):
        # if os.path.isdir(path):
        for tpath in os.listdir(os.path.join(path_exp, path)):
            classes_name.append(path)
            img_name.append(tpath)

    # nrof_images = len(image_list)
    # classes_img_name = np.zeros((nrof_images, 2))
    # iii = 0
    # for path in os.listdir(path_exp):
    #     for tpath in os.listdir(os.path.join(path_exp, path)):
    #         classes_img_name[iii,:] = [path, tpath]
    #         iii = iii + 1

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
    # sess_2 = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    sess_2 = tf.Session()
    my_head_pose_estimator = CnnHeadPoseEstimator(sess_2)  # Head pose estimation object

    # Load the weights from the configuration folders
    my_head_pose_estimator.load_roll_variables("roll/cnn_cccdd_30k.tf")
    my_head_pose_estimator.load_pitch_variables("pitch/cnn_cccdd_30k.tf")
    my_head_pose_estimator.load_yaw_variables("yaw/cnn_cccdd_30k.tf")


    with tf.Graph().as_default():

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


        # with tf.Session() as sess:
        with sess.as_default():

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            pose_array = np.zeros((nrof_images, 3))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches -1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size

                # Get images for the batch
                # images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
                images, pose = facenet.ydwu_load_data(image_list[i * batch_size:n], False, False, args.image_size, my_head_pose_estimator)
                # print("pose = ", pose)
                pose_array[i * batch_size:n, :] = pose

                feed_dict = { images_placeholder: images, phase_train_placeholder:False }

                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                # print("embed = ", embed)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)


                ydwu_image_list = image_list[i * batch_size:n]

                aaa = pose.tolist()
                bbb = embed.tolist()
                ff = open('/home/ydwu/project3/tmp_pose_and_emb.txt', 'a')


                # for jj in range(i * batch_size, n):
                    # ff.write(classes_name[jj] + ' ')
                    # ff.write(img_name[jj] + ' ')

                for jj in range(len(ydwu_image_list)):
                    # # ydwu_class = str(ydwu_image_list[jj]).split("/")[-2]
                    # # ydwu_img = str(ydwu_image_list[jj]).split("/")[-1]
                    # ff.write(str(ydwu_image_list[jj]).split("/")[-2] + ' ')
                    # ff.write(str(ydwu_image_list[jj]).split("/")[-1] + ' ')
                    # ff.write(str(aaa[jj]).strip('[').strip(']').replace(',', '') + ' ')
                    # ff.write(str(bbb[jj]).strip('[').strip(']').replace(',', '') + '\n')
                    ff.write(str(ydwu_image_list[jj]).split("/")[-2] + ' ')
                    ff.write(str(ydwu_image_list[jj]).split("/")[-1] + ' ')
                    ff.write(str(aaa[jj]) + ' ')
                    ff.write(str(bbb[jj]) + '\n')

                ff.close()

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list  = np.array(label_list)
def main(args):
    # filename = '/home/ydwu/tmp/gen_bin/cfp/Protocol/Pair_list_P.txt'
    filename = '/home/ydwu/tmp/gen_bin/cfp/Protocol/Pair_list_F.txt'

    num_list = []
    image_list = []

    with open(filename, 'r') as file_to_read:
        while True:
            lines = file_to_read.readline()  # 整行读取数据
            print(lines)
            if not lines:
                break
                pass

            l = lines.split()
            num_list.append(l[0])
            image_list.append(l[1])

    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)

    classes_name = []
    img_name = []
    for path in os.listdir(path_exp):
        # if os.path.isdir(path):
        for tpath in os.listdir(os.path.join(path_exp, path)):
            classes_name.append(path)
            img_name.append(tpath)

    sess_2 = tf.Session()
    my_head_pose_estimator = CnnHeadPoseEstimator(sess_2)  # Head pose estimation object

    # Load the weights from the configuration folders
    my_head_pose_estimator.load_roll_variables("roll/cnn_cccdd_30k.tf")
    my_head_pose_estimator.load_pitch_variables("pitch/cnn_cccdd_30k.tf")
    my_head_pose_estimator.load_yaw_variables("yaw/cnn_cccdd_30k.tf")


    with tf.Graph().as_default():

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


        # with tf.Session() as sess:
        with sess.as_default():

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            pose_array = np.zeros((nrof_images, 1))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches -1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size

                # Get images for the batch
                # images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
                images, pose = facenet.ydwu_load_data_v2(image_list[i * batch_size:n], False, False, args.image_size, my_head_pose_estimator)
                # print("pose = ", pose)
                pose_array[i * batch_size:n, :] = pose

                feed_dict = { images_placeholder: images, phase_train_placeholder:False }

                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                # print("embed = ", embed)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)


    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        with sess.as_default():
            stiched_model_saver = tf.train.import_meta_graph('/home/ydwu/tmp/gen_bin/dream_module/model.ckpt.meta')
            stiched_model_saver.restore(sess, '/home/ydwu/tmp/gen_bin/dream_module/model.ckpt-896000')
            stitch_emb = tf.get_default_graph().get_tensor_by_name('dream/input:0')
            stitch_yaw = tf.get_default_graph().get_tensor_by_name('dream/yaw:0')
            stitch_output = tf.get_default_graph().get_tensor_by_name('dream/output:0')


            # yaw_degree = 50
            yaw = BaseDataset.norm_angle(pose_array)
            mapped_profile_emb = sess.run(stitch_output,
                                          feed_dict={stitch_emb: emb_array,
                                                     stitch_yaw: np.reshape(yaw, newshape=(-1, 1))})


            run_time = time.time() - start_time
            print('Run time: ', run_time)
            feat_dim = 128
            data_num = mapped_profile_emb.shape[0]

            feat_file = '/home/ydwu/tmp/gen_bin/' + args.bin_name
            with open(feat_file, 'wb') as bin_f:
                bin_f.write(st.pack('ii', data_num, feat_dim))
                for j in range(data_num):
                    bin_f.write(st.pack('f' * feat_dim, *tuple(mapped_profile_emb[j, :])))
#!/usr/bin/env python

#The MIT License (MIT)
#Copyright (c) 2016 Massimiliano Patacchiola
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import tensorflow as tf
import cv2
from deepgaze.head_pose_estimation import CnnHeadPoseEstimator

sess = tf.Session() #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(sess) #Head pose estimation object
my_head_pose_estimator.load_roll_variables("../../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf")

for i in range(1,9):
    file_name = str(i) + ".jpg"
    print("Processing image ..... " + file_name)
    image = cv2.imread(file_name) #Read the image with OpenCV
    roll = my_head_pose_estimator.return_roll(image) #Evaluate the roll angle using a CNN
    print("Estimated roll ..... " + str(roll[0,0,0]))
    print("")
Пример #10
0
image_file_names = []
for file in sorted(glob.glob(os.path.join(LRW_SAVE_DIR, 'head_pose_jpg_file_names*'))):
    with open(file) as f:
        for line in f:
            if 'val' in line:
                    image_file_names.append(line.rstrip())

from deepgaze.head_pose_estimation import CnnHeadPoseEstimator

sess = tf.Session() #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(sess) #Head pose estimation object

# Load the weights from the configuration folders
DEEPGAZE_EXAMPLES_DIR = '/shared/fusor/home/voleti.vikram/deepgaze/examples'
my_head_pose_estimator.load_roll_variables(os.path.realpath(os.path.join(DEEPGAZE_EXAMPLES_DIR, "../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf")))
my_head_pose_estimator.load_pitch_variables(os.path.realpath(os.path.join(DEEPGAZE_EXAMPLES_DIR, "../etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf")))
my_head_pose_estimator.load_yaw_variables(os.path.realpath(os.path.join(DEEPGAZE_EXAMPLES_DIR, "../etc/tensorflow/head_pose/yaw/cnn_cccdd_30k")))

poses = np.zeros((1, 3))

try:
    prev_word = "dummy"
    for image_file in tqdm.tqdm(image_file_names):
        word = image_file.split('/')[-1].split('.')[0].split('_')[0]
        if word != prev_word:
            np.save(os.path.join(LRW_SAVE_DIR, "head_pose_"+prev_word), poses)
            prev_word = word
            poses = np.empty((0, 3))
        #Read the image with OpenCV
        image = cv2.imread(image_file)
Пример #11
0
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('head_demo.avi', fourcc, 12.0, (640, 480))

sess = tf.Session()
my_head_pose_estimator = CnnHeadPoseEstimator(
    sess)  # Head pose estimation object
# dir_path = os.path.dirname(os.path.realpath(__file__))
pitchfile_path = "/home/salil/Documents/deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"
yawfile_path = "/home/salil/Documents/deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"
rollfile_path = (
    "/home/salil/Documents/deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"
)
my_head_pose_estimator.load_pitch_variables(pitchfile_path)
my_head_pose_estimator.load_yaw_variables(yawfile_path)
my_head_pose_estimator.load_roll_variables(rollfile_path)

count = 0
x_axis_rotate = 0
while True:
    if not cap.isOpened():
        print('did not load Cam')
        pass
    ret, frame = cap.read()
    img = imutils.rotate(frame, 90)

    rclasses, rscores, rbboxes = process_image(img,
                                               select_threshold=min_select,
                                               nms_threshold=min_conf)
    img, cropped_head_list = visualize_box(img, rclasses, rscores, rbboxes,
                                           x_axis_rotate)
Пример #12
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# os.system("clear")

# %%
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

# %%
sess = tf.Session()
head_pose_estimator = CnnHeadPoseEstimator(sess)
head_pose_estimator.load_pitch_variables('pitch.tf')
head_pose_estimator.load_yaw_variables('yaw.tf')
head_pose_estimator.load_roll_variables('roll.tf')

# %%
cap = cv2.VideoCapture(0)

while (True):
    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    (fh, fw) = frame.shape[:2]

    if not ret:
        break

    faces = detector(gray, 0)
Пример #13
0
    rot[2][2] = -sh * sa * sb + ch * cb
    return rot


config = tf.ConfigProto(allow_soft_placement=True)
if has_gpu:
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.Session(config=config)  #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(
    sess)  #Head pose estimation object

model_dir = os.getenv('MODEL_DIR', './models')
# Load the weights from the configuration folders
my_head_pose_estimator.load_yaw_variables(
    os.path.join(model_dir, 'headpose', 'yaw', 'cnn_cccdd_30k.tf'))
my_head_pose_estimator.load_roll_variables(
    os.path.join(model_dir, 'headpose', 'roll', 'cnn_cccdd_30k.tf'))
my_head_pose_estimator.load_pitch_variables(
    os.path.join(model_dir, 'headpose', 'pitch', 'cnn_cccdd_30k.tf'))


def get_head_pose_vector(image, face):
    cam_w = image.shape[1]
    cam_h = image.shape[0]
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / np.tan(60 / 2 * np.pi / 180)
    f_y = f_x
    camera_matrix = np.float32([[f_x, 0.0, c_x], [0.0, f_y, c_y],
                                [0.0, 0.0, 1.0]])
    # print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")
    #Distortion coefficients
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import tensorflow as tf
import cv2
from deepgaze.head_pose_estimation import CnnHeadPoseEstimator

sess = tf.Session()  #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(
    sess)  #Head pose estimation object

# Load the weights from the configuration folders
my_head_pose_estimator.load_roll_variables("roll/cnn_cccdd_30k.tf")
my_head_pose_estimator.load_pitch_variables("pitch/cnn_cccdd_30k.tf")
my_head_pose_estimator.load_yaw_variables("yaw/cnn_cccdd_30k.tf")

for i in range(1, 9):
    file_name = "/home/ydwu/project3/zihui_DREAM/preprocess/single-img/" + str(
        i) + ".jpg"
    print("Processing image ..... " + file_name)
    image = cv2.imread(file_name)  #Read the image with OpenCV

    # yaw = my_head_pose_estimator.return_yaw(image) #Evaluate the yaw angle using a CNN
    # yaw = my_head_pose_estimator.return_yaw(image, radians=False)

    # Get the angles for roll, pitch and yaw
    roll = my_head_pose_estimator.return_roll(
        image)  # Evaluate the roll angle using a CNN
     #time.sleep(0.01)
     cam = cv2.VideoCapture(1)
     retval, frame = cam.read()
     if retval != True:
         raise ValueError("Can't read frame")
     im1 = cv2.flip(frame,1)
     #im1 = Image.open("/home/zafar/zafar-rtech/Images_Houman/img"+str(i+1)+".jpg")
     img = cv2.resize(im1, (293, 293), interpolation = cv2.INTER_AREA)
     #im1 = im1.resize((293, 293), Image.BILINEAR)
     # image = im1
     image = cv2.imwrite("/home/zafar/Deepgaze_images_new2/image"+str(i+counter)+".jpg", im1)
     sess = tf.Session() #Launch the graph in a session.
     my_head_pose_estimator = CnnHeadPoseEstimator(sess) #Head pose estimation object
     my_head_pose_estimator.load_pitch_variables("/home/zafar/deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf")
     my_head_pose_estimator.load_yaw_variables("/home/zafar/deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf")
     my_head_pose_estimator.load_roll_variables("/home/zafar/deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf")
     # image = cv2.imread("/home/zafar/Deepgaze_images/image"+str(i+counter+50)+".jpg") #Read the image with OpenCV
             
     pitch = my_head_pose_estimator.return_pitch(img) #Evaluate the pitch angle using a CNN
     yaw = my_head_pose_estimator.return_yaw(img) #Evaluate the yaw angle using a CNN
     roll = my_head_pose_estimator.return_roll(img)
     print("Estimated pitch ..... " + str(pitch[0,0,0]))
     print("Estimated yaw ..... " + str(yaw[0,0,0]))
     print("Estimated roll ..... " + str(roll[0,0,0]))
             
     with open('/home/zafar/catkin_ws/src/Thesis/reading_angle_values/New_Data/gaze_record2.txt', 'a+') as text_file:
         text_file.write(str(roll[0,0,0]) + '   '+str(pitch[0,0,0]) + '   '+str(yaw[0,0,0]))
         text_file.write('\n')
     retval = True
     cam.release()
 counter+=5
Пример #16
0
  my_head_pose_estimator = CnnHeadPoseEstimator(sess) #Head pose estimation object
  # Load the weights from the configuration folders
  

  # my_head_pose_estimator.load_roll_variables(os.path.realpath("/home/steve/code/deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
  # my_head_pose_estimator.load_pitch_variables(os.path.realpath("/home/steve/code/deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
  # my_head_pose_estimator.load_yaw_variables(os.path.realpath("/home/steve/code/deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k"))



  # data_dir = '~/research/face/data/'
  # out_dir = '~/research/face/output/'

  # inputFile = '/home/steve/code/faceDetYOLO/practice-data/360vids/20180306_094911.mp4'
  # outputFile = '/home/steve/code/face-py-faster-rcnn/output/360vids/20180306_094911_output.avi'
  my_head_pose_estimator.load_roll_variables(os.path.realpath("/users/dghaghar/research/face/src/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
  my_head_pose_estimator.load_pitch_variables(os.path.realpath("/users/dghaghar/research/face/src/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
  my_head_pose_estimator.load_yaw_variables(os.path.realpath("/users/dghaghar/research/face/src/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))



  data_dir = '/users/dghaghar/research/face/data/'
  out_dir = '/users/dghaghar/research/face/output/'

  inputFile = '/users/dghaghar/research/face/data/test.mp4'
  outputFile = '/users/dghaghar/research/face/output/test.avi'

  if not os.path.exists(out_dir):
    os.makedirs(out_dir)

  CONF_THRESH = 0.97 #0.65
Пример #17
0
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import os
import tensorflow as tf
import cv2
import numpy
import numpy as np
from deepgaze.head_pose_estimation import CnnHeadPoseEstimator

sess = tf.Session()  #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(
    sess)  #Head pose estimation object

# Load the weights from the configuration folders
my_head_pose_estimator.load_roll_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
my_head_pose_estimator.load_pitch_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
my_head_pose_estimator.load_yaw_variables(
    os.path.realpath("../../etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))


# for i in range(1,9):
#     file_name = str(i) + ".jpg"
#     print("Processing image ..... " + file_name)
#     image = cv2.imread(file_name) #Read the image with OpenCV
#     print(image.shape)
#     # Get the angles for roll, pitch and yaw
#     roll = my_head_pose_estimator.return_roll(image)  # Evaluate the roll angle using a CNN
#     pitch = my_head_pose_estimator.return_pitch(image)  # Evaluate the pitch angle using a CNN
#     yaw = my_head_pose_estimator.return_yaw(image)  # Evaluate the yaw angle using a CNN
Пример #18
0
def main():
    #Defining the video capture object
    video_capture = cv2.VideoCapture(1)
    thresh = 0.25
    frame_check = 15
    detect = dlib.get_frontal_face_detector()
    predict = dlib.shape_predictor("/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat")# change to path where landmark points are stored

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]   # get the left eye index
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]  # get the right eye index
    flag=0

    sess = tf.Session()
    my_head_pose_estimator = CnnHeadPoseEstimator(sess)
    my_head_pose_estimator.load_roll_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))# change to deepgaze directory path
    my_head_pose_estimator.load_pitch_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
    my_head_pose_estimator.load_yaw_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))


    #Start of Eye Gaze Tracking
    win = dlib.image_window()
    
    predictor_path = "/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat"
    roi = []
    ref_point = 0 
    index1 = 0
    pt_lefteye_corner_x= 0
    pt_lefteye_corner_y = 0
    pt_pos1 = 0
    predictor = dlib.shape_predictor(predictor_path)
    pt_x2 =0
    pt_y2 = 0
    pt_x1 = 0
    pt_y1 = 0
    pt_actualx = 0
    pt_actualy = 0
    detector = dlib.get_frontal_face_detector()
    flag = 0
    flag1 = 0
    pt_righteye_corner_x = 0
    pt_righteye_corner_y = 0


    if(video_capture.isOpened() == False):
        print("Error: the resource is busy or unvailable")
    else:
        print("The video source has been opened correctly...")

    #Create the main window and move it
    cv2.namedWindow('Video')
    cv2.moveWindow('Video', 20, 20)

    #Obtaining the CAM dimension
    cam_w = int(video_capture.get(3))
    cam_h = int(video_capture.get(4))

    #Defining the camera matrix.
    #To have better result it is necessary to find the focal
    # lenght of the camera. fx/fy are the focal lengths (in pixels)
    # and cx/cy are the optical centres. These values can be obtained
    # roughly by approximation, for example in a 640x480 camera:
    # cx = 640/2 = 320
    # cy = 480/2 = 240
    # fx = fy = cx/tan(60/2 * pi / 180) = 554.26
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / numpy.tan(60/2 * numpy.pi / 180)
    f_y = f_x

    #Estimated camera matrix values.
    camera_matrix = numpy.float32([[f_x, 0.0, c_x],
                                   [0.0, f_y, c_y],
                                   [0.0, 0.0, 1.0] ])

    print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")

    #These are the camera matrix values estimated on my webcam with
    # the calibration code (see: src/calibration):
    camera_matrix = numpy.float32([[602.10618226,          0.0, 320.27333589],
                                   [         0.0, 603.55869786,  229.7537026],
                                   [         0.0,          0.0,          1.0] ])

    #Distortion coefficients
    #camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    #Distortion coefficients estimated by calibration
    camera_distortion = numpy.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])


    #This matrix contains the 3D points of the
    # 11 landmarks we want to find. It has been
    # obtained from antrophometric measurement
    # on the human head.
    landmarks_3D = numpy.float32([P3D_RIGHT_SIDE,
                                  P3D_GONION_RIGHT,
                                  P3D_MENTON,
                                  P3D_GONION_LEFT,
                                  P3D_LEFT_SIDE,
                                  P3D_FRONTAL_BREADTH_RIGHT,
                                  P3D_FRONTAL_BREADTH_LEFT,
                                  P3D_SELLION,
                                  P3D_NOSE,
                                  P3D_SUB_NOSE,
                                  P3D_RIGHT_EYE,
                                  P3D_RIGHT_TEAR,
                                  P3D_LEFT_TEAR,
                                  P3D_LEFT_EYE,
                                  P3D_STOMION])

    #Declaring the two classifiers
    my_cascade = haarCascade("/home/agopinath1996/git_ws/deepgaze/etc/xml/haarcascade_frontalface_alt.xml", "/home/agopinath1996/git_ws/deepgaze/etc/xml/haarcascade_profileface.xml")
    #TODO If missing, example file can be retrieved from http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
    my_detector = faceLandmarkDetection('/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat')





    #Error counter definition
    no_face_counter = 0

    #Variables that identify the face
    #position in the main frame.
    face_x1 = 0
    face_y1 = 0
    face_x2 = 0
    face_y2 = 0
    face_w = 0
    face_h = 0

    #Variables that identify the ROI
    #position in the main frame.
    roi_x1 = 0
    roi_y1 = 0
    roi_x2 = cam_w
    roi_y2 = cam_h
    roi_w = cam_w
    roi_h = cam_h
    roi_resize_w = int(cam_w/10)
    roi_resize_h = int(cam_h/10)

    while(True):

        # Capture frame-by-frame
        ret, frame = video_capture.read()
        ret, frame_eye = video_capture.read()

        #print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0])  + "]")
        gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)

        img = cv2.cvtColor(frame_eye, cv2.COLOR_RGB2BGR) #for eye gaze detection
        drowsyframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        subjects = detect(drowsyframe, 0)
        for subject in subjects:
            shape = predict(frame,subject)
            shape = face_utils.shape_to_np(shape)
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = leftEAR + rightEAR / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0,255,0),1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0,255,0), 1)
            if ear<thresh:
                flag+= 1
                print(flag)
                if flag >= frame_check:
                    cv2.putText(frame, "WAKEUPPPP", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)
                    cv2.putText(frame, "WAKEUPPPP", (10, 325), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255),2)
            else:
                flag=0

#Eye Gaze Detetction
        dets = detect(img, 0)
        check = 5
        shapes_eye = []
        for k,d in enumerate(dets):
            #print("dets{}".format(d))
            #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))

            shape_eye = predict(img, d)

            for index, pt in enumerate(shape_eye.parts()):
                #print('Part {}: {}'.format(index, pt))
                pt_pos = (pt.x, pt.y)
                cv2.circle(img, pt_pos, 1, (0,225, 0), 2)
                if index == 29:
                    pt_x2 = int(pt.x)
                    pt_y2 = int(pt.y)
                if index == 18:
                    pt_x1 = int(pt.x)
                    pt_y1 = int(pt.y)
                if index == 37:
                    pt_righteye_corner_x = pt.x
                    pt_righteye_corner_y = pt.y
                if index == 40:
                    pt_lefteye_corner_x = pt.x
                    pt_lefteye_corner_y = pt.y
                roi =  frame_eye[pt_y1:pt_y2,pt_x1:pt_x2]
                roi_gray = cv2.cvtColor(roi,cv2.COLOR_RGB2GRAY)
                _, threshold = cv2.threshold(roi_gray, 30, 255, cv2.THRESH_BINARY_INV)
                try:
                    M = cv2.moments(threshold)
                    #print(M)
                    cX = int(M["m10"]/M["m00"])
                    cY = int(M["m01"]/M["m00"])
                    #print(cX,cY)
                    pt_actualx = pt_x1+cX
                    pt_actualy = pt_y1+cY
                    #print(pt_actualx,pt_actualy)
                    diff_right = pt_actualx-pt_righteye_corner_x
                    diff_left = pt_lefteye_corner_x - pt_actualx
                    print(diff_right,diff_left)
                    #print(cX,cY)
                    if diff_right < 3:
                        cv2.putText(frame,'Look straight!',(10,60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(10,10,255),2)
                    if diff_left <3:
                        cv2.putText(frame,'Look straight!',(10,60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(10,10,255),2)


                except:
                    pass
                cv2.circle(frame,(pt_actualx,pt_actualy), 2,(255,0,255),-1)
                #print(pt_actualx,pt_actualy)


            #print(pt_x1,pt_x2,pt_y1,pt_y2)
            #print(roi.shape_eye)
            #print(img.shape_eye)
            try:
                cv2.imshow("threshold", threshold)
                cv2.waitKey(1)
            except:
                pass

        win.clear_overlay()
        win.set_image(img)
        if len(shapes_eye)!= 0 :
            for i in range(len(shapes_eye)):
                win.add_overlay(shapes_eye[i])





        #Looking for faces with cascade
        #The classifier moves over the ROI
        #starting from a minimum dimension and augmentig
        #slightly based on the scale factor parameter.
        #The scale factor for the frontal face is 1.10 (10%)
        #Scale factor: 1.15=15%,1.25=25% ...ecc
        #Higher scale factors means faster classification
        #but lower accuracy.
        #
        #Return code: 1=Frontal, 2=FrontRotLeft,
        # 3=FrontRotRight, 4=ProfileLeft, 5=ProfileRight.
        my_cascade.findFace(gray, True, True, True, True, 1.10, 1.10, 1.15, 1.15, 40, 40, rotationAngleCCW=30, rotationAngleCW=-30, lastFaceType=my_cascade.face_type)
        #print(returnvalue)
        #Accumulate error values in a counter
        if(my_cascade.face_type == 0):
            no_face_counter += 1

        #If any face is found for a certain
        #number of cycles, then the ROI is reset
        if(no_face_counter == 50):
            no_face_counter = 0
            roi_x1 = 0
            roi_y1 = 0
            roi_x2 = cam_w
            roi_y2 = cam_h
            roi_w = cam_w
            roi_h = cam_h

        #Checking wich kind of face it is returned
        if(my_cascade.face_type > 0):

            #Face found, reset the error counter
            no_face_counter = 0

            #Because the dlib landmark detector wants a precise
            #boundary box of the face, it is necessary to resize
            #the box returned by the OpenCV haar detector.
            #Adjusting the frame for profile left
            if(my_cascade.face_type == 4):
                face_margin_x1 = 20 - 10 #resize_rate + shift_rate
                face_margin_y1 = 20 + 5 #resize_rate + shift_rate
                face_margin_x2 = -20 - 10 #resize_rate + shift_rate
                face_margin_y2 = -20 + 5 #resize_rate + shift_rate
                face_margin_h = -0.7 #resize_factor
                face_margin_w = -0.7 #resize_factor
            #Adjusting the frame for profile right
            elif(my_cascade.face_type == 5):
                face_margin_x1 = 20 + 10
                face_margin_y1 = 20 + 5
                face_margin_x2 = -20 + 10
                face_margin_y2 = -20 + 5
                face_margin_h = -0.7
                face_margin_w = -0.7
            #No adjustments
            else:
                face_margin_x1 = 0
                face_margin_y1 = 0
                face_margin_x2 = 0
                face_margin_y2 = 0
                face_margin_h = 0
                face_margin_w = 0

            #Updating the face position
            face_x1 = my_cascade.face_x + roi_x1 + face_margin_x1
            face_y1 = my_cascade.face_y + roi_y1 + face_margin_y1
            face_x2 = my_cascade.face_x + my_cascade.face_w + roi_x1 + face_margin_x2
            face_y2 = my_cascade.face_y + my_cascade.face_h + roi_y1 + face_margin_y2
            face_w = my_cascade.face_w + int(my_cascade.face_w * face_margin_w)
            face_h = my_cascade.face_h + int(my_cascade.face_h * face_margin_h)

            crop_img = frame[face_y1:face_y2, face_x1:face_x2]
            cv2.imshow("cropped", crop_img)

            roll = my_head_pose_estimator.return_roll(crop_img)
            pitch = my_head_pose_estimator.return_pitch(crop_img)
            yaw = my_head_pose_estimator.return_yaw(crop_img)
            #print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0])  + "]")

            if yaw > 30:
                cv2.putText(frame, "You are facing right!", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)
            if yaw < -30:
                cv2.putText(frame, "You are facing left!", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)








            #Updating the ROI position
            roi_x1 = face_x1 - roi_resize_w
            if (roi_x1 < 0): roi_x1 = 0
            roi_y1 = face_y1 - roi_resize_h
            if(roi_y1 < 0): roi_y1 = 0
            roi_w = face_w + roi_resize_w + roi_resize_w
            if(roi_w > cam_w): roi_w = cam_w
            roi_h = face_h + roi_resize_h + roi_resize_h
            if(roi_h > cam_h): roi_h = cam_h
            roi_x2 = face_x2 + roi_resize_w
            if (roi_x2 > cam_w): roi_x2 = cam_w
            roi_y2 = face_y2 + roi_resize_h
            if(roi_y2 > cam_h): roi_y2 = cam_h

            #Debugging printing utilities
            if(DEBUG == True):
                #print("FACE: ", face_x1, face_y1, face_x2, face_y2, face_w, face_h)
                #print("ROI: ", roi_x1, roi_y1, roi_x2, roi_y2, roi_w, roi_h)

                #Drawing a green rectangle
                # (and text) around the face.
                text_x1 = face_x1
                text_y1 = face_y1 - 3
                if(text_y1 < 0): text_y1 = 0
                cv2.putText(frame, "FACE", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
                cv2.rectangle(frame,
                             (face_x1, face_y1),
                             (face_x2, face_y2),
                             (0, 255, 0),
                              2)

            #In case of a frontal/rotated face it
            # is called the landamark detector
            if(my_cascade.face_type > 0):
                landmarks_2D = my_detector.returnLandmarks(frame, face_x1, face_y1, face_x2, face_y2, points_to_return=TRACKED_POINTS)

                if(DEBUG == True):
                    #cv2.drawKeypoints(frame, landmarks_2D)

                    for point in landmarks_2D:
                        cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)


                #Applying the PnP solver to find the 3D pose
                # of the head from the 2D position of the
                # landmarks.
                #retval - bool
                #rvec - Output rotation vector that, together with tvec, brings
                # points from the model coordinate system to the camera coordinate system.
                #tvec - Output translation vector.
                retval, rvec, tvec = cv2.solvePnP(landmarks_3D,
                                                  landmarks_2D,
                                                  camera_matrix, camera_distortion)

                #Now we project the 3D points into the image plane
                #Creating a 3-axis to be used as reference in the image.
                axis = numpy.float32([[50,0,0],
                                      [0,50,0],
                                      [0,0,50]])
                imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix, camera_distortion)

                #Drawing the three axis on the image frame.
                #The opencv colors are defined as BGR colors such as:
                # (a, b, c) >> Blue = a, Green = b and Red = c
                #Our axis/color convention is X=R, Y=G, Z=B
                sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
                cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
                cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255,0,0), 3) #BLUE
                cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0,0,255), 3) #RED

        #Drawing a yellow rectangle
        # (and text) around the ROI.
        if(DEBUG == True):
            text_x1 = roi_x1
            text_y1 = roi_y1 - 3
            if(text_y1 < 0): text_y1 = 0
            cv2.putText(frame, "ROI", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1);
            cv2.rectangle(frame,
                         (roi_x1, roi_y1),
                         (roi_x2, roi_y2),
                         (0, 255, 255),
                         2)

        #Showing the frame and waiting
        # for the exit command
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break

    #Release the camera
    video_capture.release()
    print("Bye...")