def align_faces(faces, image, predictor, required_size=(160, 160)):
    # Récupération de la taille de l'image
    (s_height, s_width) = image.shape[:2]

    aligned_faces = []

    # Boucler sur les visages détecté
    for i, det in enumerate(faces):

        # Récupérer les traits du visage
        shape = predictor(image, det)

        # Récupérer les coordonnées des yeux grâce au traits du visages
        left_eye = extract_left_eye_center(shape)
        right_eye = extract_right_eye_center(shape)

        # Récupérer la matice de rotation du visage grace à la position des yeux
        M = get_rotation_matrix(left_eye, right_eye)

        # Appliquer une rotation à l'image afin d'avoir le ième visage aligné
        rotated = cv2.warpAffine(
            image, M, (s_width, s_height), flags=cv2.INTER_CUBIC)

        # Rogner l'image afin de garder uniquement le ième visage
        cropped = crop_image(rotated, det)

        cropped = cv2.resize(cropped, required_size)

        # Ajouter le visage rogné à la liste
        aligned_faces.append(cropped)

    return asarray(aligned_faces)
        def crop_from_detected(det):
            shape = self.predictor(img, det)
            left_eye = extract_left_eye_center(shape)
            right_eye = extract_right_eye_center(shape)

            M = get_rotation_matrix(left_eye, right_eye)
            rotated = cv2.warpAffine(img,
                                     M, (s_width, s_height),
                                     flags=cv2.INTER_CUBIC)

            cropped = crop_image(rotated, det)
            return cropped
示例#3
0
def detectar(img): 
    detecs = detector(img, 1) # Vetor de detecção de rostos
    rostos = []
    s_height, s_width = img.shape[:2]

    for i, det in enumerate(detecs):
        shape = predictor(img, det)
        left_eye = extract_left_eye_center(shape)
        right_eye = extract_right_eye_center(shape)
        M = get_rotation_matrix(left_eye, right_eye)
        rotated = cv2.warpAffine(img, M, (s_height, s_width), flags=cv2.INTER_CUBIC)
        cropped = crop_image(rotated, det)
        squared = resizeAndPad(cropped, (img_h,img_w), 127)
        rostos.append(squared)

    return detecs, rostos        
示例#4
0
def prepare(raw_coll, dest_dir):
    for filename in raw_coll: 
        img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
        s_height, s_width = img.shape[:2]
        dets = detector(img, 1)
        print('Processing ' + filename)
        for i, det in enumerate(dets):
            if i > 0: 
                print("Mais de um rosto detectado!!!!!")
                break
            shape = predictor(img, det)
            left_eye = extract_left_eye_center(shape)
            right_eye = extract_right_eye_center(shape)
            M = get_rotation_matrix(left_eye, right_eye)
            rotated = cv2.warpAffine(img, M, (s_height, s_width), flags=cv2.INTER_CUBIC)
            cropped = crop_image(rotated, det)
            squared = resizeAndPad(cropped, (img_h,img_w), 127)
            output_image_path = dest_dir + os.path.basename(filename)
            cv2.imwrite(output_image_path, squared)        
示例#5
0
    def process_image(self, input_image, output_image, scale=2):

        img = cv2.imread(input_image)

        # cv2.imshow('image', img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        height, width = img.shape[:2]
        s_height, s_width = height // scale, width // scale
        img = cv2.resize(img, (s_width, s_height))

        dets = self.detector(img, 1)

        for i, det in enumerate(dets):
            shape = self.predictor(img, det)
            left_eye = extract_left_eye_center(shape)
            right_eye = extract_right_eye_center(shape)

            M = get_rotation_matrix(left_eye, right_eye)
            rotated = cv2.warpAffine(img,
                                     M, (s_width, s_height),
                                     flags=cv2.INTER_CUBIC)

            cropped = crop_image(rotated, det)

            # cv2.imshow('image', cropped)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # if output_image.endswith('.jpg'):
            #     output_image_path = output_image.replace('.jpg', '_%i.jpg' % i)
            # elif output_image.endswith('.png'):
            #     output_image_path = output_image.replace('.png', '_%i.jpg' % i)
            # else:
            #     output_image_path = output_image + ('_%i.jpg' % i)
            #
            output_image_path = os.path.join(
                output_image,
                input_image.split("/")[-1] + ('_%i.jpg' % i))
            cv2.imwrite(output_image_path, cropped)
示例#6
0
def alignFace(image):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    height, width = image.shape[:2]
    s_height, s_width = height, width
    image = cv2.resize(image, (s_width, s_height))

    dets = detector(image, 1)

    if len(dets) == 1:
        shape = predictor(image, dets[0])
        left_eye = extract_left_eye_center(shape)
        right_eye = extract_right_eye_center(shape)

        M = get_rotation_matrix(left_eye, right_eye)
        rotated = cv2.warpAffine(image,
                                 M, (s_width, s_height),
                                 flags=cv2.INTER_CUBIC)

        return rotated
    else:
        print("Not a single face")
        return None
示例#7
0
	def run(self):
		old = ""
		self.count, self.count_u, confidence = 0, 0, 0
		print("thread is : ", int(self.thread().currentThreadId()))
		local_DB = sqlite3.connect('Data/users.db')
		cur = local_DB.cursor()
		start = time.time()
		while True:
			dur = time.time() - start
			dirty = False
			if dur > 10.0:
				queryb = {'id': 1}
				if float(self.temp()) > 60 :
					self.activate_fan()
				else :
					self.deactivate_fan()
				try:
					res1 = requests.post(urlb, data=queryb)
					#print(res1.text)
					start = time.time()
					print("pulsed")
					# mergeDB()
				except requests.ConnectionError as e:
					print("no connection")
					dirty = True

			while self.working:
				dure = time.clock()
				for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
					# grab the raw NumPy array representing the image, then initialize the timestamp
					# and occupied/unoccupied text
					if (not self.working):
						rawCapture.seek(0)
						rawCapture.truncate(0)
						break
					frame = frame.array

					height, width, channels = frame.shape
					# Convert to grayscalel
					gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

					# Resize to speed up detection (optinal, change size above)
					mini = cv.resize(
						frame, (int(gray.shape[1] / size), int(gray.shape[0] / size)))

					# Detect faces and loop through each one
					# faces = self.lbp_cascade.detectMultiScale(mini, 1.1, 3)
					dets = detector(gray, 1)
					um_faces = len(dets)
					if um_faces == 0:
						self.status.emit("Denied")
						self.user.emit("aucun personne")
					for i, det in enumerate(dets):
						shape = predictor(frame, det)
						left_eye = extract_left_eye_center(shape)
						right_eye = extract_right_eye_center(shape)

						M = get_rotation_matrix(left_eye, right_eye)
						rotated = cv.warpAffine(
							frame, M, (int(width / size), int(height / size)), flags=cv.INTER_CUBIC)

						cropped = crop_image(rotated, det)
						cropped = cv.cvtColor(cropped, cv.COLOR_BGR2GRAY)
						# Try to recognize the face
						if cropped is None:
							continue
						prediction, confidence = self.model.predict(cropped)
						print(confidence)
						cv.rectangle(frame, (det.left(), det.top()),
									 (det.right(), det.bottom()), (255, 255, 0), 2)
						if confidence < 160:
							# Grant accesss
							cur.execute("SELECT prenom, depatement FROM users WHERE nom = ?",(self.names[prediction],))
							row = cur.fetchone()
							print(row)
							print(prediction)
							if old == self.names[prediction]:
								self.count += 1
								if self.count > 3:
									count = 0
									self.status.emit("Autoriser")
									self.user.emit(
										"Bienvenue, Mr,{}".format(old))
									cur.execute(
										"INSERT INTO log (nom,prenom,departement,date) VALUES (?,?,?,datetime('now'))", (old, row[0], row[1]))
									local_DB.commit()
									query = {'name': old,
											 'prenom': row[0],
											 'departement':row[1]}
									if not dirty:
										res = requests.post(url, data=query)
									self.stop()
							else :
								old = self.names[prediction]
						else:
							self.status.emit("Denied")
							self.user.emit("non reconue")
							self.count_u += 1
							if self.count_u > 5:
								self.count_u = 0
								self.stop()
					# Show the image and check for ESC being pressed
					qtimg = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
					# qtimg = cv.resize(qtimg, dim, interpolation = cv.INTER_AREA)
					image = QtGui.QImage(qtimg, width,
										 height, width * 3, QtGui.QImage.Format_RGB888)
					pixmap = QtGui.QPixmap.fromImage(image)
					pixmap_resized = pixmap.scaled(
						350, 260, QtCore.Qt.KeepAspectRatio)
					pixmapItem = QtWidgets.QGraphicsPixmapItem(pixmap)
					self.changePixmap.emit(pixmapItem)
					rawCapture.seek(0)
					rawCapture.truncate(0)
					if (time.clock() - dure) > 20:
						self.stop()
					QtWidgets.QApplication.processEvents()
示例#8
0
    img_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY)

    # Face detection
    dets = detector(img_gray)
    det = correct_detection(dets[0], img_gray)
    ldmrks = predictor(img_gray, det)

    roll, pitch, yaw = rpy.estimate(det, img_color)

    axis_image = img_color.copy()
    draw_axis(axis_image, yaw, pitch, roll)
    cv2.imwrite("img_axes.png", axis_image)

    # Face rotation
    left_eye = extract_left_eye_center(ldmrks)
    right_eye = extract_right_eye_center(ldmrks)
    dlib_angle = angle_between_2_points(left_eye, right_eye)
    if abs(roll.item()) > abs(dlib_angle):
        angle = roll.item()
    else:
        angle = dlib_angle
    M = cv2.getRotationMatrix2D(
        (ldmrks.part(NOSE_CENTER).x, ldmrks.part(NOSE_CENTER).y), angle, 1)
    rotated = cv2.warpAffine(img_color,
                             M, (img_color.shape[1], img_color.shape[0]),
                             flags=cv2.INTER_CUBIC)

    cv2.imwrite("img_rotated.png", rotated)

    # Rotated landmarls
    ldmrks = predictor(cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY), det)
    def capture(self, id, name):
        try:
            user_name = name
            id = id
        except:
            print("Vous devez entrer un nom")
        path = os.path.join(users_dir, user_name)
        if not os.path.isdir(path):
            os.mkdir(path)
        # Generate name for image file
        pin = sorted(
            [int(n[:n.find('.')])
             for n in os.listdir(path) if n[0] != '.'] + [0])[-1] + 1
        print("Le programmes va capturer 20 images. \
		\nDeplacez votre tete pour augmenter la precision pendant le fonctionnement.\n"
              )

        # The program loops until it has 20 images of the face.
        count = 0
        pause = 0
        t = 0
        count_max = 20
        # Loop until the camera is working

        # Loop until the camera is working
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):

            if count >= count_max:
                rawCapture.seek(0)
                rawCapture.truncate(0)
                break

            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            frame = frame.array

            # Get image size
            height, width, channels = frame.shape
            r = 100.0 / width
            dim = (100, int(height * r))

            # Flip frame
            frame = cv.flip(frame, 1, 0)

            # Convert to grayscale
            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

            # Detect faces
            dets = detector(frame, 1)

            for i, det in enumerate(dets):
                shape = predictor(frame, det)
                left_eye = extract_left_eye_center(shape)
                right_eye = extract_right_eye_center(shape)

                M = get_rotation_matrix(left_eye, right_eye)
                rotated = cv.warpAffine(frame,
                                        M, (width, height),
                                        flags=cv.INTER_CUBIC)

                cropped = crop_image(rotated, det)
                cv.rectangle(frame, (det.left(), det.top()),
                             (det.right(), det.bottom()), (255, 255, 0), 2)

                # Remove false positives

                if (False):
                    print("Non claire")
                else:
                    # To create diversity, only save every fith detected image
                    if (pause == 0):
                        print("enregistrement de la capture " +
                              str(count + 1) + "/" + str(count_max))

                        # Save image file
                        cv.imwrite('%s/%s.png' % (path, pin), cropped)
                        pin += 1
                        count += 1
                        pause = 1

            if (pause > 0):
                pause = (pause + 1) % 3
            qtimg = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
            #qtimg = cv.resize(qtimg, dim, interpolation = cv.INTER_AREA)
            image = QtGui.QImage(qtimg, width, height, width * 3,
                                 QtGui.QImage.Format_RGB888)
            pixmap = QtGui.QPixmap.fromImage(image)
            pixmap_resized = pixmap.scaled(350, 260, QtCore.Qt.KeepAspectRatio)
            pixmapItem = QtWidgets.QGraphicsPixmapItem(pixmap_resized)
            if count == count_max:
                image = QtGui.QImage('camera.png')
                pixmap = QtGui.QPixmap.fromImage(image)
                pixmap_resized = pixmap.scaled(350, 260,
                                               QtCore.Qt.KeepAspectRatio)
                pixmapItem = QtWidgets.QGraphicsPixmapItem(pixmap_resized)
            self.scene.addItem(pixmapItem)
            rawCapture.seek(0)
            rawCapture.truncate(0)
            QtWidgets.QApplication.processEvents()
            key = cv.waitKey(10)

            if key == 27:
                break
        image = QtGui.QImage('/camera.png')
        idle = QtGui.QPixmap.fromImage(image)
        idle_resized = pixmap.scaled(350, 260, QtCore.Qt.KeepAspectRatio)
        pixmapItem = QtWidgets.QGraphicsPixmapItem(pixmap_resized)
        self.scene.addItem(pixmapItem)
        cv.destroyAllWindows()
        QtWidgets.QApplication.processEvents()
示例#10
0
        filename = os.fsdecode(file)
        if filename.endswith(".jpg") or filename.endswith(
                ".png") or filename.endswith(".jpeg") or filename.endswith(
                    ".bmp"):
            print('transforming ' + str(counter) + '/' + str(maxFiles) + ': ' +
                  filename)
            img = cv2.imread(input_image_folder + '/' + filename,
                             cv2.IMREAD_GRAYSCALE)
            height, width = img.shape[:2]
            s_height, s_width = height // scale, width // scale
            img = cv2.resize(img, (s_width, s_height))
            dets = detector(img, 1)
            for i, det in enumerate(dets):
                shape = predictor(img, det)
                left_eye = extract_left_eye_center(shape)
                right_eye = extract_right_eye_center(shape)

                M = get_rotation_matrix(left_eye, right_eye)
                rotated = cv2.warpAffine(img,
                                         M, (s_width, s_height),
                                         flags=cv2.INTER_CUBIC)

                cropped = crop_image(rotated, det)

                if filename.endswith('.jpg'):
                    output_image = filename.replace('.jpg', '_%i.jpg' % i)
                elif filename.endswith('.png'):
                    output_image = filename.replace('.png', '_%i.jpg' % i)
                elif filename.endswith('.jpeg'):
                    output_image = filename.replace('.jpeg', '_%i.jpg' % i)
                elif filename.endswith('.bmp'):
示例#11
0
def align_face(name):
    input_image, output_image = source + name, destination + name

    #reading image. If 0 bytes we delete it
    output = input_image + ''
    img = cv2.imread(input_image)
    if img is None:
        if (os.path.isfile(input_image)):
            os.remove(input_image)
        return 'fail'

    #scaling images
    height, width = img.shape[:2]
    if (width == 0 or height == 0):
        no_removes += 1
        os.remove(input_image)
        return 'fail'
    s_height, s_width = height // scale, width // scale
    img = cv2.resize(img, (s_width, s_height))
    output += ' | ' + str(s_width) + ' | ' + str(s_height)

    #detects faces. If no faces -> remove
    dets = detector(img, 1)
    output += ' | ' + str(len(dets)) + '\n'
    if (len(dets) == 0):
        os.remove(input_image)
        return 'fail'

    #looping through each face, rotating it, cropping, saving, then reading in again and outputting the landmark points
    for i, det in enumerate(dets):
        shape = predictor(img, det)
        #rotate eyes to horizontal
        left_eye = extract_left_eye_center(shape)
        right_eye = extract_right_eye_center(shape)
        output += 'eyes: ' + str(left_eye) + ' ' + str(right_eye) + ' | '
        M = get_rotation_matrix(left_eye, right_eye)
        rotated = cv2.warpAffine(img,
                                 M, (s_width, s_height),
                                 flags=cv2.INTER_CUBIC)

        cropped = crop_image(rotated, det)
        if (cropped.shape[1] == 0):
            continue
        #saving
        if output_image.endswith('.jpg'):
            output_image_path = output_image.replace('.jpg', '_%i.jpg' % i)
        elif output_image.endswith('.png'):
            output_image_path = output_image.replace('.png', '_%i.jpg' % i)
        else:
            output_image_path = output_image + ('_%i.jpg' % i)
        output += ' | ' + output_image_path + ' | ' + str(cropped.shape)
        cv2.imwrite(output_image_path, cropped)

        #landmark detection
        try:
            LM_img = io.imread(output_image_path)
        except:
            os.remove(output_image_path)
            return
        dets = detector(LM_img, 1)
        output += ("Number of faces detected: {}".format(len(dets)))
        if (len(dets) == 0):
            os.remove(output_image_path)

        for k, d in enumerate(dets):
            output += (
                " | Detection {}: Left: {} Top: {} Ri: {} Bot: {}".format(
                    k, d.left(), d.top(), d.right(), d.bottom()))
            shape = predictor(LM_img, d)
            with open(output_image_path + '.txt', 'w') as lm:
                for i in range(shape.num_parts):
                    lm.write(
                        str(shape.part(i).x) + ' ' + str(shape.part(i).y) +
                        '\n')

    #print(output + '\n===============')
    return 'success'