Ejemplo n.º 1
0
 def capture(self):
     toolsConfig = ToolsConfig(self.captureName)
     camera = toolsConfig.getCamera()
     for x in range(self.captureAmount):
         try:
             image = camera.read()
             # Convert image to grayscale.
             image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
             # Get coordinates of single face in captured image.
             result = self.face.detect_single(image)
             if result is None:
                 print('Could not detect single face!' +
                       ' Check the image in capture.pgm' +
                       ' to see what was captured and try' +
                       ' again with only one face visible.')
                 continue
             x, y, w, h = result
             # Crop image as close as possible to desired face aspect ratio.
             # Might be smaller if face is near edge of image.
             crop = self.face.crop(image, x, y, w, h,
                                   int(ToolsConfig.getFaceFactor() * w))
             # Save image to file.
             filename, count = toolsConfig.getNewCaptureFile()
             cv2.imwrite(filename, crop)
             sleep(0.3)  #sleep for half a second
             print('Found face and wrote training image', filename)
         except KeyboardInterrupt:
             camera.stop()
             break
Ejemplo n.º 2
0
    def train(self):
        print("Reading training images...")
        print('-' * 20)
        faces = []
        labels = []
        imageDirsWithLabel = [[0, "negative"]]
        imageDirs = os.listdir(ToolsConfig.TRAINING_DIR)
        imageDirs = [
            x for x in imageDirs
            if not x.startswith('.') and not x.startswith('negative')
        ]
        pos_count = 0

        for i in range(len(imageDirs)):
            print("Assign label " + str(i + 1) + " to " + imageDirs[i])
            imageDirsWithLabel.append([i + 1, imageDirs[i]])
        print('-' * 20)
        print('')

        # Für jedes Label/Namen Paar:
        # for every label/name pair:
        for j in range(0, len(imageDirsWithLabel)):
            # Label zu den Labels hinzufügen / Bilder zu den Gesichtern
            for filename in ToolsConfig.walkFiles(
                    ToolsConfig.TRAINING_DIR + str(imageDirsWithLabel[j][1]),
                    '*.pgm'):
                faces.append(self.prepareImage(filename))
                labels.append(imageDirsWithLabel[j][0])
                if imageDirsWithLabel[j][0] != 0:
                    pos_count += 1

        # Print statistic on how many pictures per person we have collected
        print('Read ' + str(pos_count) + ' positive images and ' +
              str(labels.count(0)) + ' negative images.')
        print('')
        for j in range(1, max(labels) + 1):
            print(
                str(labels.count(j)) + " images from subject " +
                imageDirs[j - 1])

        # Train model
        print('-' * 20)
        print('')
        print('Training model with threshold {0}'.format(
            ToolsConfig.POSITIVE_THRESHOLD))
        model = ToolsConfig.model()

        model.train(np.asarray(faces), np.asarray(labels))

        # Save model results
        model.write(ToolsConfig.TRAINING_FILE)
        print('Training data saved to', ToolsConfig.TRAINING_FILE)
        print('')
        print(
            "Please add or update (if you added new people not just new images) "
            + str(imageDirs) +
            " inside config.js (mirror module) or config.py (model tester). You can change the names to whatever you want, just keep the same order and you'll be fine."
        )
import cv2  # OpenCV Library
from lib.common.face import FaceDetection
from lib.tools.config import ToolsConfig
import time
import os
import signal
import sys

model = ToolsConfig.model()
face = ToolsConfig.getFaceAndEyesDetection()
camera = ToolsConfig.getCamera()

print('Loading training data...')
model.load("training.xml")
print('Training data loaded!')


def clean_shutdown(signum, frame):
    """Release camera and close windows
    """
    camera.stop()
    cv2.destroyAllWindows()
    sys.exit(0)


signal.signal(signal.SIGINT, clean_shutdown)
signal.signal(signal.SIGTERM, clean_shutdown)
signal.signal(signal.SIGSEGV, clean_shutdown)

# Loadking the training data should give enough time to
# warm up the picamera. If not, uncomment the following
Ejemplo n.º 4
0
 def __init__(self, capName=None, captureAmount=20):
     self.face = ToolsConfig.getFaceDetection()
     self.captureName = capName
     self.captureAmount = captureAmount
Ejemplo n.º 5
0
 def __init__(self):
     
     self.face = ToolsConfig.getFaceDetection()