コード例 #1
0
ファイル: runners.py プロジェクト: ymitiku/Emopy
def run_test():
    input_shape = (IMG_SIZE[0], IMG_SIZE[1], 1)
    classifier = SevenEmotionsClassifier()
    preprocessor = Preprocessor(classifier, input_shape=input_shape)
    postProcessor = PostProcessor(classifier)
    neuralNet = NeuralNet(input_shape, preprocessor=preprocessor, train=False)
    face_detector = dlib.get_frontal_face_detector()

    if TEST_TYPE == "image":
        img = cv2.imread(TEST_IMAGE)
        faces, rectangles = preprocessor.get_faces(img, face_detector)
        predictions = []
        for i in range(len(faces)):
            face = preprocessor.sanitize(faces[i])
            predictions.append(neuralNet.predict(face))

        postProcessor = postProcessor(img, rectangles, predictions)
        cv2.imshow("Image", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    elif TEST_TYPE == "video":
        process_video(TEST_VIDEO, preprocessor, postProcessor, neuralNet)
    elif TEST_TYPE == "webcam":

        process_video(-1, preprocessor, postProcessor, neuralNet)
コード例 #2
0
ファイル: dlib_input.py プロジェクト: ymitiku/Emopy
 def __init__(self,
              classifier,
              input_shape=None,
              batch_size=32,
              augmentation=False,
              verbose=True):
     Preprocessor.__init__(self, classifier, input_shape, batch_size,
                           augmentation, verbose)
     self.predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH)
     self.feature_extractor = DlibFeatureExtractor(self.predictor)
コード例 #3
0
ファイル: sequencial.py プロジェクト: ymitiku/Emopy
 def __init__(self,classifier, input_shape = None,batch_size=BATCH_SIZE,augmentation = False,verbose = True,max_sequence_length=6):
     Preprocessor.__init__(self,classifier,input_shape,batch_size,augmentation,verbose)
     self.max_sequence_length = max_sequence_length
     self.datagenerator = ImageDataGenerator(
             rotation_range = 40,
             width_shift_range = 0.1,
             height_shift_range = 0.1,
             shear_range = 0.1,
             zoom_range = 0.1,
             horizontal_flip=True,
             data_format="channels_last"
              
         )
コード例 #4
0
ファイル: runners.py プロジェクト: ymitiku/Emopy
def run_train():
    # input_shape = (IMG_SIZE[0],IMG_SIZE[1],1)
    input_shape = INPUT_SHAPE
    classifier = SevenEmotionsClassifier()
    if (NETWORK_TYPE == "mi"):
        preprocessor = MultiInputPreprocessor(classifier,
                                              input_shape=input_shape,
                                              augmentation=AUGMENTATION)
        neuralNet = MultiInputNeuralNet(input_shape,
                                        preprocessor=preprocessor,
                                        train=True)
    elif NETWORK_TYPE == "si":
        preprocessor = Preprocessor(classifier,
                                    input_shape=input_shape,
                                    augmentation=AUGMENTATION)
        neuralNet = NeuralNet(input_shape,
                              preprocessor=preprocessor,
                              train=True)
    elif NETWORK_TYPE == "rnn":
        preprocessor = SequencialPreprocessor(
            classifier, input_shape=input_shape,
            augmentation=AUGMENTATION)("dataset/ck-split")
        neuralNet = LSTMNet(input_shape, preprocessor=preprocessor, train=True)

    elif NETWORK_TYPE == "vgg":
        preprocessor = Preprocessor(classifier,
                                    input_shape=input_shape,
                                    augmentation=AUGMENTATION)
        neuralNet = VGGFaceEmopyNet(input_shape,
                                    preprocessor=preprocessor,
                                    train=True)
    elif NETWORK_TYPE == "drnn":
        preprocessor = DlibSequencialPreprocessor(
            classifier, input_shape=input_shape,
            augmentation=AUGMENTATION)("dataset/ck-split")
        neuralNet = DlibLSTMNet(input_shape,
                                preprocessor=preprocessor,
                                train=True)

    else:
        process = EmopySequencialProcess(input_shape, 6)
        process.process_video(
            "/home/mtk/iCog/projects/emopy/test-videos/75Emotions.mp4")

    neuralNet.train()
コード例 #5
0
    def __init__(self,
                 classifier,
                 input_shape=None,
                 batch_size=32,
                 augmentation=False,
                 verbose=True):
        """

        Args:
            classifier:
            input_shape:
            batch_size:
            augmentation:
            verbose:
        """
        Preprocessor.__init__(self, classifier, input_shape, batch_size,
                              augmentation, verbose)
        self.predictor = dlib.shape_predictor()
        self.feature_extractor = DlibFeatureExtractor(self.predictor)
コード例 #6
0
    def __init__(self,
                 classifier,
                 input_shape=None,
                 batch_size=BATCH_SIZE,
                 augmentation=False,
                 verbose=True,
                 max_sequence_length=10):
        """

        Args:
            classifier:
            input_shape:
            batch_size:
            augmentation:
            verbose:
            max_sequence_length:
        """
        Preprocessor.__init__(self, classifier, input_shape, batch_size,
                              augmentation, verbose)
        self.max_sequence_length = max_sequence_length
        self.datagenerator = ImageDataGenerator(rotation_range=20,
                                                zoom_range=0.2,
                                                height_shift_range=0.2,
                                                width_shift_range=0.2)
コード例 #7
0
 def __init__(self,input_shape):
     self.classifier = SevenEmotionsClassifier()
     self.input_shape = input_shape
     self.preprocessor = Preprocessor(self.classifier,input_shape=self.input_shape,batch_size=BATCH_SIZE,augmentation=False,verbose=True)
     self.neuralNet = NeuralNet(self.input_shape,self.preprocessor)
コード例 #8
0
def get_network(args):

    if args.emotions == "all":
        classifier = SevenEmotionsClassifier()
    elif args.emotions == "pos-neg":
        classifier = PositiveNegetiveClassifier()
    elif args.emotions == "pos-neu":
        classifier = PositiveNeutralClassifier()
    else:
        raise Exception(
            "emotions should be one of all,pos-neg or pos-neu. But it is " +
            str(args.emotions))
    input_shape = (48, 48, 1)

    if args.net == "face":
        preprocessor = Preprocessor(classifier,
                                    input_shape=input_shape,
                                    batch_size=args.batch,
                                    augmentation=args.augmentation,
                                    verbose=args.verbose)
        lgr = EmopyLogger(["logs/log.txt"])
        net = NeuralNet((48, 48, 1), preprocessor, logger=lgr, train=True)
        return net
    elif args.net == "dlib":
        preprocessor = DlibInputPreprocessor(classifier,
                                             input_shape=input_shape,
                                             batch_size=args.batch,
                                             augmentation=args.augmentation,
                                             verbose=args.verbose)
        lgr = EmopyLogger(["logs/log.txt"])
        net = DlibPointsInputNeuralNet((48, 48, 1),
                                       preprocessor,
                                       logger=lgr,
                                       train=True)
        return net
    elif args.net == "face+dlib":
        preprocessor = MultiInputPreprocessor(classifier,
                                              input_shape=input_shape,
                                              batch_size=args.batch,
                                              augmentation=args.augmentation,
                                              verbose=args.verbose)
        lgr = EmopyLogger(["logs/log.txt"])
        net = MultiInputNeuralNet((48, 48, 1),
                                  preprocessor,
                                  logger=lgr,
                                  train=True)
        return net
    elif args.net == "vgg-face":
        input_shape = (48, 48, 3)
        preprocessor = Preprocessor(classifier,
                                    input_shape=input_shape,
                                    batch_size=args.batch,
                                    augmentation=args.augmentation,
                                    verbose=args.verbose)
        lgr = EmopyLogger(["logs/log.txt"])
        net = VGGFaceEmopyNet((48, 48, 1),
                              preprocessor,
                              logger=lgr,
                              train=True)
        return net
    elif args.net == "rnn":
        preprocessor = SequencialPreprocessor(
            classifier,
            input_shape=input_shape,
            batch_size=args.batch,
            augmentation=args.augmentation,
            verbose=args.verbose,
            max_sequence_length=args.sequence_length)
        lgr = EmopyLogger(["logs/log.txt"])
        net = LSTMNet(input_shape,
                      preprocessor=preprocessor,
                      logger=lgr,
                      train=True,
                      max_sequence_length=args.sequence_length)
        return net
    elif args.net == "dlib-rnn":
        preprocessor = DlibSequencialPreprocessor(
            classifier,
            input_shape=input_shape,
            batch_size=args.batch,
            augmentation=args.augmentation,
            verbose=args.verbose,
            max_sequence_length=args.sequence_length)
        lgr = EmopyLogger(["logs/log.txt"])
        net = DlibLSTMNet(input_shape,
                          preprocessor=preprocessor,
                          logger=lgr,
                          train=True,
                          max_sequence_length=args.sequence_length)
        return net
    else:
        raise Exception(
            "net arg should be one of face,face+dlib,vgg-face,rnn or dlib-rnn, but it is "
            + str(args.net))