Exemple #1
0
    def __init__(self,
                 input_shape,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            preprocessor:
            logger:
            train:
        """
        self.input_shape = input_shape
        self.models_local_folder = "dinn"
        self.logs_local_folder = self.models_local_folder
        self.preprocessor = preprocessor
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE
        self.steps_per_epoch = STEPS_PER_EPOCH

        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger(
                [os.path.join(LOG_DIR, self.logs_local_folder, "nn.txt")])
        else:
            self.logger = logger
        self.feature_extractors = ["dlib"]
        self.number_of_class = self.preprocessor.classifier.get_num_class()
        if train:
            self.model = self.build()
        else:
            self.model = self.load_model(MODEL_PATH)
Exemple #2
0
    def __init__(self,
                 input_shape,
                 learning_rate,
                 batch_size,
                 epochs,
                 steps_per_epoch,
                 data_set_dir,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            learning_rate:
            batch_size:
            epochs:
            steps_per_epoch:
            data_set_dir:
            preprocessor:
            logger:
            train:
        """
        self.input_shape = input_shape
        assert len(
            input_shape
        ) == 3, "Input shape of neural network should be length of 3. e.g (48,48,1)"
        self.models_local_folder = "nn"
        self.logs_local_folder = self.models_local_folder
        self.preprocessor = preprocessor

        self.epochs = epochs
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.steps_per_epoch = steps_per_epoch

        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger(
                [os.path.join(LOG_DIR, self.logs_local_folder, "nn.txt")])
        else:
            self.logger = logger
        self.feature_extractors = ["image"]
        self.number_of_class = self.preprocessor.classifier.get_num_class()
        if train:
            # self.model = self.build()
            self.model = self.load_model("models/nn/nn-16")
        else:
            self.model = self.load_model(MODEL_PATH)
Exemple #3
0
    def __init__(self,
                 input_shape,
                 convnet_model_path=None,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            convnet_model_path:
            preprocessor:
            logger:
            train:
        """
        LSTMNet.__init__(self, input_shape, convnet_model_path, preprocessor,
                         logger, train)

        self.models_local_folder = "drnn"
        self.logs_local_folder = self.models_local_folder
        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger([
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt")
            ])
            print(
                "Logging to file",
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt"))
        else:
            self.logger = logger
        self.model = self.build()
        self.model = self.load_model("models/drnn/drnn-2")
Exemple #4
0
    def __init__(self,
                 input_shape,
                 convnet_model_path=None,
                 preprocessor=None,
                 logger=None,
                 train=True,
                 postProcessor=None):
        """

        Args:
            input_shape:
            convnet_model_path:
            preprocessor:
            logger:
            train:
            postProcessor:
        """
        self.convnet_model_path = convnet_model_path
        self.max_sequence_length = 10
        self.postProcessor = postProcessor
        NeuralNet.__init__(self, input_shape, preprocessor, logger, train)
        self.models_local_folder = "rnn"
        self.logs_local_folder = self.models_local_folder
        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger([
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt")
            ])
            print(
                "Logging to file",
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt"))
        else:
            self.logger = logger
        self.model = self.build()
Exemple #5
0
    def __init__(self, data_out_dir, model_out_dir, net_type, input_shape,
                 learning_rate, batch_size, steps_per_epoch, epochs,
                 preprocessor, logger, session):
        """
        initializes the basic class variables and the non-basic (e.g. different preprocessors) to None
        It is important to set the TAG of the net directly after calling super.init and esp. before initializing the
        logger
        Args:
            data_out_dir: directory where the data_collectors outputted to
            model_out_dir: directory where the weights, all logs and eventually visualizations of the weights are saved
            input_shape: the shape (width & height) of the input images
            learning_rate: the chosen learning rate
            batch_size: the amount of items per batch
            steps_per_epoch: the amounts of batches per epoch
            epochs: the amount of epochs
            preprocessor: A dedicated preprocessor to be set after calling super.init
            logger: The standard logger found int util/BaseLogger.py if None; by now there are no dedicated loggers
            session: either
        """
        self.net_type = net_type
        self.session = session
        self.logger = logger
        self.data_dir = data_out_dir
        self.model_out_dir = model_out_dir

        self.input_shape = input_shape
        self.preprocessor = preprocessor
        self.number_of_classes = self.preprocessor.classifier.get_num_class()
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.steps_per_epoch = steps_per_epoch
        self.epochs = epochs

        self.lr_decay = 0.99
        self.model = None

        if not os.path.exists(os.path.join(model_out_dir, self.net_type)):
            os.makedirs(os.path.join(model_out_dir, self.net_type))
        if logger is None:
            self.logger = EmopyLogger([
                os.path.join(model_out_dir, self.net_type,
                             "%s.log" % self.net_type)
            ])
        else:
            self.logger = logger
Exemple #6
0
class NeuralNet(object):
    """
    Base class for all neural keras_models.

    Parameters
    ----------
    input_shape : tuple

    """
    def __init__(self,
                 input_shape,
                 learning_rate,
                 batch_size,
                 epochs,
                 steps_per_epoch,
                 data_set_dir,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            learning_rate:
            batch_size:
            epochs:
            steps_per_epoch:
            data_set_dir:
            preprocessor:
            logger:
            train:
        """
        self.input_shape = input_shape
        assert len(
            input_shape
        ) == 3, "Input shape of neural network should be length of 3. e.g (48,48,1)"
        self.models_local_folder = "nn"
        self.logs_local_folder = self.models_local_folder
        self.preprocessor = preprocessor

        self.epochs = epochs
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.steps_per_epoch = steps_per_epoch

        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger(
                [os.path.join(LOG_DIR, self.logs_local_folder, "nn.txt")])
        else:
            self.logger = logger
        self.feature_extractors = ["image"]
        self.number_of_class = self.preprocessor.classifier.get_num_class()
        if train:
            # self.model = self.build()
            self.model = self.load_model("models/nn/nn-16")
        else:
            self.model = self.load_model(MODEL_PATH)

    def build(self):
        """
        Build neural network model

        Returns
        -------
        keras.models.Model :
            neural network model
        """
        # TODO rework, use capsule impl., PReLU, BN
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding="same",
                   input_shape=self.input_shape,
                   kernel_initializer="glorot_normal"))
        # model.add(Dropout(0.2))
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding="same",
                   kernel_initializer="glorot_normal"))
        # model.add(Dropout(0.2))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding="same",
                   kernel_initializer="glorot_normal"))
        # model.add(Dropout(0.2))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   padding="same",
                   kernel_initializer="glorot_normal"))
        # model.add(Dropout(0.2))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(252, (3, 3), activation='relu',padding= "same",kernel_initializer="glorot_normal"))
        # model.add(Dropout(0.2))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        # TODO REWORK DENSE LAYERS
        model.add(Dense(252, activation='relu'))
        # model.add(Dropout(0.2))
        model.add(Dense(1024, activation='relu'))
        # model.add(Dropout(0.2))
        model.add(Dense(self.number_of_class, activation='softmax'))

        self.built = True
        return model

    def load_model(self, model_path):
        """

        Args:
            model_path:

        Returns:

        """
        with open(model_path + ".json") as model_file:
            model = model_from_json(model_file.read())
            model.load_weights(model_path + ".h5")
            return model

    def save_model(self):
        """
        Saves NeuralNet model. The naming convention is for json and h5 files is,
        `/path-to-models/model-local-folder-model-number.json` and
        `/path-to-models/model-local-folder-model-number.h5` respectively.
        This method also increments model_number inside "model_number.txt" file.
        """

        if not os.path.exists(PATH2SAVE_MODELS):
            os.makedirs(PATH2SAVE_MODELS)
        if not os.path.exists(
                os.path.join(PATH2SAVE_MODELS, self.models_local_folder)):
            os.makedirs(
                os.path.join(PATH2SAVE_MODELS, self.models_local_folder))
        if not os.path.exists(
                os.path.join(PATH2SAVE_MODELS, self.models_local_folder,
                             "model_number.txt")):
            model_number = np.array([0])
        else:
            model_number = np.fromfile(os.path.join(PATH2SAVE_MODELS,
                                                    self.models_local_folder,
                                                    "model_number.txt"),
                                       dtype=int)
        model_file_name = self.models_local_folder + "-" + str(model_number[0])
        with open(
                os.path.join(PATH2SAVE_MODELS, self.models_local_folder,
                             model_file_name + ".json"), "a+") as jfile:
            jfile.write(self.model.to_json())
        self.model.save_weights(
            os.path.join(PATH2SAVE_MODELS, self.models_local_folder,
                         model_file_name + ".h5"))
        model_number[0] += 1
        model_number.tofile(
            os.path.join(PATH2SAVE_MODELS, self.models_local_folder,
                         "model_number.txt"))

    def train(self):
        """Traines the neuralnet model.
        This method requires the following two directory to exist
        /PATH-TO-DATASET-DIR/train
        /PATH-TO-DATASET-DIR/test

        """

        self.model.compile(loss=keras.losses.categorical_crossentropy,
                           optimizer=keras.optimizers.Adam(self.learning_rate),
                           metrics=['accuracy'])
        # self.model.fit(x_train,y_train,epochs = EPOCHS,
        #                 batch_size = BATCH_SIZE,validation_data=(x_test,y_test))
        self.preprocessor = self.preprocessor(DATA_SET_DIR)
        self.model.fit_generator(
            self.preprocessor.flow(),
            steps_per_epoch=self.steps_per_epoch,
            epochs=self.epochs,
            validation_data=(self.preprocessor.test_images,
                             self.preprocessor.test_image_emotions))
        score = self.model.evaluate(self.preprocessor.test_images,
                                    self.preprocessor.test_image_emotions)
        self.save_model()
        self.logger.log_model(self.models_local_folder, score)

    def predict(self, face):
        """

        Args:
            face:

        Returns:

        """
        assert face.shape == IMG_SIZE, "Face image size should be " + str(
            IMG_SIZE)
        face = face.reshape(-1, 64, 64, 1)
        face = face.astype(np.float32) / 255
        emotions = self.model.predict(face)
        return emotions
Exemple #7
0
class MultiInputNeuralNet(NeuralNet):
    """
    Neutral network whose inputs are images, dlib points, dlib points distances from centroid point
    and dlib points vector angle with respect to centroid vector.

    Parameters
    ----------
    input_shape : tuple

    """
    def __init__(self,
                 input_shape,
                 learning_rate,
                 batch_size,
                 epochs,
                 steps_per_epoch,
                 dataset_dir,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            learning_rate:
            batch_size:
            epochs:
            steps_per_epoch:
            dataset_dir:
            preprocessor:
            logger:
            train:
        """
        self.input_shape = input_shape
        assert len(
            input_shape
        ) == 3, "Input shape of neural network should be length of 3. e.g (48,48,1)"
        self.models_local_folder = "minn"
        self.logs_local_folder = self.models_local_folder
        self.preprocessor = preprocessor
        self.epochs = epochs
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.steps_per_epoch = steps_per_epoch
        self.dataset_dir = dataset_dir

        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger(
                [os.path.join(LOG_DIR, self.logs_local_folder, "nn.txt")])
        else:
            self.logger = logger
        self.feature_extractors = ["image"]
        self.number_of_class = self.preprocessor.classifier.get_num_class()
        if train:
            self.model = self.build()
        else:
            self.model = self.load_model(MODEL_PATH)

    def build(self):
        """
        Build neural network model
        
        Returns 
        -------
        keras.models.Model : 
            neural network model
        """
        image_input_layer = Input(shape=self.input_shape)
        image_layer = Conv2D(
            32, (3, 3),
            activation='relu',
            padding="valid",
            kernel_initializer="glorot_normal")(image_input_layer)
        image_layer = MaxPooling2D(pool_size=(2, 2))(image_layer)
        image_layer = Conv2D(64, (3, 3),
                             activation="relu",
                             padding="valid",
                             kernel_initializer="glorot_normal")(image_layer)
        image_layer = MaxPooling2D(pool_size=(2, 2))(image_layer)
        image_layer = Conv2D(128, (3, 3),
                             activation="relu",
                             padding="valid",
                             kernel_initializer="glorot_normal")(image_layer)
        image_layer = Flatten()(image_layer)

        dlib_points_input_layer = Input(shape=(1, 68, 2))
        dlib_points_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="valid",
            kernel_initializer="glorot_normal")(dlib_points_input_layer)
        dlib_points_layer = MaxPooling2D(pool_size=(1, 2))(dlib_points_layer)
        dlib_points_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer="glorot_normal")(dlib_points_layer)
        dlib_points_layer = MaxPooling2D(pool_size=(1, 2))(dlib_points_layer)
        dlib_points_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer="glorot_normal")(dlib_points_layer)

        dlib_points_layer = Flatten()(dlib_points_layer)

        dlib_points_dist_input_layer = Input(shape=(1, 68, 1))
        dlib_points_dist_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="valid",
            kernel_initializer="glorot_normal")(dlib_points_dist_input_layer)
        dlib_points_dist_layer = MaxPooling2D(
            pool_size=(1, 2))(dlib_points_dist_layer)
        dlib_points_dist_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer='glorot_normal')(dlib_points_dist_layer)
        dlib_points_dist_layer = MaxPooling2D(
            pool_size=(1, 2))(dlib_points_dist_layer)
        dlib_points_dist_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer='glorot_normal')(dlib_points_dist_layer)

        dlib_points_dist_layer = Flatten()(dlib_points_dist_layer)

        dlib_points_angle_input_layer = Input(shape=(1, 68, 1))
        dlib_points_angle_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="valid",
            kernel_initializer="glorot_normal")(dlib_points_angle_input_layer)
        dlib_points_angle_layer = MaxPooling2D(
            pool_size=(1, 2))(dlib_points_angle_layer)
        dlib_points_angle_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer='glorot_normal')(dlib_points_angle_layer)
        dlib_points_angle_layer = MaxPooling2D(
            pool_size=(1, 2))(dlib_points_angle_layer)
        dlib_points_angle_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="valid",
            kernel_initializer='glorot_normal')(dlib_points_angle_layer)

        dlib_points_angle_layer = Flatten()(dlib_points_angle_layer)

        merged_layers = keras.layers.concatenate([
            image_layer, dlib_points_layer, dlib_points_dist_layer,
            dlib_points_angle_layer
        ])

        merged_layers = Dense(252, activation='relu')(merged_layers)
        merged_layers = Dense(1024, activation='relu')(merged_layers)
        merged_layers = Dropout(0.2)(merged_layers)
        merged_layers = Dense(self.number_of_class,
                              activation='softmax')(merged_layers)

        self.model = Model(inputs=[
            image_input_layer, dlib_points_input_layer,
            dlib_points_dist_input_layer, dlib_points_angle_input_layer
        ],
                           outputs=merged_layers)
        self.built = True
        return self.model

    def train(self):
        """Traines the neuralnet model.      
        This method requires the following two directory to exist
        /PATH-TO-DATASET-DIR/train
        /PATH-TO-DATASET-DIR/test
        
        """
        assert self.built == True, "Model not built yet."

        self.model.compile(loss=keras.losses.categorical_crossentropy,
                           optimizer=keras.optimizers.Adam(self.learning_rate),
                           metrics=['accuracy'])
        # self.model.fit(x_train,y_train,epochs = EPOCHS,
        #                 batch_size = BATCH_SIZE,validation_data=(x_test,y_test))
        self.preprocessor = self.preprocessor(self.dataset_dir)
        print("lr", self.learning_rate)
        print("batch_size", self.batch_size)
        self.model.fit_generator(self.preprocessor.flow(),
                                 steps_per_epoch=self.steps_per_epoch,
                                 epochs=self.epochs,
                                 validation_data=([
                                     self.preprocessor.test_images,
                                     self.preprocessor.test_dpoints,
                                     self.preprocessor.dpointsDists,
                                     self.preprocessor.dpointsAngles
                                 ], self.preprocessor.test_image_emotions))
        score = self.model.evaluate([
            self.preprocessor.test_images, self.preprocessor.test_dpoints,
            self.preprocessor.dpointsDists, self.preprocessor.dpointsAngles
        ], self.preprocessor.test_image_emotions)
        self.save_model()
        self.logger.log_model(self.models_local_folder, score)

    def predict(self, face):
        """

        Args:
            face:

        Returns:

        """
        assert face.shape == IMG_SIZE, "Face image size should be " + str(
            IMG_SIZE)
        face = face.reshape(1, 64, 64)

        cv2.imshow("img", face)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        emotions = self.model.predict(face)[0]
        return emotions
Exemple #8
0
class DlibPointsInputNeuralNet(NeuralNet):
    """
    Neutral network whose inputs are dlib points, dlib points distances from centroid point
    and dlib points vector angle with respect to centroid vector.

    Parameters
    ----------
    input_shape : tuple
    
    """
    def __init__(self,
                 input_shape,
                 preprocessor=None,
                 logger=None,
                 train=True):
        """

        Args:
            input_shape:
            preprocessor:
            logger:
            train:
        """
        self.input_shape = input_shape
        self.models_local_folder = "dinn"
        self.logs_local_folder = self.models_local_folder
        self.preprocessor = preprocessor
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE
        self.steps_per_epoch = STEPS_PER_EPOCH

        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger(
                [os.path.join(LOG_DIR, self.logs_local_folder, "nn.txt")])
        else:
            self.logger = logger
        self.feature_extractors = ["dlib"]
        self.number_of_class = self.preprocessor.classifier.get_num_class()
        if train:
            self.model = self.build()
        else:
            self.model = self.load_model(MODEL_PATH)

    def build(self):
        """
        Build neural network model
        
        Returns 
        -------
        keras.models.Model : 
            neural network model
        """

        dlib_points_input_layer = Input(shape=(1, 68, 2))
        dlib_points_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="same",
            kernel_initializer="glorot_normal")(dlib_points_input_layer)
        dlib_points_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="same",
            kernel_initializer="glorot_normal")(dlib_points_layer)
        # dlib_points_layer = Conv2D(128,(1, 3),activation = "relu",padding="same",kernel_initializer="glorot_normal")(dlib_points_layer)

        dlib_points_layer = Flatten()(dlib_points_layer)

        dlib_points_dist_input_layer = Input(shape=(1, 68, 1))
        dlib_points_dist_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="same",
            kernel_initializer="glorot_normal")(dlib_points_dist_input_layer)
        dlib_points_dist_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="same",
            kernel_initializer='glorot_normal')(dlib_points_dist_layer)
        # dlib_points_dist_layer = Conv2D(128,(1, 3),activation = "relu",padding="same",kernel_initializer='glorot_normal')(dlib_points_dist_layer)

        dlib_points_dist_layer = Flatten()(dlib_points_dist_layer)

        dlib_points_angle_input_layer = Input(shape=(1, 68, 1))
        dlib_points_angle_layer = Conv2D(
            32, (1, 3),
            activation='relu',
            padding="same",
            kernel_initializer="glorot_normal")(dlib_points_angle_input_layer)
        dlib_points_angle_layer = Conv2D(
            64, (1, 3),
            activation="relu",
            padding="same",
            kernel_initializer='glorot_normal')(dlib_points_angle_layer)
        # dlib_points_angle_layer = Conv2D(18,(1, 3),activation = "relu",padding="same",kernel_initializer='glorot_normal')(dlib_points_angle_layer)

        dlib_points_angle_layer = Flatten()(dlib_points_angle_layer)

        merged_layers = keras.layers.concatenate([
            dlib_points_layer, dlib_points_dist_layer, dlib_points_angle_layer
        ])

        merged_layers = Dense(128, activation='relu')(merged_layers)
        # merged_layers = Dropout(0.2)(merged_layers)
        merged_layers = Dense(1024, activation='relu')(merged_layers)
        merged_layers = Dropout(0.2)(merged_layers)
        merged_layers = Dense(self.number_of_class,
                              activation='softmax')(merged_layers)

        self.model = Model(inputs=[
            dlib_points_input_layer, dlib_points_dist_input_layer,
            dlib_points_angle_input_layer
        ],
                           outputs=merged_layers)
        self.built = True
        return self.model

    def train(self):
        """Traines the neuralnet model.      
        This method requires the following two directory to exist
        /PATH-TO-DATASET-DIR/train
        /PATH-TO-DATASET-DIR/test
        
        """
        assert self.built == True, "Model not built yet."

        self.model.compile(loss=keras.losses.categorical_crossentropy,
                           optimizer=keras.optimizers.Adam(LEARNING_RATE),
                           metrics=['accuracy'])
        # self.model.fit(x_train,y_train,epochs = EPOCHS,
        #                 batch_size = BATCH_SIZE,validation_data=(x_test,y_test))
        self.preprocessor = self.preprocessor(DATA_SET_DIR)
        self.model.summary()
        self.model.fit_generator(self.preprocessor.flow(),
                                 steps_per_epoch=self.steps_per_epoch,
                                 epochs=self.epochs,
                                 validation_data=([
                                     self.preprocessor.test_dpoints,
                                     self.preprocessor.dpointsDists,
                                     self.preprocessor.dpointsAngles
                                 ], self.preprocessor.test_image_emotions))
        score = self.model.evaluate([
            self.preprocessor.test_dpoints, self.preprocessor.dpointsDists,
            self.preprocessor.dpointsAngles
        ], self.preprocessor.test_image_emotions)
        self.save_model()
        self.logger.log_model(self.models_local_folder, score)

    def predict(self, face):
        """

        Args:
            face:

        Returns:

        """
        assert face.shape == IMG_SIZE, "Face image size should be " + str(
            IMG_SIZE)
        face = face.reshape(-1, 48, 48, 1)
        emotions = self.model.predict(face)[0]
        return emotions
Exemple #9
0
class LSTMNet(NeuralNet):
    """
    """
    def __init__(self,
                 input_shape,
                 convnet_model_path=None,
                 preprocessor=None,
                 logger=None,
                 train=True,
                 postProcessor=None):
        """

        Args:
            input_shape:
            convnet_model_path:
            preprocessor:
            logger:
            train:
            postProcessor:
        """
        self.convnet_model_path = convnet_model_path
        self.max_sequence_length = 10
        self.postProcessor = postProcessor
        NeuralNet.__init__(self, input_shape, preprocessor, logger, train)
        self.models_local_folder = "rnn"
        self.logs_local_folder = self.models_local_folder
        if not os.path.exists(os.path.join(LOG_DIR, self.logs_local_folder)):
            os.makedirs(os.path.join(LOG_DIR, self.logs_local_folder))
        if logger is None:
            self.logger = EmopyLogger([
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt")
            ])
            print(
                "Logging to file",
                os.path.join(LOG_DIR, self.logs_local_folder,
                             self.logs_local_folder + ".txt"))
        else:
            self.logger = logger
        self.model = self.build()

    def build(self):
        """

        Returns:

        """

        model = Sequential()

        model.add(
            TimeDistributed(Conv2D(32, (3, 3),
                                   padding='valid',
                                   activation='relu'),
                            input_shape=(self.max_sequence_length, 48, 48, 1)))
        # model.add(TimeDistributed(Conv2D(64,(3,3),padding="valid",activation="relu")))
        # model.add(TimeDistributed(Dropout(0.2)))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
        model.add(TimeDistributed(Flatten()))
        # model.add(Bidirectional(LSTM(128,return_sequences=False,stateful=False,activation="relu",recurrent_dropout=0.2)))
        model.add(
            LSTM(64,
                 return_sequences=False,
                 stateful=False,
                 activation="relu",
                 recurrent_dropout=0.2))
        # model.add(Dropout(0.2))
        # model.add(Dense(128,activation="relu"))
        # model.add(Dropout(0.2))
        model.add(Dense(6, activation="softmax"))

        return model

    def load_model(self, path):
        """

        Args:
            path:

        Returns:

        """
        if path is None:
            self.convnet_model_path = "models/nn/nn-5"
        with open(self.convnet_model_path + ".json") as model_file:
            model = model_from_json(model_file.read())
            model.load_weights(self.convnet_model_path + ".h5")
            return model

    def predict(self, sequence_faces):
        """

        Args:
            sequence_faces:

        Returns:

        """
        assert sequence_faces[
            0].shape == IMG_SIZE, "Face image size should be " + str(IMG_SIZE)
        face = face.reshape(-1, self.max_sequence_length, 48, 48, 1)
        emotions = self.model.predict(face)[0]
        return emotions

    def process_web_cam(self):
        """
            Predict from webcam input
        """
        model = model_from_json(open("models/rnn/rnn-0.json").read())
        model.load_weights("models/rnn/rnn-0.h5")
        cap = cv2.VideoCapture(-1)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, 300)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
        sequences = np.zeros((self.max_sequence_length, self.input_shape[0],
                              self.input_shape[1], self.input_shape[2]))
        while cap.isOpened():
            while len(sequences) < self.max_sequence_length:
                ret, frame = cap.read()
                frame = cv2.resize(frame, (300, 240))
                faces, rectangles = self.preprocessor.get_faces(
                    frame, face_detector)
                face = faces[0]
                sequences
            predictions = []
            for i in range(len(faces)):
                face = preprocessor.sanitize(faces[i])
                predictions.append(neuralNet.predict(face))

            self.postProcessor = self.postProcessor(img, rectangles,
                                                    predictions)
            cv2.imshow("Image", img)
            if (cv2.waitKey(10) & 0xFF == ord('q')):
                break
        cv2.destroyAllWindows()

    def train(self):
        """Traines the neuralnet model.
        This method requires the following two directory to exist
        /PATH-TO-DATASET-DIR/train
        /PATH-TO-DATASET-DIR/test
        
        """

        print("model")
        self.model.summary()
        print("learning rate", LEARNING_RATE)

        self.model.compile(loss=keras.losses.categorical_crossentropy,
                           optimizer=keras.optimizers.Adam(LEARNING_RATE),
                           metrics=['accuracy'])

        # self.model.compile(loss='categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
        print(self.model.output.shape)
        # x_train,x_test,y_train ,y_test = train_test_split(self.X,self.y,test_size=0.3)
        # self.model.fit(x_train,y_train,epochs = EPOCHS,
        #                 batch_size = BATCH_SIZE,validation_data=(x_test,y_test))
        self.model.fit_generator(
            self.preprocessor.flow(),
            steps_per_epoch=STEPS_PER_EPOCH,
            epochs=EPOCHS,
            validation_data=(self.preprocessor.test_sequences_dpoints,
                             self.preprocessor.test_sequence_labels))

        score = self.model.evaluate(self.preprocessor.test_sequences_dpoints,
                                    self.preprocessor.test_sequence_labels)
        self.save_model()
        self.logger.log_model(self.models_local_folder, score)