def train(self, dataPointsTrain, dataPointsValidation, dataAugmentation):

        self.logManager.newLogSession("Creating Histogram plot: Training data")
        self.plotManager.createCategoricalHistogram(dataPointsTrain, "train")
        self.logManager.newLogSession(
            "Creating Histogram plot: Validation data")
        self.plotManager.createCategoricalHistogram(dataPointsValidation,
                                                    "validation")

        self.logManager.newLogSession("Training Model")

        optimizer = Adagrad()
        self.optimizerType = "AdaGrad"

        if not self.logManager is None:
            self.logManager.write("Training Strategy: " +
                                  str(optimizer.get_config()))

            self.logManager.write("--- Training Optimizer: " +
                                  str(self.optimizerType))

            self.logManager.write("--- Training Strategy: " +
                                  str(optimizer.get_config()))

            self.logManager.write("--- Training Batchsize: " +
                                  str(self.batchSize))

            self.logManager.write("--- Training Number of Epochs: " +
                                  str(self.numberOfEpochs))

        self.model.compile(loss="categorical_crossentropy",
                           optimizer=optimizer,
                           metrics=[
                               'accuracy', 'categorical_accuracy',
                               metrics.fbeta_score, metrics.recall,
                               metrics.precision
                           ])

        filepath = self.experimentManager.modelDirectory + "/weights.best.hdf5"
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='val_categorical_accuracy',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode="min",
                                       patience=40)
        reduce_lr = ReduceLROnPlateau(factor=0.5,
                                      monitor='val_loss',
                                      min_lr=1e-5,
                                      patience=2)

        callbacks_list = [checkpoint, early_stopping, reduce_lr]

        history_callback = self.model.fit(
            dataPointsTrain.dataX,
            dataPointsTrain.dataY,
            batch_size=self.batchSize,
            epochs=self.numberOfEpochs,
            validation_data=(dataPointsValidation.dataX,
                             dataPointsValidation.dataY),
            shuffle=True,
            callbacks=callbacks_list)

        if not self.logManager is None:
            self.logManager.write(str(history_callback.history))
            self.plotManager.createTrainingPlot(history_callback)
            self.logManager.endLogSession()
    def train(self, dataPointsTrain, dataPointsValidation, dataAugmentation):

        self.logManager.newLogSession("Creating Histogram plot: Training data")
        self.plotManager.createCategoricalHistogram(dataPointsTrain, "train")
        self.logManager.newLogSession(
            "Creating Histogram plot: Validation data")
        self.plotManager.createCategoricalHistogram(dataPointsValidation,
                                                    "validation")

        self.logManager.newLogSession("Training Model")

        optimizer = Adagrad(lr=0.01, epsilon=1e-08, decay=0.0001)
        self.optimizerType = "AdaGrad"

        if not self.logManager is None:
            self.logManager.write("Training Strategy: " +
                                  str(optimizer.get_config()))

            self.logManager.write("--- Training Optimizer: " +
                                  str(self.optimizerType))

            self.logManager.write("--- Training Strategy: " +
                                  str(optimizer.get_config()))

            self.logManager.write("--- Training Batchsize: " +
                                  str(self.batchSize))

            self.logManager.write("--- Training Number of Epochs: " +
                                  str(self.numberOfEpochs))

        from keras.losses import logcosh
        self.model.compile(loss="mean_absolute_error",
                           optimizer=optimizer,
                           metrics=['mse', metrics.ccc])

        filepath = self.experimentManager.modelDirectory + "/weights.best.hdf5"

        checkPoint = ModelCheckpoint(
            filepath,
            monitor='val_arousal_output_mean_squared_error',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)

        reduce_lr = ReduceLROnPlateau(
            monitor='val_valence_output_mean_squared_error',
            factor=0.2,
            patience=5,
            min_lr=0.0001,
            verbose=1)

        callbacks_list = [reduce_lr]

        history_callback = self.model.fit(
            [dataPointsTrain.dataXVideo, dataPointsTrain.dataXAudio],
            [dataPointsTrain.dataY[:, 0], dataPointsTrain.dataY[:, 1]],
            batch_size=self.batchSize,
            epochs=self.numberOfEpochs,
            validation_data=([
                dataPointsValidation.dataXVideo,
                dataPointsValidation.dataXAudio
            ], [
                dataPointsValidation.dataY[:, 0], dataPointsValidation.dataY[:,
                                                                             1]
            ]),
            shuffle=True,
            callbacks=callbacks_list)

        if not self.logManager is None:
            self.logManager.write(str(history_callback.history))
            self.plotManager.createTrainingPlot(history_callback)
            self.logManager.endLogSession()