Beispiel #1
0
def compile_train(model,
                  encoder_bioma=None,
                  encoder_domain=None,
                  reconstruction_error=losses.MeanSquaredError(),
                  encoded_comparison_error=losses.MeanAbsoluteError(),
                  metrics=[[
                      metrics.MeanSquaredError(),
                      metrics.MeanAbsoluteError(),
                      metrics.MeanAbsolutePercentageError(),
                  ],
                           [
                               metrics.MeanSquaredError(),
                               metrics.MeanAbsoluteError(),
                               metrics.MeanAbsolutePercentageError(),
                           ], [
                               metrics.MeanAbsoluteError(),
                           ]],
                  optimizer=optimizers.SGD(lr=0.01)):
    if encoder_domain is not None and encoder_bioma is not None:
        model.compile(optimizer=optimizer,
                      loss=[
                          reconstruction_error, reconstruction_error,
                          encoded_comparison_error
                      ],
                      metrics=metrics)
    elif encoder_bioma is not None:
        model.compile(optimizer=optimizer,
                      loss=reconstruction_error,
                      metrics=metrics[0])
    elif encoder_domain is not None:
        model.compile(optimizer=optimizer,
                      loss=reconstruction_error,
                      metrics=metrics[1])
    else:
        raise Exception('Not domain nor bioma models')
Beispiel #2
0
def SLFNN(vst_onlyTokens,
          dl_terms,
          dl_associations,
          vso,
          nbEpochs=100,
          batchSize=64):

    vstTerm, l_unknownToken = word2term.wordVST2TermVST(
        vst_onlyTokens, dl_terms)
    data, labels = getMatrix(dl_terms,
                             vstTerm,
                             dl_associations,
                             vso,
                             symbol="___")

    inputSize = data.shape[1]
    ontoSpaceSize = labels.shape[1]

    model = models.Sequential()
    model.add(
        layers.Dense(units=ontoSpaceSize,
                     use_bias=True,
                     kernel_initializer=initializers.GlorotUniform(),
                     input_shape=(inputSize, )))
    model.summary()

    model.compile(
        optimizer=optimizers.Nadam(),
        loss=losses.LogCosh(),
        metrics=[metrics.CosineSimilarity(),
                 metrics.MeanSquaredError()])
    model.fit(data, labels, epochs=nbEpochs, batch_size=batchSize)

    return model, vso, l_unknownToken
    def compile(self, lr, reconstructions_output_weight, error_singrom_weight,
                gradient_weight):
        # Setting losses.
        self._custom_loss = RecSinoGradientLoss(reconstructions_output_weight,
                                                error_singrom_weight,
                                                gradient_weight)

        # Setting metrics.
        # Possibilities: Rec: MSE, RMSE, MAE, MSE in HU, MAE in HU, RMSE in HU, SNR, SSIM, relative error
        #        Choose: SNR, SSIM, HU_MAE, MSE, rel_error
        #                sino_error: MS
        #                differential: MS
        self._metrics_reconstruction = [
            metrics.MeanSquaredError('rec_mse'),
            HU_MAE('rec_HU_mae'),
            ReconstructionReference2Noise('rec_snr'),
            SSIM('rec_ssim'),
            RelativeError('rec_rel_err')
        ]
        self._monitored_metric = self._metrics_reconstruction[1]
        self._metrics_error_sino = [MeanSquare('error_sino_ms')]
        self._metrics_gradient = [MeanSquare('gradient_ms')]

        self._all_metrics = self._metrics_reconstruction + self._metrics_error_sino + self._metrics_gradient

        self._model.compile(optimizer=optimizers.Adam(lr))
Beispiel #4
0
    def get_init_model(self) -> Model:
        """Get model with initialized weights for the layers
        """
        # Feature inputs:
        input_layer = Input(shape=(5, ), name='input_features')

        # Hidden layers
        hidden_1 = Dense(64, activation='relu', name='hidden_1')(input_layer)
        hidden_2 = Dense(32, activation='relu', name='hidden_2')(hidden_1)
        # Output layer
        # output is reward for each action 0-stay, 1-left, 2-right
        output_layer = Dense(3, activation='linear', name='output')(hidden_2)

        # Model
        model = Model(inputs=[input_layer], outputs=output_layer)

        # Optimizer
        optimizer = optimizers.Adam(learning_rate=0.01)

        model.compile(loss=losses.MeanSquaredError(),
                      optimizer=optimizer,
                      metrics=[metrics.MeanSquaredError()])

        model.summary()
        return model
def create_model(input_shape,
                 optimizer='Adam',
                 loss='mean_squared_error') -> Sequential:
    """
    Simple function to create sequential model.
    Model is compiled with Adam optimizer and mean squared loss function
    :param input_shape -> tuple int
    :param: optimizer (str)
    :param: loss (str)
    :return: compiled sequential model
    """
    new_model = Sequential()
    new_model.add(
        LSTM(units=50, return_sequences=True, input_shape=input_shape))
    new_model.add(Dropout(0.2))
    new_model.add(LSTM(units=50, return_sequences=True))
    new_model.add(Dropout(0.2))
    new_model.add(LSTM(units=50))
    new_model.add(Dropout(0.2))
    new_model.add(Dense(units=1))  # prediction of closing value
    new_model.compile(optimizer='Adam',
                      loss='mean_squared_error',
                      metrics=[
                          tf_metrics.MeanSquaredError(),
                          tf_metrics.MeanAbsolutePercentageError()
                      ])
    return new_model
Beispiel #6
0
def train_model(model, scaler, data):
    data = np.asarray(data)
    X = np.delete(data, 1, axis=1)
    y = data[:, 1]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)
    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=[
                      metrics.MeanSquaredError(),
                      metrics.RootMeanSquaredError(),
                      metrics.MeanAbsoluteError()
                  ])
    epochs_hist = model.fit(X_train,
                            y_train,
                            epochs=50,
                            batch_size=15,
                            verbose=1,
                            validation_split=0.2)

    X_testing = np.array(X_test)
    y_predict = model.predict(X_testing)
    mse_training = epochs_hist.history['val_loss'][49]
    rmse_training = epochs_hist.history['val_root_mean_squared_error'][49]
    mae_training = epochs_hist.history['val_mean_absolute_error'][49]
    evaluation_test = model.evaluate(X_test, y_test)
    save_model(model)
    return {
        "mse_test": evaluation_test[1],
        "rmse_test": evaluation_test[2],
        "mae_test": evaluation_test[3],
        "mse_train": mse_training,
        "rmse_train": rmse_training,
        "mae_train": mae_training
    }
Beispiel #7
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metrics_tracking.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(),
                 metrics.MeanSquaredError()])
    assert tracker.names == ['categorical_accuracy', 'mean_squared_error']
    assert tracker.directions['categorical_accuracy'] == 'max'
    assert tracker.directions['mean_squared_error'] == 'min'
Beispiel #8
0
def SCNN(vst_onlyTokens,
         dl_terms,
         dl_associations,
         vso,
         nbEpochs=150,
         batchSize=64,
         l_numberOfFilters=[4000],
         l_filterSizes=[1],
         phraseMaxSize=15):

    data, labels, l_unkownTokens, l_uncompleteExpressions = prepare2D_data(
        vst_onlyTokens, dl_terms, dl_associations, vso, phraseMaxSize)

    embeddingSize = data.shape[2]
    ontoSpaceSize = labels.shape[2]

    inputLayer = Input(shape=(phraseMaxSize, embeddingSize))

    l_subLayers = list()
    for i, filterSize in enumerate(l_filterSizes):

        convLayer = (layers.Conv1D(
            l_numberOfFilters[i],
            filterSize,
            strides=1,
            kernel_initializer=initializers.GlorotUniform()))(inputLayer)

        outputSize = phraseMaxSize - filterSize + 1
        pool = (layers.MaxPool1D(pool_size=outputSize))(convLayer)

        activationLayer = (layers.LeakyReLU(alpha=0.3))(pool)

        l_subLayers.append(activationLayer)

    if len(l_filterSizes) > 1:
        concatenateLayer = (layers.Concatenate(axis=-1))(
            l_subLayers)  # axis=-1 // concatenating on the last dimension
    else:
        concatenateLayer = l_subLayers[0]

    convModel = Model(inputs=inputLayer, outputs=concatenateLayer)
    fullmodel = models.Sequential()
    fullmodel.add(convModel)

    fullmodel.add(
        layers.Dense(ontoSpaceSize,
                     kernel_initializer=initializers.GlorotUniform()))

    fullmodel.summary()
    fullmodel.compile(
        optimizer=optimizers.Nadam(),
        loss=losses.LogCosh(),
        metrics=[metrics.CosineSimilarity(),
                 metrics.MeanSquaredError()])
    fullmodel.fit(data, labels, epochs=nbEpochs, batch_size=batchSize)

    return fullmodel, vso, l_unkownTokens
Beispiel #9
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metric.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(),
                 metrics.MeanSquaredError()])
    assert set(tracker.metrics.keys()) == {
        'categorical_accuracy', 'mean_squared_error'
    }
    assert tracker.metrics['categorical_accuracy'].direction == 'max'
    assert tracker.metrics['mean_squared_error'].direction == 'min'
Beispiel #10
0
def test_register_from_metrics():
    # As well as direction inference.
    tracker = metrics_tracking.MetricsTracker(
        metrics=[metrics.CategoricalAccuracy(), metrics.MeanSquaredError()]
    )
    assert set(tracker.metrics.keys()) == {
        "categorical_accuracy",
        "mean_squared_error",
    }
    assert tracker.metrics["categorical_accuracy"].direction == "max"
    assert tracker.metrics["mean_squared_error"].direction == "min"
Beispiel #11
0
def build_simple_model(dataset='Fashion Mnist',
                       opt='sgd',
                       hidden=None,
                       funcs=None,
                       loss=None,
                       metrics_list=None):
    model = models.Sequential()
    if dataset == 'CIFAR-10':
        model.add(layers.Flatten(input_shape=[32, 32, 3]))
    elif ('Fashion Mnist'):
        model.add(layers.Flatten(input_shape=[28, 28]))
    for i in hidden.keys():
        model.add(layers.Dense(hidden[i], activation=funcs[i].lower()))
    model.add(layers.Dense(10, activation="softmax"))

    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
Beispiel #12
0
 def __get_metric(self, metric):
     if metric == "auc":
         return m.AUC()
     elif metric == "accuracy":
         return m.Accuracy()
     elif metric == "binary_accuracy":
         return m.BinaryAccuracy()
     elif metric == "categorical_accuracy":
         return m.CategoricalAccuracy()
     elif metric == "binary_crossentropy":
         return m.BinaryCrossentropy()
     elif metric == "categorical_crossentropy":
         return m.CategoricalCrossentropy()
     elif metric == "sparse_categorical_crossentropy":
         return m.SparseCategoricalCrossentropy()
     elif metric == "kl_divergence":
         return m.KLDivergence()
     elif metric == "poisson":
         return m.Poission()
     elif metric == "mse":
         return m.MeanSquaredError()
     elif metric == "rmse":
         return m.RootMeanSquaredError()
     elif metric == "mae":
         return m.MeanAbsoluteError()
     elif metric == "mean_absolute_percentage_error":
         return m.MeanAbsolutePercentageError()
     elif metric == "mean_squared_logarithm_error":
         return m.MeanSquaredLogarithmError()
     elif metric == "cosine_similarity":
         return m.CosineSimilarity()
     elif metric == "log_cosh_error":
         return m.LogCoshError()
     elif metric == "precision":
         return m.Precision()
     elif metric == "recall":
         return m.Recall()
     elif metric == "true_positive":
         return m.TruePositives()
     elif metric == "true_negative":
         return m.TrueNegatives()
     elif metric == "false_positive":
         return m.FalsePositives()
     elif metric == "false_negative":
         return m.FalseNegatives()
     else:
         raise Exception("specified metric not defined")
Beispiel #13
0
def load_simple_model(model_path='',
                      weights_path='',
                      opt='sgd',
                      loss=None,
                      metrics_list=None):
    model = models.load_model(model_path)
    model.load_weights(weights_path)
    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
Beispiel #14
0
def CNorm(vst_onlyTokens,
          dl_terms,
          dl_associations,
          vso,
          nbEpochs=30,
          batchSize=64,
          l_numberOfFilters=[4000],
          l_filterSizes=[1],
          phraseMaxSize=15):

    # Preparing data for SLFNN and S-CNN components:
    dataSCNN, labels, l_unkownTokens, l_uncompleteExpressions = prepare2D_data(
        vst_onlyTokens, dl_terms, dl_associations, vso, phraseMaxSize)
    dataSLFNN = numpy.zeros((dataSCNN.shape[0], dataSCNN.shape[2]))
    for i in range(dataSCNN.shape[0]):
        numberOfToken = 0
        for embedding in dataSCNN[i]:
            if not numpy.any(embedding):
                pass
            else:
                numberOfToken += 1
                dataSLFNN[i] += embedding

        if numberOfToken > 0:
            dataSLFNN[i] = dataSLFNN[i] / numberOfToken

    # Input layers:
    inputLP = Input(shape=dataSLFNN.shape[1])
    inputCNN = Input(shape=[dataSCNN.shape[1], dataSCNN.shape[2]])

    # SLFNN component:
    ontoSpaceSize = labels.shape[2]
    denseLP = layers.Dense(
        units=ontoSpaceSize,
        use_bias=True,
        kernel_initializer=initializers.GlorotUniform())(inputLP)
    modelLP = Model(inputs=inputLP, outputs=denseLP)

    # Shallow-CNN component:
    l_subLayers = list()
    for i, filterSize in enumerate(l_filterSizes):

        convLayer = (layers.Conv1D(
            l_numberOfFilters[i],
            filterSize,
            strides=1,
            kernel_initializer=initializers.GlorotUniform()))(inputCNN)

        outputSize = phraseMaxSize - filterSize + 1
        pool = (layers.MaxPool1D(pool_size=outputSize))(convLayer)

        activationLayer = (layers.LeakyReLU(alpha=0.3))(pool)

        l_subLayers.append(activationLayer)

    if len(l_filterSizes) > 1:
        concatenateLayer = (layers.Concatenate(axis=-1))(
            l_subLayers)  # axis=-1 // concatenating on the last dimension
    else:
        concatenateLayer = l_subLayers[0]

    denseLayer = layers.Dense(
        ontoSpaceSize,
        kernel_initializer=initializers.GlorotUniform())(concatenateLayer)
    modelCNN = Model(inputs=inputCNN, outputs=denseLayer)

    convModel = Model(inputs=inputCNN, outputs=concatenateLayer)
    fullmodel = models.Sequential()
    fullmodel.add(convModel)

    # Combination of the two components:
    combinedLayer = layers.average([modelLP.output, modelCNN.output])
    fullModel = Model(inputs=[inputLP, inputCNN], outputs=combinedLayer)
    fullModel.summary()

    # Compile and train:
    fullModel.compile(
        optimizer=optimizers.Nadam(),
        loss=losses.LogCosh(),
        metrics=[metrics.CosineSimilarity(),
                 metrics.MeanSquaredError()])
    fullModel.fit([dataSLFNN, dataSCNN],
                  labels,
                  epochs=nbEpochs,
                  batch_size=batchSize)

    return fullModel, vso, l_unkownTokens
Beispiel #15
0
def build_model(local_bm_hyperparameters, local_bm_settings):
    model_built = 0
    time_steps_days = int(local_bm_hyperparameters['time_steps_days'])
    epochs = int(local_bm_hyperparameters['epochs'])
    batch_size = int(local_bm_hyperparameters['batch_size'])
    workers = int(local_bm_hyperparameters['workers'])
    optimizer_function = local_bm_hyperparameters['optimizer']
    optimizer_learning_rate = local_bm_hyperparameters['learning_rate']
    if optimizer_function == 'adam':
        optimizer_function = optimizers.Adam(optimizer_learning_rate)
    elif optimizer_function == 'ftrl':
        optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
    losses_list = []
    loss_1 = local_bm_hyperparameters['loss_1']
    loss_2 = local_bm_hyperparameters['loss_2']
    loss_3 = local_bm_hyperparameters['loss_3']
    union_settings_losses = [loss_1, loss_2, loss_3]
    if 'mape' in union_settings_losses:
        losses_list.append(losses.MeanAbsolutePercentageError())
    if 'mse' in union_settings_losses:
        losses_list.append(losses.MeanSquaredError())
    if 'mae' in union_settings_losses:
        losses_list.append(losses.MeanAbsoluteError())
    if 'm_mape' in union_settings_losses:
        losses_list.append(modified_mape())
    if 'customized_loss_function' in union_settings_losses:
        losses_list.append(customized_loss())
    metrics_list = []
    metric1 = local_bm_hyperparameters['metrics1']
    metric2 = local_bm_hyperparameters['metrics2']
    union_settings_metrics = [metric1, metric2]
    if 'rmse' in union_settings_metrics:
        metrics_list.append(metrics.RootMeanSquaredError())
    if 'mse' in union_settings_metrics:
        metrics_list.append(metrics.MeanSquaredError())
    if 'mae' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsoluteError())
    if 'mape' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsolutePercentageError())
    l1 = local_bm_hyperparameters['l1']
    l2 = local_bm_hyperparameters['l2']
    if local_bm_hyperparameters['regularizers_l1_l2'] == 'True':
        activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
    else:
        activation_regularizer = None
    nof_features_for_training = local_bm_hyperparameters[
        'nof_features_for_training']
    # creating model
    forecaster_in_block = tf.keras.Sequential()
    print('creating the ANN model...')
    # first layer (DENSE)
    if local_bm_hyperparameters['units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Dense(
                units=local_bm_hyperparameters['units_layer_1'],
                activation=local_bm_hyperparameters['activation_1'],
                input_shape=(local_bm_hyperparameters['time_steps_days'],
                             nof_features_for_training),
                activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_1'])))
    # second LSTM layer
    if local_bm_hyperparameters[
            'units_layer_2'] > 0 and local_bm_hyperparameters[
                'units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_2'],
                    activation=local_bm_hyperparameters['activation_2'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_2']),
                    return_sequences=False)))
        forecaster_in_block.add(
            RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # third LSTM layer
    if local_bm_hyperparameters['units_layer_3'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_3'],
                    activation=local_bm_hyperparameters['activation_3'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_3']),
                    return_sequences=True)))
        if local_bm_hyperparameters['units_layer_4'] == 0:
            forecaster_in_block.add(
                RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # fourth layer (DENSE)
    if local_bm_hyperparameters['units_layer_4'] > 0:
        forecaster_in_block.add(
            layers.Dense(units=local_bm_hyperparameters['units_layer_4'],
                         activation=local_bm_hyperparameters['activation_4'],
                         activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_4'])))
    # final layer
    forecaster_in_block.add(
        TimeDistributed(layers.Dense(units=nof_features_for_training)))
    forecaster_in_block.save(''.join(
        [local_bm_settings['models_path'], 'in_block_NN_model_structure_']),
                             save_format='tf')
    forecast_horizon_days = local_bm_settings['forecast_horizon_days']
    forecaster_in_block.build(input_shape=(1, forecast_horizon_days + 1,
                                           nof_features_for_training))
    forecaster_in_block.compile(optimizer=optimizer_function,
                                loss=losses_list,
                                metrics=metrics_list)
    forecaster_in_block_json = forecaster_in_block.to_json()
    with open(
            ''.join([
                local_bm_settings['models_path'],
                'freq_acc_forecaster_in_block.json'
            ]), 'w') as json_file:
        json_file.write(forecaster_in_block_json)
        json_file.close()
    print(
        'build_model function finish (model structure saved in json and ts formats)'
    )
    return True, model_built
    def forecast(self, local_mse, local_normalized_scaled_unit_sales,
                 local_mean_unit_complete_time_serie, local_raw_unit_sales,
                 local_settings):
        try:
            print(
                'starting high loss (mse in previous LSTM) time_series in-block forecast submodule'
            )
            # set training parameters
            with open(''.join([local_settings['hyperparameters_path'],
                               'in_block_time_serie_based_model_hyperparameters.json'])) \
                    as local_r_json_file:
                model_hyperparameters = json.loads(local_r_json_file.read())
                local_r_json_file.close()
            local_time_series_group = np.load(''.join(
                [local_settings['train_data_path'], 'time_serie_group.npy']),
                                              allow_pickle=True)
            time_steps_days = int(local_settings['time_steps_days'])
            epochs = int(model_hyperparameters['epochs'])
            batch_size = int(model_hyperparameters['batch_size'])
            workers = int(model_hyperparameters['workers'])
            optimizer_function = model_hyperparameters['optimizer']
            optimizer_learning_rate = model_hyperparameters['learning_rate']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(optimizer_learning_rate)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            losses_list = []
            loss_1 = model_hyperparameters['loss_1']
            loss_2 = model_hyperparameters['loss_2']
            loss_3 = model_hyperparameters['loss_3']
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'mape' in union_settings_losses:
                losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in union_settings_losses:
                losses_list.append(losses.MeanSquaredError())
            if 'mae' in union_settings_losses:
                losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in union_settings_losses:
                losses_list.append(modified_mape())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            metrics_list = []
            metric1 = model_hyperparameters['metrics1']
            metric2 = model_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'rmse' in union_settings_metrics:
                metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in union_settings_metrics:
                metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsolutePercentageError())
            l1 = model_hyperparameters['l1']
            l2 = model_hyperparameters['l2']
            if model_hyperparameters['regularizers_l1_l2'] == 'True':
                activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
            else:
                activation_regularizer = None

            # searching for time_series with high loss forecast
            time_series_treated = []
            poor_results_mse_threshold = local_settings[
                'poor_results_mse_threshold']
            poor_result_time_serie_list = []
            nof_features_for_training = 0
            for result in local_mse:
                if result[1] > poor_results_mse_threshold:
                    nof_features_for_training += 1
                    poor_result_time_serie_list.append(int(result[0]))
            # nof_features_for_training = local_normalized_scaled_unit_sales.shape[0]
            nof_features_for_training = len(poor_result_time_serie_list)
            # creating model
            forecaster_in_block = tf.keras.Sequential()
            print(
                'current model for specific high loss time_series: Mix_Bid_PeepHole_LSTM_Dense_ANN'
            )
            # first layer (DENSE)
            if model_hyperparameters['units_layer_1'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_1'],
                        activation=model_hyperparameters['activation_1'],
                        input_shape=(model_hyperparameters['time_steps_days'],
                                     nof_features_for_training),
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_1'])))
            # second LSTM layer
            if model_hyperparameters['units_layer_2'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_2'],
                            activation=model_hyperparameters['activation_2'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_2'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if model_hyperparameters['units_layer_3'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_3'],
                            activation=model_hyperparameters['activation_3'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_3'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if model_hyperparameters['units_layer_4'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_4'],
                        activation=model_hyperparameters['activation_4'],
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_4'])))
            # final layer
            forecaster_in_block.add(
                TimeDistributed(layers.Dense(units=nof_features_for_training)))
            # forecaster_in_block.saves(''.join([local_settings['models_path'], '_model_structure_']),
            #                 save_format='tf')
            forecast_horizon_days = local_settings['forecast_horizon_days']
            forecaster_in_block.build(input_shape=(1, forecast_horizon_days,
                                                   nof_features_for_training))
            forecaster_in_block.compile(optimizer=optimizer_function,
                                        loss=losses_list,
                                        metrics=metrics_list)
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(
                    ''.join([
                        local_settings['models_path'],
                        'forecaster_in_block.json'
                    ]), 'w') as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            forecaster_in_block_untrained = forecaster_in_block
            print('specific time_serie model initialized and compiled')
            nof_selling_days = local_normalized_scaled_unit_sales.shape[1]
            last_learning_day_in_year = np.mod(nof_selling_days, 365)
            max_selling_time = local_settings['max_selling_time']
            days_in_focus_frame = model_hyperparameters['days_in_focus_frame']
            window_input_length = local_settings['moving_window_input_length']
            window_output_length = local_settings[
                'moving_window_output_length']
            moving_window_length = window_input_length + window_output_length
            nof_years = local_settings['number_of_years_ceil']

            # training
            # time_serie_data = local_normalized_scaled_unit_sales
            nof_poor_result_time_series = len(poor_result_time_serie_list)
            time_serie_data = np.zeros(shape=(nof_poor_result_time_series,
                                              max_selling_time))
            time_serie_iterator = 0
            for time_serie in poor_result_time_serie_list:
                time_serie_data[
                    time_serie_iterator, :] = local_normalized_scaled_unit_sales[
                        time_serie, :]
                time_serie_iterator += 1
            if local_settings['repeat_training_in_block'] == "True":
                print(
                    'starting in-block training of model for high_loss time_series in previous model'
                )
                nof_selling_days = time_serie_data.shape[1]
                # nof_moving_windows = np.int32(nof_selling_days / moving_window_length)
                remainder_days = np.mod(nof_selling_days, moving_window_length)
                window_first_days = [
                    first_day for first_day in range(0, nof_selling_days,
                                                     moving_window_length)
                ]
                length_window_walk = len(window_first_days)
                # last_window_start = window_first_days[length_window_walk - 1]
                if remainder_days != 0:
                    window_first_days[
                        length_window_walk -
                        1] = nof_selling_days - moving_window_length
                day_in_year = []
                [
                    day_in_year.append(last_learning_day_in_year + year * 365)
                    for year in range(nof_years)
                ]
                stride_window_walk = model_hyperparameters[
                    'stride_window_walk']
                print('defining x_train')
                x_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        x_train.append(
                            time_serie_data[:, day - time_steps_days:day -
                                            window_output_length])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        x_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        x_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                else:
                    logging.info(
                        "\ntrain_model_input_data_approach is not defined")
                    print('-a problem occurs with the data_approach settings')
                    return False, None
                print('defining y_train')
                y_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        y_train.append(time_serie_data[:, day -
                                                       time_steps_days:day])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        y_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        y_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]

                # if time_enhance is active, assigns more weight to the last time_steps according to enhance_last_stride
                if local_settings['time_enhance'] == 'True':
                    enhance_last_stride = local_settings['enhance_last_stride']
                    last_elements = []
                    length_x_y_train = len(x_train)
                    x_train_enhanced, y_train_enhanced = [], []
                    enhance_iterator = 1
                    for position in range(
                            length_x_y_train - enhance_last_stride,
                            length_x_y_train, -1):
                        [
                            x_train_enhanced.append(x_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        [
                            y_train_enhanced.append(y_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        enhance_iterator += 1
                    x_train = x_train[:-enhance_last_stride]
                    [
                        x_train.append(time_step)
                        for time_step in x_train_enhanced
                    ]
                    y_train = y_train[:-enhance_last_stride]
                    [
                        y_train.append(time_step)
                        for time_step in y_train_enhanced
                    ]

                # broadcasts lists to np arrays and applies the last pre-training preprocessing (amplification)
                x_train = np.array(x_train)
                y_train = np.array(y_train)
                print('x_train_shape:  ', x_train.shape)
                if local_settings['amplification'] == 'True':
                    factor = local_settings[
                        'amplification_factor']  # factor tuning was done previously
                    for time_serie_iterator in range(np.shape(x_train)[1]):
                        max_time_serie = np.amax(
                            x_train[:, time_serie_iterator, :])
                        x_train[:, time_serie_iterator, :][x_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                        max_time_serie = np.amax(
                            y_train[:, time_serie_iterator, :])
                        y_train[:, time_serie_iterator, :][y_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                print('x_train and y_train built done')

                # define callbacks, checkpoints namepaths
                model_weights = ''.join([
                    local_settings['checkpoints_path'],
                    'check_point_model_for_high_loss_time_serie_',
                    model_hyperparameters['current_model_name'],
                    "_loss_-{loss:.4f}-.hdf5"
                ])
                callback1 = cb.EarlyStopping(
                    monitor='loss',
                    patience=model_hyperparameters['early_stopping_patience'])
                callback2 = cb.ModelCheckpoint(model_weights,
                                               monitor='loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
                callbacks = [callback1, callback2]
                x_train = x_train.reshape(
                    (np.shape(x_train)[0], np.shape(x_train)[2],
                     np.shape(x_train)[1]))
                y_train = y_train.reshape(
                    (np.shape(y_train)[0], np.shape(y_train)[2],
                     np.shape(y_train)[1]))
                print('input_shape: ', np.shape(x_train))

                # train for each time_serie
                # check settings for repeat or not the training
                forecaster_in_block.fit(x_train,
                                        y_train,
                                        batch_size=batch_size,
                                        epochs=epochs,
                                        workers=workers,
                                        callbacks=callbacks,
                                        shuffle=False)
                # print summary (informative; but if says "shape = multiple", probably useless)
                forecaster_in_block.summary()
                forecaster_in_block.save(''.join([
                    local_settings['models_path'],
                    '_high_loss_time_serie_model_forecaster_in_block_.h5'
                ]))
                forecaster_in_block.save_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                print(
                    'high loss time_series model trained and saved in hdf5 format .h5'
                )
            else:
                forecaster_in_block.load_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                # forecaster_in_block = models.load_model(''.join([local_settings['models_path'],
                #                                                  '_high_loss_time_serie_model_forecaster_.h5']))
                print('weights of previously trained model loaded')

            # compile model and make forecast (not necessary)
            # forecaster_in_block.compile(optimizer='adam', loss='mse')

            # evaluating model and comparing with aggregated (in-block) LSTM
            print('evaluating the model trained..')
            time_serie_data = time_serie_data.reshape(
                (1, time_serie_data.shape[1], time_serie_data.shape[0]))
            x_input = time_serie_data[:, -forecast_horizon_days:, :]
            y_pred_normalized = forecaster_in_block.predict(x_input)
            # print('output shape: ', y_pred_normalized.shape)
            time_serie_data = time_serie_data.reshape(
                (time_serie_data.shape[2], time_serie_data.shape[1]))
            # print('time_serie data shape: ', np.shape(time_serie_data))
            time_serie_iterator = 0
            improved_time_series_forecast = []
            time_series_not_improved = []
            improved_mse = []
            for time_serie in poor_result_time_serie_list:
                # for time_serie in range(local_normalized_scaled_unit_sales.shape[0]):
                y_truth = local_raw_unit_sales[time_serie:time_serie + 1,
                                               -forecast_horizon_days:]
                # print('y_truth shape:', y_truth.shape)

                # reversing preprocess: rescale, denormalize, reshape
                # inverse reshape
                y_pred_reshaped = y_pred_normalized.reshape(
                    (y_pred_normalized.shape[2], y_pred_normalized.shape[1]))
                y_pred_reshaped = y_pred_reshaped[
                    time_serie_iterator:time_serie_iterator + 1, :]
                # print('y_pred_reshaped shape:', y_pred_reshaped.shape)

                # inverse transform (first moving_windows denormalizing and then general rescaling)
                time_serie_normalized_window_mean = np.mean(
                    time_serie_data[time_serie_iterator,
                                    -moving_window_length:])
                # print('mean of this time serie (normalized values): ', time_serie_normalized_window_mean)
                local_denormalized_array = window_based_denormalizer(
                    y_pred_reshaped, time_serie_normalized_window_mean,
                    forecast_horizon_days)
                local_point_forecast = general_mean_rescaler(
                    local_denormalized_array,
                    local_mean_unit_complete_time_serie[time_serie],
                    forecast_horizon_days)
                # print('rescaled denormalized forecasts array shape: ', local_point_forecast.shape)

                # calculating MSE
                # print(y_truth.shape)
                # print(local_point_forecast.shape)
                local_error_metric_mse = mean_squared_error(
                    y_truth, local_point_forecast)
                # print('time_serie: ', time_serie, '\tMean_Squared_Error: ', local_error_metric_mse)
                previous_result = local_mse[:, 1][local_mse[:, 0] ==
                                                  time_serie].item()
                time_series_treated.append(
                    [int(time_serie), previous_result, local_error_metric_mse])
                if local_error_metric_mse < previous_result:
                    # print('better results with time_serie specific model training')
                    print(time_serie, 'MSE improved from ', previous_result,
                          'to ', local_error_metric_mse)
                    improved_time_series_forecast.append(int(time_serie))
                    improved_mse.append(local_error_metric_mse)
                else:
                    # print('no better results with time serie specific model training')
                    # print('MSE not improved from: ', previous_result, '\t current mse: ', local_error_metric_mse)
                    time_series_not_improved.append(int(time_serie))
                time_serie_iterator += 1
            time_series_treated = np.array(time_series_treated)
            improved_mse = np.array(improved_mse)
            average_mse_in_block_forecast = np.mean(time_series_treated[:, 2])
            average_mse_improved_ts = np.mean(improved_mse)
            print('poor result time serie list len:',
                  len(poor_result_time_serie_list))
            print('mean_mse for in-block forecast:',
                  average_mse_in_block_forecast)
            print(
                'number of time series with better results with this forecast: ',
                len(improved_time_series_forecast))
            print(
                'mean_mse of time series with better results with this forecast: ',
                average_mse_improved_ts)
            print('not improved time series =', len(time_series_not_improved))
            time_series_treated = np.array(time_series_treated)
            improved_time_series_forecast = np.array(
                improved_time_series_forecast)
            time_series_not_improved = np.array(time_series_not_improved)
            poor_result_time_serie_array = np.array(
                poor_result_time_serie_list)
            # store data of (individual-approach) time_series forecast successfully improved and those that not
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'poor_result_time_serie_array'
                ]), poor_result_time_serie_array)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_forecast_results'
                ]), time_series_treated)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast'
                ]), improved_time_series_forecast)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_not_improved'
                ]), time_series_not_improved)
            np.savetxt(''.join([
                local_settings['models_evaluation_path'],
                'time_series_forecast_results.csv'
            ]),
                       time_series_treated,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(''.join([local_settings['models_path'], 'high_loss_time_serie_model_forecaster_in_block.json']), 'w') \
                    as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            print('trained model weights and architecture saved')
            print('metadata (results, time_serie with high loss) saved')
            print(
                'forecast improvement done. (high loss time_serie focused) submodule has finished'
            )
        except Exception as submodule_error:
            print('time_series in-block forecast submodule_error: ',
                  submodule_error)
            logger.info(
                'error in forecast of in-block time_series (high_loss_identified_ts_forecast submodule)'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False
        return True
Beispiel #17
0
x = Conv1D(filters=64,kernel_size=27,strides=1,padding='SAME',activation='relu')(encoded)
x = UpSampling1D(size = 3)(x)
x = Conv1D(filters=32,kernel_size=25,strides=1,padding='SAME',activation='relu')(x)
x = UpSampling1D(size = 3)(x)
x = Conv1D(filters=16,kernel_size=23,strides=1,padding='SAME',activation='relu')(x)
x = UpSampling1D(size = 3)(x)
x = Conv1D(filters=4,kernel_size=21,strides=1,padding='SAME',activation='relu')(x)
x = UpSampling1D(size = 3)(x)
decoded = Conv1D(filters=1, kernel_size=19, strides=1, padding='SAME', activation='sigmoid')(x)
'''
autoencoder = Model(input_signal, decoded)
#autoencoder.compile(optimizer='adadelta',loss='binary_crossentropy',metrics=['accuracy'])
autoencoder.compile(optimizer='sgd',
                    loss='mse',
                    metrics=[metrics.MeanSquaredError(),
                             metrics.Accuracy()])
#autoencoder.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=[metrics.Accuracy()])

#print(noised_nsr,noised_af)

X_noisy = np.concatenate((noised_af, noised_nsr), axis=0)
X_original = np.concatenate((original_af, original_nsr), axis=0)
print('noised X:', X_noisy)
print('original X:', X_original)

#X_noisy = np.random.shuffle(X_noisy).reshape((1280,))
#X_original = np.random.shuffle(X_original)
#print(X_noisy)
#print(X_original)
    def train_model(self, local_settings, local_raw_unit_sales,
                    local_model_hyperparameters):
        try:
            # loading hyperparameters
            local_days_in_focus = local_model_hyperparameters[
                'days_in_focus_frame']
            local_raw_unit_sales_data = local_raw_unit_sales[:,
                                                             -local_days_in_focus:]
            local_nof_ts = local_raw_unit_sales.shape[0]
            local_forecast_horizon_days = local_settings[
                'forecast_horizon_days']
            local_features_for_each_training = 1
            print(
                'starting neural network - individual time_serie training unit_sale_approach'
            )

            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters[
                'learning_rate']
            local_validation_split = local_model_hyperparameters[
                'validation_split']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(
                    local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(
                    local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [
                local_loss_1, local_loss_2, local_loss_3
            ]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(
                    metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1,
                                                                  l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(
                monitor='loss',
                patience=local_model_hyperparameters['early_stopping_patience']
            )
            local_callbacks = [local_callback1]
            print(
                'building current model: individual_time_serie_acc_freq_LSTM_Dense_ANN'
            )
            local_base_model = tf.keras.Sequential()
            # first layer (LSTM)
            if local_model_hyperparameters['units_layer_1'] > 0:
                local_base_model.add(
                    layers.LSTM(
                        units=local_model_hyperparameters['units_layer_1'],
                        activation=local_model_hyperparameters['activation_1'],
                        input_shape=(
                            local_model_hyperparameters['time_steps_days'],
                            local_features_for_each_training),
                        dropout=float(
                            local_model_hyperparameters['dropout_layer_1']),
                        activity_regularizer=local_activation_regularizer,
                        return_sequences=True))
            # second LSTM layer
            if local_model_hyperparameters['units_layer_2'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.LSTM(
                            units=local_model_hyperparameters['units_layer_2'],
                            activation=local_model_hyperparameters[
                                'activation_2'],
                            activity_regularizer=local_activation_regularizer,
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_2']
                            ),
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.
                        RNN(PeepholeLSTMCell(
                            units=local_model_hyperparameters['units_layer_3'],
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_3'])
                        ),
                            activity_regularizer=local_activation_regularizer,
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(
                    layers.Dense(
                        units=local_model_hyperparameters['units_layer_4'],
                        activation=local_model_hyperparameters['activation_4'],
                        activity_regularizer=local_activation_regularizer))
                local_base_model.add(
                    layers.Dropout(rate=float(
                        local_model_hyperparameters['dropout_layer_4'])))
            # final layer
            local_base_model.add(
                layers.Dense(
                    units=local_model_hyperparameters['units_final_layer']))

            # build and compile model
            local_base_model.build(
                input_shape=(1, local_time_steps_days,
                             local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([
                local_settings['models_path'],
                '_unit_sales_forecaster_template_individual_ts.h5'
            ]))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               '_unit_sales_forecaster_forecaster_template_individual_ts.json']), 'w') \
                    as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']

            # loading x_train and y_train, previously done for third and fourth models trainings
            local_builder = local_bxy_x_y_builder()
            local_x_train, local_y_train = local_builder.build_x_y_train_arrays(
                local_raw_unit_sales, local_settings,
                local_model_hyperparameters)
            local_x_train = local_x_train.reshape(local_x_train.shape[0],
                                                  local_x_train.shape[2],
                                                  local_x_train.shape[1])
            local_y_train = local_x_train.reshape(local_y_train.shape[0],
                                                  local_y_train.shape[2],
                                                  local_y_train.shape[1])

            # star training time_serie by time_serie
            local_y_pred_array = np.zeros(shape=(local_raw_unit_sales.shape[0],
                                                 local_forecast_horizon_days),
                                          dtype=np.dtype('float32'))
            for time_serie in range(local_nof_ts):
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, :, time_serie: time_serie + 1], \
                                   local_y_train[:, :, time_serie: time_serie + 1]
                # training, saving model and storing forecasts
                local_base_model.fit(local_x,
                                     local_y,
                                     batch_size=local_batch_size,
                                     epochs=local_epochs,
                                     workers=local_workers,
                                     callbacks=local_callbacks,
                                     shuffle=False,
                                     validation_split=local_validation_split)
                local_base_model.save_weights(''.join([
                    local_settings['models_path'],
                    '/_weights_unit_sales_NN_35_days/_individual_ts_',
                    str(time_serie), '_model_weights_.h5'
                ]))
                local_x_input = local_raw_unit_sales[
                    time_serie:time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = local_x_input.reshape(1,
                                                      local_x_input.shape[1],
                                                      1)
                # print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                # print('x_input:\n', local_x_input)
                # print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                # print('ts:', time_serie)
                # print(local_y_pred)
                local_y_pred_array[time_serie:time_serie + 1, :] = local_y_pred
            local_point_forecast_normalized = local_y_pred_array.reshape(
                (local_y_pred_array.shape[0], local_y_pred_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized.clip(0)

            # save points forecast
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'point_forecast_NN_from_unit_sales_training'
                ]), local_point_forecast)
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'eleventh_model_NN_unit_sales_forecast_data'
                ]), local_point_forecast)
            np.savetxt(''.join([
                local_settings['others_outputs_path'],
                'point_forecast_NN_from_unit_sales_training.csv'
            ]),
                       local_point_forecast,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            print('point forecasts saved to file')
            print(
                'submodule for build, train and forecast time_serie unit_sales individually finished successfully'
            )
            return True, local_point_forecast
        except Exception as submodule_error:
            print(
                'train model and forecast individual time_series units_sales_ submodule_error: ',
                submodule_error)
            logger.info(
                'error in training and forecast-individual time_serie unit_sales_ schema'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False, []
Beispiel #19
0
    def forecast(self, local_mse, local_normalized_scaled_unit_sales,
                 local_mean_unit_complete_time_serie, local_raw_unit_sales,
                 local_settings):
        try:
            print(
                'starting high loss (mse in aggregated LSTM) specific time_serie forecast submodule'
            )
            # set training parameters
            with open(''.join([local_settings['hyperparameters_path'],
                               'individual_time_serie_based_model_hyperparameters.json'])) \
                    as local_r_json_file:
                model_hyperparameters = json.loads(local_r_json_file.read())
                local_r_json_file.close()
            time_steps_days = int(local_settings['time_steps_days'])
            epochs = int(model_hyperparameters['epochs'])
            batch_size = int(model_hyperparameters['batch_size'])
            workers = int(model_hyperparameters['workers'])
            optimizer_function = model_hyperparameters['optimizer']
            optimizer_learning_rate = model_hyperparameters['learning_rate']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(optimizer_learning_rate)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            losses_list = []
            loss_1 = model_hyperparameters['loss_1']
            loss_2 = model_hyperparameters['loss_2']
            loss_3 = model_hyperparameters['loss_3']
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'mape' in union_settings_losses:
                losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in union_settings_losses:
                losses_list.append(losses.MeanSquaredError())
            if 'mae' in union_settings_losses:
                losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in union_settings_losses:
                losses_list.append(modified_mape())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            metrics_list = []
            metric1 = model_hyperparameters['metrics1']
            metric2 = model_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'rmse' in union_settings_metrics:
                metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in union_settings_metrics:
                metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsolutePercentageError())
            l1 = model_hyperparameters['l1']
            l2 = model_hyperparameters['l2']
            if model_hyperparameters['regularizers_l1_l2'] == 'True':
                activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
            else:
                activation_regularizer = None
            nof_features_by_training = 1
            forecaster = tf.keras.Sequential()
            print(
                'current model for specific high loss time_series: Mix_Bid_PeepHole_LSTM_Dense_ANN'
            )
            # first layer (DENSE)
            if model_hyperparameters['units_layer_1'] > 0:
                forecaster.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_1'],
                        activation=model_hyperparameters['activation_1'],
                        activity_regularizer=activation_regularizer))
                forecaster.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_1'])))
            # second LSTM layer
            if model_hyperparameters['units_layer_2'] > 0:
                forecaster.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_2'],
                            activation=model_hyperparameters['activation_2'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_2'])),
                                   return_sequences=False)))
                forecaster.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if model_hyperparameters['units_layer_3'] > 0:
                forecaster.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_3'],
                            activation=model_hyperparameters['activation_3'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_3'])),
                                   return_sequences=False)))
                forecaster.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if model_hyperparameters['units_layer_4'] > 0:
                forecaster.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_4'],
                        activation=model_hyperparameters['activation_4'],
                        activity_regularizer=activation_regularizer))
                forecaster.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_4'])))
            # final layer
            forecaster.add(layers.Dense(units=nof_features_by_training))
            forecaster.compile(optimizer=optimizer_function,
                               loss=losses_list,
                               metrics=metrics_list)
            # forecaster.saves(''.join([local_settings['models_path'], '_model_structure_']),
            #                 save_format='tf')
            forecaster.build(
                input_shape=(1, local_settings['forecast_horizon_days'], 1))
            forecaster_yaml = forecaster.to_yaml()
            with open(
                    ''.join([local_settings['models_path'],
                             'forecaster.yaml']), 'w') as yaml_file:
                yaml_file.write(forecaster_yaml)
            forecaster_untrained = forecaster
            print('specific time_serie model initialized and compiled')
            poor_results_mse_threshold = local_settings[
                'poor_results_mse_threshold']
            nof_selling_days = local_normalized_scaled_unit_sales.shape[1]
            last_learning_day_in_year = np.mod(nof_selling_days, 365)
            max_selling_time = local_settings['max_selling_time']
            days_in_focus_frame = model_hyperparameters['days_in_focus_frame']
            window_input_length = local_settings['moving_window_input_length']
            window_output_length = local_settings[
                'moving_window_output_length']
            moving_window_length = window_input_length + window_output_length
            nof_years = local_settings['number_of_years_ceil']
            time_series_individually_treated = []
            time_series_not_improved = []
            dirname = os.path.dirname(__file__)
            for result in local_mse:
                time_serie = int(result[0])
                file_path = os.path.join(
                    dirname, ''.join([
                        '.', local_settings['models_path'],
                        'specific_time_serie_',
                        str(time_serie), 'model_forecast_.h5'
                    ]))
                if os.path.isfile(
                        file_path) or result[1] <= poor_results_mse_threshold:
                    continue
                # training
                print('\ntime_serie: ', time_serie)
                time_serie_data = local_normalized_scaled_unit_sales[
                    time_serie, :]
                time_serie_data = time_serie_data.reshape(
                    time_serie_data.shape[0])
                nof_selling_days = time_serie_data.shape[0]
                # nof_moving_windows = np.int32(nof_selling_days / moving_window_length)
                remainder_days = np.mod(nof_selling_days, moving_window_length)
                window_first_days = [
                    first_day for first_day in range(0, nof_selling_days,
                                                     moving_window_length)
                ]
                length_window_walk = len(window_first_days)
                # last_window_start = window_first_days[length_window_walk - 1]
                if remainder_days != 0:
                    window_first_days[
                        length_window_walk -
                        1] = nof_selling_days - moving_window_length
                day_in_year = []
                [
                    day_in_year.append(last_learning_day_in_year + year * 365)
                    for year in range(nof_years)
                ]
                stride_window_walk = model_hyperparameters[
                    'stride_window_walk']
                print('defining x_train')
                x_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        x_train.append(
                            time_serie_data[day - time_steps_days:day -
                                            window_output_length])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        x_train.append(time_serie_data[day:day +
                                                       window_input_length])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available
                    [
                        x_train.append(
                            time_serie_data[day - window_input_length:day])
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    x_train = np.array(x_train)
                    print('x_train_shape:  ', x_train.shape)
                else:
                    logging.info(
                        "\ntrain_model_input_data_approach is not defined")
                    print('-a problem occurs with the data_approach settings')
                    return False, None
                print('defining y_train')
                y_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        y_train.append(
                            time_serie_data[day - window_output_length:day])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        y_train.append(time_serie_data[day:day +
                                                       window_output_length])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available
                    [
                        y_train.append(
                            time_serie_data[day - window_output_length:day])
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                y_train = np.array(y_train)
                factor = local_settings['amplification_factor']
                max_time_serie = np.amax(x_train)
                x_train[x_train > 0] = max_time_serie * factor
                max_time_serie = np.amax(y_train)
                y_train[y_train > 0] = max_time_serie * factor
                print('x_train and y_train built done')

                # define callbacks, checkpoints namepaths
                model_weights = ''.join([
                    local_settings['checkpoints_path'],
                    'model_for_specific_time_serie_',
                    str(time_serie),
                    model_hyperparameters['current_model_name'],
                    "_loss_-{loss:.4f}-.hdf5"
                ])
                callback1 = cb.EarlyStopping(
                    monitor='loss',
                    patience=model_hyperparameters['early_stopping_patience'])
                callback2 = cb.ModelCheckpoint(model_weights,
                                               monitor='loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
                callbacks = [callback1, callback2]
                x_train = x_train.reshape(
                    (np.shape(x_train)[0], np.shape(x_train)[1], 1))
                y_train = y_train.reshape(
                    (np.shape(y_train)[0], np.shape(y_train)[1], 1))
                print('input_shape: ', np.shape(x_train))

                # train for each time_serie
                # check settings for repeat or not the training
                need_store_time_serie = True
                # load model
                time_series_individually_treated = np.load(''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast.npy'
                ]))
                time_series_individually_treated = time_series_individually_treated.tolist(
                )
                model_name = ''.join([
                    'specific_time_serie_',
                    str(time_serie), 'model_forecast_.h5'
                ])
                model_path = ''.join(
                    [local_settings['models_path'], model_name])
                if os.path.isfile(model_path) and model_hyperparameters[
                        'repeat_one_by_one_training'] == "False":
                    forecaster = models.load_model(model_path,
                                                   custom_objects={
                                                       'modified_mape':
                                                       modified_mape,
                                                       'customized_loss':
                                                       customized_loss
                                                   })
                    need_store_time_serie = False
                elif model_hyperparameters['one_by_one_feature_training_done'] == "False"\
                        or model_hyperparameters['repeat_one_by_one_training'] == "True":
                    forecaster = forecaster_untrained
                    forecaster.fit(x_train,
                                   y_train,
                                   batch_size=batch_size,
                                   epochs=epochs,
                                   workers=workers,
                                   callbacks=callbacks,
                                   shuffle=False)
                    # print summary (informative; but if says "shape = multiple", probably useless)
                    forecaster.summary()

                # compile model and make forecast
                forecaster.compile(optimizer='adam', loss='mse')

                # evaluating model and comparing with aggregated (in-block) LSTM
                print('evaluating the model trained..')
                forecast_horizon_days = local_settings['forecast_horizon_days']
                time_serie_data = time_serie_data.reshape(
                    (1, time_serie_data.shape[0], 1))
                x_input = time_serie_data[:, -forecast_horizon_days:, :]
                y_pred_normalized = forecaster.predict(x_input)
                print('output shape: ', y_pred_normalized.shape)
                y_truth = local_raw_unit_sales[time_serie,
                                               -forecast_horizon_days:]
                y_truth = y_truth.reshape(1, np.shape(y_truth)[0])
                print('y_truth shape:', y_truth.shape)

                # reversing preprocess: rescale, denormalize, reshape
                # inverse reshape
                y_pred_reshaped = y_pred_normalized.reshape(
                    (y_pred_normalized.shape[2], y_pred_normalized.shape[1]))
                print('y_pred_reshaped shape:', y_pred_reshaped.shape)

                # inverse transform (first moving_windows denormalizing and then general rescaling)
                time_serie_data = time_serie_data.reshape(
                    np.shape(time_serie_data)[1], 1)
                print('time_serie data shape: ', np.shape(time_serie_data))
                time_serie_normalized_window_mean = np.mean(
                    time_serie_data[-moving_window_length:])
                print('mean of this time serie (normalized values): ',
                      time_serie_normalized_window_mean)
                local_denormalized_array = window_based_denormalizer(
                    y_pred_reshaped, time_serie_normalized_window_mean,
                    forecast_horizon_days)
                local_point_forecast = general_mean_rescaler(
                    local_denormalized_array,
                    local_mean_unit_complete_time_serie[time_serie],
                    forecast_horizon_days)
                print('rescaled denormalized forecasts array shape: ',
                      local_point_forecast.shape)

                # calculating MSE
                local_error_metric_mse = mean_squared_error(
                    y_truth, local_point_forecast)
                print('time_serie: ', time_serie, '\tMean_Squared_Error: ',
                      local_error_metric_mse)
                if local_error_metric_mse < result[1]:
                    print(
                        'better results with time_serie specific model training'
                    )
                    print('MSE improved from ', result[1], 'to ',
                          local_error_metric_mse)
                    # save models for this time serie
                    forecaster.save(''.join([
                        local_settings['models_path'], 'specific_time_serie_',
                        str(time_serie), 'model_forecast_.h5'
                    ]))
                    print('model for time_serie ', str(time_serie), " saved")
                    if need_store_time_serie:
                        time_series_individually_treated.append(
                            int(time_serie))
                else:
                    print(
                        'no better results with time serie specific model training'
                    )
                    time_series_not_improved.append(int(time_serie))
            time_series_individually_treated = np.array(
                time_series_individually_treated)
            time_series_not_improved = np.array(time_series_not_improved)
            # store data of (individual-approach) time_series forecast successfully improved and those that not
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast'
                ]), time_series_individually_treated)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_not_improved'
                ]), time_series_not_improved)
            print(
                'forecast improvement done. (specific time_serie focused) submodule has finished'
            )
        except Exception as submodule_error:
            print('time_series individual forecast submodule_error: ',
                  submodule_error)
            logger.info(
                'error in forecast of individual (high_loss_identified_ts_forecast submodule)'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False
        return True
    def train(self, local_settings, local_raw_unit_sales, local_model_hyperparameters, local_time_series_not_improved,
              raw_unit_sales_ground_truth):
        try:
            # data normalization
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_x_train, local_y_train = build_x_y_train_arrays(local_raw_unit_sales, local_settings,
                                                                  local_model_hyperparameters,
                                                                  local_time_series_not_improved)
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_features_for_each_training = 1
            print('starting neural network - individual time_serie training')
            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters['learning_rate']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [local_loss_1, local_loss_2, local_loss_3]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1, l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(monitor='loss',
                                               patience=local_model_hyperparameters['early_stopping_patience'])
            local_callbacks = [local_callback1]
            print('building current model: Mix_Bid_PeepHole_LSTM_Dense_ANN')
            local_base_model = tf.keras.Sequential()
            # first layer (DENSE)
            if local_model_hyperparameters['units_layer_1'] > 0:
                # strictly dim 1 of input_shape is ['time_steps_days'] (dim 0 is number of batches: None)
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_1'],
                                                  activation=local_model_hyperparameters['activation_1'],
                                                  input_shape=(local_time_steps_days,
                                                               local_features_for_each_training),
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_1'])))
            # second layer
            if local_model_hyperparameters['units_layer_2']:
                if local_model_hyperparameters['units_layer_1'] == 0:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         input_shape=(local_time_steps_days,
                                                      local_features_for_each_training),
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                else:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                # local_base_model.add(RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_3'],
                                                  activation=local_model_hyperparameters['activation_3'],
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_3'])))
            # fourth layer
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(layers.RNN(
                    PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_4'],
                                     activation=local_model_hyperparameters['activation_4'],
                                     dropout=float(local_model_hyperparameters['dropout_layer_4']))))
            local_base_model.add(layers.Dense(units=local_forecast_horizon_days))

            # build and compile model
            local_base_model.build(input_shape=(1, local_time_steps_days, local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([local_settings['models_path'],
                                           'generic_forecaster_template_individual_ts.h5']))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               'generic_forecaster_template_individual_ts.json']), 'w') as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']
            # all input data in the correct type
            local_x_train = np.array(local_x_train, dtype=np.dtype('float32'))
            local_y_train = np.array(local_y_train, dtype=np.dtype('float32'))
            local_raw_unit_sales = np.array(local_raw_unit_sales, dtype=np.dtype('float32'))
            # specific time_serie models training loop
            local_y_pred_list = []
            local_nof_time_series = local_settings['number_of_time_series']
            remainder = np.array([time_serie for time_serie in range(local_nof_time_series)
                                  if time_serie not in local_time_series_not_improved])
            for time_serie in remainder:
                # ----------------------key_point---------------------------------------------------------------------
                # take note that each loop the weights and internal last states of previous training are conserved
                # that's probably save times and (in aggregated or ordered) connected time series will improve results
                # ----------------------key_point---------------------------------------------------------------------
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, time_serie: time_serie + 1, :], \
                                   local_y_train[:, time_serie: time_serie + 1, :]
                local_x = local_x.reshape(local_x.shape[0], local_x.shape[2], 1)
                local_y = local_y.reshape(local_y.shape[0], local_y.shape[2], 1)
                # training, saving model and storing forecasts
                local_base_model.fit(local_x, local_y, batch_size=local_batch_size, epochs=local_epochs,
                                     workers=local_workers, callbacks=local_callbacks, shuffle=False)
                local_base_model.save_weights(''.join([local_settings['models_path'],
                                                       '/weights_last_year/_individual_ts_',
                                                       str(time_serie), '_model_weights_.h5']))
                local_x_input = local_raw_unit_sales[time_serie: time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = cof_zeros(local_x_input, local_settings)
                local_x_input = local_x_input.reshape(1, local_x_input.shape[1], 1)
                print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                print('x_input:\n', local_x_input)
                print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                local_y_pred = cof_zeros(local_y_pred, local_settings)
                if local_settings['mini_ts_evaluator'] == "True" and \
                        local_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
                    mini_evaluator = mini_evaluator_submodule()
                    evaluation = mini_evaluator.evaluate_ts_forecast(
                            raw_unit_sales_ground_truth[time_serie, -local_forecast_horizon_days:], local_y_pred)
                    print('ts:', time_serie, 'with cof_zeros ts mse:', evaluation)
                else:
                    print('ts:', time_serie)
                print(local_y_pred)
                local_y_pred_list.append(local_y_pred)
            local_point_forecast_array = np.array(local_y_pred_list)
            local_point_forecast_normalized = local_point_forecast_array.reshape(
                (local_point_forecast_array.shape[0], local_point_forecast_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized

            # save points forecast
            np.savetxt(''.join([local_settings['others_outputs_path'], 'point_forecast_NN_LSTM_simulation.csv']),
                       local_point_forecast, fmt='%10.15f', delimiter=',', newline='\n')
            print('point forecasts saved to file')
            print('submodule for build, train and forecast time_serie individually finished successfully')
            return True
        except Exception as submodule_error:
            print('train model and forecast individual time_series submodule_error: ', submodule_error)
            logger.info('error in training and forecast-individual time_serie schema')
            logger.error(str(submodule_error), exc_info=True)
            return False