예제 #1
0
def main():
    #file = r'./db/fucDatasetReg_1F_NoLinear.csv'
    #file = r'./db/fucDatasetReg_2F.csv'
    file = r'./db/fucDatasetReg_3F_1000.csv'
    x_train, x_test, y_train, y_test = getCsvDataset(file)

    lr = 1e-3
    EPOCHES = 200
    # optimizer = optimizerTf(lr=lr)
    # losses,_ = trainModel(x_train,y_train,optimizer,epochs=EPOCHES)
    # plotLoss(losses)

    opts = []
    # fast group
    opts.append((optimizers.SGD(learning_rate=lr), 'SGD'))
    opts.append((optimizers.RMSprop(learning_rate=lr), 'RMSprop'))
    opts.append((optimizers.Adam(learning_rate=lr), 'Adam'))
    opts.append((optimizers.Adamax(learning_rate=lr), 'Adamax'))
    opts.append((optimizers.Nadam(learning_rate=lr), 'Nadam'))
    # # slow group
    opts.append((optimizers.Adadelta(learning_rate=lr), 'Adadelta'))
    opts.append((optimizers.Adagrad(learning_rate=lr), 'Adagrad'))
    opts.append((optimizers.Ftrl(learning_rate=lr), 'Ftrl'))

    lossesDict = {}
    for opti, name in opts:
        losses, _ = trainModel(x_train, y_train, opti, epochs=EPOCHES)
        lossesDict[name] = losses
        #print(name, losses)

    plotLossDict(lossesDict)
예제 #2
0
    def setOptimizer(self, config):
        configOptimizer = config["model"]["optimizer"].lower()

        if configOptimizer == "Adadelta".lower():
            self.optimizer = optimizers.Adadelta()
        elif configOptimizer == "Adagrad".lower():
            self.optimizer = optimizers.Adagrad()
        elif configOptimizer == "Adamax".lower():
            self.optimizer = optimizers.Adamax()
        elif configOptimizer == "Ftrl".lower():
            self.optimizer = optimizers.Ftrl()
        elif configOptimizer == "SGD".lower():
            self.optimizer = optimizers.SGD()
        elif configOptimizer == "Nadam".lower():
            self.optimizer = optimizers.Nadam()
        elif configOptimizer == "Optimizer".lower():
            self.optimizer = optimizers.Optimizer()
        elif configOptimizer == "RMSprop".lower():
            self.optimizer = optimizers.RMSprop()
예제 #3
0
    def forecast(self, local_mse, local_normalized_scaled_unit_sales,
                 local_mean_unit_complete_time_serie, local_raw_unit_sales,
                 local_settings):
        try:
            print(
                'starting high loss (mse in aggregated LSTM) specific time_serie forecast submodule'
            )
            # set training parameters
            with open(''.join([local_settings['hyperparameters_path'],
                               'individual_time_serie_based_model_hyperparameters.json'])) \
                    as local_r_json_file:
                model_hyperparameters = json.loads(local_r_json_file.read())
                local_r_json_file.close()
            time_steps_days = int(local_settings['time_steps_days'])
            epochs = int(model_hyperparameters['epochs'])
            batch_size = int(model_hyperparameters['batch_size'])
            workers = int(model_hyperparameters['workers'])
            optimizer_function = model_hyperparameters['optimizer']
            optimizer_learning_rate = model_hyperparameters['learning_rate']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(optimizer_learning_rate)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            losses_list = []
            loss_1 = model_hyperparameters['loss_1']
            loss_2 = model_hyperparameters['loss_2']
            loss_3 = model_hyperparameters['loss_3']
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'mape' in union_settings_losses:
                losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in union_settings_losses:
                losses_list.append(losses.MeanSquaredError())
            if 'mae' in union_settings_losses:
                losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in union_settings_losses:
                losses_list.append(modified_mape())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            metrics_list = []
            metric1 = model_hyperparameters['metrics1']
            metric2 = model_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'rmse' in union_settings_metrics:
                metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in union_settings_metrics:
                metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsolutePercentageError())
            l1 = model_hyperparameters['l1']
            l2 = model_hyperparameters['l2']
            if model_hyperparameters['regularizers_l1_l2'] == 'True':
                activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
            else:
                activation_regularizer = None
            nof_features_by_training = 1
            forecaster = tf.keras.Sequential()
            print(
                'current model for specific high loss time_series: Mix_Bid_PeepHole_LSTM_Dense_ANN'
            )
            # first layer (DENSE)
            if model_hyperparameters['units_layer_1'] > 0:
                forecaster.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_1'],
                        activation=model_hyperparameters['activation_1'],
                        activity_regularizer=activation_regularizer))
                forecaster.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_1'])))
            # second LSTM layer
            if model_hyperparameters['units_layer_2'] > 0:
                forecaster.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_2'],
                            activation=model_hyperparameters['activation_2'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_2'])),
                                   return_sequences=False)))
                forecaster.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if model_hyperparameters['units_layer_3'] > 0:
                forecaster.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_3'],
                            activation=model_hyperparameters['activation_3'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_3'])),
                                   return_sequences=False)))
                forecaster.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if model_hyperparameters['units_layer_4'] > 0:
                forecaster.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_4'],
                        activation=model_hyperparameters['activation_4'],
                        activity_regularizer=activation_regularizer))
                forecaster.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_4'])))
            # final layer
            forecaster.add(layers.Dense(units=nof_features_by_training))
            forecaster.compile(optimizer=optimizer_function,
                               loss=losses_list,
                               metrics=metrics_list)
            # forecaster.saves(''.join([local_settings['models_path'], '_model_structure_']),
            #                 save_format='tf')
            forecaster.build(
                input_shape=(1, local_settings['forecast_horizon_days'], 1))
            forecaster_yaml = forecaster.to_yaml()
            with open(
                    ''.join([local_settings['models_path'],
                             'forecaster.yaml']), 'w') as yaml_file:
                yaml_file.write(forecaster_yaml)
            forecaster_untrained = forecaster
            print('specific time_serie model initialized and compiled')
            poor_results_mse_threshold = local_settings[
                'poor_results_mse_threshold']
            nof_selling_days = local_normalized_scaled_unit_sales.shape[1]
            last_learning_day_in_year = np.mod(nof_selling_days, 365)
            max_selling_time = local_settings['max_selling_time']
            days_in_focus_frame = model_hyperparameters['days_in_focus_frame']
            window_input_length = local_settings['moving_window_input_length']
            window_output_length = local_settings[
                'moving_window_output_length']
            moving_window_length = window_input_length + window_output_length
            nof_years = local_settings['number_of_years_ceil']
            time_series_individually_treated = []
            time_series_not_improved = []
            dirname = os.path.dirname(__file__)
            for result in local_mse:
                time_serie = int(result[0])
                file_path = os.path.join(
                    dirname, ''.join([
                        '.', local_settings['models_path'],
                        'specific_time_serie_',
                        str(time_serie), 'model_forecast_.h5'
                    ]))
                if os.path.isfile(
                        file_path) or result[1] <= poor_results_mse_threshold:
                    continue
                # training
                print('\ntime_serie: ', time_serie)
                time_serie_data = local_normalized_scaled_unit_sales[
                    time_serie, :]
                time_serie_data = time_serie_data.reshape(
                    time_serie_data.shape[0])
                nof_selling_days = time_serie_data.shape[0]
                # nof_moving_windows = np.int32(nof_selling_days / moving_window_length)
                remainder_days = np.mod(nof_selling_days, moving_window_length)
                window_first_days = [
                    first_day for first_day in range(0, nof_selling_days,
                                                     moving_window_length)
                ]
                length_window_walk = len(window_first_days)
                # last_window_start = window_first_days[length_window_walk - 1]
                if remainder_days != 0:
                    window_first_days[
                        length_window_walk -
                        1] = nof_selling_days - moving_window_length
                day_in_year = []
                [
                    day_in_year.append(last_learning_day_in_year + year * 365)
                    for year in range(nof_years)
                ]
                stride_window_walk = model_hyperparameters[
                    'stride_window_walk']
                print('defining x_train')
                x_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        x_train.append(
                            time_serie_data[day - time_steps_days:day -
                                            window_output_length])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        x_train.append(time_serie_data[day:day +
                                                       window_input_length])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available
                    [
                        x_train.append(
                            time_serie_data[day - window_input_length:day])
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    x_train = np.array(x_train)
                    print('x_train_shape:  ', x_train.shape)
                else:
                    logging.info(
                        "\ntrain_model_input_data_approach is not defined")
                    print('-a problem occurs with the data_approach settings')
                    return False, None
                print('defining y_train')
                y_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        y_train.append(
                            time_serie_data[day - window_output_length:day])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        y_train.append(time_serie_data[day:day +
                                                       window_output_length])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available
                    [
                        y_train.append(
                            time_serie_data[day - window_output_length:day])
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                y_train = np.array(y_train)
                factor = local_settings['amplification_factor']
                max_time_serie = np.amax(x_train)
                x_train[x_train > 0] = max_time_serie * factor
                max_time_serie = np.amax(y_train)
                y_train[y_train > 0] = max_time_serie * factor
                print('x_train and y_train built done')

                # define callbacks, checkpoints namepaths
                model_weights = ''.join([
                    local_settings['checkpoints_path'],
                    'model_for_specific_time_serie_',
                    str(time_serie),
                    model_hyperparameters['current_model_name'],
                    "_loss_-{loss:.4f}-.hdf5"
                ])
                callback1 = cb.EarlyStopping(
                    monitor='loss',
                    patience=model_hyperparameters['early_stopping_patience'])
                callback2 = cb.ModelCheckpoint(model_weights,
                                               monitor='loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
                callbacks = [callback1, callback2]
                x_train = x_train.reshape(
                    (np.shape(x_train)[0], np.shape(x_train)[1], 1))
                y_train = y_train.reshape(
                    (np.shape(y_train)[0], np.shape(y_train)[1], 1))
                print('input_shape: ', np.shape(x_train))

                # train for each time_serie
                # check settings for repeat or not the training
                need_store_time_serie = True
                # load model
                time_series_individually_treated = np.load(''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast.npy'
                ]))
                time_series_individually_treated = time_series_individually_treated.tolist(
                )
                model_name = ''.join([
                    'specific_time_serie_',
                    str(time_serie), 'model_forecast_.h5'
                ])
                model_path = ''.join(
                    [local_settings['models_path'], model_name])
                if os.path.isfile(model_path) and model_hyperparameters[
                        'repeat_one_by_one_training'] == "False":
                    forecaster = models.load_model(model_path,
                                                   custom_objects={
                                                       'modified_mape':
                                                       modified_mape,
                                                       'customized_loss':
                                                       customized_loss
                                                   })
                    need_store_time_serie = False
                elif model_hyperparameters['one_by_one_feature_training_done'] == "False"\
                        or model_hyperparameters['repeat_one_by_one_training'] == "True":
                    forecaster = forecaster_untrained
                    forecaster.fit(x_train,
                                   y_train,
                                   batch_size=batch_size,
                                   epochs=epochs,
                                   workers=workers,
                                   callbacks=callbacks,
                                   shuffle=False)
                    # print summary (informative; but if says "shape = multiple", probably useless)
                    forecaster.summary()

                # compile model and make forecast
                forecaster.compile(optimizer='adam', loss='mse')

                # evaluating model and comparing with aggregated (in-block) LSTM
                print('evaluating the model trained..')
                forecast_horizon_days = local_settings['forecast_horizon_days']
                time_serie_data = time_serie_data.reshape(
                    (1, time_serie_data.shape[0], 1))
                x_input = time_serie_data[:, -forecast_horizon_days:, :]
                y_pred_normalized = forecaster.predict(x_input)
                print('output shape: ', y_pred_normalized.shape)
                y_truth = local_raw_unit_sales[time_serie,
                                               -forecast_horizon_days:]
                y_truth = y_truth.reshape(1, np.shape(y_truth)[0])
                print('y_truth shape:', y_truth.shape)

                # reversing preprocess: rescale, denormalize, reshape
                # inverse reshape
                y_pred_reshaped = y_pred_normalized.reshape(
                    (y_pred_normalized.shape[2], y_pred_normalized.shape[1]))
                print('y_pred_reshaped shape:', y_pred_reshaped.shape)

                # inverse transform (first moving_windows denormalizing and then general rescaling)
                time_serie_data = time_serie_data.reshape(
                    np.shape(time_serie_data)[1], 1)
                print('time_serie data shape: ', np.shape(time_serie_data))
                time_serie_normalized_window_mean = np.mean(
                    time_serie_data[-moving_window_length:])
                print('mean of this time serie (normalized values): ',
                      time_serie_normalized_window_mean)
                local_denormalized_array = window_based_denormalizer(
                    y_pred_reshaped, time_serie_normalized_window_mean,
                    forecast_horizon_days)
                local_point_forecast = general_mean_rescaler(
                    local_denormalized_array,
                    local_mean_unit_complete_time_serie[time_serie],
                    forecast_horizon_days)
                print('rescaled denormalized forecasts array shape: ',
                      local_point_forecast.shape)

                # calculating MSE
                local_error_metric_mse = mean_squared_error(
                    y_truth, local_point_forecast)
                print('time_serie: ', time_serie, '\tMean_Squared_Error: ',
                      local_error_metric_mse)
                if local_error_metric_mse < result[1]:
                    print(
                        'better results with time_serie specific model training'
                    )
                    print('MSE improved from ', result[1], 'to ',
                          local_error_metric_mse)
                    # save models for this time serie
                    forecaster.save(''.join([
                        local_settings['models_path'], 'specific_time_serie_',
                        str(time_serie), 'model_forecast_.h5'
                    ]))
                    print('model for time_serie ', str(time_serie), " saved")
                    if need_store_time_serie:
                        time_series_individually_treated.append(
                            int(time_serie))
                else:
                    print(
                        'no better results with time serie specific model training'
                    )
                    time_series_not_improved.append(int(time_serie))
            time_series_individually_treated = np.array(
                time_series_individually_treated)
            time_series_not_improved = np.array(time_series_not_improved)
            # store data of (individual-approach) time_series forecast successfully improved and those that not
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast'
                ]), time_series_individually_treated)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_not_improved'
                ]), time_series_not_improved)
            print(
                'forecast improvement done. (specific time_serie focused) submodule has finished'
            )
        except Exception as submodule_error:
            print('time_series individual forecast submodule_error: ',
                  submodule_error)
            logger.info(
                'error in forecast of individual (high_loss_identified_ts_forecast submodule)'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False
        return True
예제 #4
0
def parameter_update(theta_0, ln_q, ln_1_q, ln_s, mu, sigma, n_u, n_y, jitter,
                     sample_size_w=4096, batch_size=None, optimizer_choice='adam',
                     lr=1e-3, max_batch=int(1024), factr=1e-8, plot_loss=True):

    batch_L = []

    if optimizer_choice == 'adam':
        optimizer = optimizers.Adam(lr=lr)
    elif optimizer_choice == 'adadelta':
        optimizer = optimizers.Adadelta(lr=lr)
    elif optimizer_choice == 'adagrad':
        optimizer = optimizers.Adagrad(lr=lr)
    elif optimizer_choice == 'adamax':
        optimizer = optimizers.Adamax(lr=lr)
    elif optimizer_choice == 'ftrl':
        optimizer = optimizers.Ftrl(lr=lr)
    elif optimizer_choice == 'nadam':
        optimizer = optimizers.Nadam(lr=lr)
    elif optimizer_choice == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=lr)
    elif optimizer_choice == 'sgd':
        optimizer = optimizers.SGD(lr=lr)

    theta = tf.Variable(theta_0)

    fin_theta = theta_0.copy()

    if batch_size is None:
        batch_size = int(numpy.floor(n_y / 2))

    batch_idx = numpy.arange(0, n_y, batch_size)

    batch_num = len(batch_idx) - 1

    converge = False

    for i in range(0, int(1e8)):

        for j in range(0, batch_num):

            raw_sample_w = tf.random.normal((sample_size_w, 3 * (batch_idx[j + 1] - batch_idx[j])), dtype='float64')

            _, g_t = get_obj_g(theta,
                               ln_q[batch_idx[j]:batch_idx[j + 1]],
                               ln_1_q[batch_idx[j]:batch_idx[j + 1]],
                               ln_s[batch_idx[j]:batch_idx[j + 1]],
                               mu[batch_idx[j]:batch_idx[j + 1]],
                               sigma[batch_idx[j]:batch_idx[j + 1]],
                               n_u, (batch_idx[j + 1] - batch_idx[j]),
                               raw_sample_w, jitter)

            optimizer.apply_gradients(zip([g_t], [theta]))

            theta = theta.numpy()

            theta[:2] = numpy.abs(theta[:2])

            theta[:2][theta[:2] <= 1e-8] = 1e-8

            theta[5:8][theta[5:8] <= 1e-8] = 1e-8


            raw_sample_w = tf.random.normal((sample_size_w, 3 * numpy.shape(ln_q)[0]), dtype='float64')

            L_t = vi_obj(theta,
                         ln_q,
                         ln_1_q,
                         ln_s,
                         mu,
                         sigma,
                         n_u,
                         numpy.shape(ln_q)[0],
                         raw_sample_w, jitter)

            tmp_L = (L_t.numpy() / numpy.shape(ln_q)[0])

            if len(batch_L) >= 2:
                if tmp_L < numpy.min(batch_L[:-1]):
                    fin_theta = theta.copy()

            theta = tf.Variable(theta)

            if numpy.mod(len(batch_L), 16) == 0:

                print('=============================================================================')

                print(theta[:8])
                print(theta[-6:])

                print('Batch: ' + str(len(batch_L)) + ', optimiser: ' + optimizer_choice + ', Loss: ' + str(tmp_L))

                print('=============================================================================')

            batch_L.append(numpy.min(tmp_L))

            if plot_loss:
                fig = matplotlib.pyplot.figure(figsize=(16, 9))

                matplotlib.pyplot.plot(numpy.arange(0, len(batch_L)),
                                       numpy.array(batch_L))

                matplotlib.pyplot.xlabel('Batches')

                matplotlib.pyplot.ylabel('Loss')

                matplotlib.pyplot.title('Learning Rate: ' + str(lr))

                matplotlib.pyplot.grid(True)

                matplotlib.pyplot.ylim([numpy.min(batch_L), numpy.median(batch_L)])

                try:
                    fig.savefig('./' + str(n_u) + '_' + optimizer_choice + '_' + str(lr) + '.png', bbox_inches='tight')
                except PermissionError:
                    pass
                except OSError:
                    pass

                matplotlib.pyplot.close(fig)

            if len(batch_L) > batch_num*16:
                previous_opt = numpy.min(batch_L.copy()[:-batch_num*16])

                current_opt = numpy.min(batch_L.copy()[-batch_num*16:])

                if numpy.mod(len(batch_L), 16) == 0:
                    print('Previous And Recent Top Averaged Loss Is:')
                    print(numpy.hstack([previous_opt, current_opt]))

                if previous_opt - current_opt <= numpy.abs(previous_opt * factr):
                    converge = True
                    break

                if len(batch_L) >= max_batch:
                    converge = True
                    break

        per_idx = numpy.random.permutation(n_y)

        ln_q = ln_q[per_idx]

        ln_1_q = ln_1_q[per_idx]

        ln_s = ln_s[per_idx]

        mu = mu[per_idx]

        sigma = sigma[per_idx]

        if converge:
            break

    return fin_theta
    def forecast(self, local_mse, local_normalized_scaled_unit_sales,
                 local_mean_unit_complete_time_serie, local_raw_unit_sales,
                 local_settings):
        try:
            print(
                'starting high loss (mse in previous LSTM) time_series in-block forecast submodule'
            )
            # set training parameters
            with open(''.join([local_settings['hyperparameters_path'],
                               'in_block_time_serie_based_model_hyperparameters.json'])) \
                    as local_r_json_file:
                model_hyperparameters = json.loads(local_r_json_file.read())
                local_r_json_file.close()
            local_time_series_group = np.load(''.join(
                [local_settings['train_data_path'], 'time_serie_group.npy']),
                                              allow_pickle=True)
            time_steps_days = int(local_settings['time_steps_days'])
            epochs = int(model_hyperparameters['epochs'])
            batch_size = int(model_hyperparameters['batch_size'])
            workers = int(model_hyperparameters['workers'])
            optimizer_function = model_hyperparameters['optimizer']
            optimizer_learning_rate = model_hyperparameters['learning_rate']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(optimizer_learning_rate)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            losses_list = []
            loss_1 = model_hyperparameters['loss_1']
            loss_2 = model_hyperparameters['loss_2']
            loss_3 = model_hyperparameters['loss_3']
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'mape' in union_settings_losses:
                losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in union_settings_losses:
                losses_list.append(losses.MeanSquaredError())
            if 'mae' in union_settings_losses:
                losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in union_settings_losses:
                losses_list.append(modified_mape())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            metrics_list = []
            metric1 = model_hyperparameters['metrics1']
            metric2 = model_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'rmse' in union_settings_metrics:
                metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in union_settings_metrics:
                metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsolutePercentageError())
            l1 = model_hyperparameters['l1']
            l2 = model_hyperparameters['l2']
            if model_hyperparameters['regularizers_l1_l2'] == 'True':
                activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
            else:
                activation_regularizer = None

            # searching for time_series with high loss forecast
            time_series_treated = []
            poor_results_mse_threshold = local_settings[
                'poor_results_mse_threshold']
            poor_result_time_serie_list = []
            nof_features_for_training = 0
            for result in local_mse:
                if result[1] > poor_results_mse_threshold:
                    nof_features_for_training += 1
                    poor_result_time_serie_list.append(int(result[0]))
            # nof_features_for_training = local_normalized_scaled_unit_sales.shape[0]
            nof_features_for_training = len(poor_result_time_serie_list)
            # creating model
            forecaster_in_block = tf.keras.Sequential()
            print(
                'current model for specific high loss time_series: Mix_Bid_PeepHole_LSTM_Dense_ANN'
            )
            # first layer (DENSE)
            if model_hyperparameters['units_layer_1'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_1'],
                        activation=model_hyperparameters['activation_1'],
                        input_shape=(model_hyperparameters['time_steps_days'],
                                     nof_features_for_training),
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_1'])))
            # second LSTM layer
            if model_hyperparameters['units_layer_2'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_2'],
                            activation=model_hyperparameters['activation_2'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_2'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if model_hyperparameters['units_layer_3'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_3'],
                            activation=model_hyperparameters['activation_3'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_3'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if model_hyperparameters['units_layer_4'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_4'],
                        activation=model_hyperparameters['activation_4'],
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_4'])))
            # final layer
            forecaster_in_block.add(
                TimeDistributed(layers.Dense(units=nof_features_for_training)))
            # forecaster_in_block.saves(''.join([local_settings['models_path'], '_model_structure_']),
            #                 save_format='tf')
            forecast_horizon_days = local_settings['forecast_horizon_days']
            forecaster_in_block.build(input_shape=(1, forecast_horizon_days,
                                                   nof_features_for_training))
            forecaster_in_block.compile(optimizer=optimizer_function,
                                        loss=losses_list,
                                        metrics=metrics_list)
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(
                    ''.join([
                        local_settings['models_path'],
                        'forecaster_in_block.json'
                    ]), 'w') as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            forecaster_in_block_untrained = forecaster_in_block
            print('specific time_serie model initialized and compiled')
            nof_selling_days = local_normalized_scaled_unit_sales.shape[1]
            last_learning_day_in_year = np.mod(nof_selling_days, 365)
            max_selling_time = local_settings['max_selling_time']
            days_in_focus_frame = model_hyperparameters['days_in_focus_frame']
            window_input_length = local_settings['moving_window_input_length']
            window_output_length = local_settings[
                'moving_window_output_length']
            moving_window_length = window_input_length + window_output_length
            nof_years = local_settings['number_of_years_ceil']

            # training
            # time_serie_data = local_normalized_scaled_unit_sales
            nof_poor_result_time_series = len(poor_result_time_serie_list)
            time_serie_data = np.zeros(shape=(nof_poor_result_time_series,
                                              max_selling_time))
            time_serie_iterator = 0
            for time_serie in poor_result_time_serie_list:
                time_serie_data[
                    time_serie_iterator, :] = local_normalized_scaled_unit_sales[
                        time_serie, :]
                time_serie_iterator += 1
            if local_settings['repeat_training_in_block'] == "True":
                print(
                    'starting in-block training of model for high_loss time_series in previous model'
                )
                nof_selling_days = time_serie_data.shape[1]
                # nof_moving_windows = np.int32(nof_selling_days / moving_window_length)
                remainder_days = np.mod(nof_selling_days, moving_window_length)
                window_first_days = [
                    first_day for first_day in range(0, nof_selling_days,
                                                     moving_window_length)
                ]
                length_window_walk = len(window_first_days)
                # last_window_start = window_first_days[length_window_walk - 1]
                if remainder_days != 0:
                    window_first_days[
                        length_window_walk -
                        1] = nof_selling_days - moving_window_length
                day_in_year = []
                [
                    day_in_year.append(last_learning_day_in_year + year * 365)
                    for year in range(nof_years)
                ]
                stride_window_walk = model_hyperparameters[
                    'stride_window_walk']
                print('defining x_train')
                x_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        x_train.append(
                            time_serie_data[:, day - time_steps_days:day -
                                            window_output_length])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        x_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        x_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                else:
                    logging.info(
                        "\ntrain_model_input_data_approach is not defined")
                    print('-a problem occurs with the data_approach settings')
                    return False, None
                print('defining y_train')
                y_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        y_train.append(time_serie_data[:, day -
                                                       time_steps_days:day])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        y_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        y_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]

                # if time_enhance is active, assigns more weight to the last time_steps according to enhance_last_stride
                if local_settings['time_enhance'] == 'True':
                    enhance_last_stride = local_settings['enhance_last_stride']
                    last_elements = []
                    length_x_y_train = len(x_train)
                    x_train_enhanced, y_train_enhanced = [], []
                    enhance_iterator = 1
                    for position in range(
                            length_x_y_train - enhance_last_stride,
                            length_x_y_train, -1):
                        [
                            x_train_enhanced.append(x_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        [
                            y_train_enhanced.append(y_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        enhance_iterator += 1
                    x_train = x_train[:-enhance_last_stride]
                    [
                        x_train.append(time_step)
                        for time_step in x_train_enhanced
                    ]
                    y_train = y_train[:-enhance_last_stride]
                    [
                        y_train.append(time_step)
                        for time_step in y_train_enhanced
                    ]

                # broadcasts lists to np arrays and applies the last pre-training preprocessing (amplification)
                x_train = np.array(x_train)
                y_train = np.array(y_train)
                print('x_train_shape:  ', x_train.shape)
                if local_settings['amplification'] == 'True':
                    factor = local_settings[
                        'amplification_factor']  # factor tuning was done previously
                    for time_serie_iterator in range(np.shape(x_train)[1]):
                        max_time_serie = np.amax(
                            x_train[:, time_serie_iterator, :])
                        x_train[:, time_serie_iterator, :][x_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                        max_time_serie = np.amax(
                            y_train[:, time_serie_iterator, :])
                        y_train[:, time_serie_iterator, :][y_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                print('x_train and y_train built done')

                # define callbacks, checkpoints namepaths
                model_weights = ''.join([
                    local_settings['checkpoints_path'],
                    'check_point_model_for_high_loss_time_serie_',
                    model_hyperparameters['current_model_name'],
                    "_loss_-{loss:.4f}-.hdf5"
                ])
                callback1 = cb.EarlyStopping(
                    monitor='loss',
                    patience=model_hyperparameters['early_stopping_patience'])
                callback2 = cb.ModelCheckpoint(model_weights,
                                               monitor='loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
                callbacks = [callback1, callback2]
                x_train = x_train.reshape(
                    (np.shape(x_train)[0], np.shape(x_train)[2],
                     np.shape(x_train)[1]))
                y_train = y_train.reshape(
                    (np.shape(y_train)[0], np.shape(y_train)[2],
                     np.shape(y_train)[1]))
                print('input_shape: ', np.shape(x_train))

                # train for each time_serie
                # check settings for repeat or not the training
                forecaster_in_block.fit(x_train,
                                        y_train,
                                        batch_size=batch_size,
                                        epochs=epochs,
                                        workers=workers,
                                        callbacks=callbacks,
                                        shuffle=False)
                # print summary (informative; but if says "shape = multiple", probably useless)
                forecaster_in_block.summary()
                forecaster_in_block.save(''.join([
                    local_settings['models_path'],
                    '_high_loss_time_serie_model_forecaster_in_block_.h5'
                ]))
                forecaster_in_block.save_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                print(
                    'high loss time_series model trained and saved in hdf5 format .h5'
                )
            else:
                forecaster_in_block.load_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                # forecaster_in_block = models.load_model(''.join([local_settings['models_path'],
                #                                                  '_high_loss_time_serie_model_forecaster_.h5']))
                print('weights of previously trained model loaded')

            # compile model and make forecast (not necessary)
            # forecaster_in_block.compile(optimizer='adam', loss='mse')

            # evaluating model and comparing with aggregated (in-block) LSTM
            print('evaluating the model trained..')
            time_serie_data = time_serie_data.reshape(
                (1, time_serie_data.shape[1], time_serie_data.shape[0]))
            x_input = time_serie_data[:, -forecast_horizon_days:, :]
            y_pred_normalized = forecaster_in_block.predict(x_input)
            # print('output shape: ', y_pred_normalized.shape)
            time_serie_data = time_serie_data.reshape(
                (time_serie_data.shape[2], time_serie_data.shape[1]))
            # print('time_serie data shape: ', np.shape(time_serie_data))
            time_serie_iterator = 0
            improved_time_series_forecast = []
            time_series_not_improved = []
            improved_mse = []
            for time_serie in poor_result_time_serie_list:
                # for time_serie in range(local_normalized_scaled_unit_sales.shape[0]):
                y_truth = local_raw_unit_sales[time_serie:time_serie + 1,
                                               -forecast_horizon_days:]
                # print('y_truth shape:', y_truth.shape)

                # reversing preprocess: rescale, denormalize, reshape
                # inverse reshape
                y_pred_reshaped = y_pred_normalized.reshape(
                    (y_pred_normalized.shape[2], y_pred_normalized.shape[1]))
                y_pred_reshaped = y_pred_reshaped[
                    time_serie_iterator:time_serie_iterator + 1, :]
                # print('y_pred_reshaped shape:', y_pred_reshaped.shape)

                # inverse transform (first moving_windows denormalizing and then general rescaling)
                time_serie_normalized_window_mean = np.mean(
                    time_serie_data[time_serie_iterator,
                                    -moving_window_length:])
                # print('mean of this time serie (normalized values): ', time_serie_normalized_window_mean)
                local_denormalized_array = window_based_denormalizer(
                    y_pred_reshaped, time_serie_normalized_window_mean,
                    forecast_horizon_days)
                local_point_forecast = general_mean_rescaler(
                    local_denormalized_array,
                    local_mean_unit_complete_time_serie[time_serie],
                    forecast_horizon_days)
                # print('rescaled denormalized forecasts array shape: ', local_point_forecast.shape)

                # calculating MSE
                # print(y_truth.shape)
                # print(local_point_forecast.shape)
                local_error_metric_mse = mean_squared_error(
                    y_truth, local_point_forecast)
                # print('time_serie: ', time_serie, '\tMean_Squared_Error: ', local_error_metric_mse)
                previous_result = local_mse[:, 1][local_mse[:, 0] ==
                                                  time_serie].item()
                time_series_treated.append(
                    [int(time_serie), previous_result, local_error_metric_mse])
                if local_error_metric_mse < previous_result:
                    # print('better results with time_serie specific model training')
                    print(time_serie, 'MSE improved from ', previous_result,
                          'to ', local_error_metric_mse)
                    improved_time_series_forecast.append(int(time_serie))
                    improved_mse.append(local_error_metric_mse)
                else:
                    # print('no better results with time serie specific model training')
                    # print('MSE not improved from: ', previous_result, '\t current mse: ', local_error_metric_mse)
                    time_series_not_improved.append(int(time_serie))
                time_serie_iterator += 1
            time_series_treated = np.array(time_series_treated)
            improved_mse = np.array(improved_mse)
            average_mse_in_block_forecast = np.mean(time_series_treated[:, 2])
            average_mse_improved_ts = np.mean(improved_mse)
            print('poor result time serie list len:',
                  len(poor_result_time_serie_list))
            print('mean_mse for in-block forecast:',
                  average_mse_in_block_forecast)
            print(
                'number of time series with better results with this forecast: ',
                len(improved_time_series_forecast))
            print(
                'mean_mse of time series with better results with this forecast: ',
                average_mse_improved_ts)
            print('not improved time series =', len(time_series_not_improved))
            time_series_treated = np.array(time_series_treated)
            improved_time_series_forecast = np.array(
                improved_time_series_forecast)
            time_series_not_improved = np.array(time_series_not_improved)
            poor_result_time_serie_array = np.array(
                poor_result_time_serie_list)
            # store data of (individual-approach) time_series forecast successfully improved and those that not
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'poor_result_time_serie_array'
                ]), poor_result_time_serie_array)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_forecast_results'
                ]), time_series_treated)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast'
                ]), improved_time_series_forecast)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_not_improved'
                ]), time_series_not_improved)
            np.savetxt(''.join([
                local_settings['models_evaluation_path'],
                'time_series_forecast_results.csv'
            ]),
                       time_series_treated,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(''.join([local_settings['models_path'], 'high_loss_time_serie_model_forecaster_in_block.json']), 'w') \
                    as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            print('trained model weights and architecture saved')
            print('metadata (results, time_serie with high loss) saved')
            print(
                'forecast improvement done. (high loss time_serie focused) submodule has finished'
            )
        except Exception as submodule_error:
            print('time_series in-block forecast submodule_error: ',
                  submodule_error)
            logger.info(
                'error in forecast of in-block time_series (high_loss_identified_ts_forecast submodule)'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False
        return True
예제 #6
0
def parameter_update(theta_0,
                     ln_q,
                     ln_1_q,
                     ln_s,
                     mu,
                     sigma,
                     n_u,
                     n_y,
                     jitter,
                     sample_size_w=1024,
                     batch_size=None,
                     val_size=None,
                     optimizer_choice='adam',
                     lr=1e-3,
                     max_batch=int(4096),
                     tol=8,
                     factr=1e-3,
                     plot_loss=True,
                     print_info=True):

    batch_L = []

    gap = []

    if optimizer_choice == 'adam':
        optimizer = optimizers.Adam(lr=lr)
    elif optimizer_choice == 'adadelta':
        optimizer = optimizers.Adadelta(lr=lr)
    elif optimizer_choice == 'adagrad':
        optimizer = optimizers.Adagrad(lr=lr)
    elif optimizer_choice == 'adamax':
        optimizer = optimizers.Adamax(lr=lr)
    elif optimizer_choice == 'ftrl':
        optimizer = optimizers.Ftrl(lr=lr)
    elif optimizer_choice == 'nadam':
        optimizer = optimizers.Nadam(lr=lr)
    elif optimizer_choice == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=lr)
    elif optimizer_choice == 'sgd':
        optimizer = optimizers.SGD(lr=lr)
    else:
        optimizer = None

    theta = tf.Variable(theta_0)

    fin_theta = theta_0.copy()

    if val_size is not None:
        if val_size > n_y:
            val_size = n_y
            val_idx = numpy.arange(0, n_y)
        else:
            val_idx = numpy.random.choice(numpy.arange(0, n_y),
                                          val_size,
                                          replace=False)
    else:
        val_idx = None

    for i in range(0, int(1e8)):

        if batch_size is None:
            tmp_idx = numpy.arange(0, n_y)
        else:
            tmp_idx = numpy.random.choice(numpy.arange(0, n_y),
                                          batch_size,
                                          replace=False)

        raw_sample_w = tf.random.normal((sample_size_w, 3 * len(tmp_idx)),
                                        dtype='float64')

        L_t, g_t = get_obj_g(theta, ln_q[tmp_idx], ln_1_q[tmp_idx],
                             ln_s[tmp_idx], mu[tmp_idx], sigma[tmp_idx], n_u,
                             len(tmp_idx), n_y, raw_sample_w, jitter)

        optimizer.apply_gradients(zip([g_t], [theta]))

        theta = theta.numpy()

        theta[:2] = numpy.abs(theta[:2])

        theta[:2][theta[:2] <= 1e-8] = 1e-8

        theta[5:8][theta[5:8] <= 1e-8] = 1e-8

        if val_size is not None:

            if numpy.mod(i, numpy.min([numpy.floor(tol / 2), 8])) == 0:

                raw_sample_w = tf.random.normal((sample_size_w, 3 * val_size),
                                                dtype='float64')

                tmp_L_t = vi_obj(theta, ln_q[val_idx], ln_1_q[val_idx],
                                 ln_s[val_idx], mu[val_idx], sigma[val_idx],
                                 n_u, val_size, n_y, raw_sample_w, jitter)

            tmp_L = (tmp_L_t.numpy() / n_y)

        else:

            tmp_L = (L_t.numpy() / n_y)

        batch_L.append(numpy.min(tmp_L))

        if len(batch_L) >= 2:
            if tmp_L < numpy.min(batch_L[:-1]):
                fin_theta = theta.copy()

        theta = tf.Variable(theta)

        if (numpy.mod(len(batch_L), tol) == 0) & print_info:

            print(
                '============================================================================='
            )

            print(theta[:8])
            print(theta[-6:])

            print('Batch: ' + str(len(batch_L)) + ', optimiser: ' +
                  optimizer_choice + ', Loss: ' + str(tmp_L))

            print(
                '============================================================================='
            )

        if len(batch_L) > tol:
            previous_opt = numpy.min(batch_L.copy()[:-tol])

            current_opt = numpy.min(batch_L.copy()[-tol:])

            gap.append(previous_opt - current_opt)

            if (numpy.mod(len(batch_L), tol) == 0) & print_info:
                print('Previous And Recent Top Averaged Loss Is:')
                print(numpy.hstack([previous_opt, current_opt]))

                print('Current Improvement, Initial Improvement * factr')
                print(numpy.hstack([gap[-1], gap[0] * factr]))

            if (len(gap) >= 2) & (gap[-1] <= (gap[0] * factr)):
                print('Total batch number: ' + str(len(batch_L)))
                print('Initial Loss: ' + str(batch_L[0]))
                print('Final Loss: ' + str(numpy.min(batch_L)))
                print('Current Improvement, Initial Improvement * factr')
                print(numpy.hstack([gap[-1], gap[0] * factr]))
                break

            if len(batch_L) >= max_batch:
                break

    if plot_loss:
        fig = matplotlib.pyplot.figure(figsize=(16, 9))

        matplotlib.pyplot.plot(numpy.arange(0, len(batch_L)),
                               numpy.array(batch_L))

        matplotlib.pyplot.xlabel('Batches')

        matplotlib.pyplot.ylabel('Loss')

        matplotlib.pyplot.title('Learning Rate: ' + str(lr))

        matplotlib.pyplot.grid(True)

        try:
            fig.savefig('./' + str(n_y) + '_' + str(n_u) + '_' +
                        optimizer_choice + '_' + str(lr) + '.png',
                        bbox_inches='tight')
        except PermissionError:
            pass
        except OSError:
            pass

        matplotlib.pyplot.close(fig)

    return fin_theta
    def train_model(self, local_settings, local_raw_unit_sales,
                    local_model_hyperparameters):
        try:
            # loading hyperparameters
            local_days_in_focus = local_model_hyperparameters[
                'days_in_focus_frame']
            local_raw_unit_sales_data = local_raw_unit_sales[:,
                                                             -local_days_in_focus:]
            local_nof_ts = local_raw_unit_sales.shape[0]
            local_forecast_horizon_days = local_settings[
                'forecast_horizon_days']
            local_features_for_each_training = 1
            print(
                'starting neural network - individual time_serie training unit_sale_approach'
            )

            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters[
                'learning_rate']
            local_validation_split = local_model_hyperparameters[
                'validation_split']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(
                    local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(
                    local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [
                local_loss_1, local_loss_2, local_loss_3
            ]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(
                    metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1,
                                                                  l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(
                monitor='loss',
                patience=local_model_hyperparameters['early_stopping_patience']
            )
            local_callbacks = [local_callback1]
            print(
                'building current model: individual_time_serie_acc_freq_LSTM_Dense_ANN'
            )
            local_base_model = tf.keras.Sequential()
            # first layer (LSTM)
            if local_model_hyperparameters['units_layer_1'] > 0:
                local_base_model.add(
                    layers.LSTM(
                        units=local_model_hyperparameters['units_layer_1'],
                        activation=local_model_hyperparameters['activation_1'],
                        input_shape=(
                            local_model_hyperparameters['time_steps_days'],
                            local_features_for_each_training),
                        dropout=float(
                            local_model_hyperparameters['dropout_layer_1']),
                        activity_regularizer=local_activation_regularizer,
                        return_sequences=True))
            # second LSTM layer
            if local_model_hyperparameters['units_layer_2'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.LSTM(
                            units=local_model_hyperparameters['units_layer_2'],
                            activation=local_model_hyperparameters[
                                'activation_2'],
                            activity_regularizer=local_activation_regularizer,
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_2']
                            ),
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.
                        RNN(PeepholeLSTMCell(
                            units=local_model_hyperparameters['units_layer_3'],
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_3'])
                        ),
                            activity_regularizer=local_activation_regularizer,
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(
                    layers.Dense(
                        units=local_model_hyperparameters['units_layer_4'],
                        activation=local_model_hyperparameters['activation_4'],
                        activity_regularizer=local_activation_regularizer))
                local_base_model.add(
                    layers.Dropout(rate=float(
                        local_model_hyperparameters['dropout_layer_4'])))
            # final layer
            local_base_model.add(
                layers.Dense(
                    units=local_model_hyperparameters['units_final_layer']))

            # build and compile model
            local_base_model.build(
                input_shape=(1, local_time_steps_days,
                             local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([
                local_settings['models_path'],
                '_unit_sales_forecaster_template_individual_ts.h5'
            ]))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               '_unit_sales_forecaster_forecaster_template_individual_ts.json']), 'w') \
                    as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']

            # loading x_train and y_train, previously done for third and fourth models trainings
            local_builder = local_bxy_x_y_builder()
            local_x_train, local_y_train = local_builder.build_x_y_train_arrays(
                local_raw_unit_sales, local_settings,
                local_model_hyperparameters)
            local_x_train = local_x_train.reshape(local_x_train.shape[0],
                                                  local_x_train.shape[2],
                                                  local_x_train.shape[1])
            local_y_train = local_x_train.reshape(local_y_train.shape[0],
                                                  local_y_train.shape[2],
                                                  local_y_train.shape[1])

            # star training time_serie by time_serie
            local_y_pred_array = np.zeros(shape=(local_raw_unit_sales.shape[0],
                                                 local_forecast_horizon_days),
                                          dtype=np.dtype('float32'))
            for time_serie in range(local_nof_ts):
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, :, time_serie: time_serie + 1], \
                                   local_y_train[:, :, time_serie: time_serie + 1]
                # training, saving model and storing forecasts
                local_base_model.fit(local_x,
                                     local_y,
                                     batch_size=local_batch_size,
                                     epochs=local_epochs,
                                     workers=local_workers,
                                     callbacks=local_callbacks,
                                     shuffle=False,
                                     validation_split=local_validation_split)
                local_base_model.save_weights(''.join([
                    local_settings['models_path'],
                    '/_weights_unit_sales_NN_35_days/_individual_ts_',
                    str(time_serie), '_model_weights_.h5'
                ]))
                local_x_input = local_raw_unit_sales[
                    time_serie:time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = local_x_input.reshape(1,
                                                      local_x_input.shape[1],
                                                      1)
                # print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                # print('x_input:\n', local_x_input)
                # print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                # print('ts:', time_serie)
                # print(local_y_pred)
                local_y_pred_array[time_serie:time_serie + 1, :] = local_y_pred
            local_point_forecast_normalized = local_y_pred_array.reshape(
                (local_y_pred_array.shape[0], local_y_pred_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized.clip(0)

            # save points forecast
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'point_forecast_NN_from_unit_sales_training'
                ]), local_point_forecast)
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'eleventh_model_NN_unit_sales_forecast_data'
                ]), local_point_forecast)
            np.savetxt(''.join([
                local_settings['others_outputs_path'],
                'point_forecast_NN_from_unit_sales_training.csv'
            ]),
                       local_point_forecast,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            print('point forecasts saved to file')
            print(
                'submodule for build, train and forecast time_serie unit_sales individually finished successfully'
            )
            return True, local_point_forecast
        except Exception as submodule_error:
            print(
                'train model and forecast individual time_series units_sales_ submodule_error: ',
                submodule_error)
            logger.info(
                'error in training and forecast-individual time_serie unit_sales_ schema'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False, []
    def build_and_compile(self, local_model_name, local_settings,
                          local_hyperparameters):
        try:
            # keras,tf session/random seed reset/fix
            # kb.clear_session()
            # tf.compat.v1.reset_default_graph()
            np.random.seed(11)
            tf.random.set_seed(2)

            # load hyperparameters
            units_layer_1 = local_hyperparameters['units_layer_1']
            units_layer_2 = local_hyperparameters['units_layer_2']
            units_layer_3 = local_hyperparameters['units_layer_3']
            units_layer_4 = local_hyperparameters['units_layer_4']
            units_dense_layer_4 = local_hyperparameters['units_dense_layer_4']
            units_final_layer = local_hyperparameters['units_final_layer']
            activation_1 = local_hyperparameters['activation_1']
            activation_2 = local_hyperparameters['activation_2']
            activation_3 = local_hyperparameters['activation_3']
            activation_4 = local_hyperparameters['activation_4']
            activation_dense_layer_4 = local_hyperparameters[
                'activation_dense_layer_4']
            activation_final_layer = local_hyperparameters[
                'activation_final_layer']
            dropout_layer_1 = local_hyperparameters['dropout_layer_1']
            dropout_layer_2 = local_hyperparameters['dropout_layer_2']
            dropout_layer_3 = local_hyperparameters['dropout_layer_3']
            dropout_layer_4 = local_hyperparameters['dropout_layer_4']
            dropout_dense_layer_4 = local_hyperparameters[
                'dropout_dense_layer_4']
            input_shape_y = local_hyperparameters['input_shape_y']
            input_shape_x = local_hyperparameters['input_shape_x']
            nof_channels = local_hyperparameters['nof_channels']
            stride_y_1 = local_hyperparameters['stride_y_1']
            stride_x_1 = local_hyperparameters['stride_x_1']
            kernel_size_y_1 = local_hyperparameters['kernel_size_y_1']
            kernel_size_x_1 = local_hyperparameters['kernel_size_x_1']
            kernel_size_y_2 = local_hyperparameters['kernel_size_y_2']
            kernel_size_x_2 = local_hyperparameters['kernel_size_x_2']
            kernel_size_y_3 = local_hyperparameters['kernel_size_y_3']
            kernel_size_x_3 = local_hyperparameters['kernel_size_x_3']
            kernel_size_y_4 = local_hyperparameters['kernel_size_y_4']
            kernel_size_x_4 = local_hyperparameters['kernel_size_x_4']
            pool_size_y_1 = local_hyperparameters['pool_size_y_1']
            pool_size_x_1 = local_hyperparameters['pool_size_x_1']
            pool_size_y_2 = local_hyperparameters['pool_size_y_2']
            pool_size_x_2 = local_hyperparameters['pool_size_x_2']
            pool_size_y_3 = local_hyperparameters['pool_size_y_3']
            pool_size_x_3 = local_hyperparameters['pool_size_x_3']
            pool_size_y_4 = local_hyperparameters['pool_size_y_4']
            pool_size_x_4 = local_hyperparameters['pool_size_x_4']
            optimizer_function = local_hyperparameters['optimizer']
            optimizer_learning_rate = local_hyperparameters['learning_rate']
            epsilon_adam = local_hyperparameters['epsilon_adam']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(
                    learning_rate=optimizer_learning_rate,
                    epsilon=epsilon_adam)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            elif optimizer_function == 'sgd':
                optimizer_function = optimizers.SGD(optimizer_learning_rate)
            elif optimizer_function == 'rmsp':
                optimizer_function = optimizers.RMSprop(
                    optimizer_learning_rate, epsilon=epsilon_adam)
            optimizer_function = tf.train.experimental.enable_mixed_precision_graph_rewrite(
                optimizer_function)
            loss_1 = local_hyperparameters['loss_1']
            loss_2 = local_hyperparameters['loss_2']
            loss_3 = local_hyperparameters['loss_3']
            label_smoothing = local_hyperparameters['label_smoothing']
            losses_list = []
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'CategoricalCrossentropy' in union_settings_losses:
                losses_list.append(
                    losses.CategoricalCrossentropy(
                        label_smoothing=label_smoothing))
            if 'BinaryCrossentropy' in union_settings_losses:
                losses_list.append(losses.BinaryCrossentropy())
            if 'CategoricalHinge' in union_settings_losses:
                losses_list.append(losses.CategoricalHinge())
            if 'KLD' in union_settings_losses:
                losses_list.append(losses.KLDivergence())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            if 'customized_loss_t2' in union_settings_losses:
                losses_list.append(customized_loss_t2)
            if "Huber" in union_settings_losses:
                losses_list.append(losses.Huber())
            metrics_list = []
            metric1 = local_hyperparameters['metrics1']
            metric2 = local_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'auc_roc' in union_settings_metrics:
                metrics_list.append(metrics.AUC())
            if 'customized_metric_auc_roc' in union_settings_metrics:
                metrics_list.append(customized_metric_auc_roc())
            if 'CategoricalAccuracy' in union_settings_metrics:
                metrics_list.append(metrics.CategoricalAccuracy())
            if 'CategoricalHinge' in union_settings_metrics:
                metrics_list.append(metrics.CategoricalHinge())
            if 'BinaryAccuracy' in union_settings_metrics:
                metrics_list.append(metrics.BinaryAccuracy())
            if local_settings['use_efficientNetB2'] == 'False':
                type_of_model = '_custom'
                if local_hyperparameters['regularizers_l1_l2_1'] == 'True':
                    l1_1 = local_hyperparameters['l1_1']
                    l2_1 = local_hyperparameters['l2_1']
                    activation_regularizer_1 = regularizers.l1_l2(l1=l1_1,
                                                                  l2=l2_1)
                else:
                    activation_regularizer_1 = None
                if local_hyperparameters['regularizers_l1_l2_2'] == 'True':
                    l1_2 = local_hyperparameters['l1_2']
                    l2_2 = local_hyperparameters['l2_2']
                    activation_regularizer_2 = regularizers.l1_l2(l1=l1_2,
                                                                  l2=l2_2)
                else:
                    activation_regularizer_2 = None
                if local_hyperparameters['regularizers_l1_l2_3'] == 'True':
                    l1_3 = local_hyperparameters['l1_3']
                    l2_3 = local_hyperparameters['l2_3']
                    activation_regularizer_3 = regularizers.l1_l2(l1=l1_3,
                                                                  l2=l2_3)
                else:
                    activation_regularizer_3 = None
                if local_hyperparameters['regularizers_l1_l2_4'] == 'True':
                    l1_4 = local_hyperparameters['l1_4']
                    l2_4 = local_hyperparameters['l2_4']
                    activation_regularizer_4 = regularizers.l1_l2(l1=l1_4,
                                                                  l2=l2_4)
                else:
                    activation_regularizer_4 = None
                if local_hyperparameters[
                        'regularizers_l1_l2_dense_4'] == 'True':
                    l1_dense_4 = local_hyperparameters['l1_dense_4']
                    l2_dense_4 = local_hyperparameters['l2_dense_4']
                    activation_regularizer_dense_layer_4 = regularizers.l1_l2(
                        l1=l1_dense_4, l2=l2_dense_4)
                else:
                    activation_regularizer_dense_layer_4 = None

                # building model
                classifier_ = tf.keras.models.Sequential()
                # first layer
                classifier_.add(
                    layers.Input(shape=(input_shape_y, input_shape_x,
                                        nof_channels)))
                # classifier_.add(layers.ZeroPadding2D(padding=((0, 1), (0, 1))))
                classifier_.add(
                    layers.Conv2D(
                        units_layer_1,
                        kernel_size=(kernel_size_y_1, kernel_size_x_1),
                        strides=(stride_y_1, stride_x_1),
                        activity_regularizer=activation_regularizer_1,
                        activation=activation_1,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_1))
                # LAYER 1.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_1,
                        kernel_size=(kernel_size_y_1, kernel_size_x_1),
                        input_shape=(input_shape_y, input_shape_x,
                                     nof_channels),
                        strides=(stride_y_1, stride_x_1),
                        activity_regularizer=activation_regularizer_1,
                        activation=activation_1,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_1))
                # second layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_2,
                        kernel_size=(kernel_size_y_2, kernel_size_x_2),
                        activity_regularizer=activation_regularizer_2,
                        activation=activation_2,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_2))
                # LAYER 2.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_2,
                        kernel_size=(kernel_size_y_2, kernel_size_x_2),
                        activity_regularizer=activation_regularizer_2,
                        activation=activation_2,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_2))
                # third layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_3,
                        kernel_size=(kernel_size_y_3, kernel_size_x_3),
                        activity_regularizer=activation_regularizer_3,
                        activation=activation_3,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_3))
                # LAYER 3.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_3,
                        kernel_size=(kernel_size_y_3, kernel_size_x_3),
                        activity_regularizer=activation_regularizer_3,
                        activation=activation_3,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_3))
                # fourth layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_4,
                        kernel_size=(kernel_size_y_4, kernel_size_x_4),
                        activity_regularizer=activation_regularizer_4,
                        activation=activation_4,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_4))
                # Full connection and final layer
                classifier_.add(
                    layers.Dense(units=units_final_layer,
                                 activation=activation_final_layer))
                # Compile model
                classifier_.compile(optimizer=optimizer_function,
                                    loss=losses_list,
                                    metrics=metrics_list)

            elif local_settings['use_efficientNetB2'] == 'True':
                type_of_model = '_EfficientNetB2'
                # pretrained_weights = ''.join([local_settings['models_path'],
                #                               local_hyperparameters['weights_for_training_efficientnetb2']])
                classifier_pretrained = tf.keras.applications.EfficientNetB2(
                    include_top=False,
                    weights='imagenet',
                    input_tensor=None,
                    input_shape=(input_shape_y, input_shape_x, 3),
                    pooling=None,
                    classifier_activation=None)
                # classifier_pretrained.save_weights(''.join([local_settings['models_path'],
                #                                             'pretrained_efficientnetb2_weights.h5']))
                #
                # classifier_receptor = tf.keras.applications.EfficientNetB2(include_top=False, weights=None,
                #                                                              input_tensor=None,
                #                                                              input_shape=(input_shape_y,
                #                                                                           input_shape_x, 1),
                #                                                              pooling=None,
                #                                                              classifier_activation=None)
                #
                # classifier_receptor.load_weights(''.join([local_settings['models_path'],
                #                                             'pretrained_efficientnetb2_weights.h5']), by_name=True)
                #
                # classifier_pretrained = classifier_receptor

                if local_settings['nof_classes'] == 2 or local_hyperparameters[
                        'use_bias_always'] == 'True':
                    # if two classes, log(pos/neg) = log(0.75/0.25) = 0.477121254719
                    bias_initializer = tf.keras.initializers.Constant(
                        local_hyperparameters['bias_initializer'])
                else:
                    # assuming balanced classes...
                    bias_initializer = tf.keras.initializers.Constant(0)

                effnb2_model = models.Sequential(classifier_pretrained)
                effnb2_model.add(layers.GlobalAveragePooling2D())
                effnb2_model.add(layers.Dropout(dropout_dense_layer_4))
                # effnb2_model.add(layers.Dense(units=units_dense_layer_4, activation=activation_dense_layer_4,
                #                  kernel_initializer=tf.keras.initializers.VarianceScaling(scale=0.333333333,
                #                                                                           mode='fan_out',
                #                                                                           distribution='uniform'),
                #                               bias_initializer=bias_initializer))
                # effnb2_model.add(layers.Dropout(dropout_dense_layer_4))
                effnb2_model.add(
                    layers.Dense(units_final_layer,
                                 activation=activation_final_layer,
                                 kernel_initializer=tf.keras.initializers.
                                 VarianceScaling(scale=0.333333333,
                                                 mode='fan_out',
                                                 distribution='uniform'),
                                 bias_initializer=bias_initializer))
                classifier_ = effnb2_model

                if local_settings[
                        'use_local_pretrained_weights_for_retraining'] != 'False':
                    classifier_.load_weights(''.join([
                        local_settings['models_path'], local_settings[
                            'use_local_pretrained_weights_for_retraining']
                    ]))
                    for layer in classifier_.layers[0].layers:
                        layer.trainable = True
                        # if 'excite' in layer.name:
                        #     layer.trainable = True
                        # if 'top_conv' in layer.name:
                        #     layer.trainable = True
                        # if 'project_conv' in layer.name:
                        #     layer.trainable = True

                classifier_.build(input_shape=(input_shape_y, input_shape_x,
                                               nof_channels))
                classifier_.compile(optimizer=optimizer_function,
                                    loss=losses_list,
                                    metrics=metrics_list)

            # Summary of model
            classifier_.summary()

            # save_model
            classifier_json = classifier_.to_json()
            with open(''.join([local_settings['models_path'], local_model_name, type_of_model,
                               '_classifier_.json']), 'w') \
                    as json_file:
                json_file.write(classifier_json)
                json_file.close()
            classifier_.save(''.join([
                local_settings['models_path'], local_model_name, type_of_model,
                '_classifier_.h5'
            ]))
            classifier_.save(''.join([
                local_settings['models_path'], local_model_name, type_of_model,
                '/'
            ]),
                             save_format='tf')
            print('model architecture saved')

            # output png and pdf with model, additionally saves a json file model_name_analyzed.json
            if local_settings['model_analyzer'] == 'True':
                model_architecture = model_structure()
                model_architecture_review = model_architecture.analize(
                    ''.join(
                        [local_model_name, type_of_model, '_classifier_.h5']),
                    local_settings, local_hyperparameters)
        except Exception as e:
            print('error in build or compile of customized model')
            print(e)
            classifier_ = None
            logger.error(str(e), exc_info=True)
        return classifier_
예제 #9
0
파일: main.py 프로젝트: Ksenox/ANN-2021
import numpy as np

from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.models import Sequential
from PIL import Image
from tensorflow.keras import optimizers

NUM_EPOCHS = 20

optimizers = [
    optimizers.Adadelta(learning_rate=0.01),
    optimizers.Adagrad(learning_rate=0.01),
    optimizers.Adam(),
    optimizers.RMSprop(),
    optimizers.Ftrl(),
    optimizers.SGD(momentum=0.9)
]


def load_data():
    #MNIST - набор данных
    mnist = tf.keras.datasets.mnist
    (train_images, train_labels), (test_images,
                                   test_labels) = mnist.load_data()

    #преобразование изображений в масиив чисел из интервала [0, 1]
    train_images = train_images / 255.0
    test_images = test_images / 255.0

    #кодирование метк категорий
예제 #10
0
def build_model(local_bm_hyperparameters, local_bm_settings):
    model_built = 0
    time_steps_days = int(local_bm_hyperparameters['time_steps_days'])
    epochs = int(local_bm_hyperparameters['epochs'])
    batch_size = int(local_bm_hyperparameters['batch_size'])
    workers = int(local_bm_hyperparameters['workers'])
    optimizer_function = local_bm_hyperparameters['optimizer']
    optimizer_learning_rate = local_bm_hyperparameters['learning_rate']
    if optimizer_function == 'adam':
        optimizer_function = optimizers.Adam(optimizer_learning_rate)
    elif optimizer_function == 'ftrl':
        optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
    losses_list = []
    loss_1 = local_bm_hyperparameters['loss_1']
    loss_2 = local_bm_hyperparameters['loss_2']
    loss_3 = local_bm_hyperparameters['loss_3']
    union_settings_losses = [loss_1, loss_2, loss_3]
    if 'mape' in union_settings_losses:
        losses_list.append(losses.MeanAbsolutePercentageError())
    if 'mse' in union_settings_losses:
        losses_list.append(losses.MeanSquaredError())
    if 'mae' in union_settings_losses:
        losses_list.append(losses.MeanAbsoluteError())
    if 'm_mape' in union_settings_losses:
        losses_list.append(modified_mape())
    if 'customized_loss_function' in union_settings_losses:
        losses_list.append(customized_loss())
    metrics_list = []
    metric1 = local_bm_hyperparameters['metrics1']
    metric2 = local_bm_hyperparameters['metrics2']
    union_settings_metrics = [metric1, metric2]
    if 'rmse' in union_settings_metrics:
        metrics_list.append(metrics.RootMeanSquaredError())
    if 'mse' in union_settings_metrics:
        metrics_list.append(metrics.MeanSquaredError())
    if 'mae' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsoluteError())
    if 'mape' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsolutePercentageError())
    l1 = local_bm_hyperparameters['l1']
    l2 = local_bm_hyperparameters['l2']
    if local_bm_hyperparameters['regularizers_l1_l2'] == 'True':
        activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
    else:
        activation_regularizer = None
    nof_features_for_training = local_bm_hyperparameters[
        'nof_features_for_training']
    # creating model
    forecaster_in_block = tf.keras.Sequential()
    print('creating the ANN model...')
    # first layer (DENSE)
    if local_bm_hyperparameters['units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Dense(
                units=local_bm_hyperparameters['units_layer_1'],
                activation=local_bm_hyperparameters['activation_1'],
                input_shape=(local_bm_hyperparameters['time_steps_days'],
                             nof_features_for_training),
                activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_1'])))
    # second LSTM layer
    if local_bm_hyperparameters[
            'units_layer_2'] > 0 and local_bm_hyperparameters[
                'units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_2'],
                    activation=local_bm_hyperparameters['activation_2'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_2']),
                    return_sequences=False)))
        forecaster_in_block.add(
            RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # third LSTM layer
    if local_bm_hyperparameters['units_layer_3'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_3'],
                    activation=local_bm_hyperparameters['activation_3'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_3']),
                    return_sequences=True)))
        if local_bm_hyperparameters['units_layer_4'] == 0:
            forecaster_in_block.add(
                RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # fourth layer (DENSE)
    if local_bm_hyperparameters['units_layer_4'] > 0:
        forecaster_in_block.add(
            layers.Dense(units=local_bm_hyperparameters['units_layer_4'],
                         activation=local_bm_hyperparameters['activation_4'],
                         activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_4'])))
    # final layer
    forecaster_in_block.add(
        TimeDistributed(layers.Dense(units=nof_features_for_training)))
    forecaster_in_block.save(''.join(
        [local_bm_settings['models_path'], 'in_block_NN_model_structure_']),
                             save_format='tf')
    forecast_horizon_days = local_bm_settings['forecast_horizon_days']
    forecaster_in_block.build(input_shape=(1, forecast_horizon_days + 1,
                                           nof_features_for_training))
    forecaster_in_block.compile(optimizer=optimizer_function,
                                loss=losses_list,
                                metrics=metrics_list)
    forecaster_in_block_json = forecaster_in_block.to_json()
    with open(
            ''.join([
                local_bm_settings['models_path'],
                'freq_acc_forecaster_in_block.json'
            ]), 'w') as json_file:
        json_file.write(forecaster_in_block_json)
        json_file.close()
    print(
        'build_model function finish (model structure saved in json and ts formats)'
    )
    return True, model_built
예제 #11
0
 pool_size_y_1 = model_hyperparameters['pool_size_y_1']
 pool_size_x_1 = model_hyperparameters['pool_size_x_1']
 pool_size_y_2 = model_hyperparameters['pool_size_y_2']
 pool_size_x_2 = model_hyperparameters['pool_size_x_2']
 pool_size_y_3 = model_hyperparameters['pool_size_y_3']
 pool_size_x_3 = model_hyperparameters['pool_size_x_3']
 pool_size_y_4 = model_hyperparameters['pool_size_y_4']
 pool_size_x_4 = model_hyperparameters['pool_size_x_4']
 optimizer_function = model_hyperparameters['optimizer']
 optimizer_learning_rate = model_hyperparameters['learning_rate']
 if optimizer_function == 'adam':
     optimizer_function = optimizers.Adam(optimizer_learning_rate)
     optimizer_function = tf.train.experimental.enable_mixed_precision_graph_rewrite(
         optimizer_function)
 elif optimizer_function == 'ftrl':
     optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
 elif optimizer_function == 'sgd':
     optimizer_function = optimizers.SGD(optimizer_learning_rate)
 losses_list = []
 loss_1 = model_hyperparameters['loss_1']
 loss_2 = model_hyperparameters['loss_2']
 loss_3 = model_hyperparameters['loss_3']
 union_settings_losses = [loss_1, loss_2, loss_3]
 if 'CategoricalCrossentropy' in union_settings_losses:
     losses_list.append(losses.CategoricalCrossentropy())
 if 'CategoricalHinge' in union_settings_losses:
     losses_list.append(losses.CategoricalHinge())
 if 'LogCosh' in union_settings_losses:
     losses_list.append(losses.LogCosh)
 if 'customized_loss_function' in union_settings_losses:
     losses_list.append(customized_loss())
    def train(self, local_settings, local_raw_unit_sales, local_model_hyperparameters, local_time_series_not_improved,
              raw_unit_sales_ground_truth):
        try:
            # data normalization
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_x_train, local_y_train = build_x_y_train_arrays(local_raw_unit_sales, local_settings,
                                                                  local_model_hyperparameters,
                                                                  local_time_series_not_improved)
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_features_for_each_training = 1
            print('starting neural network - individual time_serie training')
            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters['learning_rate']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [local_loss_1, local_loss_2, local_loss_3]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1, l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(monitor='loss',
                                               patience=local_model_hyperparameters['early_stopping_patience'])
            local_callbacks = [local_callback1]
            print('building current model: Mix_Bid_PeepHole_LSTM_Dense_ANN')
            local_base_model = tf.keras.Sequential()
            # first layer (DENSE)
            if local_model_hyperparameters['units_layer_1'] > 0:
                # strictly dim 1 of input_shape is ['time_steps_days'] (dim 0 is number of batches: None)
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_1'],
                                                  activation=local_model_hyperparameters['activation_1'],
                                                  input_shape=(local_time_steps_days,
                                                               local_features_for_each_training),
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_1'])))
            # second layer
            if local_model_hyperparameters['units_layer_2']:
                if local_model_hyperparameters['units_layer_1'] == 0:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         input_shape=(local_time_steps_days,
                                                      local_features_for_each_training),
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                else:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                # local_base_model.add(RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_3'],
                                                  activation=local_model_hyperparameters['activation_3'],
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_3'])))
            # fourth layer
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(layers.RNN(
                    PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_4'],
                                     activation=local_model_hyperparameters['activation_4'],
                                     dropout=float(local_model_hyperparameters['dropout_layer_4']))))
            local_base_model.add(layers.Dense(units=local_forecast_horizon_days))

            # build and compile model
            local_base_model.build(input_shape=(1, local_time_steps_days, local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([local_settings['models_path'],
                                           'generic_forecaster_template_individual_ts.h5']))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               'generic_forecaster_template_individual_ts.json']), 'w') as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']
            # all input data in the correct type
            local_x_train = np.array(local_x_train, dtype=np.dtype('float32'))
            local_y_train = np.array(local_y_train, dtype=np.dtype('float32'))
            local_raw_unit_sales = np.array(local_raw_unit_sales, dtype=np.dtype('float32'))
            # specific time_serie models training loop
            local_y_pred_list = []
            local_nof_time_series = local_settings['number_of_time_series']
            remainder = np.array([time_serie for time_serie in range(local_nof_time_series)
                                  if time_serie not in local_time_series_not_improved])
            for time_serie in remainder:
                # ----------------------key_point---------------------------------------------------------------------
                # take note that each loop the weights and internal last states of previous training are conserved
                # that's probably save times and (in aggregated or ordered) connected time series will improve results
                # ----------------------key_point---------------------------------------------------------------------
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, time_serie: time_serie + 1, :], \
                                   local_y_train[:, time_serie: time_serie + 1, :]
                local_x = local_x.reshape(local_x.shape[0], local_x.shape[2], 1)
                local_y = local_y.reshape(local_y.shape[0], local_y.shape[2], 1)
                # training, saving model and storing forecasts
                local_base_model.fit(local_x, local_y, batch_size=local_batch_size, epochs=local_epochs,
                                     workers=local_workers, callbacks=local_callbacks, shuffle=False)
                local_base_model.save_weights(''.join([local_settings['models_path'],
                                                       '/weights_last_year/_individual_ts_',
                                                       str(time_serie), '_model_weights_.h5']))
                local_x_input = local_raw_unit_sales[time_serie: time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = cof_zeros(local_x_input, local_settings)
                local_x_input = local_x_input.reshape(1, local_x_input.shape[1], 1)
                print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                print('x_input:\n', local_x_input)
                print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                local_y_pred = cof_zeros(local_y_pred, local_settings)
                if local_settings['mini_ts_evaluator'] == "True" and \
                        local_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
                    mini_evaluator = mini_evaluator_submodule()
                    evaluation = mini_evaluator.evaluate_ts_forecast(
                            raw_unit_sales_ground_truth[time_serie, -local_forecast_horizon_days:], local_y_pred)
                    print('ts:', time_serie, 'with cof_zeros ts mse:', evaluation)
                else:
                    print('ts:', time_serie)
                print(local_y_pred)
                local_y_pred_list.append(local_y_pred)
            local_point_forecast_array = np.array(local_y_pred_list)
            local_point_forecast_normalized = local_point_forecast_array.reshape(
                (local_point_forecast_array.shape[0], local_point_forecast_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized

            # save points forecast
            np.savetxt(''.join([local_settings['others_outputs_path'], 'point_forecast_NN_LSTM_simulation.csv']),
                       local_point_forecast, fmt='%10.15f', delimiter=',', newline='\n')
            print('point forecasts saved to file')
            print('submodule for build, train and forecast time_serie individually finished successfully')
            return True
        except Exception as submodule_error:
            print('train model and forecast individual time_series submodule_error: ', submodule_error)
            logger.info('error in training and forecast-individual time_serie schema')
            logger.error(str(submodule_error), exc_info=True)
            return False
예제 #13
0
# tensorflow optimizers(优化器)
# from . import schedules
# from tensorflow.python.keras.optimizer_v2.adadelta import Adadelta
# from tensorflow.python.keras.optimizer_v2.adagrad import Adagrad
# from tensorflow.python.keras.optimizer_v2.adam import Adam
# from tensorflow.python.keras.optimizer_v2.adamax import Adamax
# from tensorflow.python.keras.optimizer_v2.ftrl import Ftrl
# from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD
# from tensorflow.python.keras.optimizer_v2.nadam import Nadam
# from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2 as Optimizer
# from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
# from tensorflow.python.keras.optimizers import deserialize
# from tensorflow.python.keras.optimizers import get
# from tensorflow.python.keras.optimizers import serialize


import tensorflow.keras.optimizers as optim

optimizer = optim.Ftrl(lr=0.02)