Пример #1
0
def findPMD(filepath, outputpath1, outputpath2):
    """
        findPMD(filepath, outputpath1, outputpath2)
    The main function in PMDfinder.
    
    * filepath: input BED file path.
    * outputpath1: the output bed file path.
    * outputpath2: the output grange file path.
    """
    # load DSS methylation data
    methylation = pd.read_csv(filepath,
                              sep='\t',
                              comment='t',
                              header=0,
                              low_memory=False)

    # store the location and the percent methylation
    a = list(map(float, methylation['X']))
    b = list(map(float, methylation['N']))
    meth_ratio = [i / j for i, j in zip(a, b)]

    # geno_pos = list(map(float, methylation['pos']))

    ### Data Conversion
    # convert methylation ratio to PMD/non-PMD level (y=4x(1-x))
    def methRatio2PMDLevel(meth_ratio):
        n = len(meth_ratio)
        PMD_level = [0] * n
        for i in range(n):
            PMD_level[i] = 4 * meth_ratio[i] * (1 - meth_ratio[i])
        return PMD_level

    PMD_level = methRatio2PMDLevel(meth_ratio)

    ### Sequential Data Matrix
    # Extract sequenctial feature by sliding window
    N = len(PMD_level)
    X = np.zeros((N - 1023, 1024))
    for i in range(N - 1023):
        X[i, :] = PMD_level[i:i + 1024]

    X = X.astype(np.float32)

    ### Autoencoder
    # latent is the last variable
    latent_dim = 8

    m = Autoencoder(latent_dim)
    m.compile(optimizer='adam', loss=losses.MeanSquaredError())

    # fit the model
    m.fit(X, X, epochs=5, shuffle=True)

    # get the encoded PMD
    encoded_slicing_PMD = m.encoder(X).numpy()

    ### k-means
    kmeans = KMeans(n_clusters=2, random_state=22).fit(encoded_slicing_PMD)
    final_result = kmeans.labels_

    ### Post-processing steps
    ## Remove PMD that is less than 101 bp length
    assign1 = []  # index for the location equal to 1
    for i in range(len(final_result)):
        if final_result[i] == 1:
            assign1.append(i)

    break_pts1 = [
        0
    ]  # index for the break point, the next equal to 1 is more than 1bp
    for i in range(1, len(assign1)):
        if assign1[i] - assign1[i - 1] > 1:
            break_pts1.append(i)

    # small_PMD_intervals: identify region that is close with each other
    small_PMD_intervals = []
    for i in range(1, len(break_pts1)):
        if assign1[break_pts1[i] - 1] - assign1[break_pts1[i - 1]] + 1 < 101:
            small_PMD_intervals.append(i)

    # change the PMD interval with less than 101 to Non-PMD
    for interval in small_PMD_intervals:
        final_result[assign1[break_pts1[interval -
                                        1]:break_pts1[interval]]] = 0

    ## Merge PMD that is less than 101 bp from the next one
    # This need to check the non-PMD region length
    assign2 = []
    for i in range(len(final_result)):
        if final_result[i] == 0:
            assign2.append(i)

    break_pts2 = [0]
    for i in range(1, len(assign2)):
        if assign2[i] - assign2[i - 1] > 1:
            break_pts2.append(i)

    # small non_PMD intervals
    small_non_PMD_intervals = []
    for i in range(1, len(break_pts2)):
        if assign2[break_pts2[i] - 1] - assign2[break_pts2[i - 1]] + 1 < 51:
            small_non_PMD_intervals.append(i)

    # change the PMD interval with less than 51 to Non-PMD
    for interval in small_non_PMD_intervals:
        final_result[assign2[break_pts2[interval -
                                        1]:break_pts2[interval]]] = 1

    # file output
    output_methylation = methylation[:len(methylation) - 1023].copy()
    output_methylation.loc[:,
                           'PMD_predict'] = pd.DataFrame(final_result)[0].map({
                               1:
                               'Non-PMD',
                               0:
                               'PMD'
                           })
    output_methylation.to_csv(outputpath1, sep='\t', index=False, header=True)

    # output grange file
    df = pd.DataFrame(columns=['chr', 'start', 'end', 'status'])

    ncols = len(output_methylation)
    i, j = 0, 0

    while i < ncols:
        if j == ncols:
            df = df.append(
                {
                    'chr': output_methylation.iloc[i, 0],
                    'start': output_methylation.iloc[i, 1],
                    'end': output_methylation.iloc[j - 1, 1],
                    'status': ti
                },
                ignore_index=True)
            break

        ti = output_methylation.iloc[i, 4]
        tj = output_methylation.iloc[j, 4]
        if tj == ti:
            j += 1
        else:
            df = df.append(
                {
                    'chr': output_methylation.iloc[i, 0],
                    'start': output_methylation.iloc[i, 1],
                    'end': output_methylation.iloc[j - 1, 1],
                    'status': ti
                },
                ignore_index=True)
            i = j

    df.to_csv(outputpath2, sep='\t', index=False, header=True)
    # print(df)

    print("Finished PMDfinder!")
Пример #2
0
def pipeline():
    featurearr, simarr, labelarr=load_data()
    xarr, yarr, aarr, edge_attrarr=graphdatageneration(featurearr, simarr, labelarr)

    dataset = MyDataset(xarr,yarr,aarr,edge_attrarr)

    np.random.seed(10)
    # Train/test split
    idxs = np.random.permutation(len(dataset))
    split = int(0.8 * len(dataset))
    idx_tr, idx_te = np.split(idxs, [split])
    dataset_tr, dataset_te = dataset[idx_tr], dataset[idx_te]
    loader_tr = DisjointLoader(dataset_tr, batch_size=32, epochs=30,shuffle=True)
    loader_te = DisjointLoader(dataset_te, batch_size=32, epochs=1,shuffle=True)

    model=buildmodel(dataset)

    opt = optimizers.Adam(lr=learning_rate)
    loss_fn = losses.MeanSquaredError()


    @tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
    def train_step(inputs, target):
        with tf.GradientTape() as tape:
            predictions = model(inputs, training=True)
            loss = loss_fn(target, predictions)
            mae=losses.MeanAbsoluteError()(target, predictions)
            mape=losses.MeanAbsolutePercentageError()(target, predictions)

            loss += sum(model.losses)
        gradients = tape.gradient(loss, model.trainable_variables)
        opt.apply_gradients(zip(gradients, model.trainable_variables))
        return loss,mae,mape

    print("training")
    current_batch = 0
    model_loss = 0
    total_mape=0
    total_mae=0
    for batch in loader_tr:
        outs,mae,mape= train_step(*batch)

        model_loss += outs
        total_mae+=mae
        total_mape+=mape
        current_batch += 1
        if current_batch == loader_tr.steps_per_epoch:
            print("MSE: {}".format(model_loss / loader_tr.steps_per_epoch),
                  "MAE: {}".format(total_mae/ loader_tr.steps_per_epoch),
                  "MAPE: {}".format(total_mape/ loader_tr.steps_per_epoch))
            model_loss = 0
            total_mae = 0
            total_mape = 0
            current_batch = 0


    print("testing")
    model_loss = 0
    model_mae=0
    model_mape = 0
    for batch in loader_te:
        inputs, target = batch
        predictions = model(inputs, training=False)
        model_loss += loss_fn(target, predictions)
        model_mae += losses.MeanAbsoluteError()(target, predictions)
        model_mape+= losses.MeanAbsolutePercentageError()(target, predictions)

    model_loss /= loader_te.steps_per_epoch
    model_mae /= loader_te.steps_per_epoch
    model_mape /= loader_te.steps_per_epoch
    print("Done. Test MSE: {}".format(model_loss),
          "Test MAE: {}".format(model_mae),
          "Test MAPE: {}".format(model_mape))
    model.save('/home/som/lab/seed-yzj/newpaper4/laboratory/model/fusion.hdf5')
Пример #3
0
    def __init__(self, output_len, model_parameters):
        '''
        output_len = length of target vector
        model_parameters = model hyper parameters pulled from parameters.py
        '''
        super(EARSHOT, self).__init__(name='earshot')
        self.model_parameters = model_parameters
        self.mask = Masking(mask_value=-9999, name="mask")

        if self.model_parameters.hidden['type'] == "LSTM":
            self.hidden = LSTM(self.model_parameters.hidden['size'],
                               return_sequences=True,
                               stateful=False,
                               name="LSTM")
        elif self.model_parameters.hidden['type'] == "GRU":
            self.hidden = GRU(self.model_parameters.hidden['size'],
                              return_sequences=True,
                              name="GRU")

        # loss function and output activation are coupled, this sets them both
        if self.model_parameters.train_loss == 'CE':
            self.loss = losses.BinaryCrossentropy(from_logits=True)
            self.activation = tf.nn.sigmoid
        elif self.model_parameters.train_loss == 'MSE':
            self.loss = losses.MeanSquaredError()
            self.activation = tf.nn.tanh

        # set learning rate schedule
        if list(self.model_parameters.learning_schedule.keys())[0] == 'noam':
            self.lr_sched = noam_decay_lr(
                self.model_parameters.learning_schedule['noam']['warmup'])
            lr = self.model_parameters.learning_schedule['noam']['initial']
        elif list(self.model_parameters.learning_schedule.keys()
                  )[0] == 'constant':
            self.lr_sched = constant_lr(
                self.model_parameters.learning_schedule['constant']['rate'])
            lr = self.model_parameters.learning_schedule['constant']['rate']
        elif list(self.model_parameters.learning_schedule.keys()
                  )[0] == 'polynomial':
            self.lr_sched = polynomial_decay_lr(
                self.model_parameters.learning_schedule['polynomial']
                ['max_epochs'],
                self.model_parameters.learning_schedule['polynomial']
                ['poly_pow'])
            lr = self.model_parameters.learning_schedule['polynomial'][
                'initial']
        elif list(self.model_parameters.learning_schedule.keys())[0] == 'step':
            self.lr_sched = step_decay_lr(
                self.model_parameters.learning_schedule['step']['initial'],
                self.model_parameters.learning_schedule['step']['drop_factor'],
                self.model_parameters.learning_schedule['step']['drop_every'])
            lr = self.model_parameters.learning_schedule['step']['initial']

        # optimizer
        if list(self.model_parameters.optimizer.keys())[0] == 'ADAM':
            self.optimizer = tf.keras.optimizers.Adam(
                learning_rate=lr, **self.model_parameters.optimizer['ADAM'])
        elif list(self.model_parameters.optimizer.keys())[0] == 'SGD':
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=lr, **self.model_parameters.optimizer['SGD'])

        self.dense_output = Dense(output_len, activation=self.activation)
Пример #4
0
from machine_learning_solver.PINN import AllenCahnPINN
import tensorflow.keras.losses as losses
import numpy as np
from sklearn.metrics import mean_squared_error
from numerical_solvers.AllenCahn_FTCS import AllenCahnFTCS
from util.generate_plots import generate_contour_and_snapshots_plot

# Part 1: solve AC equation using PINN

pinn = AllenCahnPINN(loss_obj=losses.MeanSquaredError(),
                     n_nodes=20,
                     n_layers=8)
pinn.generate_training_data(n_initial=50, n_boundary=25, equidistant=False)
pinn.perform_training(max_n_epochs=1000,
                      min_mse=0.05,
                      track_losses=True,
                      batch_size='full')

# Print loss data frame and plot solution
print(pinn.loss_df)
generate_contour_and_snapshots_plot(pinn.u_pred,
                                    train_feat=pinn.train_feat,
                                    legend_loc='center left')
# generate_contour_and_snapshots_plot(pinn.u_pred, train_feat=pinn.train_feat, legend_loc='center left',
#                                     savefig_path='plots/Fig10_PINN_contour_and_snapshots_plot.jpg')

# Part 2: use initial initial condition predicted by PINN for FTCS

# Generate initial data for Upwind solver
n_spatial = 512
n_temporal = 201
Пример #5
0

def coolLinear(x):
    return 3 * x + 1


# generate training data
bounds = (-10, 10)  # represents full system dynamics

inputList, outputList = randomPoints(10000, bounds, boolfunc)

# neural network code
model = models.Sequential()
model.add(layers.Dense(1, activation='linear', input_shape=(1, )))
model.compile(optimizer='Adam',
              loss=losses.MeanSquaredError(),
              metrics=['mean_squared_error'])

history = model.fit(np.array(inputList), np.array(outputList), epochs=200)
print(model.get_weights())

# plots out learning curve
# plt.plot(history.history['mean_squared_error'], label='mean_squared_error')
# plt.xlabel('Epoch')
# plt.ylabel('MSE')
# plt.ylim([0.0, 0.2])
# plt.legend(loc='lower right')
# plt.show()

bounds = (-15, 15)  # represents full system dynamics
Пример #6
0
    sub_dir = datetime.strftime(datetime.now(), r'%Y%m%d-%H%M%S')
    root_dir = 'tmp/'
    s1 = 'use_learnable' if USE_LEARNABLE else 'use_fixed'
    s2 = 'use_recon' if USE_RECONSTRUCT else 'no_recon'
    sub_dir = s1 + '_' + s2 + '/' + sub_dir

    writer = tf.summary.create_file_writer(root_dir + sub_dir)
    initer = k.initializers.RandomUniform(0.2, 1.0)
    weights: List[tf.Variable] = [
        tf.Variable(initer([], tf.float32), trainable=USE_LEARNABLE)
        for i in range(3)
    ]
    optimizer = k.optimizers.Adam(0.0001)
    ce_fn1 = kls.CategoricalCrossentropy()
    ce_fn2 = kls.CategoricalCrossentropy()
    mse_fn = kls.MeanSquaredError()
    ceacc_fn1 = k.metrics.CategoricalAccuracy()
    ceacc_fn2 = k.metrics.CategoricalAccuracy()
    batch_size = 256
    epochs = 10

    @tf.function
    def step(x, y1, y2, y3):
        with tf.GradientTape() as tape:
            p1, p2, p3 = model(x, training=True)
            l1 = ce_fn1(y1, p1)
            ceacc_fn1.update_state(y1, p1)
            l1_w = uncertaint_weight(l1, weights[0])
            l2 = ce_fn2(y2, p2)
            ceacc_fn2.update_state(y2, p2)
            l2_w = uncertaint_weight(l2, weights[1])
Пример #7
0
    hidden_dense_layer3 = layers.Dense(
        16, activation=activations.tanh)(hidden_batch_layer2)
    hidden_batch_layer3 = layers.BatchNormalization()(hidden_dense_layer3)
    output_para = layers.Dense(
        env.action_dim, activation=activations.sigmoid)(hidden_batch_layer3)
    output_hidden = layers.Dense(4, activation=activations.tanh)(output_para)
    output_eval = layers.Dense(1,
                               activation=activations.sigmoid)(output_hidden)
    model = models.Model(inputs=[input_layer],
                         outputs=[output_para, output_eval])
    return model


model = create_model()
optimizer = optimizers.RMSprop(0.00001)
loss_object = losses.MeanSquaredError()


def discount_rewards(rewards, r=0.04):
    discounted_rewards = np.zeros_like(rewards)
    running_add = 0
    for t in reversed(range(0, len(rewards))):
        running_add = running_add / (1 + r / 250) + rewards[t]
        discounted_rewards[t] = running_add
    discounted_rewards -= np.mean(discounted_rewards)
    discounted_rewards /= np.std(discounted_rewards)
    return discounted_rewards


def interaction_process():
    ob = env.reset()
Пример #8
0
def build_model(local_bm_hyperparameters, local_bm_settings):
    model_built = 0
    time_steps_days = int(local_bm_hyperparameters['time_steps_days'])
    epochs = int(local_bm_hyperparameters['epochs'])
    batch_size = int(local_bm_hyperparameters['batch_size'])
    workers = int(local_bm_hyperparameters['workers'])
    optimizer_function = local_bm_hyperparameters['optimizer']
    optimizer_learning_rate = local_bm_hyperparameters['learning_rate']
    if optimizer_function == 'adam':
        optimizer_function = optimizers.Adam(optimizer_learning_rate)
    elif optimizer_function == 'ftrl':
        optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
    losses_list = []
    loss_1 = local_bm_hyperparameters['loss_1']
    loss_2 = local_bm_hyperparameters['loss_2']
    loss_3 = local_bm_hyperparameters['loss_3']
    union_settings_losses = [loss_1, loss_2, loss_3]
    if 'mape' in union_settings_losses:
        losses_list.append(losses.MeanAbsolutePercentageError())
    if 'mse' in union_settings_losses:
        losses_list.append(losses.MeanSquaredError())
    if 'mae' in union_settings_losses:
        losses_list.append(losses.MeanAbsoluteError())
    if 'm_mape' in union_settings_losses:
        losses_list.append(modified_mape())
    if 'customized_loss_function' in union_settings_losses:
        losses_list.append(customized_loss())
    metrics_list = []
    metric1 = local_bm_hyperparameters['metrics1']
    metric2 = local_bm_hyperparameters['metrics2']
    union_settings_metrics = [metric1, metric2]
    if 'rmse' in union_settings_metrics:
        metrics_list.append(metrics.RootMeanSquaredError())
    if 'mse' in union_settings_metrics:
        metrics_list.append(metrics.MeanSquaredError())
    if 'mae' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsoluteError())
    if 'mape' in union_settings_metrics:
        metrics_list.append(metrics.MeanAbsolutePercentageError())
    l1 = local_bm_hyperparameters['l1']
    l2 = local_bm_hyperparameters['l2']
    if local_bm_hyperparameters['regularizers_l1_l2'] == 'True':
        activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
    else:
        activation_regularizer = None
    nof_features_for_training = local_bm_hyperparameters[
        'nof_features_for_training']
    # creating model
    forecaster_in_block = tf.keras.Sequential()
    print('creating the ANN model...')
    # first layer (DENSE)
    if local_bm_hyperparameters['units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Dense(
                units=local_bm_hyperparameters['units_layer_1'],
                activation=local_bm_hyperparameters['activation_1'],
                input_shape=(local_bm_hyperparameters['time_steps_days'],
                             nof_features_for_training),
                activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_1'])))
    # second LSTM layer
    if local_bm_hyperparameters[
            'units_layer_2'] > 0 and local_bm_hyperparameters[
                'units_layer_1'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_2'],
                    activation=local_bm_hyperparameters['activation_2'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_2']),
                    return_sequences=False)))
        forecaster_in_block.add(
            RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # third LSTM layer
    if local_bm_hyperparameters['units_layer_3'] > 0:
        forecaster_in_block.add(
            layers.Bidirectional(
                layers.LSTM(
                    units=local_bm_hyperparameters['units_layer_3'],
                    activation=local_bm_hyperparameters['activation_3'],
                    activity_regularizer=activation_regularizer,
                    dropout=float(local_bm_hyperparameters['dropout_layer_3']),
                    return_sequences=True)))
        if local_bm_hyperparameters['units_layer_4'] == 0:
            forecaster_in_block.add(
                RepeatVector(local_bm_hyperparameters['repeat_vector']))
    # fourth layer (DENSE)
    if local_bm_hyperparameters['units_layer_4'] > 0:
        forecaster_in_block.add(
            layers.Dense(units=local_bm_hyperparameters['units_layer_4'],
                         activation=local_bm_hyperparameters['activation_4'],
                         activity_regularizer=activation_regularizer))
        forecaster_in_block.add(
            layers.Dropout(
                rate=float(local_bm_hyperparameters['dropout_layer_4'])))
    # final layer
    forecaster_in_block.add(
        TimeDistributed(layers.Dense(units=nof_features_for_training)))
    forecaster_in_block.save(''.join(
        [local_bm_settings['models_path'], 'in_block_NN_model_structure_']),
                             save_format='tf')
    forecast_horizon_days = local_bm_settings['forecast_horizon_days']
    forecaster_in_block.build(input_shape=(1, forecast_horizon_days + 1,
                                           nof_features_for_training))
    forecaster_in_block.compile(optimizer=optimizer_function,
                                loss=losses_list,
                                metrics=metrics_list)
    forecaster_in_block_json = forecaster_in_block.to_json()
    with open(
            ''.join([
                local_bm_settings['models_path'],
                'freq_acc_forecaster_in_block.json'
            ]), 'w') as json_file:
        json_file.write(forecaster_in_block_json)
        json_file.close()
    print(
        'build_model function finish (model structure saved in json and ts formats)'
    )
    return True, model_built
Пример #9
0
    def __init__(self, root_data_dir, k_folds, selected_fold, augment_level,
                 batch_size, slices, seed, experiment_dir):
        if root_data_dir[-1] == '/':
            utils.log('Removing extra \'/\' from root_data_dir')
            root_data_dir = root_data_dir[:-1]

        utils.log('Creating CNN with parameters:\n' +
                  '\troot_data_dir={},\n'.format(root_data_dir) +
                  '\tk_folds={},\n'.format(k_folds) +
                  '\tselected_fold={},\n'.format(selected_fold) +
                  '\taugment_level={},\n'.format(augment_level) +
                  '\tbatch_size={},\n'.format(batch_size) +
                  '\tslices={},\n'.format(slices) +
                  '\tseed={},\n'.format(seed) +
                  '\texperiment_dir={}'.format(experiment_dir))

        self.experiment_dir = experiment_dir
        self.checkpoints_dir = join(self.experiment_dir, 'checkpoints')
        self.log_dir = join(
            self.experiment_dir,
            'logs-{}'.format(datetime.now().strftime('%d-%m-%Y_%Hh%Mm%Ss')))
        self.cross_sections_dir = join(self.experiment_dir, 'cross_sections')
        self.enfaces_dir = join(self.experiment_dir, 'enfaces')

        makedirs(self.cross_sections_dir, exist_ok=True)
        makedirs(self.enfaces_dir, exist_ok=True)

        self.writer = tf.summary.create_file_writer(self.log_dir)

        # build model
        self.model = get_unet(IMAGE_DIM, IMAGE_DIM // slices)
        self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
        self.model.compile(optimizer=self.optimizer,
                           loss=losses.MeanSquaredError())

        # set up checkpoints
        self.epoch = tf.Variable(0)
        self.checkpoint = tf.train.Checkpoint(model=self.model,
                                              optimizer=self.optimizer,
                                              epoch=self.epoch)
        self.manager = tf.train.CheckpointManager(
            self.checkpoint,
            directory=self.checkpoints_dir,
            max_to_keep=None  #keep all checkpoints
        )

        if self.manager.latest_checkpoint:
            utils.log('Loading latest checkpoint {}'.format(
                self.manager.latest_checkpoint))
            self.restore_status = self.checkpoint.restore(
                self.manager.latest_checkpoint)
        else:
            self.restore_status = None

        if augment_level == AUGMENT_NORMALIZE:
            self.mean = STATS[basename(
                root_data_dir)][seed][k_folds][selected_fold]['mean']
            self.std = STATS[basename(
                root_data_dir)][seed][k_folds][selected_fold]['std']
            utils.log('mean={}, std={}'.format(self.mean, self.std))
        else:
            self.use_random_jitter = (augment_level == AUGMENT_FULL)
            utils.log('Using augmented data, not fetching mean/std')
            utils.log('use_random_jitter={}'.format(self.use_random_jitter))

        self.data_loaded = False

        self.root_data_dir = root_data_dir
        self.k_folds = k_folds
        self.selected_fold = selected_fold
        self.augment_level = augment_level
        self.batch_size = batch_size
        self.slices = slices
        self.seed = seed
Пример #10
0
    def compile_fit(self,
                    model_input,
                    q_train_padded,
                    a_train_padded,
                    y_q_label_df,
                    y_a_label_df,
                    y_q_classify_list,
                    y_q_classify_dict,
                    y_a_classify_list,
                    y_a_classify_dict,
                    epoch_num=3):
        """
        This function is used to switch between numrical. The switch controled by hyperparameters self.TYPE
        When self.TYPE == 'num', input will be q_train_padded and y_q_label_df (others are same)
        Meanwhile, switch to ['MSE'] as loss and ['mse', 'mae'] as metrics

        When self.TYPE == 'classify', input will be q_train_padded and y_q_classify_list[0] etc.
        Meanwhile, swith to ['categorical_crossentropy'] as loss and ['accuracy'] as metrics

        """
        start_time = time()
        print("*" * 40, "Start {} Processing".format(model_input._name),
              "*" * 40)
        # loss_fun = 'categorical_crossentropy'
        # loss_fun = 'MSE' #MeanSquaredError
        # loss_fun = '

        METRICS = [
            metrics.TruePositives(name='tp'),
            metrics.FalsePositives(name='fp'),
            metrics.TrueNegatives(name='tn'),
            metrics.FalseNegatives(name='fn'),
            metrics.CategoricalAccuracy(name='accuracy'),
            metrics.Precision(name='precision'),
            metrics.Recall(name='recall'),
            metrics.AUC(name='auc'),
            # F1Score(num_classes = int(y_train.shape[1]), name='F1')
        ]

        loss_fun = None
        metrics_fun = None
        # becase large data input, we want to process automaticaly. So set this arugs to choose
        # question process or answer process automatically
        if self.PART == 'q':
            print("Start processing question part")
            # start to decide complie parameters
            if self.TYPE == 'num':
                print("Start numerical output")
                # call split
                X_train, X_val, y_train, y_val = self.split_data(
                    q_train_padded, y_q_label_df, test_size=0.2)
                loss_fun = losses.MeanSquaredError()
                metrics_fun = ['mse', 'mae']
            elif self.TYPE == 'classify':
                print("Start classify output")
                X_train, X_val, y_train, y_val = self.split_data(
                    q_train_padded, y_q_classify_list[0], test_size=0.2)
                loss_fun = losses.CategoricalCrossentropy()
                metrics_fun = METRICS
            else:
                print("UNKNOW self.TYPE")

        elif self.PART == 'a':
            print("Start processing answer part")
            if self.TYPE == 'num':
                print("Start numerical output")
                # call split
                X_train, X_val, y_train, y_val = self.split_data(
                    a_train_padded, y_a_label_df, test_size=0.2)
                loss_fun = losses.MeanSquaredError()
                metrics_fun = ['mse', 'mae']
            elif self.TYPE == 'classify':
                print("Start classify output")
                X_train, X_val, y_train, y_val = self.split_data(
                    a_train_padded, y_a_classify_list[0], test_size=0.2)
                loss_fun = losses.CategoricalCrossentropy()
                metrics_fun = METRICS
            else:
                print("UNKNOW self.TYPE")

        learning_rate = 1e-3
        opt_adam = optimizers.Adam(lr=learning_rate, decay=1e-5)
        model_input.compile(loss=loss_fun,
                            optimizer=opt_adam,
                            metrics=metrics_fun)
        # batch_size is subjected to my GPU and GPU memory, after testing, 32 is reasonable value size.
        # If vector bigger, this value should dercrease

        history = model_input.fit(
            X_train,
            y_train,
            validation_data=(X_val, y_val),
            epochs=epoch_num,
            batch_size=16,
            verbose=1,
            callbacks=[PredictCallback(X_val, y_val, model_input)])
        # spearmanr_list = PredictCallback(X_val, y_val, model_input).spearmanr_list
        # dic = ['loss', 'accuracy', 'val_loss','val_accuracy']
        history_dict = [x for x in history.history]
        # model_input.predict(train_features[:10])

        cost_time = round((time() - start_time), 4)
        print("*" * 40,
              "End {} with {} seconds".format(model_input._name, cost_time),
              "*" * 40,
              end='\n\n')

        return history, model_input
Пример #11
0
Y = tf.reshape(Y, [-1, 2048])

#loop through entire file

opt = tf.keras.optimizers.Adam(learning_rate=5e-9)
opt2 = tf.keras.optimizers.Adam(learning_rate=5e-4)
opt3 = tf.keras.optimizers.Adam(learning_rate=5e-4)

#skipping model.round_weights(128)
#the scale factor?

callback2 = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                             min_delta=.01,
                                             patience=30)

model.compile(optimizer=opt, loss=losses.MeanSquaredError())
history = model.fit(X, X, epochs=100000000, callbacks=[callback2])

model.freeze_decoder()

nc = 7

np.set_printoptions(threshold=np.inf)

print(model.layers[1].weights)

print("before clustering")
cluster_weights = tfmot.clustering.keras.cluster_weights
CentroidInitialization = tfmot.clustering.keras.CentroidInitialization

clustering_params = {
Пример #12
0
np.random.seed(77)
tf_rnd.set_seed(77)
random.seed(77)

## Параметры размеров окна matplotlib
rcParams['figure.figsize'] = (10.0, 5.0)

x_arr = np.arange(-10, 10, 0.5)
y_arr = np.tile([1, 1, 1, 1, 0, 0, 0, 0], 5)

plt.plot(x_arr, y_arr, '-', color='red', label='Real values', markersize=5)
plt.show()
plt.close()

threshold = 0.0001
keras_lf = losses.MeanSquaredError()


class signal_nn:
    def __init__(self, neurons=50, activation='relu', threshold=0.000001):
        self.threshold = threshold
        self.model = models.Sequential()
        self.model.add(
            layers.Dense(neurons, activation=activation, input_dim=1))
        self.model.add(layers.Dense(neurons, activation=activation))
        self.model.add(layers.Dense(neurons, activation=activation))
        self.model.add(layers.Dense(neurons * 2, activation=activation))
        self.model.add(layers.Dense(neurons * 2, activation=activation))
        self.model.add(layers.Dense(neurons * 2, activation=activation))
        self.model.add(layers.Dense(neurons, activation=activation))
        self.model.add(layers.Dense(20, activation=activation))
Пример #13
0
def create_model(learn_rate, epoch_num, batches, outf_layer, outf_sum,
                 filter_num, split_filters, which_sum, fc):
    input_shape = (98, 98, 3)
    inputs = Input(shape=input_shape, name='image_input')
    # filter number settings
    (f1, f2, f3) = filter_num
    # 3 filters for summing
    if split_filters:
        (f1, f2, f3) = (int(f1 - 3), int(f2 - 3), int(f3))

    # normal layer
    convolution_1 = Conv2D(f1,
                           kernel_size=(5, 5),
                           strides=(1, 1),
                           activation=outf_layer,
                           input_shape=input_shape,
                           name='c_layer_1')(inputs)
    s1 = tf.reduce_sum(convolution_1, axis=[1, 2, 3], name='c_layer_1_sum')
    pooling_1 = MaxPooling2D(pool_size=(2, 2),
                             strides=(2, 2),
                             name='p_layer_1')(convolution_1)
    if split_filters:
        # sum "layer"
        s1 = tf.reduce_sum(Conv2D(3,
                                  kernel_size=(5, 5),
                                  strides=(1, 1),
                                  activation=outf_sum,
                                  input_shape=input_shape)(inputs),
                           name='c_layer_1_sum')

    convolution_2 = Conv2D(f2,
                           kernel_size=(5, 5),
                           strides=(1, 1),
                           activation=outf_layer,
                           input_shape=input_shape,
                           name='c_layer_2')(pooling_1)
    s2 = tf.reduce_sum(convolution_2, axis=[1, 2, 3], name='c_layer_2_sum')
    pooling_2 = MaxPooling2D(pool_size=(2, 2),
                             strides=(2, 2),
                             name='p_layer_2')(convolution_2)
    if split_filters:
        s2 = tf.reduce_sum(Conv2D(3,
                                  kernel_size=(5, 5),
                                  strides=(1, 1),
                                  activation=outf_sum,
                                  input_shape=input_shape)(pooling_1),
                           name='c_layer_2_sum')

    convolution_3 = Conv2D(f3,
                           kernel_size=(5, 5),
                           strides=(1, 1),
                           activation=outf_sum,
                           input_shape=input_shape,
                           name='c_layer_3')(pooling_2)

    if fc:
        flat = Flatten()(convolution_3)
        s3 = Dense(1, activation=outf_sum)(flat)
    else:
        s3 = tf.reduce_sum(convolution_3, axis=[1, 2, 3], name='c_layer_3_sum')

    y_pred = s3
    for i, s in enumerate([s1, s2]):
        if which_sum[i] == 1:
            y_pred += s

    model = Model(inputs=inputs, outputs=s3)
    model.compile(
        loss=losses.MeanSquaredError(),
        optimizer=optimizers.Adam(learning_rate=learn_rate, name='Adam'),
        metrics=[metrics.RootMeanSquaredError(),
                 metrics.MeanAbsoluteError()])

    return model
    def train(self, local_settings, local_raw_unit_sales, local_model_hyperparameters, local_time_series_not_improved,
              raw_unit_sales_ground_truth):
        try:
            # data normalization
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_x_train, local_y_train = build_x_y_train_arrays(local_raw_unit_sales, local_settings,
                                                                  local_model_hyperparameters,
                                                                  local_time_series_not_improved)
            local_forecast_horizon_days = local_settings['forecast_horizon_days']
            local_features_for_each_training = 1
            print('starting neural network - individual time_serie training')
            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters['learning_rate']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [local_loss_1, local_loss_2, local_loss_3]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1, l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(monitor='loss',
                                               patience=local_model_hyperparameters['early_stopping_patience'])
            local_callbacks = [local_callback1]
            print('building current model: Mix_Bid_PeepHole_LSTM_Dense_ANN')
            local_base_model = tf.keras.Sequential()
            # first layer (DENSE)
            if local_model_hyperparameters['units_layer_1'] > 0:
                # strictly dim 1 of input_shape is ['time_steps_days'] (dim 0 is number of batches: None)
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_1'],
                                                  activation=local_model_hyperparameters['activation_1'],
                                                  input_shape=(local_time_steps_days,
                                                               local_features_for_each_training),
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_1'])))
            # second layer
            if local_model_hyperparameters['units_layer_2']:
                if local_model_hyperparameters['units_layer_1'] == 0:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         input_shape=(local_time_steps_days,
                                                      local_features_for_each_training),
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                else:
                    local_base_model.add(layers.RNN(
                        PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_2'],
                                         activation=local_model_hyperparameters['activation_2'],
                                         dropout=float(local_model_hyperparameters['dropout_layer_2']))))
                # local_base_model.add(RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(layers.Dense(units=local_model_hyperparameters['units_layer_3'],
                                                  activation=local_model_hyperparameters['activation_3'],
                                                  activity_regularizer=local_activation_regularizer))
                local_base_model.add(layers.Dropout(rate=float(local_model_hyperparameters['dropout_layer_3'])))
            # fourth layer
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(layers.RNN(
                    PeepholeLSTMCell(units=local_model_hyperparameters['units_layer_4'],
                                     activation=local_model_hyperparameters['activation_4'],
                                     dropout=float(local_model_hyperparameters['dropout_layer_4']))))
            local_base_model.add(layers.Dense(units=local_forecast_horizon_days))

            # build and compile model
            local_base_model.build(input_shape=(1, local_time_steps_days, local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([local_settings['models_path'],
                                           'generic_forecaster_template_individual_ts.h5']))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               'generic_forecaster_template_individual_ts.json']), 'w') as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']
            # all input data in the correct type
            local_x_train = np.array(local_x_train, dtype=np.dtype('float32'))
            local_y_train = np.array(local_y_train, dtype=np.dtype('float32'))
            local_raw_unit_sales = np.array(local_raw_unit_sales, dtype=np.dtype('float32'))
            # specific time_serie models training loop
            local_y_pred_list = []
            local_nof_time_series = local_settings['number_of_time_series']
            remainder = np.array([time_serie for time_serie in range(local_nof_time_series)
                                  if time_serie not in local_time_series_not_improved])
            for time_serie in remainder:
                # ----------------------key_point---------------------------------------------------------------------
                # take note that each loop the weights and internal last states of previous training are conserved
                # that's probably save times and (in aggregated or ordered) connected time series will improve results
                # ----------------------key_point---------------------------------------------------------------------
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, time_serie: time_serie + 1, :], \
                                   local_y_train[:, time_serie: time_serie + 1, :]
                local_x = local_x.reshape(local_x.shape[0], local_x.shape[2], 1)
                local_y = local_y.reshape(local_y.shape[0], local_y.shape[2], 1)
                # training, saving model and storing forecasts
                local_base_model.fit(local_x, local_y, batch_size=local_batch_size, epochs=local_epochs,
                                     workers=local_workers, callbacks=local_callbacks, shuffle=False)
                local_base_model.save_weights(''.join([local_settings['models_path'],
                                                       '/weights_last_year/_individual_ts_',
                                                       str(time_serie), '_model_weights_.h5']))
                local_x_input = local_raw_unit_sales[time_serie: time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = cof_zeros(local_x_input, local_settings)
                local_x_input = local_x_input.reshape(1, local_x_input.shape[1], 1)
                print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                print('x_input:\n', local_x_input)
                print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                local_y_pred = cof_zeros(local_y_pred, local_settings)
                if local_settings['mini_ts_evaluator'] == "True" and \
                        local_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
                    mini_evaluator = mini_evaluator_submodule()
                    evaluation = mini_evaluator.evaluate_ts_forecast(
                            raw_unit_sales_ground_truth[time_serie, -local_forecast_horizon_days:], local_y_pred)
                    print('ts:', time_serie, 'with cof_zeros ts mse:', evaluation)
                else:
                    print('ts:', time_serie)
                print(local_y_pred)
                local_y_pred_list.append(local_y_pred)
            local_point_forecast_array = np.array(local_y_pred_list)
            local_point_forecast_normalized = local_point_forecast_array.reshape(
                (local_point_forecast_array.shape[0], local_point_forecast_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized

            # save points forecast
            np.savetxt(''.join([local_settings['others_outputs_path'], 'point_forecast_NN_LSTM_simulation.csv']),
                       local_point_forecast, fmt='%10.15f', delimiter=',', newline='\n')
            print('point forecasts saved to file')
            print('submodule for build, train and forecast time_serie individually finished successfully')
            return True
        except Exception as submodule_error:
            print('train model and forecast individual time_series submodule_error: ', submodule_error)
            logger.info('error in training and forecast-individual time_serie schema')
            logger.error(str(submodule_error), exc_info=True)
            return False
Пример #15
0
            plt.subplot(232)
            plt.imshow(pred[0, ..., 0], cmap='gray')
            plt.subplot(233)
            plt.imshow(self.train_lab[0, ..., 0], cmap='gray')
            pred = self.model(self.test_img)
            plt.subplot(234)
            plt.imshow(self.test_img[0, ..., 0], cmap='gray')
            plt.subplot(235)
            plt.imshow(pred[0, ..., 0], cmap='gray')
            plt.subplot(236)
            plt.imshow(self.test_lab[0, ..., 0], cmap='gray')
            plt.show()


# %%

# Build model
model = SubPixel(upscale_factor=scale)

model.compile(loss=losses.MeanSquaredError(),
              optimizer=optimizers.Adam(learning_rate=0.001),
              metrics=[Metric(1).psnr, Metric(1).ssim])
# %%
model.fit(train_ds,
          epochs=epochs,
          validation_data=val_ds,
          callbacks=[PlotCallback()],
          verbose=2)

# %%
Пример #16
0
 def __init__(self, scope='MSE'):
     super(MSE, self).__init__(scope)
     self.cost = losses.MeanSquaredError(reduction=losses.Reduction.SUM)
Пример #17
0
x = layers.concatenate([x1, x2])

score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)

# 模型构建

model = tf.keras.Model(inputs=[image_input, timeseries_input],
                       outputs=[score_output, class_output])

# 模型配置

model.compile(
    optimizer=optimizers.RMSprop(1e-3),
    loss={
        "score_output": losses.MeanSquaredError(),
        "class_output": losses.CategoricalCrossentropy(from_logits=True)
    },
    metrics={
        "score_output":
        [metrics.MeanAbsolutePercentageError(),
         metrics.MeanAbsoluteError()],
        "class_output": [metrics.CategoricalAccuracy()]
    })

tf.print(model.summary())
# 数据构建

img_data = random_sample(size=(100, 32, 32, 3))
ts_data = random_sample(size=(100, 20, 10))
score_targets = random_sample(size=(100, 1))
    def train_model(self, local_settings, local_raw_unit_sales,
                    local_model_hyperparameters):
        try:
            # loading hyperparameters
            local_days_in_focus = local_model_hyperparameters[
                'days_in_focus_frame']
            local_raw_unit_sales_data = local_raw_unit_sales[:,
                                                             -local_days_in_focus:]
            local_nof_ts = local_raw_unit_sales.shape[0]
            local_forecast_horizon_days = local_settings[
                'forecast_horizon_days']
            local_features_for_each_training = 1
            print(
                'starting neural network - individual time_serie training unit_sale_approach'
            )

            # building architecture and compiling model_template
            # set training parameters
            local_time_steps_days = int(local_settings['time_steps_days'])
            local_epochs = int(local_model_hyperparameters['epochs'])
            local_batch_size = int(local_model_hyperparameters['batch_size'])
            local_workers = int(local_model_hyperparameters['workers'])
            local_optimizer_function = local_model_hyperparameters['optimizer']
            local_optimizer_learning_rate = local_model_hyperparameters[
                'learning_rate']
            local_validation_split = local_model_hyperparameters[
                'validation_split']
            if local_optimizer_function == 'adam':
                local_optimizer_function = optimizers.Adam(
                    local_optimizer_learning_rate)
            elif local_optimizer_function == 'ftrl':
                local_optimizer_function = optimizers.Ftrl(
                    local_optimizer_learning_rate)
            local_losses_list = []
            local_loss_1 = local_model_hyperparameters['loss_1']
            local_loss_2 = local_model_hyperparameters['loss_2']
            local_loss_3 = local_model_hyperparameters['loss_3']
            local_union_settings_losses = [
                local_loss_1, local_loss_2, local_loss_3
            ]
            if 'mape' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in local_union_settings_losses:
                local_losses_list.append(losses.MeanSquaredError())
            if 'mae' in local_union_settings_losses:
                local_losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in local_union_settings_losses:
                local_losses_list.append(modified_mape())
            if 'customized_loss_function' in local_union_settings_losses:
                local_losses_list.append(customized_loss())
            if 'pinball_loss_function' in local_union_settings_losses:
                local_losses_list.append(pinball_function_loss())
            local_metrics_list = []
            local_metric1 = local_model_hyperparameters['metrics1']
            local_metric2 = local_model_hyperparameters['metrics2']
            local_union_settings_metrics = [local_metric1, local_metric2]
            if 'rmse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in local_union_settings_metrics:
                local_metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in local_union_settings_metrics:
                local_metrics_list.append(
                    metrics.MeanAbsolutePercentageError())
            local_l1 = local_model_hyperparameters['l1']
            local_l2 = local_model_hyperparameters['l2']
            if local_model_hyperparameters['regularizers_l1_l2'] == 'True':
                local_activation_regularizer = regularizers.l1_l2(l1=local_l1,
                                                                  l2=local_l2)
            else:
                local_activation_regularizer = None
            # define callbacks, checkpoints namepaths
            local_callback1 = cb.EarlyStopping(
                monitor='loss',
                patience=local_model_hyperparameters['early_stopping_patience']
            )
            local_callbacks = [local_callback1]
            print(
                'building current model: individual_time_serie_acc_freq_LSTM_Dense_ANN'
            )
            local_base_model = tf.keras.Sequential()
            # first layer (LSTM)
            if local_model_hyperparameters['units_layer_1'] > 0:
                local_base_model.add(
                    layers.LSTM(
                        units=local_model_hyperparameters['units_layer_1'],
                        activation=local_model_hyperparameters['activation_1'],
                        input_shape=(
                            local_model_hyperparameters['time_steps_days'],
                            local_features_for_each_training),
                        dropout=float(
                            local_model_hyperparameters['dropout_layer_1']),
                        activity_regularizer=local_activation_regularizer,
                        return_sequences=True))
            # second LSTM layer
            if local_model_hyperparameters['units_layer_2'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.LSTM(
                            units=local_model_hyperparameters['units_layer_2'],
                            activation=local_model_hyperparameters[
                                'activation_2'],
                            activity_regularizer=local_activation_regularizer,
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_2']
                            ),
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if local_model_hyperparameters['units_layer_3'] > 0:
                local_base_model.add(
                    layers.Bidirectional(
                        layers.
                        RNN(PeepholeLSTMCell(
                            units=local_model_hyperparameters['units_layer_3'],
                            dropout=float(
                                local_model_hyperparameters['dropout_layer_3'])
                        ),
                            activity_regularizer=local_activation_regularizer,
                            return_sequences=False)))
                local_base_model.add(
                    RepeatVector(local_model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if local_model_hyperparameters['units_layer_4'] > 0:
                local_base_model.add(
                    layers.Dense(
                        units=local_model_hyperparameters['units_layer_4'],
                        activation=local_model_hyperparameters['activation_4'],
                        activity_regularizer=local_activation_regularizer))
                local_base_model.add(
                    layers.Dropout(rate=float(
                        local_model_hyperparameters['dropout_layer_4'])))
            # final layer
            local_base_model.add(
                layers.Dense(
                    units=local_model_hyperparameters['units_final_layer']))

            # build and compile model
            local_base_model.build(
                input_shape=(1, local_time_steps_days,
                             local_features_for_each_training))
            local_base_model.compile(optimizer=local_optimizer_function,
                                     loss=local_losses_list,
                                     metrics=local_metrics_list)

            # save model architecture (template for specific models)
            local_base_model.save(''.join([
                local_settings['models_path'],
                '_unit_sales_forecaster_template_individual_ts.h5'
            ]))
            local_base_model_json = local_base_model.to_json()
            with open(''.join([local_settings['models_path'],
                               '_unit_sales_forecaster_forecaster_template_individual_ts.json']), 'w') \
                    as json_file:
                json_file.write(local_base_model_json)
                json_file.close()
            local_base_model.summary()

            # training model
            local_moving_window_length = local_settings['moving_window_input_length'] + \
                                         local_settings['moving_window_output_length']

            # loading x_train and y_train, previously done for third and fourth models trainings
            local_builder = local_bxy_x_y_builder()
            local_x_train, local_y_train = local_builder.build_x_y_train_arrays(
                local_raw_unit_sales, local_settings,
                local_model_hyperparameters)
            local_x_train = local_x_train.reshape(local_x_train.shape[0],
                                                  local_x_train.shape[2],
                                                  local_x_train.shape[1])
            local_y_train = local_x_train.reshape(local_y_train.shape[0],
                                                  local_y_train.shape[2],
                                                  local_y_train.shape[1])

            # star training time_serie by time_serie
            local_y_pred_array = np.zeros(shape=(local_raw_unit_sales.shape[0],
                                                 local_forecast_horizon_days),
                                          dtype=np.dtype('float32'))
            for time_serie in range(local_nof_ts):
                print('training time_serie:', time_serie)
                local_x, local_y = local_x_train[:, :, time_serie: time_serie + 1], \
                                   local_y_train[:, :, time_serie: time_serie + 1]
                # training, saving model and storing forecasts
                local_base_model.fit(local_x,
                                     local_y,
                                     batch_size=local_batch_size,
                                     epochs=local_epochs,
                                     workers=local_workers,
                                     callbacks=local_callbacks,
                                     shuffle=False,
                                     validation_split=local_validation_split)
                local_base_model.save_weights(''.join([
                    local_settings['models_path'],
                    '/_weights_unit_sales_NN_35_days/_individual_ts_',
                    str(time_serie), '_model_weights_.h5'
                ]))
                local_x_input = local_raw_unit_sales[
                    time_serie:time_serie + 1, -local_forecast_horizon_days:]
                local_x_input = local_x_input.reshape(1,
                                                      local_x_input.shape[1],
                                                      1)
                # print('x_input shape:', local_x_input.shape)
                local_y_pred = local_base_model.predict(local_x_input)
                # print('x_input:\n', local_x_input)
                # print('y_pred shape:', local_y_pred.shape)
                local_y_pred = local_y_pred.reshape(local_y_pred.shape[1])
                # print('ts:', time_serie)
                # print(local_y_pred)
                local_y_pred_array[time_serie:time_serie + 1, :] = local_y_pred
            local_point_forecast_normalized = local_y_pred_array.reshape(
                (local_y_pred_array.shape[0], local_y_pred_array.shape[1]))
            local_point_forecast = local_point_forecast_normalized.clip(0)

            # save points forecast
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'point_forecast_NN_from_unit_sales_training'
                ]), local_point_forecast)
            np.save(
                ''.join([
                    local_settings['train_data_path'],
                    'eleventh_model_NN_unit_sales_forecast_data'
                ]), local_point_forecast)
            np.savetxt(''.join([
                local_settings['others_outputs_path'],
                'point_forecast_NN_from_unit_sales_training.csv'
            ]),
                       local_point_forecast,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            print('point forecasts saved to file')
            print(
                'submodule for build, train and forecast time_serie unit_sales individually finished successfully'
            )
            return True, local_point_forecast
        except Exception as submodule_error:
            print(
                'train model and forecast individual time_series units_sales_ submodule_error: ',
                submodule_error)
            logger.info(
                'error in training and forecast-individual time_serie unit_sales_ schema'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False, []
    N = 10000
    T = 200
    maxlen = T

    x, t = toy_problem(N, T)
    x_train, x_val, t_train, t_val = \
        train_test_split(x, t, test_size=0.2, shuffle=False)
    '''
    2. モデルの構築
    '''
    model = RNN(50)
    '''
    3. モデルの学習
    '''
    criterion = losses.MeanSquaredError()
    optimizer = optimizers.Adam(learning_rate=0.001,
                                beta_1=0.9,
                                beta_2=0.999,
                                amsgrad=True)
    train_loss = metrics.Mean()
    val_loss = metrics.Mean()

    def compute_loss(t, y):
        return criterion(t, y)

    def train_step(x, t):
        with tf.GradientTape() as tape:
            preds = model(x)
            loss = compute_loss(t, preds)
        grads = tape.gradient(loss, model.trainable_variables)
    def forecast(self, local_mse, local_normalized_scaled_unit_sales,
                 local_mean_unit_complete_time_serie, local_raw_unit_sales,
                 local_settings):
        try:
            print(
                'starting high loss (mse in previous LSTM) time_series in-block forecast submodule'
            )
            # set training parameters
            with open(''.join([local_settings['hyperparameters_path'],
                               'in_block_time_serie_based_model_hyperparameters.json'])) \
                    as local_r_json_file:
                model_hyperparameters = json.loads(local_r_json_file.read())
                local_r_json_file.close()
            local_time_series_group = np.load(''.join(
                [local_settings['train_data_path'], 'time_serie_group.npy']),
                                              allow_pickle=True)
            time_steps_days = int(local_settings['time_steps_days'])
            epochs = int(model_hyperparameters['epochs'])
            batch_size = int(model_hyperparameters['batch_size'])
            workers = int(model_hyperparameters['workers'])
            optimizer_function = model_hyperparameters['optimizer']
            optimizer_learning_rate = model_hyperparameters['learning_rate']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(optimizer_learning_rate)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            losses_list = []
            loss_1 = model_hyperparameters['loss_1']
            loss_2 = model_hyperparameters['loss_2']
            loss_3 = model_hyperparameters['loss_3']
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'mape' in union_settings_losses:
                losses_list.append(losses.MeanAbsolutePercentageError())
            if 'mse' in union_settings_losses:
                losses_list.append(losses.MeanSquaredError())
            if 'mae' in union_settings_losses:
                losses_list.append(losses.MeanAbsoluteError())
            if 'm_mape' in union_settings_losses:
                losses_list.append(modified_mape())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            metrics_list = []
            metric1 = model_hyperparameters['metrics1']
            metric2 = model_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'rmse' in union_settings_metrics:
                metrics_list.append(metrics.RootMeanSquaredError())
            if 'mse' in union_settings_metrics:
                metrics_list.append(metrics.MeanSquaredError())
            if 'mae' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsoluteError())
            if 'mape' in union_settings_metrics:
                metrics_list.append(metrics.MeanAbsolutePercentageError())
            l1 = model_hyperparameters['l1']
            l2 = model_hyperparameters['l2']
            if model_hyperparameters['regularizers_l1_l2'] == 'True':
                activation_regularizer = regularizers.l1_l2(l1=l1, l2=l2)
            else:
                activation_regularizer = None

            # searching for time_series with high loss forecast
            time_series_treated = []
            poor_results_mse_threshold = local_settings[
                'poor_results_mse_threshold']
            poor_result_time_serie_list = []
            nof_features_for_training = 0
            for result in local_mse:
                if result[1] > poor_results_mse_threshold:
                    nof_features_for_training += 1
                    poor_result_time_serie_list.append(int(result[0]))
            # nof_features_for_training = local_normalized_scaled_unit_sales.shape[0]
            nof_features_for_training = len(poor_result_time_serie_list)
            # creating model
            forecaster_in_block = tf.keras.Sequential()
            print(
                'current model for specific high loss time_series: Mix_Bid_PeepHole_LSTM_Dense_ANN'
            )
            # first layer (DENSE)
            if model_hyperparameters['units_layer_1'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_1'],
                        activation=model_hyperparameters['activation_1'],
                        input_shape=(model_hyperparameters['time_steps_days'],
                                     nof_features_for_training),
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_1'])))
            # second LSTM layer
            if model_hyperparameters['units_layer_2'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_2'],
                            activation=model_hyperparameters['activation_2'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_2'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # third LSTM layer
            if model_hyperparameters['units_layer_3'] > 0:
                forecaster_in_block.add(
                    layers.Bidirectional(
                        layers.RNN(PeepholeLSTMCell(
                            units=model_hyperparameters['units_layer_3'],
                            activation=model_hyperparameters['activation_3'],
                            activity_regularizer=activation_regularizer,
                            dropout=float(
                                model_hyperparameters['dropout_layer_3'])),
                                   return_sequences=False)))
                forecaster_in_block.add(
                    RepeatVector(model_hyperparameters['repeat_vector']))
            # fourth layer (DENSE)
            if model_hyperparameters['units_layer_4'] > 0:
                forecaster_in_block.add(
                    layers.Dense(
                        units=model_hyperparameters['units_layer_4'],
                        activation=model_hyperparameters['activation_4'],
                        activity_regularizer=activation_regularizer))
                forecaster_in_block.add(
                    layers.Dropout(
                        rate=float(model_hyperparameters['dropout_layer_4'])))
            # final layer
            forecaster_in_block.add(
                TimeDistributed(layers.Dense(units=nof_features_for_training)))
            # forecaster_in_block.saves(''.join([local_settings['models_path'], '_model_structure_']),
            #                 save_format='tf')
            forecast_horizon_days = local_settings['forecast_horizon_days']
            forecaster_in_block.build(input_shape=(1, forecast_horizon_days,
                                                   nof_features_for_training))
            forecaster_in_block.compile(optimizer=optimizer_function,
                                        loss=losses_list,
                                        metrics=metrics_list)
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(
                    ''.join([
                        local_settings['models_path'],
                        'forecaster_in_block.json'
                    ]), 'w') as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            forecaster_in_block_untrained = forecaster_in_block
            print('specific time_serie model initialized and compiled')
            nof_selling_days = local_normalized_scaled_unit_sales.shape[1]
            last_learning_day_in_year = np.mod(nof_selling_days, 365)
            max_selling_time = local_settings['max_selling_time']
            days_in_focus_frame = model_hyperparameters['days_in_focus_frame']
            window_input_length = local_settings['moving_window_input_length']
            window_output_length = local_settings[
                'moving_window_output_length']
            moving_window_length = window_input_length + window_output_length
            nof_years = local_settings['number_of_years_ceil']

            # training
            # time_serie_data = local_normalized_scaled_unit_sales
            nof_poor_result_time_series = len(poor_result_time_serie_list)
            time_serie_data = np.zeros(shape=(nof_poor_result_time_series,
                                              max_selling_time))
            time_serie_iterator = 0
            for time_serie in poor_result_time_serie_list:
                time_serie_data[
                    time_serie_iterator, :] = local_normalized_scaled_unit_sales[
                        time_serie, :]
                time_serie_iterator += 1
            if local_settings['repeat_training_in_block'] == "True":
                print(
                    'starting in-block training of model for high_loss time_series in previous model'
                )
                nof_selling_days = time_serie_data.shape[1]
                # nof_moving_windows = np.int32(nof_selling_days / moving_window_length)
                remainder_days = np.mod(nof_selling_days, moving_window_length)
                window_first_days = [
                    first_day for first_day in range(0, nof_selling_days,
                                                     moving_window_length)
                ]
                length_window_walk = len(window_first_days)
                # last_window_start = window_first_days[length_window_walk - 1]
                if remainder_days != 0:
                    window_first_days[
                        length_window_walk -
                        1] = nof_selling_days - moving_window_length
                day_in_year = []
                [
                    day_in_year.append(last_learning_day_in_year + year * 365)
                    for year in range(nof_years)
                ]
                stride_window_walk = model_hyperparameters[
                    'stride_window_walk']
                print('defining x_train')
                x_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        x_train.append(
                            time_serie_data[:, day - time_steps_days:day -
                                            window_output_length])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        x_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        x_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                else:
                    logging.info(
                        "\ntrain_model_input_data_approach is not defined")
                    print('-a problem occurs with the data_approach settings')
                    return False, None
                print('defining y_train')
                y_train = []
                if local_settings['train_model_input_data_approach'] == "all":
                    [
                        y_train.append(time_serie_data[:, day -
                                                       time_steps_days:day])
                        for day in range(time_steps_days, max_selling_time,
                                         stride_window_walk)
                    ]
                elif local_settings[
                        'train_model_input_data_approach'] == "focused":
                    [
                        y_train.append(time_serie_data[:, day:day +
                                                       time_steps_days])
                        for last_day in day_in_year[:-1] for day in range(
                            last_day + window_output_length, last_day +
                            window_output_length -
                            days_in_focus_frame, -stride_window_walk)
                    ]
                    # border condition, take care with last year, working with last data available, yeah really!!
                    [
                        y_train.append(
                            np.concatenate(
                                (time_serie_data[:, day -
                                                 window_output_length:day],
                                 np.zeros(shape=(nof_poor_result_time_series,
                                                 time_steps_days -
                                                 window_output_length))),
                                axis=1))
                        for last_day in day_in_year[-1:] for day in range(
                            last_day, last_day -
                            days_in_focus_frame, -stride_window_walk)
                    ]

                # if time_enhance is active, assigns more weight to the last time_steps according to enhance_last_stride
                if local_settings['time_enhance'] == 'True':
                    enhance_last_stride = local_settings['enhance_last_stride']
                    last_elements = []
                    length_x_y_train = len(x_train)
                    x_train_enhanced, y_train_enhanced = [], []
                    enhance_iterator = 1
                    for position in range(
                            length_x_y_train - enhance_last_stride,
                            length_x_y_train, -1):
                        [
                            x_train_enhanced.append(x_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        [
                            y_train_enhanced.append(y_train[position])
                            for enhance in range(1, 3 * (enhance_iterator + 1))
                        ]
                        enhance_iterator += 1
                    x_train = x_train[:-enhance_last_stride]
                    [
                        x_train.append(time_step)
                        for time_step in x_train_enhanced
                    ]
                    y_train = y_train[:-enhance_last_stride]
                    [
                        y_train.append(time_step)
                        for time_step in y_train_enhanced
                    ]

                # broadcasts lists to np arrays and applies the last pre-training preprocessing (amplification)
                x_train = np.array(x_train)
                y_train = np.array(y_train)
                print('x_train_shape:  ', x_train.shape)
                if local_settings['amplification'] == 'True':
                    factor = local_settings[
                        'amplification_factor']  # factor tuning was done previously
                    for time_serie_iterator in range(np.shape(x_train)[1]):
                        max_time_serie = np.amax(
                            x_train[:, time_serie_iterator, :])
                        x_train[:, time_serie_iterator, :][x_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                        max_time_serie = np.amax(
                            y_train[:, time_serie_iterator, :])
                        y_train[:, time_serie_iterator, :][y_train[:, time_serie_iterator, :] > 0] = \
                            max_time_serie * factor
                print('x_train and y_train built done')

                # define callbacks, checkpoints namepaths
                model_weights = ''.join([
                    local_settings['checkpoints_path'],
                    'check_point_model_for_high_loss_time_serie_',
                    model_hyperparameters['current_model_name'],
                    "_loss_-{loss:.4f}-.hdf5"
                ])
                callback1 = cb.EarlyStopping(
                    monitor='loss',
                    patience=model_hyperparameters['early_stopping_patience'])
                callback2 = cb.ModelCheckpoint(model_weights,
                                               monitor='loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
                callbacks = [callback1, callback2]
                x_train = x_train.reshape(
                    (np.shape(x_train)[0], np.shape(x_train)[2],
                     np.shape(x_train)[1]))
                y_train = y_train.reshape(
                    (np.shape(y_train)[0], np.shape(y_train)[2],
                     np.shape(y_train)[1]))
                print('input_shape: ', np.shape(x_train))

                # train for each time_serie
                # check settings for repeat or not the training
                forecaster_in_block.fit(x_train,
                                        y_train,
                                        batch_size=batch_size,
                                        epochs=epochs,
                                        workers=workers,
                                        callbacks=callbacks,
                                        shuffle=False)
                # print summary (informative; but if says "shape = multiple", probably useless)
                forecaster_in_block.summary()
                forecaster_in_block.save(''.join([
                    local_settings['models_path'],
                    '_high_loss_time_serie_model_forecaster_in_block_.h5'
                ]))
                forecaster_in_block.save_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                print(
                    'high loss time_series model trained and saved in hdf5 format .h5'
                )
            else:
                forecaster_in_block.load_weights(''.join([
                    local_settings['models_path'],
                    '_weights_high_loss_ts_model_forecaster_in_block_.h5'
                ]))
                # forecaster_in_block = models.load_model(''.join([local_settings['models_path'],
                #                                                  '_high_loss_time_serie_model_forecaster_.h5']))
                print('weights of previously trained model loaded')

            # compile model and make forecast (not necessary)
            # forecaster_in_block.compile(optimizer='adam', loss='mse')

            # evaluating model and comparing with aggregated (in-block) LSTM
            print('evaluating the model trained..')
            time_serie_data = time_serie_data.reshape(
                (1, time_serie_data.shape[1], time_serie_data.shape[0]))
            x_input = time_serie_data[:, -forecast_horizon_days:, :]
            y_pred_normalized = forecaster_in_block.predict(x_input)
            # print('output shape: ', y_pred_normalized.shape)
            time_serie_data = time_serie_data.reshape(
                (time_serie_data.shape[2], time_serie_data.shape[1]))
            # print('time_serie data shape: ', np.shape(time_serie_data))
            time_serie_iterator = 0
            improved_time_series_forecast = []
            time_series_not_improved = []
            improved_mse = []
            for time_serie in poor_result_time_serie_list:
                # for time_serie in range(local_normalized_scaled_unit_sales.shape[0]):
                y_truth = local_raw_unit_sales[time_serie:time_serie + 1,
                                               -forecast_horizon_days:]
                # print('y_truth shape:', y_truth.shape)

                # reversing preprocess: rescale, denormalize, reshape
                # inverse reshape
                y_pred_reshaped = y_pred_normalized.reshape(
                    (y_pred_normalized.shape[2], y_pred_normalized.shape[1]))
                y_pred_reshaped = y_pred_reshaped[
                    time_serie_iterator:time_serie_iterator + 1, :]
                # print('y_pred_reshaped shape:', y_pred_reshaped.shape)

                # inverse transform (first moving_windows denormalizing and then general rescaling)
                time_serie_normalized_window_mean = np.mean(
                    time_serie_data[time_serie_iterator,
                                    -moving_window_length:])
                # print('mean of this time serie (normalized values): ', time_serie_normalized_window_mean)
                local_denormalized_array = window_based_denormalizer(
                    y_pred_reshaped, time_serie_normalized_window_mean,
                    forecast_horizon_days)
                local_point_forecast = general_mean_rescaler(
                    local_denormalized_array,
                    local_mean_unit_complete_time_serie[time_serie],
                    forecast_horizon_days)
                # print('rescaled denormalized forecasts array shape: ', local_point_forecast.shape)

                # calculating MSE
                # print(y_truth.shape)
                # print(local_point_forecast.shape)
                local_error_metric_mse = mean_squared_error(
                    y_truth, local_point_forecast)
                # print('time_serie: ', time_serie, '\tMean_Squared_Error: ', local_error_metric_mse)
                previous_result = local_mse[:, 1][local_mse[:, 0] ==
                                                  time_serie].item()
                time_series_treated.append(
                    [int(time_serie), previous_result, local_error_metric_mse])
                if local_error_metric_mse < previous_result:
                    # print('better results with time_serie specific model training')
                    print(time_serie, 'MSE improved from ', previous_result,
                          'to ', local_error_metric_mse)
                    improved_time_series_forecast.append(int(time_serie))
                    improved_mse.append(local_error_metric_mse)
                else:
                    # print('no better results with time serie specific model training')
                    # print('MSE not improved from: ', previous_result, '\t current mse: ', local_error_metric_mse)
                    time_series_not_improved.append(int(time_serie))
                time_serie_iterator += 1
            time_series_treated = np.array(time_series_treated)
            improved_mse = np.array(improved_mse)
            average_mse_in_block_forecast = np.mean(time_series_treated[:, 2])
            average_mse_improved_ts = np.mean(improved_mse)
            print('poor result time serie list len:',
                  len(poor_result_time_serie_list))
            print('mean_mse for in-block forecast:',
                  average_mse_in_block_forecast)
            print(
                'number of time series with better results with this forecast: ',
                len(improved_time_series_forecast))
            print(
                'mean_mse of time series with better results with this forecast: ',
                average_mse_improved_ts)
            print('not improved time series =', len(time_series_not_improved))
            time_series_treated = np.array(time_series_treated)
            improved_time_series_forecast = np.array(
                improved_time_series_forecast)
            time_series_not_improved = np.array(time_series_not_improved)
            poor_result_time_serie_array = np.array(
                poor_result_time_serie_list)
            # store data of (individual-approach) time_series forecast successfully improved and those that not
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'poor_result_time_serie_array'
                ]), poor_result_time_serie_array)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_forecast_results'
                ]), time_series_treated)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'improved_time_series_forecast'
                ]), improved_time_series_forecast)
            np.save(
                ''.join([
                    local_settings['models_evaluation_path'],
                    'time_series_not_improved'
                ]), time_series_not_improved)
            np.savetxt(''.join([
                local_settings['models_evaluation_path'],
                'time_series_forecast_results.csv'
            ]),
                       time_series_treated,
                       fmt='%10.15f',
                       delimiter=',',
                       newline='\n')
            forecaster_in_block_json = forecaster_in_block.to_json()
            with open(''.join([local_settings['models_path'], 'high_loss_time_serie_model_forecaster_in_block.json']), 'w') \
                    as json_file:
                json_file.write(forecaster_in_block_json)
                json_file.close()
            print('trained model weights and architecture saved')
            print('metadata (results, time_serie with high loss) saved')
            print(
                'forecast improvement done. (high loss time_serie focused) submodule has finished'
            )
        except Exception as submodule_error:
            print('time_series in-block forecast submodule_error: ',
                  submodule_error)
            logger.info(
                'error in forecast of in-block time_series (high_loss_identified_ts_forecast submodule)'
            )
            logger.error(str(submodule_error), exc_info=True)
            return False
        return True
Пример #21
0
            layers.Dense(latent_dim, activation='relu'),
        ])
        self.decoder = tf.keras.Sequential([
            layers.Dense(784, activation='sigmoid'),
            layers.Reshape((28, 28))
        ])

    def call(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return decoded


autoencoder = Autoencoder(latent_dim)

autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())

autoencoder.fit(x_train,
                x_train,
                epochs=10,
                shuffle=True,
                validation_data=(x_test, x_test))

encoded_imgs = autoencoder.encoder(x_test).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()

n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
                          activation='softmax',
                          name="class_output")(fc1)
    dense2 = layers.Dense(1, activation='sigmoid', name="bounding_box")(
        fc1)  # later change this into bounding box regression

    values = model.predict(image)
    values1 = maxpoolmodel.predict(image)

    region_array = np.asarray([[[0.0, 0.0, 1.0, 1.0]]], dtype='float32')

    roimodel = tf.keras.Model(inputs=(feature_input, roi_input),
                              outputs=(dense1, dense2))
    roimodel.compile(
        optimizer=optimizers.RMSprop(1e-3),
        loss={
            "bounding_box": losses.MeanSquaredError(),
            "class_output": losses.CategoricalCrossentropy(),
        },
        metrics={
            "bounding_box": [
                metrics.MeanAbsolutePercentageError(),
                metrics.MeanAbsoluteError(),
            ],
            "class_output": [metrics.CategoricalAccuracy()],
        },
    )
    roimodel.summary()
    values = values.reshape(
        1, 1, 5, 5, 1280)  # take into account batch size which is first input
    region_array = region_array.reshape(1, 1, 1, 4)
    output2 = np.array([1])