Exemplo n.º 1
0
    def compile(self, generator_G_opt: optimizer_v2.OptimizerV2,
                generator_F_opt: optimizer_v2.OptimizerV2,
                discriminator_X_opt: optimizer_v2.OptimizerV2,
                discriminator_Y_opt: optimizer_v2.OptimizerV2,
                generator_loss_fn, discriminator_loss_fn) -> None:

        super(CycleGAN, self).compile()
        self.generator_G_opt = generator_G_opt
        self.generator_F_opt = generator_F_opt
        self.discriminator_X_opt = discriminator_X_opt
        self.discriminator_Y_opt = discriminator_Y_opt
        self.generator_loss_fn = generator_loss_fn
        self.discriminator_loss_fn = discriminator_loss_fn
        self.cycle_loss_fn: Loss = MeanAbsoluteError()
        self.identity_loss_fn: Loss = MeanAbsoluteError()
Exemplo n.º 2
0
def main():
    batch = 8
    epoch = 20
    loss = MeanAbsoluteError()
    learning_rate = PiecewiseConstantDecay(boundaries=[100000],
                                           values=[1e-4, 5e-5])
    res_blocks = [
        15, 15, 15, 15, 15, 9, 9, 9, 9, 9, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3
    ]
    checkpoint_dir = './ckpt/edsr'

    # 데이터 가져오기
    ds_train = VCTK(subset='train').dataset()
    ds_valid = VCTK(subset='valid').dataset()

    # 모델 빌딩
    edsr_model = edsr2(scale=4, res_blocks=res_blocks, res_block_scaling=0.7)

    # 훈련
    edsr_trainer = EDSRTrainer(model=edsr_model,
                               loss=loss,
                               learning_rate=learning_rate,
                               checkpoint_dir=checkpoint_dir)
    edsr_trainer.train(train_dataset=ds_train,
                       valid_dataset=ds_valid,
                       batch=batch,
                       epoch=epoch)

    edsr_model.save_weights(
        f'./weights/EDSR_16000_{len(res_blocks)}res_{batch}batch_{epoch}epochs_tanh_entropy_glorot_uniform.h5'
    )
 def __init__(self,
              model,
              loss,
              checkpoint_dir,
              learning_rate=PiecewiseConstantDecay(boundaries=[200000],
                                                   values=[1e-3, 5e-4])):
     if loss == 'MAE':
         loss = MeanAbsoluteError()
     elif loss == 'MSE':
         loss = MeanSquaredError()
     else:
         raise ValueError("loss specified incorrectly")
     super().__init__(model,
                      loss=MeanAbsoluteError(),
                      learning_rate=learning_rate,
                      checkpoint_dir=checkpoint_dir)
Exemplo n.º 4
0
 def __init__(self,
              model,
              loss=MeanAbsoluteError(),
              learning_rate=PiecewiseConstantDecay(boundaries=[200000],
                                                   values=[1e-4, 5e-5]),
              checkpoint_dir='./ckpt/edsr'):
     super().__init__(model, loss, learning_rate, checkpoint_dir)
Exemplo n.º 5
0
def get_mean_baseline(train: pd.DataFrame, val: pd.DataFrame) -> float:
    """Calculates the mean MAE and MAPE baselines by taking the mean values of the training data as prediction for the
    validation target feature.

    Parameters
    ----------
    train : pd.DataFrame
        Pandas DataFrame containing your training data.
    val : pd.DataFrame
        Pandas DataFrame containing your validation data.

    Returns
    -------
    float
        MAPE value.
    """
    y_hat = train["price"].mean()
    val["y_hat"] = y_hat
    mae = MeanAbsoluteError()
    mae = mae(val["price"], val["y_hat"]).numpy()  # type: ignore
    mape = MeanAbsolutePercentageError()
    mape = mape(val["price"], val["y_hat"]).numpy()  # type: ignore

    print(mae)
    print("mean baseline MAPE: ", mape)

    return mape
Exemplo n.º 6
0
  def __init__(self, dim_input=(50,200,1), channel=1,
               num_inner_updates=1,
               inner_update_lr=0.4, k_shot=5, learn_inner_update_lr=False):
    super(MAML, self).__init__()
    self.dim_input = (50,200,1)
    # self.dim_output = dim_output
    self.inner_update_lr = inner_update_lr
    self.loss_func = MeanAbsoluteError()
    self.channels = channel
    # self.img_size = int(np.sqrt(self.dim_input/self.channels))

    # outputs_ts[i] and losses_ts_post[i] are the output and loss after i+1 inner gradient updates
    losses_tr_pre, outputs_tr, losses_ts_post, outputs_ts = [], [], [], []
    accuracies_tr_pre, accuracies_ts = [], []

    # for each loop in the inner training loop
    outputs_ts = [[]]*num_inner_updates
    losses_ts_post = [[]]*num_inner_updates
    accuracies_ts = [[]]*num_inner_updates

    # Define the weights - these should NOT be directly modified by the
    # inner training loop
    tf.random.set_seed(seed)
    self.Unet = MuskensNet(channel, NUM_DOWNCOV_BLOCKS)

    # TO DO: update when learning the the learning rate
    self.learn_inner_update_lr = learn_inner_update_lr
    if self.learn_inner_update_lr:
      self.inner_update_lr_dict = {}
      for key in self.Unet.layer_weights.keys():
        if(isinstance(self.Unet.layer_weights[key],list)):
          self.inner_update_lr_dict[key] = [[tf.Variable(self.inner_update_lr, name='inner_update_lr_%s_%d_%d' % (key, number, j)) for number in range(len(self.Unet.layer_weights[key]))] for j in range(num_inner_updates)]
        else:
          self.inner_update_lr_dict[key] = [tf.Variable(self.inner_update_lr, name='inner_update_lr_%s_%d' % (key, j)) for j in range(num_inner_updates)]
Exemplo n.º 7
0
def run_model(
    model_name: str,
    model_function: Model,
    lr: float,
    train_generator: Iterator,
    validation_generator: Iterator,
    test_generator: Iterator,
) -> History:
    """This function runs a keras model with the Ranger optimizer and multiple callbacks. The model is evaluated within
    training through the validation generator and afterwards one final time on the test generator.

    Parameters
    ----------
    model_name : str
        The name of the model as a string.
    model_function : Model
        Keras model function like small_cnn()  or adapt_efficient_net().
    lr : float
        Learning rate.
    train_generator : Iterator
        keras ImageDataGenerators for the training data.
    validation_generator : Iterator
        keras ImageDataGenerators for the validation data.
    test_generator : Iterator
        keras ImageDataGenerators for the test data.

    Returns
    -------
    History
        The history of the keras model as a History object. To access it as a Dict, use history.history. For an example
        see plot_results().
    """

    callbacks = get_callbacks(model_name)
    model = model_function
    model.summary()
    plot_model(model, to_file=model_name + ".jpg", show_shapes=True)

    radam = tfa.optimizers.RectifiedAdam(learning_rate=lr)
    ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
    optimizer = ranger

    model.compile(optimizer=optimizer,
                  loss="mean_absolute_error",
                  metrics=[MeanAbsoluteError(),
                           MeanAbsolutePercentageError()])
    history = model.fit(
        train_generator,
        epochs=100,
        validation_data=validation_generator,
        callbacks=callbacks,
        workers=
        6,  # adjust this according to the number of CPU cores of your machine
    )

    model.evaluate(
        test_generator,
        callbacks=callbacks,
    )
    return history  # type: ignore
Exemplo n.º 8
0
 def LSTM(self,
          input_shape=None,
          dropout_p=0.2,
          n_output=2,
          learning_rate=1e-3):
     log.log("LSTM Model Initilaizing...")
     if input_shape != None:
         self.input_shape = input_shape
         self.dropout_p = dropout_p
         self.n_output = n_output
         self.learning_rate = learning_rate
         self.model.add(
             LSTM(16,
                  dropout=self.dropout_p,
                  input_shape=self.input_shape,
                  return_sequences=True))
         self.model.add(
             LSTM(32, dropout=self.dropout_p, return_sequences=True))
         self.model.add(
             LSTM(64, dropout=self.dropout_p, return_sequences=True))
         self.model.add(Flatten())
         self.model.add(Dense(units=32, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=16, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=8, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=self.n_output))
         #print(self.model.summary())
         self.model.compile(
             optimizer=Adam(learning_rate=self.learning_rate),
             loss=MeanAbsoluteError(),
             metrics=[MeanSquaredError()])
     log.log("LSTM Model Initialized!")
Exemplo n.º 9
0
def nonzero_MAE(y_true,y_pred):
    f = MeanAbsoluteError()
    where = tf.not_equal(y_pred,0)
    #idx = tf.boolean_mas(where)
    yt_mask = tf.boolean_mask(y_true,where)
    yp_mask = tf.boolean_mask(y_pred,where)
    return f(yt_mask,yp_mask)

# class nonzero_MAE(Callback):
#     """
#     metric callback inspired by 
#         https://stackoverflow.com/questions/51728648/how-do-masked-values-affect-the-metrics-in-keras
#     """
#     def on_train_begin(self, logs={}):
#         self.loss_f = MeanAbsoluteError()
#         self.val_res = []
    
#     def on_epoch_end(self, epoch, logs={}):
#         val_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()
#         val_targ = self.model.validation_data[1]
#         indx = np.where(~val_targ.any(axis=2))[0] #find where all targets are zero. That are the masked once as we masked the target with 0 and the data with 666
#         y_true_nomask = np.delete(val_targ, indx, axis=0)
#         y_pred_nomask = np.delete(val_predict, indx, axis=0)

#         result = self.loss_f(y_true_nomask, y_pred_nomask)
#         self.val_res.append(result)

#         print (f'— non-zero MAE: {result}')
#         return
Exemplo n.º 10
0
 def __init__(self, model, ckpt_path, args):
     super().__init__(model=model,
                      loss=MeanAbsoluteError(),
                      learning_rate=ExponentialDecay(args.lr_init,
                                                     args.lr_decay_step,
                                                     args.lr_decay_ratio,
                                                     staircase=True),
                      checkpoint_path=ckpt_path,
                      args=args)
Exemplo n.º 11
0
 def __init__(self,
              model,
              checkpoint_dir,
              learning_rate=PiecewiseConstantDecay(boundaries=[200000],
                                                   values=[1e-3, 5e-4])):
     super().__init__(model,
                      loss=MeanAbsoluteError(),
                      learning_rate=learning_rate,
                      checkpoint_dir=checkpoint_dir)
Exemplo n.º 12
0
def fitness_func(solution, sol_idx):
    global data_inputs, data_outputs, kerasGA, model

    predictions = predict(model=model, solution=solution, data=data_inputs)

    mae = MeanAbsoluteError()
    abs_error = mae(y_true=data_outputs, y_pred=predictions).numpy() + 0.00000001
    solution_fitness = 1.0 / abs_error

    return solution_fitness
Exemplo n.º 13
0
def fitness_func(solution, sol_idx):
    global data_inputs, data_outputs, keras_ga, model

    predictions = pygad.kerasga.predict(model=model,
                                        solution=solution,
                                        data=data_inputs)

    mae = MeanAbsoluteError()
    solution_fitness = (mae(data_outputs, predictions))

    return solution_fitness
Exemplo n.º 14
0
    def __init__(self, model, learning_rate, checkpoint_dir='./ckpt/'):

        self.now = None
        self.loss = MeanAbsoluteError()
        self.checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
                                              psnr=tf.Variable(-1.0),
                                              optimizer=Adam(learning_rate),
                                              model=model)
        self.checkpoint_manager = tf.train.CheckpointManager(
            checkpoint=self.checkpoint,
            directory=checkpoint_dir,
            max_to_keep=3)
        self.restore()
Exemplo n.º 15
0
    def run(
        change_idx: int,
        study_data: np.ndarray,
        control_data: np.ndarray,
        ts: np.ndarray = None
    ) -> Tuple[Union[float, np.ndarray], float, np.ndarray]:
        """
        Recommendation: 
        >>> len(study_data) == len(control_data)
        >>> True
        """
        assert len(study_data) == len(
            control_data
        ), f"expected study_data and control_data to have the same length, but got {len(study_data)} and {len(control_data)}"

        # split data
        study_before, study_after, control_before, control_after, litmus_window_size = Litmus.split_data(
            change_idx, study_data, control_data)

        # data
        train_data = tf.data.Dataset.from_tensor_slices(
            ([control_before], [study_before])).batch(litmus_window_size)
        control_before_data = tf.data.Dataset.from_tensor_slices(
            [control_before]).batch(litmus_window_size)
        control_after_data = tf.data.Dataset.from_tensor_slices(
            [control_after]).batch(litmus_window_size)

        # train and test
        litmus = Litmus(out_dim=litmus_window_size)
        sgd = SGD(1e-3, momentum=0.2)
        loss = MeanAbsoluteError()
        litmus.compile(sgd, loss)
        litmus.fit(train_data, epochs=2000)
        pred_control_before = litmus.predict(control_before_data)[
            0]  # first batch
        pred_control_after = litmus.predict(control_after_data)[
            0]  # first batch

        diff_before = Litmus.compute_diff(pred_control_before, study_before)
        diff_after = Litmus.compute_diff(pred_control_after, study_after)

        _, u_yx, vx = Litmus.compute_mean_placements(diff_after, diff_before)
        _, u_xy, vy = Litmus.compute_mean_placements(diff_before, diff_after)

        critical_score = Litmus.critical_value(u_yx, u_xy, vx, vy,
                                               litmus_window_size)

        return critical_score, Litmus.THRESHOLD, np.concatenate(
            [pred_control_before, pred_control_after], axis=0)
Exemplo n.º 16
0
    def __init__(self, generator, discriminator, cycle_loss_weight, identity_loss_weight, gradient_penalty_weight,
                 learning_rate=PiecewiseConstantDecay(boundaries=[100], values=[2e-4, 2e-5]), beta_1=0.5):
        self.A2B_G = generator
        self.B2A_G = generator
        self.A_D = discriminator
        self.B_D = discriminator

        self.generator_optimizer = Adam(learning_rate=learning_rate, beta_1=beta_1)
        self.discriminator_optimizer = Adam(learning_rate=learning_rate, beta_1=beta_1)

        self.cycle_loss_weight = cycle_loss_weight
        self.identity_loss_weight = identity_loss_weight
        self.gradient_penalty_weight = gradient_penalty_weight

        self.mean_squared_error = MeanSquaredError()
        self.mean_absolute_error = MeanAbsoluteError()
Exemplo n.º 17
0
 def __get_loss(loss, num_of_classes):
     if loss == 'cross_entropy':  # default value.
         return CategoricalCrossentropy(
         )  # if num_of_classes != 2 else BinaryCrossentropy()
     elif loss == 'binary_cross_entropy"':
         return BinaryCrossentropy()
     elif loss == 'cosine_similarity':
         return CosineSimilarity()
     elif loss == 'mean_absolute_error':
         return MeanAbsoluteError()
     elif loss == 'mean_squared_error':
         return MeanSquaredError()
     elif loss == 'huber':
         return Huber()
     else:
         raise ValueError('loss type does not exist.')
Exemplo n.º 18
0
def create_model(num_rows_df: int,
                 num_output_fields: int,
                 window_size: int = 10,
                 look_ahead_size: int = 5,
                 num_neurons: int = 40,
                 weight_decay_kernel: float = 1e-4,
                 weight_decay_recurrent: float = 1e-3,
                 kernel_dropout: float = 0.1,
                 recurrent_dropout: float = 0.3,
                 second_lstm_layer: bool = False,
                 learning_rate: float = 0.01,
                 loss: str = 'mse'):

    input_layer = Input(shape=(window_size, num_rows_df))
    norm = keras.layers.LayerNormalization()(input_layer)
    encoder = LSTM(
        num_neurons,
        recurrent_regularizer=regularizers.l2(weight_decay_recurrent),
        kernel_regularizer=regularizers.l2(weight_decay_kernel),
        recurrent_dropout=recurrent_dropout,
        dropout=kernel_dropout,
        return_sequences=second_lstm_layer)(norm)

    if second_lstm_layer:
        encoder = LSTM(
            num_neurons // 2,
            recurrent_regularizer=regularizers.l2(weight_decay_recurrent),
            kernel_regularizer=regularizers.l2(weight_decay_kernel),
            recurrent_dropout=recurrent_dropout,
            dropout=kernel_dropout)(encoder)

    repeat = RepeatVector(look_ahead_size)(encoder)
    decoder = LSTM(num_neurons, return_sequences=True)(repeat)
    pred = TimeDistributed(Dense(num_output_fields,
                                 activation='relu'))(decoder)

    model = Model(inputs=input_layer, outputs=pred)
    model.compile(
        optimizer=Adam(learning_rate=learning_rate),
        loss=loss,
        metrics=[
            MeanAbsoluteError(reduction=tf.keras.losses.Reduction.SUM),
            'accuracy'
        ])
    model.summary()
    return model
Exemplo n.º 19
0
    def calculate_G_loss(self, discriminator_b_false, real_target,
                         generated_b):
        """Calculate the G loss.

        Args:
            discriminator_b_false (tf.tensor): Discriminator prediction for generated B image.
            real_target (tf.tensor): Real B images.
            generated_b (tf.tensor): Generated B image.

        Returns:
            Tensors containing gan loss and l1 loss.
        """
        MAE = MeanAbsoluteError()
        gan_loss = (self.disc_loss_function(self.create_label(True),
                                            discriminator_b_false) *
                    self.gan_loss_coeff)
        l1_loss = MAE(generated_b, real_target) * self.L1_loss_coeff
        return gan_loss, l1_loss
Exemplo n.º 20
0
    def build(self, hp):

        model = Sequential()
        model.add(
            Dense(units=hp.Int('units',
                               min_value=50,
                               max_value=200,
                               step=50,
                               default=50),
                  activation=hp.Choice('dense_activation',
                                       values=['relu', 'tanh', 'sigmoid'],
                                       default='relu'),
                  input_dim=self.input_shape))
        for i in range(hp.Int('num_layers', 1, 6)):
            model.add(
                Dense(
                    units=hp.Int('units_' + str(i),
                                 min_value=5,
                                 max_value=200,
                                 step=20),
                    activation=hp.Choice('dense_activation',
                                         values=['relu', 'tanh', 'sigmoid'],
                                         default='relu'),
                ))

            model.add(
                Dropout(rate=hp.Float('dropout' + str(i),
                                      min_value=0.0,
                                      max_value=0.5,
                                      default=0.25,
                                      step=0.05)))

        model.add(
            Dense(self.num_classes,
                  activation=hp.Choice('dense_activation_final',
                                       values=['relu', 'tanh', 'sigmoid'],
                                       default='sigmoid')))

        optimizer = hp.Choice('optimizer', ['adam', 'sgd'])
        model.compile(optimizer, loss=MeanAbsoluteError(), metrics=['mape'])
        return model
Exemplo n.º 21
0
    'adam': Adam(),
    'sdg': SGD(),
    'adagrad': Adagrad(),
    'adamax': Adamax(),
    'ftrl': Ftrl(),
    'nadam': Nadam(),
    'rmsprop': RMSprop()
}

# These are the only loss currently supported
loss_dict = {
    'binary_cross_entropy': BinaryCrossentropyL(),
    'cross_entropy': BinaryCrossentropyL(),
    'categorical_cross_entropy': CategoricalCrossentropyL(),
    'mean_squared_error': MeanSquaredError(),
    'mean_absolute_error': MeanAbsoluteError()
}

# this one has to be inside because of the arguments inside the class: num_classes=num_classes, **model_parameters
models_dict = {
    'Unet': {
        'model_class':
        Unet,
        'compatible_backbones': [
            'vgg16', 'vgg19', 'resnet18', 'seresnet18', 'inceptionv3',
            'mobilenet', 'efficientnetb0'
        ]
    },
    'FPN': {
        'model_class':
        FPN,
Exemplo n.º 22
0
                       parent_selection_type='rank',
                       fitness_func=fitness_func, # initial_population=kerasGA.population_weights,
                       sol_per_pop=9, num_genes=9, init_range_low=0.01, init_range_high=10.00,
                       crossover_type='single_point', mutation_type='random',
                       mutation_num_genes=2, save_best_solutions=True, save_solutions=True,
                       allow_duplicate_genes=True, stop_criteria='saturate_10',
                       on_generation=callback_generation)

# Run the Genetic algorithm
ga_instance.run()

# Plot the fitness value
ga_instance.plot_fitness(title='Iteration vs. Fitness', xlabel='Generation', ylabel='Fitness',
                         linewidth=4)

# To get the details about the best solution found by PyGAD
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print(f'Fitness value of the best solution = '
      f'{solution_fitness}'.format(solution_fitness=solution_fitness))
print(f'Index of the best solution: {solution_idx}'.format(solution_idx=solution_idx))

# Make prediction based on the trained model's best solution.
predictions = predict(model=model, solution=solution, data=data_inputs)

print('Predictions: \n', predictions)

# Measure the trained model error.
mae = MeanAbsoluteError()
abs_error = mae(y_true=data_outputs, y_pred=predictions).numpy()
print('Absolute Error:', abs_error)
Exemplo n.º 23
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer
import pygad.kerasga
import numpy as np
from tensorflow.keras.losses import mean_squared_error, MeanAbsoluteError

model = Sequential([InputLayer(input_shape=[2, ]),
                    Dense(units=2, activation='sigmoid', use_bias=True, bias_initializer='ones'),
                    Dense(units=1, activation='sigmoid', use_bias=True, bias_initializer='ones')])

keras_genetic_algorithm = pygad.kerasga.KerasGA(model=model, num_solutions=9)

training_data = np.array([[1, 1],
                          [1, 0],
                          [0, 1],
                          [0, 0]])

labels = np.array([[0],
                   [1],
                   [1],
                   [0]])

history =

mean_absolute_error = MeanAbsoluteError()
loss = mean_absolute_error(y_true=labels, y_pred=)
Exemplo n.º 24
0
 def CNN(self,
         input_shape=None,
         stride=(1, 1),
         dilation=(1, 1),
         kernel_n=3,
         pooling_size=(2, 2),
         dropout_p=0.2,
         n_output=2,
         learning_rate=1e-3):
     log.log("CNN Model Initilaizing...")
     if input_shape != None:
         self.kernel_n = kernel_n
         self.input_shape = input_shape
         self.stride = stride  # skips, kernel makes at every convolution
         self.dilation = dilation  # kernel coverage
         self.pooling_size = pooling_size
         self.dropout_p = dropout_p
         self.n_output = n_output
         self.learning_rate = learning_rate
         self.model.add(
             Conv2D(filters=16,
                    kernel_size=self.kernel_n,
                    activation='relu',
                    padding='same',
                    input_shape=self.input_shape,
                    strides=self.stride,
                    dilation_rate=self.dilation))
         #self.model.add(MaxPool2D(pool_size=self.pooling_size))
         self.model.add(
             Conv2D(filters=32,
                    kernel_size=self.kernel_n,
                    activation='relu',
                    padding='same',
                    strides=self.stride,
                    dilation_rate=self.dilation))
         #self.model.add(MaxPool2D(pool_size=self.pooling_size))
         self.model.add(
             Conv2D(filters=64,
                    kernel_size=self.kernel_n,
                    activation='relu',
                    padding='same',
                    strides=self.stride,
                    dilation_rate=self.dilation))
         #self.model.add(MaxPool2D(pool_size=self.pooling_size))
         self.model.add(
             Conv2D(filters=128,
                    kernel_size=self.kernel_n,
                    activation='relu',
                    padding='same',
                    strides=self.stride,
                    dilation_rate=self.dilation))
         #self.model.add(MaxPool2D(pool_size=self.pooling_size))
         self.model.add(Flatten())
         self.model.add(
             Dense(units=self.input_shape[0] * 128, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=128, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=64, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=32, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=16, activation='relu'))
         self.model.add(Dropout(self.dropout_p))
         self.model.add(Dense(units=self.n_output))
         self.model.compile(
             optimizer=Adam(learning_rate=self.learning_rate),
             loss=MeanAbsoluteError(),
             metrics=[MeanSquaredError()])
     log.log("CNN Model Initialized!")
Exemplo n.º 25
0
def decoding_L1_loss(y_true, y_pred):
    return MeanAbsoluteError()(y_true, y_pred)
Exemplo n.º 26
0
    econv3, epool3 = encoder_block(epool2, 128)
    econv4, epool4 = encoder_block(epool3, 256)
    econv5, epool5 = encoder_block(epool4, 512)
    econv6 = Conv2D(512, 3, padding='same',
                    kernel_initializer='he_normal')(epool5)
    econv6 = LeakyReLU(0.1)(econv6)

    decoder_input = econv6
    net = decoder_block(decoder_input, econv5, 512)
    net = decoder_block(net, econv4, 256)
    net = decoder_block(net, econv3, 128)
    net = decoder_block(net, econv2, 64)
    net = decoder_block(net, econv1, 32)

    net = Conv2D(output_channel,
                 3,
                 padding='same',
                 kernel_initializer='he_normal')(net)

    model = Model(inputs=inputs, outputs=net)

    return model, econv6


if __name__ == "__main__":
    model, _ = unet()
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=MeanAbsoluteError(),
                  metrics=['accuracy'])

    print(model.summary())
Exemplo n.º 27
0
def my_loss(y_batch, pred_batch, eff_batch):
    mae = MeanAbsoluteError()
    loss = 0
    for i in range(y_batch.shape[0]):
        loss += 0.6 / (1.0 - eff_batch[i])**1 * mae(y_batch[i], pred_batch[i])
    return loss
Exemplo n.º 28
0
optimizer = Adam(lr=LEARNING_RATE, epsilon=1e-7, amsgrad=True)
#use the sinusoidal annealing lr scheduler
scheduler = LearningRateScheduler(lr_max=LEARNING_RATE,
                                  div_factor=LR_DIV_FACTOR,
                                  pct_start=PCT_START)


def my_loss(y_batch, pred_batch, eff_batch):
    mae = MeanAbsoluteError()
    loss = 0
    for i in range(y_batch.shape[0]):
        loss += 0.6 / (1.0 - eff_batch[i])**1 * mae(y_batch[i], pred_batch[i])
    return loss


loss_fn = MeanAbsoluteError()
#compile the model with MAE loss
model.compile(optimizer, loss_fn)
print("model compiled")

# data loading
# load training data, ground truth Hy and efficiency
import gc
input_imgs_120k = np.load(data_folder + '\\' +
                          'grating_pattern_UNet_reshaped_sub.npy',
                          mmap_mode='r')
input_imgs_120k = np.swapaxes(input_imgs_120k, 1, 2)
Hy_forward_120k = np.load(data_folder + '\\' + 'Hy_out_forward_RI.npy',
                          mmap_mode='r')
Hy_forward_120k = np.swapaxes(Hy_forward_120k, 1, 2)
efficiency_120k = np.load(data_folder + '\\' + 'efficiency_reverse.npy',
Exemplo n.º 29
0
def compute_l1_loss(fake_outputs, ground_truth):
    return MeanAbsoluteError(reduction=keras.losses.Reduction.NONE)(
        ground_truth, fake_outputs)
def construct_and_compile_model(model_type,
                                model_name,
                                task,
                                checkpoint_file,
                                checkpoints_dir,
                                model_params={}):
    """Construct and compile a model of a specific type

    Args:
        model_type (str): The type of model to be constructed
        model_name (str): The name of model to be constructed
        task (str): Either 'regression' or 'classification'
        checkpoint_file (str): Name of a checkpoint file
        checkpoints_dir (str): Path to the checkpoints directory
        model_params (dict): Possible hyper-parameters for the model to be
                             constructed

    Returns:
        model (tf.keras.Model): Constructed and compiled model
    """
    n_cells = model_params['n_cells']
    input_dimension = model_params['input_dimension']
    output_dimension = model_params['output_dimension']
    dropout = model_params['dropout']
    global_dropout = model_params['global_dropout']
    hid_dimension = model_params['hidden_dimension']
    multiplier = model_params['multiplier']

    if task == 'classification':
        loss_fn = SparseCategoricalCrossentropy()
        metrics = ['accuracy']
    elif task == 'regression':
        loss_fn = MeanAbsoluteError()
        metrics = ['mse']
        output_dimension = 1
    else:
        raise ValueError('Argument "task" must be one of "classification" ' \
                'or "regression"')

    if model_type == 'lstm' or model_type == 'gru':
        model = construct_rnn(input_dimension, output_dimension, model_type,
                              n_cells, dropout, hid_dimension, model_name)
    elif model_type == 'lstm_cw' or model_type == 'gru_cw':
        model = construct_channel_wise_rnn(input_dimension, output_dimension,
                                           model_type, dropout, global_dropout,
                                           hid_dimension, multiplier,
                                           model_name)
    elif model_type == 'fcn':
        model = construct_fcn(input_dimension, output_dimension, dropout,
                              model_name)
    elif model_type == 'lstm_fcn':
        model = construct_lstm_fcn(input_dimension, output_dimension, dropout,
                                   hid_dimension, model_name)
    else:
        raise ValueError(f'Model type {model_type} is not supported.')

    if checkpoint_file:
        print(f"=> Loading weights from checkpoint: {checkpoint_file}")
        model.load_weights(os.path.join(checkpoints_dir, checkpoint_file))

    model.compile(optimizer=Adam(), loss=loss_fn, metrics=metrics)

    model.summary()

    return model