Exemple #1
0
    def update_learning_rate(self, step, lr, optimizers):
        """Since we cannot use fit, we have to update the learning rate manually.

        Args:
            step (int): current step.
            lr: current learning rate
            optimizers: list of optimizers to change
        """
        cur_lr = self._calculate_lr(step, lr)

        for _optimizer in optimizers:
            K.set_value(_optimizer.learning_rate, cur_lr)
Exemple #2
0
    def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')

        step = min(epoch, self.n_epoch)
        cosine_decay = 0.5 * (1 + cos(pi * step / self.n_epoch))
        decayed = (1 - self.alpha) * cosine_decay + self.alpha
        lr = self.initial_learning_rate * decayed
        K.set_value(self.model.optimizer.lr, lr)
        if self.verbose > 0:
            print('\nEpoch %05d: CosineAnnealingScheduler setting learning '
                  'rate to %s.' % (epoch + 1, lr))
        def on_batch_begin(self, batch, logs=None):
            self.steps += 1
            lr = L_FACTOR * min(1.0, self.steps / self.warm) / max(
                self.steps, self.warm)
            K.set_value(self.model.optimizer.lr, lr)

            if os.path.isfile('stop'):
                print("Stop file found.")
                global stop
                stop = True
                self.model.stop_training = True
                mdl.save_weights("models/final.h5", save_format="h5")
Exemple #4
0
    def setAllParams(self, list_of_values):
        """ Set all parameters used by the learning algorithm

        Arguments
        ---------
        list_of_values : list of numpy arrays
             list of the parameters to be set (same order than given by getAllParams()).
        """
        for i, p in enumerate(self.params):
            K.set_value(p, list_of_values[i])
        for j, p in enumerate(self.params_policy):
            K.set_value(p, list_of_values[j + i + 1])
Exemple #5
0
    def on_epoch_end(self, epoch, logs=None):
        if not hasattr(self.model, "encoder"):
            raise ValueError('Model must have a "encoder" attribute.')
        if not hasattr(self.model.encoder, "kld_weight"):
            raise ValueError(
                'Model encoder must have a "kld_weight" attribute.')

        kld_weight = float(K.get_value(self.model.encoder.kld_weight))
        assert kld_weight <= self.max_val
        delta = self.max_val - kld_weight
        kld_weight = min(kld_weight + delta * epoch, self.max_val)
        K.set_value(self.model.encoder.kld_weight, K.get_value(kld_weight))
Exemple #6
0
def auto_encoder_transform(Inputs, Outputs, n_dims=32, verbose=0, epochs=10):
    import tensorflow as tf
    import tensorflow.keras.backend as K
    from tensorflow import keras
    loss = "mean_squared_error"
    es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0, patience=5, verbose=0, )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=4, min_lr=0.0001)
    nan_prevent = tf.keras.callbacks.TerminateOnNaN()
    input_layer = tf.keras.Input(shape=(Inputs.shape[1],))
    encoded = tf.keras.layers.Dense(n_dims * 4, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l2=1e-5), use_bias=False)(input_layer)
    encoded = tf.keras.layers.Dense(n_dims, activation='linear', use_bias=False)(encoded)
    encoded = K.l2_normalize(encoded, axis=-1)

    decoded = tf.keras.layers.Dense(n_dims * 4, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l2=1e-5), use_bias=False)(encoded)
    decoded = tf.keras.layers.Dense(Outputs.shape[1], use_bias=False)(decoded)

    autoencoder = tf.keras.Model(input_layer, decoded)
    encoder = tf.keras.Model(input_layer, encoded)
    adam = tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    autoencoder.compile(optimizer=adam, loss=loss, metrics=["mean_squared_error"])
    Inputs = Inputs.astype(float)
    Outputs = Outputs.astype(float)
    X1, X2, Y1, Y2 = train_test_split(Inputs, Outputs, test_size=0.5)
    autoencoder.fit(X1, Y1,
                    epochs=epochs,
                    batch_size=2048,
                    shuffle=True,
                    verbose=verbose,
                    validation_data=(X2, Y2),
                    callbacks=[es, reduce_lr, nan_prevent])

    K.set_value(autoencoder.optimizer.lr, 0.001)
    es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0, patience=5, verbose=0, )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=4, min_lr=0.00001)
    autoencoder.fit(X2, Y2,
                    epochs=epochs,
                    batch_size=2048,
                    shuffle=True,
                    verbose=verbose,
                    validation_data=(X1, Y1),
                    callbacks=[es, reduce_lr, nan_prevent])

    K.set_value(autoencoder.optimizer.lr, 0.001)
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="loss", factor=0.2, patience=1, min_lr=0.00001)
    autoencoder.fit(Inputs, Outputs,
                    epochs=epochs,
                    batch_size=2048,
                    shuffle=True,
                    verbose=verbose,
                    callbacks=[reduce_lr, nan_prevent])

    Z = encoder.predict(Inputs)
    return Z, encoder
Exemple #7
0
 def on_batch_begin(self, batch, logs=None):
     lr = cosine_decay_with_warmup(
         global_step=self.global_step,
         learning_rate_base=self.learning_rate_base,
         total_steps=self.total_steps,
         warmup_learning_rate=self.warmup_learning_rate,
         warmup_steps=self.warmup_steps,
         hold_base_rate_steps=self.hold_base_rate_steps)
     K.set_value(self.model.optimizer.lr, lr)
     if self.verbose > 0:
         print('\nBatch %05d: setting learning '
               'rate to %s.' % (self.global_step + 1, lr))
    def on_epoch_end(self, epoch, logs={}):

        if (epoch < self.epochs_max_point):
            self.lr = self.lr + self.lr_step_size
        elif (epoch >= self.epochs_max_point
              and epoch < self.epochs_max_point * 2):
            self.lr = self.lr - self.lr_step_size
        else:
            self.lr = self.min_lr

        K.set_value(self.model.optimizer.lr, self.lr)
        self.lrs.append(self.lr)
Exemple #9
0
    def on_batch_end(self, batch, logs=None):

        self.counter += 1
        
        if self.counter % 1000 == 500:
        
            if not hasattr(self.model.optimizer, 'lr'):
                raise ValueError('Optimizer must have a "lr" attribute.')

            self.x = min([math.pi, (self.counter / self.half_period) * math.pi])
            self.lr = self.min_lr + (self.initial_lr - self.min_lr) * (1.0 + math.cos(self.x)) / 2.0
            K.set_value(self.model.optimizer.lr, self.lr)
Exemple #10
0
    def train(self, x_label, y_label, learning_rate=None):

        board_input = np.array(x_label)

        probs, values = y_label
        probs_output = np.array(probs)
        values_output = np.array(values)

        K.set_value(self.model.optimizer.lr, learning_rate)

        self.model.fit(board_input, [probs_output, values_output],
                       batch_size=len(x_label), verbose=0)
Exemple #11
0
    def set_ig_values(self, delta_value, edge_value):
        """
        Set values of the integrated gradient parameters in all layers of the model.

        Args:
            delta_value: Value of the `delta` parameter
            edge_value: Value of the `non_exist_edges` parameter
        """
        for delta_var in self.deltas:
            K.set_value(delta_var, delta_value)
        for edge_var in self.non_exist_edges:
            K.set_value(edge_var, edge_value)
    def optimizer_params_step(self):
        next_lr = self.lr_scheds[self.sched_idx].step()
        next_momentum = self.momentum_scheds[self.sched_idx].step()

        # add to logs
        self.logs.setdefault('lr', []).append(next_lr)
        self.logs.setdefault('momentum', []).append(next_momentum)

        # update optimizer params
        K.set_value(self.model.optimizer.lr, next_lr)
        if hasattr(self.model.optimizer, 'momentum'):
            K.set_value(self.model.optimizer.momentum, next_momentum)
Exemple #13
0
    def on_train_begin(self, logs={}):
        logs = logs or {}

        self._reset()
        K.set_value(self.model.optimizer.lr, self.compute_lr())

        if self._update_momentum:
            if not hasattr(self.model.optimizer, 'momentum'):
                raise ValueError("Momentum can be updated only on SGD optimizer !")

            new_momentum = self.compute_momentum()
            K.set_value(self.model.optimizer.momentum, new_momentum)
Exemple #14
0
    def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')

        learning_rate = float(K.get_value(self.model.optimizer.lr))
        learning_rate = self.schedule(epoch)
        if not isinstance(learning_rate, (float, np.float32, np.float64)):
            raise ValueError('Learning rate should be float.')
        K.set_value(self.model.optimizer.lr, learning_rate)
        if self.verbose > 0:
            print('\nEpoch %05d: LearningRateScheduler reducing learning '
                  'rate to %s.' % (epoch + 1, learning_rate))
Exemple #15
0
 def update_lr(self):
     if not hasattr(self.model.optimizer, "lr"):
         raise ValueError('Optimizer must have a "lr" attribute.')
     lr = float(K.get_value(self.model.optimizer.lr))
     lr = lr * self.lr_multiplier
     if not isinstance(lr, (float, np.float32, np.float64)):
         raise ValueError('The output of the "schedule" function '
                          "should be float.")
     K.set_value(self.model.optimizer.lr, lr)
     if self.verbose > 0:
         print("\nBatch %05d: LR_Finder reducing learning "
               "rate to %s." % (self.batch_num + 1, lr))
Exemple #16
0
    def mc_dropout(self, batch_size=1000, dropout=0.5, iter=100):
        K.set_value(self.mc_dropout_rate, dropout)
        repititions = []
        for i in range(iter):
            _, pred = self.model.predict(self.x_test, batch_size)
            repititions.append(pred)
        K.set_value(self.mc_dropout_rate, 0)

        repititions = np.array(repititions)
        mc = np.var(repititions, 0)
        mc = np.mean(mc, -1)
        return -mc
Exemple #17
0
    def on_batch_end(self, epoch, logs=None):
        '''Record previous batch statistics and update the learning rate.'''
        logs = logs or {}
        self.iteration += 1

        self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.iteration)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)
            
        K.set_value(self.model.optimizer.lr, self.clr())
Exemple #18
0
    def on_epoch_begin(self, epoch, logs=None):

        if logs is None:
            logs = {}
        new_weight = self.schedule(epoch)
        new_value = new_weight * self.weight_orig
        print("Current {} annealer weight is {}".format(
            self.weight_name, new_value))
        assert type(
            new_weight
        ) == float, 'The output of the "schedule" function should be float.'
        K.set_value(self.weight_var, new_value)
Exemple #19
0
    def on_batch_begin(self, batch, logs=None):

        new_kl_weight = self.schedule(self.count)
        if not isinstance(new_kl_weight, (float, np.float32, np.float64)):
            raise ValueError(
                'The output of the "schedule" function should be float.')
        # Set new value
        K.set_value(self.kl_weight, new_kl_weight)
        if self.verbose > 0 and self.count % 20 == 0:
            print('\nBatch %05d: KLWeightScheduler setting KL weight to %s.' %
                  (self.count + 1, new_kl_weight))
        self.count += 1
 def on_batch_begin(self, batch, logs=None):
     'TODO: docstring'
     if not hasattr(self.model.optimizer, 'lr'):
         raise ValueError('Optimizer must have a "lr" attribute.')
     if not hasattr(self.model.optimizer, 'momentum'):
         raise ValueError('Optimizer must have a "momentum" attribute.')
     lr = _1cycle_lr(self.iteration, self.cyc_iterations,
                     self.ramp_iterations, self.min_lr, self.max_lr)
     mom = _1cycle_mom(self.iteration, self.cyc_iterations, self.min_mom,
                       self.max_mom)
     K.set_value(self.model.optimizer.lr, lr)
     K.set_value(self.model.optimizer.momentum, mom)
Exemple #21
0
    def on_batch_end(self, model, logs=None):
        
        logs = logs or {}
        self.trn_iterations += 1
        self.clr_iterations += 1

        self.history.setdefault('lr', []).append(K.get_value(model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.trn_iterations)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)
        
        K.set_value(model.optimizer.lr, self.clr())
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        self.trn_iterations += 1
        self.clr_iterations += 1
        print("epoch : ", epoch)
        self.history.setdefault('lr', []).append(
            K.get_value(self.model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.trn_iterations)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)
        if self.clr_iterations > self.lr_cg_th:
            K.set_value(self.model.optimizer.lr, self.htdlr())
Exemple #23
0
    def on_epoch_begin(self, epoch, logs):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute\n')

        lr = self.eta_min + (self.eta_max - self.eta_min) * (
            1 + math.cos(math.pi * epoch / self.T_max)) / 2

        K.set_value(self.model.optimizer.lr, lr)

        if self.verbose > 0:
            print(
                f'\nEpoch {(epoch+1):5d} CosineAnnealingScheduler - setting learning rate to {lr}'
            )
Exemple #24
0
 def on_batch_begin(self, batch, logs=None):
     """第一个epoch用来warmup,第二个epoch把学习率降到最低
     """
     if self.passed < self.params['steps']:
         lr = (self.passed + 1.) / self.params['steps'] * self.learning_rate
         K.set_value(self.model.optimizer.lr, lr)
         self.passed += 1
     elif self.params['steps'] <= self.passed < self.params['steps'] * 2:
         lr = (2 - (self.passed + 1.) / self.params['steps']) * (
             self.learning_rate - self.min_learning_rate)
         lr += self.min_learning_rate
         K.set_value(self.model.optimizer.lr, lr)
         self.passed += 1
 def reset_optimizers(res_log2):
     """Reset the optimizer state for the next resolution of training. Also adjust LR."""
     with strategy.scope():
         lr = learning_rate * (learning_rate_decay**(res_log2 - 2))
         for opt in [G_optimizer, D_optimizer]:
             weights = opt.get_weights()
             opt.set_weights([
                 0.0 if isinstance(w, float) else np.zeros(w.shape,
                                                           dtype=w.dtype)
                 for w in weights
             ])
             K.set_value(opt.iterations, 0)
             K.set_value(opt.lr, lr)
    def on_batch_begin(self, batch, logs):
        # keep track of total batch count
        self._cur_iter += 1

        # calculate current learning rate
        lr = self._lr_init * (1 -
                              (self._cur_iter / self._max_iter))**self._power

        # limit learning rate
        lr = max(lr, self._lr_min)

        # set the new learning rate
        K.set_value(self.model.optimizer.lr, lr)
Exemple #27
0
 def on_batch_begin(self, batch, logs=None):
     # params是模型自动传递给Callback的一些参数
     if self.params['steps'] == None:
         self.steps_per_epoch = np.ceil(1. * self.params['samples'] /
                                        self.params['batch_size'])
     else:
         self.steps_per_epoch = self.params['steps']
     if self.num_passed_batchs < self.steps_per_epoch * self.warmup_epochs:
         # 前10个epoch中,学习率线性地从零增加到0.001
         K.set_value(
             self.model.optimizer.lr, 0.001 * (self.num_passed_batchs + 1) /
             self.steps_per_epoch / self.warmup_epochs)
         self.num_passed_batchs += 1
Exemple #28
0
    def update_weights(self):
        new_weights = list()
        for name in self.loss_weights.keys():
            K.set_value(
                self.loss_weights[name],
                self.rolling_avg[self.ref_loss] / self.rolling_avg[name])
            new_weights.append(self.rolling_avg[self.ref_loss] /
                               self.rolling_avg[name])

        out_str = ''
        for name, val in zip(self.loss_weights.keys(), new_weights):
            out_str += '{}: {:7.2f}\t'.format(name, val)
        print('WEIGHTS UPDATE: ' + out_str)
Exemple #29
0
    def on_batch_begin(self, batch, logs=None):
        if self.should_update:
            if self.current_step < self.warmup_steps:
                lr = self.warmup(current_step=self.current_step)
            else:
                lr = self.final_lr
                self.should_update = False

            K.set_value(self.model.optimizer.lr, lr)
            self.current_step += 1

            if self.verbose > 0:
                print(f'\nSetting learning rate to {lr}')
Exemple #30
0
    def on_batch_end(self, batch, logs=None):
        logs = logs or {}

        if self.step < self.w:
            lr = self.lr / self.w * self.step
        else:
            lr = K.get_value(self.optimizer.lr)
            for step in self.p:
                if step == self.step:
                    lr = lr / self.factor

        K.set_value(self.optimizer.lr, lr)
        self.step += 1