Пример #1
0
def callbacks(cp, tb, logs):

    # Callbacks: Save the model.
    directory1 = os.path.join('out', 'checkpoints', cp)
    if not os.path.exists(directory1):
        os.makedirs(directory1)
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        directory1, '{epoch:03d}.hdf5'),
                                   monitor='accuracy',
                                   verbos=1,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='max')

    # Callbacks: TensorBoard
    directory2 = os.path.join('out', 'tensorboard', tb)
    if not os.path.exists(directory2):
        os.makedirs(directory2)
    tensorboard = TensorBoard(log_dir=os.path.join(directory2))

    # Callbacks: Early Stopper
    early_stopper = EarlyStopping(monitor='loss', patience=100)

    # Callbacks: Save Results.
    directory3 = os.path.join('out', 'results', logs)
    if not os.path.exists(directory3):
        os.makedirs(directory3)
    csv_logger = CSVLogger(
        os.path.join(directory3, 'training-' + 'logs' + '.csv'))

    # Learning rate schedule.
    lr_schedule = LearningRateScheduler(fixed_schedule, verbose=0)

    cb = [
        tensorboard, early_stopper, csv_logger, checkpointer, lr_schedule,
        CustomCallback()
    ]

    return cb
def train(lambd, sigma, n_centers, trial):
    K.clear_session()
    (X_train, y_train), (X_test, y_test) = inbalanced_cifar(200)

    model = create_models(sigma, n_centers)
    model.compile("adam", affinity_loss(lambd), [acc])
    tf.logging.set_verbosity(tf.logging.FATAL) # ログを埋めないようにする

    tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
    strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test, trial)

    history = model.fit(X_train, y_train, callbacks=[scheduler, f1],
                        batch_size=640, epochs=100, verbose=0).history

    max_f1 = max(f1.f1_log)
    print("lambda:{lambd:.04}, sigma:{sigma:.04} n_centers:{n_centers} / f1 = {max_f1:.04}")
    return max_f1
Пример #3
0
    def initialize_callbacks(self):
        checkpoint_path = os.path.join(
            self.output_dir,
            self.run_uid + "_" + self.model_string +
            "-{epoch:04d}-{val_loss:.4f}.h5",
        )
        checkpoint = ModelCheckpoint(
            checkpoint_path,
            monitor="val_loss",
            verbose=1,
            save_best_only=True,
            mode="min",
            period=self.period_save,
        )

        # Add on epoch_end callback

        epo_end = OnEpochEnd([self.local_generator.on_epoch_end])

        if self.apply_learning_decay == 1:
            step_decay_callback = create_decay_callback(
                self.initial_learning_rate, self.epochs_drop)

            lrate = LearningRateScheduler(step_decay_callback)
            callbacks_list = [checkpoint, lrate]
        else:
            callbacks_list = [checkpoint]

        if not (self.tensorboard_path == ""):
            board_callback = tensorflow.keras.callbacks.TensorBoard(
                log_dir=os.path.join(self.tensorboard_path, self.model_string),
                histogram_freq=1,
            )
            callbacks_list.append(board_callback)

        if not (tensorflow.__version__ == '2.3.0'):
            callbacks_list.append(epo_end)

        self.callbacks_list = callbacks_list
Пример #4
0
def train_dnn(epochs=10, num_rounds=5, num_layers=2):
    model = make_dnn(num_layers=num_layers)
    model.compile(optimizer='adam', loss='mse', metrics=['acc'])

    model.summary()

    x_train, y_train = samples.make_train_data(10**7, num_rounds)
    x_test, y_test = samples.make_train_data(10**6, num_rounds)

    lr = LearningRateScheduler(cyclic_lr(9, 0.0001, 0.002))
    check = make_checkpoint('best_dnn_r' + str(num_rounds) + "_l" +
                            str(num_layers) + ".h5")

    h = model.fit(x_train,
                  y_train,
                  epochs=epochs,
                  batch_size=5000,
                  validation_data=(x_test, y_test),
                  callbacks=[lr, check])
    print("Best validaation accuracy: ", np.max(h.history['val_acc']))

    return model
Пример #5
0
    def warmup(self, x_train, y_train, epochs=5, s_lr=1e-6, e_lr=0.001):
        """ Warmup for numerical stability
            x_train : training images
            y_train : training labels
            epochs  : number of epochs for warmup
            s_lr    : start warmup learning rate
            e_lr    : end warmup learning rate
        """
        print("*** Warmup (for numerical stability)")
        # Setup learning rate scheduler
        self.compile(optimizer=Adam(s_lr))
        lrate = LearningRateScheduler(self.warmup_scheduler, verbose=1)
        self.w_epochs = epochs
        self.w_lr = e_lr - s_lr

        # Train the model
        self.model.fit(x_train,
                       y_train,
                       epochs=epochs,
                       batch_size=32,
                       verbose=1,
                       callbacks=[lrate])
Пример #6
0
    def warmup(self, x_train, y_train, epochs=5, batch_size=32, s_lr=1e-6, e_lr=0.001, 
               loss='categorical_crossentropy', metrics=['acc']):
        """ Warmup for numerical stability
            x_train   : training images
            y_train   : training labels
            epochs    : number of epochs for warmup
            batch_size: batch size
            s_lr      : start warmup learning rate
            e_lr      : end warmup learning rate
            loss      : loss function
            metrics   : training metrics to report
        """
        print("*** Warmup (for numerical stability)")
        # Setup learning rate scheduler
        self.compile(optimizer=Adam(s_lr), loss=loss, metrics=metrics)
        lrate = LearningRateScheduler(self.warmup_scheduler, verbose=1)
        self.w_epochs = epochs
        self.w_lr     = e_lr - s_lr

        # Train the model
        self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1,
                       callbacks=[lrate])
    def training(self, x_train, y_train, epochs=10, batch_size=32, lr=0.001, decay=1e-05):
        """ Full Training of the Model
            x_train    : training images
            y_train    : training labels
            epochs     : number of epochs
            batch_size : size of batch
            lr         : learning rate
            decay      : learning rate decay
        """

        # Check for hidden dropout layer in classifier
        for layer in self.model.layers:
            if isinstance(layer, Dropout):
                self.hidden_dropout = layer
                break    

        self.t_decay = decay
        self.compile(optimizer=Adam(lr=lr, decay=decay))

        lrate = LearningRateScheduler(self.training_scheduler, verbose=1)
        self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
                       callbacks=[lrate])
Пример #8
0
    def callbacks_(self):
        '''callbacks'''
        lr_scheduler = LearningRateScheduler(self.lr_schedule)

        checkpoint = ModelCheckpoint(filepath = self.params.model_directory + "/weights.hdf5",
                                     monitor = 'val_loss',
                                     save_best_only = True,
                                     verbose = 1,
                                     save_weights_only = True)

        tb = TensorBoard(log_dir = self.params.model_directory + "/tensorboard",
                         histogram_freq = 0,
                         write_graph = True,
                         write_images = True,
                         embeddings_layer_names = None,
                         embeddings_metadata = None)

        csv_logger = CSVLogger(filename = self.params.model_directory + '/history.csv',
                                                          append = True,
                                                          separator = ",")

        return [lr_scheduler, checkpoint, tb, csv_logger]
Пример #9
0
def build_model(n_kernels, kernel_size, stride, n_dense):
    model = tf.keras.models.Sequential()
    model.add(layers.Convolution2D(filters=n_kernels,kernel_size=(kernel_size,kernel_size),activation='relu',input_shape=(16,16,1)))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D(strides=(stride,stride)))
    model.add(layers.Dropout(rate=0.25))
    model.add(layers.Flatten())
    model.add(layers.Dense(n_dense,activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Dense(10,activation='softmax'))
    
    adamOptimizer= tf.keras.optimizers.Adam(lr=0.0001)
    model.compile(optimizer=adamOptimizer,loss='categorical_crossentropy')
    
    annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
    history = model.fit(x_trn,y_trn,epochs=2,batch_size =16, verbose=2, 
                        validation_data=(x_val, y_val),callbacks=[annealer])
    
    tstError=model.evaluate(x_tst,y_tst)
    trnError=model.evaluate(x_trn,y_trn)    
    noOfParams=model.count_params()
    return (tstError,trnError,noOfParams)
Пример #10
0
def _make_callback(args, save_models):
    """ 
    
    Generate the callback.

    Parameters
    ----------
    args: dic
        A dictionary containing all of the input parameters. 
        
    save_models: str
       Path to the output directory for the models. 
              
    Returns
    -------   
    callbacks: obj
        List of callback objects. 
        
        
    """

    m_name = str(args['output_name']) + '_{epoch:03d}.h5'
    filepath = os.path.join(save_models, m_name)
    early_stopping_monitor = EarlyStopping(monitor=args['monitor'],
                                           patience=args['patience'])
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor=args['monitor'],
                                 mode='auto',
                                 verbose=1,
                                 save_best_only=True)
    lr_scheduler = LearningRateScheduler(_lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=args['patience'] - 2,
                                   min_lr=0.5e-6)

    callbacks = [checkpoint, lr_reducer, lr_scheduler, early_stopping_monitor]
    return callbacks
Пример #11
0
def train(weights_path, epochs, batch_size, initial_epoch):
    """Trains a model."""
    print ('loading data...')
    # Loads or creates training data.
    input_shape, train, valid, train_targets, valid_targets = get_train_data()
    print ('getting model...')
    # Loads or creates model.
    model, checkpoint_path = get_model(input_shape,
                                       scale_factor=len(train)/batch_size,
                                       weights_path=weights_path)

    # Sets callbacks.
    checkpointer = ModelCheckpoint(checkpoint_path, verbose=1,
                                   save_weights_only=True, save_best_only=True)

    scheduler = LearningRateScheduler(schedule)
    print ('fitting model...')
    # Trains model.
    model.fit(train, train_targets, batch_size, epochs,
              initial_epoch=initial_epoch,
              callbacks=[checkpointer, scheduler],
              validation_data=(valid, valid_targets))
Пример #12
0
 def get_callbacks(self,
                   gen_bool,
                   val_gen,
                   val_y,
                   train_gen=None,
                   train_y=None):
     # callbacks when fitting
     early_stopping = OnAllValDataEarlyStopping(
         self.dataset.name,
         gen_bool,
         val_gen,
         val_y,
         train_gen,
         train_y,
         qsize=1000,
         workers=5,
         monitor=self.dataset.ea_metric,
         mode=self.dataset.ea_mode,
         # minimum change to qualify as an improvement
         min_delta=0,
         patience=self.patience_early_stopping,
         verbose=1,
         restore_best_weights=True)
     csv_logger = CSVLogger('temp.log')  # streams epoch results to a csv
     if self.lr_scheduler['name'] == 'ReduceLROnPlateau':
         lr_scheduler = ReduceLROnPlateau(monitor='val_loss',
                                          factor=0.2,
                                          patience=2,
                                          verbose=1,
                                          mode='auto',
                                          min_delta=0.0001,
                                          min_lr=0.001)
     elif self.lr_scheduler['name'] == 'LearningRateScheduler':
         rate = self.lr_scheduler['rate']
         # reducing the learning rate by "rate" every 2 epochs
         lr_scheduler = LearningRateScheduler(
             lambda epoch: self.init_lr * rate**(epoch // 2), verbose=0)
     callbacks = [early_stopping, lr_scheduler] + [csv_logger]
     return callbacks
Пример #13
0
def get_lr_scheduler(lr: float, total_epochs: int, lr_params: dict):
  lr_schedule_name = lr_params['strategy'].lower()

  get_lr = {
      "exponential_decay": LearningRateScheduler(ExponentialDecay(lr)),
      "step_decay": LearningRateScheduler(StepDecay(
          lr,
          lr_params['decay_rate'],
          lr_params['drop_after_num_epoch'])),
      "step_decay_schedule": LearningRateScheduler(StepDecaySchedule(
          lr,
          lr_params['drop_schedule'],
          lr_params['decay_rate'],
          total_epochs)),
      "explicit_schedule": LearningRateScheduler(ExplicitSchedule(
        lr,
        lr_params['drop_schedule'],
        lr_params['list_lr'],
      )),
      "polynomial_decay": LearningRateScheduler(PolynomialDecay(
          lr,
          lr_params['power'],
          total_epochs)),
      "inverse_time_decay": LearningRateScheduler(InverseTimeDecay(
          lr,
          lr_params['decay_rate'],
          lr_params['decay_step'],
          lr_params['staircase'])),
      "cosine_decay": LearningRateScheduler(CosineDecay(
          lr,
          lr_params['alpha'],
          total_epochs)),
      "lr_reduce_on_plateau": ReduceLROnPlateau(
          monitor='val_loss',
          factor=lr_params['decay_rate'],
          patience=lr_params['patience'],
          verbose=1,
          mode='auto',
          min_lr=lr_params['min_lr'])
  }
  return get_lr[lr_schedule_name]
Пример #14
0
def learning_rate_scheduler(max_learn_rate, end_learn_rate, warmup_epoch_count,
                            total_epoch_count):
    """
    Wrapper function to return keras learning rate scheduler callback

    Args:
        max_learn_rate (float): maximum possible learning rate (achieved at peak
        of warmup epochs)
        end_learn_rate (float): minimum learning rate (achieved at end of
        maximum training epochs)
        warmup_epoch_count (int): number of epochs where learning rate rises
        total_epoch_count (int): maximum training epochs

    Returns:
        (tensorflow.keras.callbacks.LearningRateScheduler): LR-scheduler with
        internally passed learning rates
    """
    def lr_scheduler(epoch):
        """
        Output current learning rate based on epoch count, warmup and
        exponential decay

        Args:
            epoch (int): current epoch number

        Returns:
            (float): current learning rate
        """
        if epoch < warmup_epoch_count:
            res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
        else:
            res = max_learn_rate * math.exp(
                math.log(end_learn_rate / max_learn_rate) *
                (epoch - warmup_epoch_count + 1) /
                (total_epoch_count - warmup_epoch_count + 1))
        return float(res)

    return LearningRateScheduler(lr_scheduler, verbose=1)
    def train(self):
        lr = self.config.learning_rate
        self.histories = {}
        earlystopping = EarlyStopping(monitor='val_loss', patience=10,min_delta=0.01, mode='auto')
        csv_log = CSVLogger("logs/logfiles/results.csv")
        # checkpoint = ModelCheckpoint( filepath='./ckpt/', save_freq='epoch',save_best_only=False, save_weights_only=True,verbose=1)
        

        self.model.compile(
              optimizer=tf.keras.optimizers.Adam(lr=0.0001),
              loss=tf.keras.metrics.binary_crossentropy,
              metrics=[self.macro_f1]
              )
        
        for k in range(0, 1):
            best_val_loss = 0.5
            print("Processing Fold {}.... ".format(k))
            filepath='./ckpt/'+self.timestamp + '/'+str(k)+'/'
            
            # checkpoint = ModelCheckpoint(
            #     filepath=filepath, monitor='val_loss',
            #     save_best_only=True, save_weights_only=True,verbose=10
            #     )

            with tf.device('/gpu:0'):
                history = self.model.fit(
                    x=self.dataset[k]['train'],
                    epochs=self.config.epochs,
                    validation_data=self.dataset[k]['valid'],
                    callbacks=[self.tensorboard_callback, earlystopping, csv_log, LearningRateScheduler(self.lr_decay, verbose=1)]
                )
                print("Processing Done for {} Fold .... ".format(k))
                self.histories[k] = history
                val_loss = history.history['val_loss']
                if float(min(val_loss)) < float(best_val_loss):
                    self.model.save(filepath)

        return self.model
Пример #16
0
    def fit_data(self, show_figures=True):
        checkpointer = ModelCheckpoint(filepath=self.best_weights(self.name),
                                       verbose=self.verbose,
                                       save_best_only=True)
        train_values = {c: self.X_train[c] for c in self.categorical_columns}
        train_values['continuouse'] = self.X_train[
            self.non_categorical_columns]
        train_values['rnn_input'] = self.ts_train
        val_values = {c: self.X_valid[c] for c in self.categorical_columns}
        val_values['continuouse'] = self.X_valid[self.non_categorical_columns]
        val_values['rnn_input'] = self.ts_valid
        lrate = LearningRateScheduler(self.step_decay, verbose=2)

        history = self.model.fit(
            train_values,
            self.y_train,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(val_values, self.y_valid),
            verbose=self.verbose,
            shuffle=True,
            #validation_split=0.2,
            callbacks=[checkpointer, lrate])
        if show_figures:
            fig, ax = plt.subplots(figsize=(10, 5))
            # plot history
            ax.plot(history.history['loss'], label='train')
            ax.plot(history.history['val_loss'], label='test')
            ax.legend()
            figure_name = os.path.join(self.output_directory,
                                       self.name + "_history.png")

            plt.savefig(figure_name)
            plt.show()
        self.model.load_weights(self.best_weights(self.name))

        p = self.model.evaluate(val_values, self.y_valid)
        self.info(f"Validation Score: {p}")
Пример #17
0
def main():

    inputShape = (None, None, 3)
    batchSize = 2

    hr_image = load_img(os.path.join(os.getcwd(), 'hr_image', 'HR.png'))
    #target_size = inputShape[:-1]) ## loading the high-resolution image
    hr_image = np.array(hr_image, dtype=np.float32) * (2 / 255) - 1
    hr_image = np.array([hr_image] * batchSize)  ## creating fake batches

    lr_image = load_img(os.path.join(os.getcwd(), 'lr_image', 'LR.png'))
    #target_size = inputShape[:-1]) ## loading the low-resolution image
    lr_image = np.array(lr_image, dtype=np.float32) * (2 / 255) - 1
    lr_image = np.array([lr_image] * batchSize)

    nn = model(inputShape)
    print(nn.summary())
    #lr_schedule = ExponentialDecay(
    #   initial_learning_rate=1e-1,
    #  decay_steps=1000,
    # decay_rate=0.99)
    lrate = LearningRateScheduler(step_decay)
    #optimizer = SGD(learning_rate = 0.01)
    #optimizer = SGD(learning_rate=0.001,epsilon = 1e-9, beta_1 = .9, beta_2 = .999)

    optimizer = Adam(lr=0.001, epsilon=1e-8, beta_1=.9, beta_2=.999)
    nn.compile(optimizer=optimizer, loss='mse')

    #es = EarlyStopping(monitor = 'loss' , mode = 'min', verbose = 1,
    #       patience = 100) ## early stopping to prevent overfitting

    history = nn.fit(lr_image, hr_image, epochs=1000, batch_size=batchSize)
    #callbacks=[lrate])
    """ reconstrucing high-resolution image from the low-resolution image """
    pred = nn.predict(lr_image)
    pred = np.uint8((pred + 1) * 255 / 2)
    pred = Image.fromarray(pred[0])
    pred.save("re.png")
Пример #18
0
    def build_model(self):
        x_input = Input(shape=self.input_shape, name='input_1')
        backbone = tf.keras.applications.DenseNet121(include_top=False,
                                                   weights='imagenet',
                                                   input_tensor=x_input,
                                                   input_shape=None,
                                                   pooling='avg',
                                                   classes=1)
        # backbone = UNIWARD(x_input).model
        for layer in backbone.layers:
            layer.trainable = True
        out = Dense(self.num_classes, kernel_initializer='normal')(backbone.output)
        if self.mode == "multiclass":
            out = Activation('softmax')(out)

        model = Model(inputs=backbone.input, outputs=out)
        model_checkpoint = ModelCheckpoint(
            filepath=self.log_dir + '/regression_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            save_weights_only=False,
            mode='auto',
            period=1)
        tb = TensorBoard(log_dir=self.log_dir, update_freq='epoch', profile_batch=100000000)
        self.callbacks = [tb, model_checkpoint]


        adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                        verbose=1)
        model.compile(optimizer=adam,
                      loss=self.loss,
                      metrics=[self.loss, alaska_tf])

        self.model = model
        return self.model
Пример #19
0
def basic_callbacks(checkpoint="keras_checkpoints.h5", evals=[], lr=0.001, lr_decay=0.05, lr_min=0, lr_decay_steps=0):
    checkpoint_base = "./checkpoints"
    if not os.path.exists(checkpoint_base):
        os.mkdir(checkpoint_base)
    checkpoint = os.path.join(checkpoint_base, checkpoint)
    model_checkpoint = ModelCheckpoint(checkpoint, verbose=1)
    # model_checkpoint = keras.callbacks.experimental.BackupAndRestore(checkpoint_base)

    if isinstance(lr_decay_steps, list):
        # Constant decay on epoch
        lr_scheduler = ConstantDecayScheduler(sch=lr_decay_steps, lr_base=lr, decay_rate=lr_decay)
    elif lr_decay < 1:
        # Exponential decay
        warmup = 10 if lr_decay_steps == 0 else lr_decay_steps
        lr_scheduler = LearningRateScheduler(lambda epoch: scheduler(epoch, lr, lr_decay, lr_min, warmup=warmup))
    else:
        # Cosine decay on epoch / batch
        lr_scheduler = CosineLrScheduler(
            lr_base=lr, decay_steps=lr_decay, lr_min=lr_min, warmup_iters=1, lr_on_batch=lr_decay_steps, restarts=4
        )
    my_history = My_history(os.path.splitext(checkpoint)[0] + "_hist.json", evals=evals)
    # tensor_board_log = keras.callbacks.TensorBoard(log_dir=os.path.splitext(checkpoint)[0] + '_logs')
    return [model_checkpoint, lr_scheduler, my_history, Gently_stop_callback()]
Пример #20
0
    def get_callbacks(self, model_prefix='Model'):
        """
        Creates a list of callbacks that can be used during training to create a
        snapshot ensemble of the model.

        Args:
            model_prefix: prefix for the filename of the weights.

        Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
                 SnapshotModelCheckpoint] which can be provided to the 'fit' function
        """
        if not os.path.exists('weights/'):
            os.makedirs('weights/')

        callback_list = [ModelCheckpoint('weights/%s-Best.h5' % model_prefix,
                                         monitor='val_acc',
                                         save_best_only=True, save_weights_only=True),
                         LearningRateScheduler(schedule=self._cosine_anneal_schedule),
                         SnapshotModelCheckpoint(self.T,
                                                 self.M,
                                                 fn_prefix='weights/%s' % model_prefix)]

        return callback_list
Пример #21
0
def create_callbacks(log_dir):
    # early_stopping = EarlyStopping(patience=4, monitor='val_loss', verbose=1)

    lr_schedule = LearningRateScheduler(clr.lrfn, verbose=1)

    model_name = f'./output/models/best-model.hdf5'

    model_checkpoint = ModelCheckpoint(monitor='val_loss',
                                       filepath=model_name,
                                       save_best_only=True,
                                       verbose=1,
                                       pooling='average')

    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    callbacks = [
        lr_schedule,
        model_checkpoint,
        tensorboard_callback,
    ]

    return callbacks
Пример #22
0
def train(X_train):
    all_predictions = []
    from sklearn.model_selection import KFold
    import tensorflow.keras.callbacks.LearningRateScheduler
    import math
    kf = KFold(n_splits=5, random_state=42, shuffle=True)
    lr_schedule = LearningRateScheduler(lambda epoch: 1e-8 * math.pow(drop, math.floor((1+epoch)/0.001)))

    for ind, (tr, val) in enumerate(kf.split(X_train)):
        X_tr = X_train[tr]
        y_tr = y_train[tr]
        X_vl = X_train[val]
        y_vl = y_train[val]
        model = create_model()
        print( X_tr.shape,y_tr.shape,X_vl.shape,y_vl.shape)
        model.fit(
            X_tr, y_tr, epochs=1, batch_size=64, validation_data=(X_vl, y_vl), verbose=True, 
            callbacks=[lr_schedule]
        )

        print("Done training! Now predicting")
        all_predictions.append(model.predict(X_test))
    return all_predictions
Пример #23
0
def cifar10_classfication_train_procedure_with_alexnet(save_path, epoch=5):
    import Data
    import Model

    def lr_schedule(epoch_index, cur_lr):
        if epoch_index < 100:
            return 0.001
        elif epoch_index < 120:
            return 0.02
        else:
            return 0.02

    def image_preprocessing(images):
        return images

    start_time = time.time()
    x_train, y_train, x_test, y_test = Data.load_data(
        dataset_name="cifar10",
        data_dir="./tmp/cifar-10-batches-py/",
        preprocessing=image_preprocessing)
    end_time = time.time()
    print("[Train] Load data time: ", end_time - start_time)

    model = Model.alexnet(dataset_name="cifar10", shape=(32, 32, 3))
    model.summary()

    lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              epochs=epoch,
              batch_size=32,
              callbacks=[lr_scheduler])
    model.save(save_path)
    def train(self, X_train, y_train, X_val, y_val):
        # コンパイル
        self.model.compile(optimizer=SGD(lr=self.initial_lr, momentum=0.9),
                           loss="categorical_crossentropy",
                           metrics=["acc"])
        # Data Augmentation
        traingen = ImageDataGenerator(rescale=1. / 255,
                                      width_shift_range=4. / 32,
                                      height_shift_range=4. / 32,
                                      horizontal_flip=True)
        valgen = ImageDataGenerator(rescale=1. / 255)
        # TPU
        tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            tpu_grpc_url)
        strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
        self.model = tf.contrib.tpu.keras_to_tpu_model(self.model,
                                                       strategy=strategy)

        # Callback
        time_cb = TimeHistory()
        lr_cb = LearningRateScheduler(self.lr_schduler)
        # Train
        history = self.model.fit_generator(traingen.flow(X_train,
                                                         y_train,
                                                         batch_size=1024),
                                           epochs=self.nb_epochs,
                                           steps_per_epoch=len(X_train) / 1024,
                                           validation_data=valgen.flow(
                                               X_val, y_val, batch_size=1024),
                                           validation_steps=len(X_val) / 1024,
                                           callbacks=[time_cb, lr_cb]).history
        history["time"] = time_cb.times
        # Save history
        file_name = f"{self.framework}_n{self.n}.dat"
        with open(file_name, "wb") as fp:
            pickle.dump(history, fp)
Пример #25
0
    def set_callbacks(self):
        """
        registers the callbacks
        * EarlyStopping: stops the model when it no longer learns
        * ModelChckpoint: store the wights of the best epoch
        * ReduceLROnPlateau: reduce LR when it find itself in a plateu
        * LearningRateScheduler: halves the LR every 5th epoch
        :return:
        """
        print(self.weights_path)
        checkpoint = ModelCheckpoint(self.weights_path,
                                     monitor='dice_coef',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max',
                                     save_weights_only=True)
        reduceLROnPlat = ReduceLROnPlateau(monitor='dice_coef',
                                           factor=0.33,
                                           patience=3,
                                           verbose=1,
                                           mode='max',
                                           epsilon=0.0001,
                                           cooldown=0,
                                           min_lr=1e-8)
        early = EarlyStopping(monitor="dice_coef", mode="max", patience=50)

        def scheduler(epoch, lr):
            e = epoch + 1
            if e % 5 != 0:
                new_lr = lr
            else:
                new_lr = lr * 0.5
                print("Reducing learning rate to: " + str(new_lr))
            return new_lr

        lr_schedule = LearningRateScheduler(schedule=scheduler)
        self.callbacks_list = [checkpoint, reduceLROnPlat, early, lr_schedule]
Пример #26
0
def train_network():
    history = load_data()
    xs, y_policies, y_values = zip(*history)

    # 学習のための入力データのシェイプの変換
    a, b, c = DN_INPUT_SHAPE
    xs = np.array(xs)
    xs = xs.reshape(len(xs), c, a, b).transpose(0, 2, 3, 1)
    y_policies = np.array(y_policies)
    y_values = np.array(y_values)

    model = load_model('./model/best.h5')
    model.compile(loss=['categorical_crossentropy', 'mse'], optimizer='adam')

    # 学習率
    def step_decay(epoch):
        x = 0.001
        if epoch >= 50: x = 0.0005
        if epoch >= 80: x = 0.00025
        return x

    lr_decay = LearningRateScheduler(step_decay)

    print_callback = LambdaCallback(on_epoch_begin=lambda epoch, logs: print(
        '\rTrain {}/{}'.format(epoch + 1, RN_EPOCHS), end=''))

    model.fit(xs, [y_policies, y_values],
              batch_size=128,
              epochs=RN_EPOCHS,
              verbose=0,
              callbacks=[lr_decay, print_callback])
    print('')

    model.save('./model/latest.h5')

    K.clear_session()
    del model
Пример #27
0
def train_model():
    generator = data_generator.SegmentationSequence(
        os.path.normpath("..\\data\\train\\images"),
        os.path.normpath("..\\data\\train\\masks"),
        augmenter=augmentation.augment,
        batch_size=4,
        output_img_size=(240, 240),
        steps_per_epoch=1000,
    )
    model = model_builder.build_segmentation_model((240, 240, 3), 4, 4, 20, 16)
    opt = tf.keras.optimizers.Adam(lr=lr_schedule(0), clipnorm=1.0)
    model.compile(
        optimizer=opt,
        loss=tversky_loss,
        metrics=[dice_coef(1.0)],
    )

    tb = TensorBoard(
        log_dir=os.path.normpath(".\\logs\\fit\\") +
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
        histogram_freq=1,
    )
    # lr_reducer = ReduceLROnPlateau(
    #     factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-8
    # )
    lr_scheduler = LearningRateScheduler(lr_schedule)
    checkpoints = ModelCheckpoint(
        os.path.normpath(".\\ckpts"),
        monitor="loss",
        verbose=0,
        save_weights_only=False,
        mode="auto",
        save_freq="epoch",
    )
    callbacks = [lr_scheduler, tb, checkpoints]
    history = model.fit(generator, epochs=EPOCHS, callbacks=callbacks)
    model.save(MODEL_SAVE_PATH)
Пример #28
0
def run_lr_sweep(model,
                 x,
                 y=None,
                 lr_start=1e-6,
                 lr_stop=1e2,
                 steps=1000,
                 mode='exponential',
                 cutoff=None):
    """
    Takes a compiler keras model and runs a learning rate sweep with some data.
    Will plot Loss and Learning Rate after completed run.

    :param model: Compiled Keras model
    :param x: either data generator or training input
    :param y: None if x is a data generator, else training targets
    :param lr_start: Lower bound for learning rate
    :param lr_stop: Higher bound for learning rate
    :param steps: Number of steps to perform learning sweep over. In effect this is the number of epochs
    with steps_per_epoch=1
    :param mode: Rate of change for learning rate. Either 'linear' or 'exponential'.
    :param cutoff: When loss explodes, the plotting will try to clip that part of the plot so smaller differences in
    loss is visible without extensive zooming. Optionally, set this argument to limit the loss-axis in all plots
    """
    data = GatherLRDataCallback()
    sweep_func = _get_lr_sweep(lr_start, lr_stop, steps, mode)
    # plt.plot([sweep_func(i) for i in range(steps)])
    #
    # plt.xlabel('steps')
    # plt.ylabel('LR')
    # plt.show()

    model.fit(x if y is None else (x, y),
              epochs=steps,
              steps_per_epoch=1,
              shuffle=False,
              callbacks=[data, LearningRateScheduler(sweep_func)])
    data.plot(cutoff=cutoff)
Пример #29
0
def capsul_train_predict():
    (x_train, y_train), (x_test, y_test), (x_indt, y_indt) = load_data(col=9, row=11)
    model = CapsNet(input_shape=(11,10,2,),kerl=3, n_class=2, num_routing=5)
    
    model.compile(optimizer=Adam(learning_rate=lr_schedule(0)),
                 loss=[margin_loss, 'mse'],
                 loss_weights=[1., 0.325],
                 metrics={'out_caps': 'accuracy'})
    
    lr_scheduler = LearningRateScheduler(lr_schedule)
    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5,
                                   min_lr=0.5e-6)

    save_dir = os.path.join(os.getcwd(), 'save_models')
    model_name = 'trb_capsul_model.h5'
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    filepath = os.path.join(save_dir, model_name)    
    checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_out_caps_accuracy',
                                verbose=1, save_best_only=True,
                                save_weights_only=True)
    cbs = [checkpoint, lr_reducer, lr_scheduler]
    model.fit([x_train, y_train], [y_train, x_train],
                  batch_size=32,
                  epochs=10,
                  validation_data=[[x_test, y_test], [y_test, x_test]],
                  shuffle=True,
                  callbacks=cbs)
    model.load_weights(filepath)    
    y_pred, _ = model.predict([x_test, y_test])
    
    info = 'By capsul net Training in TRB, Testing in testdata.....'
    writeMetrics('TRB_result.txt', y_test, y_pred, info)
    
    indt_pred, _ = model.predict([x_indt, y_indt])
    info = 'By capsul net Tringing in TRB, Testing in nCoV-19...'
    writeMetrics('TRB_result.txt', y_indt, indt_pred, info)
def train(model: tf.keras.models) -> tf.keras.models:
    logdir = tempfile.mkdtemp()
    print('Writing training logs to ' + logdir)
    keras_file = '{0}/{0}_{1}_temp.h5'.format(args.architecture, args.dataset)
    tb = tf.keras.callbacks.TensorBoard(log_dir=logdir, profile_batch=0)
    es = EarlyStopping(monitor='val_loss',
                       mode='min',
                       verbose=1,
                       patience=round(cfg.epochs / 4))
    mc = ModelCheckpoint(keras_file,
                         monitor='val_acc',
                         mode='max',
                         verbose=1,
                         save_best_only=True)
    learning_rate_scheduler = LearningRateScheduler(
        schedule=cfg.learning_rate_scheduler)
    callbacks = [tb, es, mc, learning_rate_scheduler]
    opt = tf.keras.optimizers.Adam()
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size=cfg.batch_size,
              epochs=cfg.epochs,
              verbose=1,
              callbacks=callbacks,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    print('Saving model to: ', keras_file.replace('temp', 'original'))
    tf.keras.models.save_model(model,
                               keras_file.replace('temp', 'original'),
                               include_optimizer=False)
    return model