Example #1
0
    def search(self,
               epochs=None,
               callbacks=None,
               fit_on_val_data=False,
               **fit_kwargs):
        """Search for the best HyperParameters.

        If there is not early-stopping in the callbacks, the early-stopping callback
        is injected to accelerate the search process. At the end of the search, the
        best model will be fully trained with the specified number of epochs.

        # Arguments
            callbacks: A list of callback functions. Defaults to None.
            fit_on_val_data: Boolean. Use the training set and validation set for the
                final fit of the best model.
        """
        if self._finished:
            return

        if callbacks is None:
            callbacks = []

        # Insert early-stopping for adaptive number of epochs.
        if epochs is None:
            epochs = 1000
            if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
                callbacks.append(tf_callbacks.EarlyStopping(patience=10))

        # Insert early-stopping for acceleration.
        acceleration = False
        new_callbacks = self._deepcopy_callbacks(callbacks)
        if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
            acceleration = True
            new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))

        super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)

        # Fully train the best model with original callbacks.
        if acceleration or fit_on_val_data:
            copied_fit_kwargs = copy.copy(fit_kwargs)
            if fit_on_val_data:
                # Concatenate training and validation data.
                copied_fit_kwargs['x'] = copied_fit_kwargs['x'].concatenate(
                    fit_kwargs['validation_data'])
                copied_fit_kwargs.pop('validation_data')
                # Remove early-stopping since no validation data.
                if utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
                    copied_fit_kwargs['callbacks'] = [
                        copy.deepcopy(callbacks)
                        for callback in callbacks
                        if not isinstance(callback, tf_callbacks.EarlyStopping)]
                    # Use best trial number of epochs.
                    copied_fit_kwargs['epochs'] = self._get_best_trial_epochs()
            model = self.final_fit(**copied_fit_kwargs)
        else:
            model = self.get_best_models()[0]

        model.save_weights(self.best_model_path)
        self._finished = True
def get_callbacks(model_name):
    filepath = ''
    with open('Pathfile.txt', 'r') as myfile:
        filepath = myfile.read()
        filepath = filepath.split("\n")[0]
    tb_log_dir = os.path.join(filepath, 'Logs', model_name)
    lg_log_dir = os.path.join(filepath, 'History', model_name + '.csv')

    lg = callbacks.CSVLogger(lg_log_dir, separator=',', append=False)
    es = callbacks.EarlyStopping(monitor='loss',
                                 min_delta=0.0001,
                                 patience=40,
                                 verbose=1,
                                 mode='auto',
                                 restore_best_weights=True)
    # lr = callbacks.LearningRateScheduler(scheduler, verbose=1)
    #callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch')
    rop = callbacks.ReduceLROnPlateau(monitor='loss',
                                      factor=0.3,
                                      patience=5,
                                      verbose=1,
                                      mode='auto',
                                      min_delta=0.001,
                                      cooldown=0,
                                      min_lr=0.00000001)
    tb = callbacks.TensorBoard(
        log_dir=tb_log_dir,
        histogram_freq=0,
        write_graph=False,
        write_images=False,
        update_freq='epoch',
        profile_batch=0)  # embeddings_freq=0,embeddings_metadata=None)
    return [es, rop, tb, lg]
Example #3
0
def search(train, val):

    # TODO: how to discriminate between trials?
    log_dir = './logs/' + datetime.now().strftime('%Y%m%d-%H%M%S')
    cp_dir = './checkpoints/' + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'
    os.makedirs(cp_dir, exist_ok=True)

    tb_cb = LRTensorBoard(log_dir=log_dir)
    es_cb = callbacks.EarlyStopping(patience=2)
    mc_cb = callbacks.ModelCheckpoint(cp_dir,
                                      save_weights_only=True,
                                      save_best_only=True)
    lrs_cb = callbacks.LearningRateScheduler(scheduler, verbose=0)
    cbs = [
        #tb_cb,
        #es_cb,
        #mc_cb,
        lrs_cb,
    ]

    time.sleep(1)

    x_train, y_train = train

    tuner = build_search()
    tuner.search(x_train,
                 y_train,
                 batch_size=64,
                 epochs=10,
                 validation_data=val,
                 callbacks=cbs,
                 verbose=0)

    return tuner
def make_callbacks(params: dict) -> List[callbacks.Callback]:
    callbacks_list = []
    timestamp = datetime.now().strftime("%y-%m-%d_%H_%M_%S")

    if params['tensorboard']:
        callbacks_list.append(
            callbacks.TensorBoard(log_dir='log/fits/fit_' + timestamp + '_' +
                                  params['name']))

    if params['modelcheckpoint']:
        callbacks_list.append(
            callbacks.ModelCheckpoint('log/models/model_' + timestamp + '_' +
                                      params['name'],
                                      monitor='val_loss',
                                      save_best_only=True,
                                      verbose=1))

    if params['earlystopping']:
        callbacks_list.append(
            callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=0.001,
                                    patience=5,
                                    verbose=1))

    return callbacks_list
Example #5
0
    def train(self):
        if "path_to_drive" not in self.configuration.keys():
            self.configuration["path_to_drive"] = ""

        model_checkpoint = callbacks.ModelCheckpoint(
            '{}{}.hdf5'.format(self.configuration["path_to_drive"], self.name),
            monitor=self.configuration["monitor_accuracy"],
            verbose=1,
            save_best_only=True,
            mode='max')
        logdir = os.path.join(
            "logs",
            datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
        tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir,
                                                              histogram_freq=1)

        early_stopping = callbacks.EarlyStopping(
            monitor=self.configuration["monitor_accuracy"],
            patience=20,
            verbose=1,
            mode='max')

        print("Start fit...")
        self.model.fit(
            x=self.train_generator,
            # steps_per_epoch=self.x_train.shape[0],
            # steps_per_epoch=self.x_train.shape[0],
            epochs=self.configuration["epochs"],
            validation_data=self.validation_generator,
            # validation_steps=self.x_val.shape[0],
            callbacks=[early_stopping, model_checkpoint, tensorboard_callback],
            verbose=1)
    def model_train(self, params):
        clear_session()

        model = self.build_model(params)
        adam = optimizers.Adam(lr=params['LR'])
        model.compile(optimizer=adam, loss='mae', metrics=['accuracy'])

        reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                factor=0.5,
                                                patience=params['LR_patience'],
                                                verbose=0,
                                                mode='min')
        earlystop = callbacks.EarlyStopping(monitor='val_loss',
                                            patience=params['ES_patience'],
                                            verbose=0,
                                            restore_best_weights=False,
                                            mode='min')

        model.fit(self.x_train,
                  self.y_train,
                  batch_size=256,
                  epochs=250,
                  validation_data=(self.x_valid, self.y_valid),
                  verbose=0,
                  callbacks=[reduce_lr, earlystop])
        return model
Example #7
0
def model_training():
    df = pd.read_csv("results.csv")
    X = df[["date", "home_team", "away_team", "country", "neutral"]]
    y = df[["home_score", "away_score"]]
    X_train, X_valid, y_train, y_valid = train_test_split(X,
                                                          y,
                                                          train_size=0.80)

    preprocessor = ColumnTransformer(
        transformers=[('date_scale', StandardScaler(),
                       ['date']), ('neutral', OrdinalEncoder(), ['neutral']),
                      ('home',
                       OneHotEncoder(handle_unknown='ignore', sparse=False),
                       ['home_team']),
                      ('away',
                       OneHotEncoder(handle_unknown='ignore', sparse=False),
                       ['away_team'])])

    pipeline = Pipeline(steps=[('date', DateTransformer()),
                               ('pre', preprocessor)],
                        verbose=True)

    X_train = pipeline.fit_transform(X_train)
    X_valid = pipeline.transform(X_valid)
    input_shape = [X_train.shape[1]]

    model = models.Sequential([
        layers.Dense(512, activation='relu', input_shape=input_shape),
        layers.Dense(512, activation='relu'),
        layers.Dense(2)
    ])
    model.compile(loss='mse', optimizer=optimizers.Adam(learning_rate=0.01))

    early_stopping = callbacks.EarlyStopping(
        patience=5,
        min_delta=0.001,
        restore_best_weights=True,
    )

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_valid, y_valid),
                        epochs=500,
                        batch_size=64,
                        callbacks=[early_stopping],
                        verbose=1)

    curr_dir = Path(__file__).resolve().parent
    with open("model_count.txt", "r") as mc:
        cnt = mc.read()
        models.save_model(
            model, Path(curr_dir, 'Saved Neural Network Models',
                        f'model_{cnt}'))
        joblib.dump(
            pipeline,
            Path(curr_dir, 'Saved Fitted Preprocessing Pipelines',
                 f'pipeline_{cnt}.pkl'))
    with open("model_count.txt", "w") as mc:
        new_cnt = str(int(cnt) + 1)
        mc.write(new_cnt)
def initialTraining(optimazerLastLayer, noOfEpochs, batchSize, savedModelName,
                    train_generator, validation_generator, model,
                    modelArchitecture, lr_decay, learningRate):
    # compile the model and train the top layer only

    rms = RMSprop(decay=lr_decay, lr=learningRate)
    model.compile(optimizer=rms,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    earlystop = callbacks.EarlyStopping(monitor='val_loss',
                                        min_delta=0,
                                        patience=5,
                                        mode='auto')
    uniqueClasses = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    class_weight1 = class_weight.compute_class_weight('balanced',
                                                      uniqueClasses,
                                                      train_generator.classes)
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=nb_train_samples //
                                  batchSize,
                                  epochs=noOfEpochs,
                                  validation_data=validation_generator,
                                  validation_steps=nb_val_samples // batchSize,
                                  class_weight=class_weight1,
                                  callbacks=[earlystop])
    plt.plot(history.history['val_accuracy'], 'r')
    plt.plot(history.history['accuracy'], 'b')
    plt.title('Performance of model ' + modelArchitecture)
    plt.ylabel('Accuracy')
    plt.xlabel('Epochs No')
    plt.savefig(savedModelName + '_initialModel_plot.png')
    serializeModel(model, savedModelName + "_initialModel")
Example #9
0
 def _fit_model(self, spikes):
     #   ---------------- DENSE AUTOENCODER JAVI -------------------
     shape = np.shape(spikes)[1]
     spikes = np.squeeze(spikes)
     inputs = Input(shape=(shape, ))
     # a layer instance is callable on a tensor, and returns a tensor
     x = Dense(6, activation='sigmoid')(inputs)
     predictions = Dense(shape, activation='sigmoid')(x)
     # This creates a model that includes the Input layer and three Dense layers
     model = Model(inputs=inputs, outputs=predictions)
     model.compile(optimizer='nadam', loss='mse', metrics=['accuracy'])
     model_hidden = Model(inputs=inputs, outputs=x)
     call = callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=0,
                                    patience=5,
                                    verbose=0,
                                    mode='auto',
                                    baseline=None,
                                    restore_best_weights=True)
     model.fit(spikes,
               spikes,
               batch_size=64,
               epochs=250,
               verbose=1,
               callbacks=[call],
               validation_split=0.2)
     # compute the final latent features
     latent_features = model_hidden.predict(spikes)
     return latent_features
Example #10
0
 def fit_model(self, X_train, Y_train):
     """
     Constructs a model. Populates the history object
     :param X_train: training inputs - list of n 32*32 input images
     :param Y_train: training outputs - list of n items
     :param X_val: validation inputs - list of m 32*32 validation images
     :param Y_val: validation outputs
     :return: None
     """
     Y_train = to_categorical(Y_train)
     self.model.compile(optimizer='sgd',
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])
     X_train = np.array(X_train)
     X_train = X_train.reshape(
         (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
     self.history = self.model.fit(X_train,
                                   Y_train,
                                   shuffle=True,
                                   callbacks=[
                                       callbacks.EarlyStopping(
                                           monitor='val_loss',
                                           min_delta=0,
                                           patience=5,
                                           verbose=0,
                                           mode='min')
                                   ],
                                   epochs=100,
                                   validation_split=0.2)
Example #11
0
    def _get_callback(es_patience=25, es_min_delta=1e-5, verbose=0, **kwargs):
        """
        Setup the keras callbacks that will be used in training


        :param int es_patience: patience parameter for the early stopping
        :param int/float es_min_delta: lower bound for epoch to be considered improvement
        :param int verbose: verbosity
        :param kwargs: additional keyword arguments for keras.callbacks.EarlyStopping

        :return: a list containing the early stopping object defined with input parameters
        """

        # Generate the early stopping objects based on specfications from the options file
        vec_callbacks = [
            callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=es_min_delta,
                                    patience=es_patience,
                                    verbose=verbose,
                                    mode='min',
                                    restore_best_weights=True,
                                    **kwargs)
        ]

        return vec_callbacks
Example #12
0
def train_by_fit(optimizer, loss, train_data, train_steps, validation_data,
                 validation_steps):
    """
    使用fit方式训练,可以知道训练完的时间,以及更规范的添加callbacks参数
    :param optimizer: 优化器
    :param loss: 自定义的loss function
    :param train_data: 以tf.data封装好的训练集数据
    :param validation_data: 验证集数据
    :param train_steps: 迭代一个epoch的轮次
    :param validation_steps: 同上
    :return: None
    """
    cbk = [
        callbacks.ReduceLROnPlateau(verbose=1),
        callbacks.EarlyStopping(patience=10, verbose=1),
        callbacks.ModelCheckpoint('./model/yolov3_{val_loss:.04f}.h5',
                                  save_best_only=True,
                                  save_weights_only=True)
    ]

    model = yolo_body()
    model.compile(optimizer=optimizer, loss=loss)

    # initial_epoch用于恢复之前的训练
    model.fit(train_data,
              steps_per_epoch=max(1, train_steps),
              validation_data=validation_data,
              validation_steps=max(1, validation_steps),
              epochs=cfg.epochs,
              callbacks=cbk)
def construct_LSTM(cases=True):
    num_features = 4 if cases else 3
    inputs = Input(shape=(None, num_features))
    x = LSTM(128, return_sequences=True)(inputs)
    x = Dropout(0.2)(x, training=True)
    x = LSTM(64)(x)
    x = Dropout(0.2)(x, training=True)
    x = Dense(128, 'relu')(x)
    x = Dense(64, 'relu')(x)
    outputs = Dense(21)(x)
    model = Model(inputs, outputs)

    if cases:
        x_train, y_train = to_sequence(DATA_UK_CASES_DIFF_LIST_SCALED, [FEATURE_STRINGENCY_FOR_CASES, FEATURE_VACCINATION_FOR_CASES, FEATURE_TESTS_FOR_CASES])
        latest_data, _ = to_sequence(DATA_UK_CASES_DIFF_LIST_SCALED[-7:], [FEATURE_STRINGENCY_FOR_CASES[-7:], FEATURE_VACCINATION_FOR_CASES[-7:], FEATURE_TESTS_FOR_CASES[-7:]], output_size=0)
    else:
        x_train, y_train = to_sequence(DATA_UK_DEATHS_DIFF_LIST_SCALED, [FEATURE_CASES_FOR_DEATHS, FEATURE_VACCINATION_FOR_DEATHS])
        latest_data, _ = to_sequence(DATA_UK_DEATHS_DIFF_LIST_SCALED[-7:], [FEATURE_CASES_FOR_DEATHS[-7:], FEATURE_VACCINATION_FOR_DEATHS[-7:]], output_size=0)

    earlyStopping = callbacks.EarlyStopping(monitor='loss', patience=10, restore_best_weights=True)

    model.compile('adam', 'mse')
    model.fit(x_train, y_train, callbacks=earlyStopping, epochs=300, verbose=0)

    return result_LSTM(model, latest_data, cases)
Example #14
0
	def do_train(self):
		print("***************************start training***************************")
		save_callback = SaveCallback(save_path=conf.SAVE_DIR, backbone=conf.backbone, model=self.model,
									 timestamp=self.timestamp, save_name=self.save_name)  # , validation_data=[x_test, y_test])
		early_stop_callback = callbacks.EarlyStopping(monitor='val_loss', patience=conf.early_stop_patience, verbose=1, mode='auto', restore_best_weights=True)
		reduce_lr_callback = callbacks.ReduceLROnPlateau(monitor='val_acc', factor=conf.reduce_lr_factor, patience=conf.reduce_lr_patience, verbose=1,
														 mode='auto', epsilon=0.0001, cooldown=0, min_lr=0.00001)
		tensorboard_callback = TensorBoard(log_dir=conf.OUT_DIR)

		callbacks_list = []
		callbacks_list.append(save_callback)
		callbacks_list.append(early_stop_callback)
		#callbacks_list.append(reduce_lr_callback)
		callbacks_list.append(tensorboard_callback)

		if conf.FIT_GENERATE == True:
			self.model.fit(self.fit_gen.generate(),
						   epochs=conf.epochs,
						   steps_per_epoch=self.corpus_size / conf.batch_size,
					  	   callbacks=callbacks_list,
					  	   validation_data=([self.x_test, self.y_test], self.y_test), verbose=1)
		else:
			self.model.fit(x=[self.x_train,
							  self.y_train],
						   	  y=self.y_train,
						      batch_size=conf.batch_size,
						      epochs=conf.epochs,
					  		  callbacks=callbacks_list,
					          validation_data=([self.x_test, self.y_test], self.y_test),  # validation_split=0.02,
					          verbose=1)
		print("***************************train done***************************")
Example #15
0
 def train_model(
     self,
     train_X,
     train_Y,
     test_X,
     test_Y,
     model_name,
     file_save_path,
     epochs=100,
     EarlyStop_patience=30,
 ):
     my_callbacks = [
         callbacks.EarlyStopping(monitor='val_loss',
                                 patience=EarlyStop_patience,
                                 restore_best_weights=True),
         callbacks.CSVLogger(filename=file_save_path +
                             f'/Log_{model_name}.csv'),
     ]
     self.model.fit(x=train_X,
                    y=train_Y,
                    epochs=epochs,
                    validation_data=(test_X, test_Y),
                    steps_per_epoch=1,
                    validation_steps=1,
                    shuffle=True,
                    callbacks=my_callbacks)
     self.model.save(filepath=file_save_path + f'/model_{model_name}.h5')
Example #16
0
    def fit_generator(self, data):
        self.model = self.model_builder.build_model()

        train_generator =  data.data['train_generator']
        validation_generator =  data.data['validation_generator']
        
        early_stop = callbacks.EarlyStopping(
            monitor='val_loss', 
            patience=self.EARLY_STOP_PATIENCE, 
            verbose=0, 
            mode='min')
        file_name = self.FILE_NAME_FORMAT.format(data.identifier)
        mcp_save = callbacks.ModelCheckpoint(
            file_name,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        return self.model.fit_generator(
            train_generator,
            steps_per_epoch=300,
            epochs=self.EPOCHS,
            validation_data=validation_generator,
            validation_steps=50,
            callbacks=[early_stop, mcp_save])
Example #17
0
def get_callbacks(args, partition_idx):
    import tensorflow.keras.callbacks as bk
    # from CustomEarlyStopping import CustomEarlyStopping
    model_type = args.model_type
    timestamp = args.timestamp
    early_stop = args.early_stop
    t_name = args.weights_dir + '/tensorboard_logs/{}_{}_{}'.format(
        model_type, timestamp, partition_idx)
    t_name = t_name.replace('/', '\\')  # Correction for Windows paths

    callbacks = list()
    callbacks.append(None)  # Position for Checkpoint
    # CustomEarlyStopping(patience_loss=args.patience, patience_acc=10, threshold=.95)
    callbacks.append(bk.CSVLogger(args.weights_dir + '/log.csv'))
    # CustomEarlyStopping(patience_loss=10, threshold=0.95)
    callbacks.append(bk.TensorBoard(log_dir=t_name, histogram_freq=args.debug))

    if early_stop > 0:
        # TODO - Test multiple EarlyStopping
        callbacks.append(
            bk.EarlyStopping(monitor='val_loss',
                             patience=early_stop,
                             verbose=0))
        # callbacks.append(bk.EarlyStopping(monitor='val_accuracy', patience=early_stop, verbose=0))
    callbacks.append(
        bk.ReduceLROnPlateau(monitor='val_loss',
                             factor=.9,
                             patience=10,
                             min_lr=0.00001,
                             cooldown=0,
                             verbose=0))
    # calls.append(C.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch)))
    # calls.append( C.LearningRateScheduler(schedule=lambda epoch: args.lr * math.cos(1+( (epoch-1 % (args.epochs/cycles)))/(args.epochs/cycles) ) ))
    #    calls.append( C.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.)) )
    return callbacks
Example #18
0
    def fit_model(self, data, early_stop=False):
        self.model = self.model_builder.build_model()

        X_train = data.data["X_train"]
        X_val = data.data["X_val"]
        y_train = data.data["y_train"]
        y_val = data.data["y_val"]
        
        file_name = self.FILE_NAME_FORMAT.format(data.identifier)
    
        # usually doesn't help
        early_stop = callbacks.EarlyStopping(
            monitor='val_loss', 
            patience=self.EARLY_STOP_PATIENCE, 
            verbose=0, 
            mode='min')
        mcp_save = callbacks.ModelCheckpoint(
            file_name,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        cbs = [mcp_save]
        if early_stop == True:
            cbs.append(early_stop)

        return self.model.fit(
            X_train, 
            y_train, 
            epochs=self.EPOCHS, 
            batch_size=self.BATCH_SIZE, 
            validation_data=(X_val, y_val), 
            callbacks=cbs)
def finetuningTraining(learningRate, noOfEpochs, batchSize, savedModelName,
                       train_generator, validation_generator, model, lr_decay):
    # we need to recompile the model for these modifications to take effect
    # we use SGD with a low learning rate
    sgd = SGD(lr=learningRate, decay=lr_decay, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    earlystop = callbacks.EarlyStopping(monitor='val_loss',
                                        min_delta=0,
                                        patience=5,
                                        mode='auto')
    uniqueClasses = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    class_weight1 = class_weight.compute_class_weight('balanced',
                                                      uniqueClasses,
                                                      train_generator.classes)
    # we train our model again (this time fine-tuning the top 2 inception blocks
    # alongside the top Dense layers
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=nb_train_samples //
                                  batchSize,
                                  epochs=noOfEpochs,
                                  validation_data=validation_generator,
                                  validation_steps=nb_val_samples // batchSize,
                                  class_weight=class_weight1,
                                  callbacks=[earlystop])
    plt.clf()
    plt.plot(history.history['val_accuracy'], 'r')
    plt.plot(history.history['accuracy'], 'b')
    plt.savefig(savedModelName + '_finalModel_plot.png')
    serializeModel(model, savedModelName + "_finalModel")
Example #20
0
def model_train(model, x_train, x_val, epochs, train_step, val_step,
                weights_path):
    """
    模型训练
    :param model: 定义好的模型
    :param x_train: 训练集数据
    :param x_val: 验证集数据
    :param epochs: 迭代次数
    :param train_step: 一个epoch的训练次数
    :param val_step: 一个epoch的验证次数
    :param weights_path: 权值保存路径
    :return: None
    """
    # 如果选成h5格式,则不会保存成ckpt的tensorflow常用格式
    # monitor是指验证参数,如何评估模型好坏的标准
    cbk = [
        callbacks.ModelCheckpoint(filepath=weights_path,
                                  save_best_only=True,
                                  save_weights_only=True,
                                  monitor='val_loss'),
        callbacks.EarlyStopping(patience=10, min_delta=1e-3)
    ]

    history = model.fit(x_train,
                        steps_per_epoch=train_step,
                        epochs=epochs,
                        validation_data=x_val,
                        validation_steps=val_step,
                        callbacks=cbk,
                        verbose=1)
Example #21
0
    def fit_generator(self,
                      train_generator,
                      val_generator=None,
                      epochs=10,
                      monitor='val_loss',
                      patience_count=10,
                      metrics=['accuracy'],
                      outdir=""):

        self.model.compile(optimizer=optimizers.Adagrad(),
                           loss='categorical_crossentropy',
                           metrics=metrics)

        training_callbacks = [
            callbacks.EarlyStopping(monitor=monitor, patience=patience_count),
            # callbacks.ModelCheckpoint(filepath=outdir + 'model.{epoch}.h5',
            #                           save_best_only=True,
            #                           monitor=monitor,
            #                           mode='auto')
        ]

        if val_generator is None:
            self.training_history = self.model.fit(train_generator,
                                                   epochs=epochs,
                                                   verbose=2)

        else:
            self.training_history = self.model.fit(
                train_generator,
                validation_data=val_generator,
                callbacks=training_callbacks,
                epochs=epochs,
                verbose=2)
def train_model(model: keras.Sequential, data, epochs):
    logdir = f'logs/fit/{epochs}/' + \
        datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = callbacks.TensorBoard(log_dir=logdir,
                                                 histogram_freq=1)
    early_stop_callback = callbacks.EarlyStopping(monitor='val_accuracy',
                                                  patience=5,
                                                  min_delta=0.0001,
                                                  restore_best_weights=True,
                                                  verbose=0)
    learning_rate_callback = callbacks.LearningRateScheduler(
        lambda epoch, lr: lr if epoch < 2 else lr * 0.9, verbose=0)
    save_callback = callbacks.ModelCheckpoint(
        filepath='logs/model' + datetime.now().strftime("%Y%m%d-%H%M%S"),
        save_best_only=True)
    x_train = to_dict(data['training_data'])
    y_train = tf.convert_to_tensor(data['training_labels'])
    x_val = to_dict(data['test_data'])
    y_val = tf.convert_to_tensor(data['test_labels'])
    training_history = model.fit(x_train,
                                 y_train,
                                 epochs=epochs,
                                 validation_data=(x_val, y_val),
                                 callbacks=[
                                     tensorboard_callback, early_stop_callback,
                                     learning_rate_callback, save_callback
                                 ])

    return training_history
    def train(self, epoch, batch_size):
        print("Training the model...")

        # tensorboard = callbacks.TensorBoard(log_dir=self.LOG_DIR)
        reduceLR = callbacks.ReduceLROnPlateau(monitor='val_accuracy',
                                               factor=0.1,
                                               patience=3,
                                               verbose=1,
                                               mode='auto',
                                               min_delta=0.0001,
                                               cooldown=0,
                                               min_lr=0.000001)
        early = callbacks.EarlyStopping(monitor='val_accuracy',
                                        min_delta=0.0001,
                                        patience=5,
                                        mode='auto',
                                        restore_best_weights=True)

        self.model.fit(
            [self.train_x[:, 0], self.train_x[:, 1], self.train_x[:, 2]],
            self.train_y,
            epochs=epoch,
            batch_size=batch_size,
            validation_split=.2,
            shuffle=True,
            callbacks=[reduceLR, early])

        self.model.save(self.SAVE_DIR + 'final.hdf5')
Example #24
0
    def fit_model(self, X_train, y_train, X_val, y_val):

        y_train = to_categorical(y_train)
        y_val = to_categorical(y_val)

        early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                                 patience=5)
        model_checkpoint = callbacks.ModelCheckpoint('multi-digit_cnn_new.h5',
                                                     save_best_only=True)

        optimizer = Adam(lr=1e-3, amsgrad=True)
        tb = callbacks.TensorBoard(log_dir="ccnlogs/{}".format(time()))
        self.model.compile(optimizer=optimizer,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.history = self.model.fit(
            X_train, [
                y_train[:, 0], y_train[:, 1], y_train[:, 2], y_train[:, 3],
                y_train[:, 4]
            ],
            batch_size=512,
            epochs=12,
            shuffle=True,
            validation_data=(X_val, [
                y_val[:, 0], y_val[:, 1], y_val[:, 2], y_val[:, 3], y_val[:, 4]
            ]),
            callbacks=[early_stopping, model_checkpoint])
def train_model():
    model = build_model()
    print(model.summary())

    optimizer = optimizers.Adam(learning_rate=0.001)
    loss = losses.SparseCategoricalCrossentropy(from_logits=True)

    model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])

    (x_train, y_train), (x_test, y_test) = load_preprocess_data()

    epochs = 10
    n_train = 60000
    n_test = 10000
    batch_size = 32
    steps_per_epoch = n_train // batch_size
    validation_steps = n_test // batch_size

    train_data_set = convert_to_data_set(x_train,
                                         y_train,
                                         repeat_times=epochs,
                                         shuffle_buffer_size=n_train,
                                         batch_size=batch_size)

    val_data_set = convert_to_data_set(x_test,
                                       y_test,
                                       repeat_times=epochs,
                                       shuffle_buffer_size=n_test,
                                       batch_size=batch_size)

    my_callbacks = []
    early_stopping_cb = callbacks.EarlyStopping(monitor='val_loss',
                                                patience=5,
                                                restore_best_weights=True)
    my_callbacks.append(early_stopping_cb)

    tensorboard_cb = callbacks.TensorBoard(log_dir='logs')
    my_callbacks.append(tensorboard_cb)

    checkpoint_path = 'models/base_cnn/ckpt'
    checkpoint_cb = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                              save_weights_only=True,
                                              save_best_only=True)
    my_callbacks.append(checkpoint_cb)

    history = model.fit(train_data_set,
                        epochs=epochs,
                        steps_per_epoch=steps_per_epoch,
                        validation_data=val_data_set,
                        validation_steps=validation_steps,
                        callbacks=my_callbacks)

    print('\n\n')
    train_result = model.evaluate(x_train, y_train)
    format_result(train_result, name='train')

    val_result = model.evaluate(x_test, y_test)
    format_result(val_result, name='val')

    return history
def use_Sequential_model():
    from tensorflow.keras import Sequential, callbacks, regularizers
    BATCH_SIZE = 8
    simple_model = Sequential([
        Input(shape=(33, )),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(64, activation='sigmoid'),
        Dense(32, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    stop = callbacks.EarlyStopping(monitor='val_loss',
                                   patience=30,
                                   restore_best_weights=True)
    simple_model.compile(optimizer=optimizer,
                         loss=loss_fn,
                         metrics=['accuracy'])
    hist_simple = simple_model.fit(train_features,
                                   train_labels,
                                   epochs=500,
                                   callbacks=stop,
                                   validation_split=0.15,
                                   batch_size=BATCH_SIZE)

    predictions = simple_model.predict(test_features)
    for i in range(10):
        print(f"Prediction: {label_names[int(np.round(predictions[i][0]))]}")
    test_loss, test_acc = simple_model.evaluate(test_features, test_labels)
    simple_model.save('simple_model')
Example #27
0
    def train_model(self, model):

        accuracies, f1_scores = [], []

        train_images, train_labels, test_images, \
            test_labels, validation_images, validation_labels = self.load_data()

        # Train three times
        for i in range(3):

            # To free memory on google colab.
            if K.backend() == 'tensorflow':
                K.clear_session()

            print('Trainning %s of 3' % (i + 1))

            # Early Stop when bad networks are identified        
            es = callbacks.EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience=10, baseline=0.5)

            model.fit(train_images, train_labels, 
                epochs=70, 
                batch_size=128, 
                verbose=0,
                validation_data=(validation_images, validation_labels), 
                callbacks=[es])
            
            loss, accuracy, f1_score = model.evaluate(test_images, test_labels, verbose=1)

            accuracies.append(accuracy)
            f1_scores.append(f1_score)

            if i == 0 and accuracy < 0.5:
                break

        return np.mean(accuracies), np.std(accuracies), np.mean(f1_scores), np.std(f1_scores)
Example #28
0
def f_train_model(model,
                  inpx,
                  inpy,
                  model_weights,
                  num_epochs=5,
                  batch_size=64):
    '''
    Train model. Returns just history.history
    '''
    cv_fraction = 0.33  # Fraction of data for cross validation

    history = model.fit(x=inpx,
                        y=inpy,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=[
                            callbacks.EarlyStopping(monitor='val_loss',
                                                    min_delta=0,
                                                    patience=20,
                                                    verbose=1),
                            callbacks.ModelCheckpoint(model_weights,
                                                      save_best_only=True,
                                                      monitor='val_loss',
                                                      mode='min')
                        ],
                        validation_split=cv_fraction,
                        shuffle=True)

    print("Number of parameters", model.count_params())

    return history.history
Example #29
0
 def __init__(self,
              modelFile,
              model,
              ssparam,
              earlystop=None,
              modelcheckpoint=None,
              metrics=['accuracy']):
     self.weight = K.variable(0.)
     self.modelFile = modelFile
     self.ssparam = ssparam
     self.metrics = metrics
     if earlystop is not None:
         self.earlystop = earlystop
     else:
         self.earlystop = callbacks.EarlyStopping(
             patience=self.ssparam['patience'])
     if modelcheckpoint is not None:
         self.modelcheckpoint = modelcheckpoint
     else:
         self.modelcheckpoint = callbacks.ModelCheckpoint(
             filepath=self.modelFile,
             save_weights_only=True,
             save_best_only=True)
     layer = model.get_layer('unsupLayer')
     loss2 = semisup_loss(layer.get_output_at(0), layer.get_output_at(1))
     model.compile(
         loss=[sup_loss, loss2],
         loss_weights=[1, self.weight],
         optimizer=optimizers.Adam(learning_rate=ssparam['learning_rate']),
         experimental_run_tf_function=False,
         metrics=metrics)
     self.model = model
Example #30
0
def fit_model(model, x_train, y_train, x_valid, y_valid, ckpt_path):
    monitor = "val_loss"
    K.clear_session()

    history = model.fit(x=x_train,
                        y=y_train,
                        batch_size=16,
                        epochs=50,
                        verbose=1,
                        callbacks=[
                            callbacks.ModelCheckpoint(filepath=ckpt_path,
                                                      monitor=monitor,
                                                      verbose=2,
                                                      save_best_only=True,
                                                      save_weights_only=True),
                            callbacks.EarlyStopping(
                                monitor=monitor,
                                min_delta=1e-4,
                                patience=25,
                                verbose=2,
                            ),
                            callbacks.ReduceLROnPlateau(monitor=monitor,
                                                        factor=0.8,
                                                        patience=3,
                                                        verbose=2,
                                                        min_lr=1e-4)
                        ],
                        validation_data=(x_valid, y_valid))

    return history