コード例 #1
0
def build_keras_fit_callbacks(model_path):
    from keras import callbacks
    return [
        callbacks.EarlyStopping(monitor='val_loss', patience=20
                                #verbose=1
                                ),
        callbacks.ModelCheckpoint(model_path,
                                  monitor='val_loss',
                                  save_best_only=True,
                                  save_weights_only=True,
                                  verbose=0),
        callbacks.ReduceLROnPlateau(monitor='val_loss',
                                    min_lr=1e-7,
                                    factor=0.2,
                                    verbose=0)
    ]
コード例 #2
0
    def train(self, train_configs):
        """
         Train the model based on the training configurations
        :param train_configs: Configuration for the training
        """
        optimizer = adam(train_configs["learning_rate"])
        train_times = train_configs["train_times"]

        # Data sequence for training
        sequence = DataSequence(
            train_configs["data_directory"] + "data_road/training",
            train_configs["batch_size"], self.input_size)
        steps_per_epoch = len(sequence) * train_times

        # configure the model for training

        self.feature_extractor.compile(optimizer=optimizer,
                                       loss='categorical_crossentropy',
                                       metrics=['accuracy'])

        # define the callbacks for training
        tb = TensorBoard(log_dir=train_configs["logs_dir"], write_graph=True)
        mc = ModelCheckpoint(mode='max',
                             filepath=train_configs["save_model_name"],
                             monitor='acc',
                             save_best_only='True',
                             save_weights_only='True',
                             verbose=2)
        es = EarlyStopping(mode='max', monitor='acc', patience=6, verbose=1)
        model_reducelr = callbacks.ReduceLROnPlateau(
            monitor='loss',
            factor=0.2,
            patience=5,
            verbose=1,
            min_lr=0.05 * train_configs["learning_rate"])

        callback = [tb, mc, es, model_reducelr]

        # Train the model on data generated batch-by-batch by the DataSequence generator
        self.feature_extractor.fit_generator(sequence,
                                             steps_per_epoch=steps_per_epoch,
                                             epochs=train_configs["nb_epochs"],
                                             verbose=1,
                                             shuffle=True,
                                             callbacks=callback,
                                             workers=3,
                                             max_queue_size=8)
コード例 #3
0
def train(X_train, y_train, X_val, y_val, subject):
    
    X_shape = X_train.shape
    #X_train = np.split(X_train, [1,2,3], axis=4)
    #X_val = np.split(X_val, [1,2,3], axis=4) 
    
    n_epoch = 500
    early_stopping = 30
    classes_len = len(np.unique(y_train))

    Y_train = to_categorical(y_train, classes_len)
    Y_val = to_categorical(y_val, classes_len)
    output_dim = classes_len
    loss = 'categorical_crossentropy'
    activation = 'softmax'
    
    inputs = Input(shape=(X_shape[1],X_shape[2],X_shape[3]))
    
    def layers(inputs):
        
        pipe = Convolution2D(40, (1,22), strides=(1,1), padding='valid')(inputs)
        #pipe = se_block()(pipe)
        pipe = LeakyReLU(alpha=0.05)(pipe)
        pipe = Dropout(0.5)(pipe)
        pipe = BatchNormalization()(pipe)
        
        pipe = Reshape((pipe.shape[1].value, 40))(pipe)

        pipe = AveragePooling1D(pool_size=(75), strides=(15))(pipe)
        pipe = Flatten()(pipe)
        return pipe
    
    pipeline = layers(inputs)

    output = Dense(output_dim, activation=activation)(pipeline)
    model = Model(inputs=inputs, outputs=output)

    opt = optimizers.adam(lr=0.001, beta_2=0.999)
    model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
    cb = [callbacks.ProgbarLogger(count_mode='samples'),
          callbacks.ReduceLROnPlateau(monitor='loss',factor=0.5,patience=7,min_lr=0.00001),
          callbacks.ModelCheckpoint('./model_results_global/A0{:d}_model.hdf5'.format(subject),monitor='val_loss',verbose=0,
                                    save_best_only=True, period=1),
          callbacks.EarlyStopping(patience=early_stopping, monitor='val_acc', min_delta=0.0001)]
    model.summary()
    model.fit(X_train, Y_train, validation_data=(X_val, Y_val), 
              batch_size=128, epochs=n_epoch, verbose=1, callbacks=cb)
コード例 #4
0
def custom_model_network(input_data,
                         learning_rate,
                         embdim,
                         embreg,
                         batchsize,
                         sentlength,
                         hiddennodes,
                         hiddenL1,
                         hiddenlayer=False):
    sequences = tokenizer.texts_to_sequences(input_data)
    word_index = tokenizer.word_index
    max_words = len(word_index)
    data = pad_sequences(sequences,
                         maxlen=sentlength,
                         padding="post",
                         truncating="post")
    model = Sequential()
    model.add(
        Embedding(max_words + 1,
                  output_dim=embdim,
                  input_length=sentlength,
                  embeddings_regularizer=regularizers.l1(embreg)))
    model.add(keras.layers.Lambda(lambda x: keras.backend.mean(x, axis=1)))
    if hiddenlayer:
        model.add(
            Dense(hiddennodes,
                  activation='relu',
                  kernel_regularizer=regularizers.l1(hiddenL1)))
    model.add(Dense(1, activation='sigmoid'))

    callback_list = [
        callbacks.EarlyStopping(monitor='val_loss',
                                patience=10,
                                restore_best_weights=True),
        callbacks.ReduceLROnPlateau(monitor='val_loss', factor=.2, patience=5)
    ]
    model.compile(optimizer=optimizers.rmsprop(lr=learning_rate),
                  loss='binary_crossentropy',
                  metrics=['acc'])

    history = model.fit(data,
                        y_train,
                        epochs=1500,
                        batch_size=batchsize,
                        callbacks=callback_list,
                        validation_split=0.2)
    return history
コード例 #5
0
ファイル: train.py プロジェクト: zhizhenzhong/learn_Q_factor
def model_training(input_data, output_data, n_steps, n_features, channel,
                   segment):

    rnn_size = 32  #32 is good for 96 entry prediction history
    reduce_patience = 8
    stop_patience = 15  # early stop
    epochs = 200

    #LSTM prediction model
    model = Sequential()
    model.add(
        layers.Bidirectional(layers.LSTM(rnn_size, activation='relu'),
                             input_shape=(n_steps, n_features)))
    #model.add(layers.GlobalMaxPool1D())
    #model.add(layers.LSTM(rnn_size, activation='relu'))
    #model.add(layers.Dense(dense_size))
    #model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(1))

    model.compile(optimizer='adam',
                  loss=root_mean_squared_error,
                  metrics=['accuracy'])
    model.summary()
    plot_model(model, to_file='model.png', show_shapes=True)
    # train the model each generation and show predictions against the validation dataset
    reducer = callbacks.ReduceLROnPlateau(patience=reduce_patience, verbose=1)
    stopper = callbacks.EarlyStopping(patience=stop_patience, verbose=1)
    updater = UpdateMonitor()
    plothistory = LossHistory()

    print('start training')
    model.fit(input_data,
              output_data,
              epochs=epochs,
              validation_split=1. / 5,
              shuffle=True,
              callbacks=[updater, reducer, stopper, plothistory])
    print('\ntrain finished\n')

    plothistory.loss_plot('epoch')
    loss_file = 'loss_ch_' + str(channel) + '_seg_' + str(segment) + '.pdf'
    loss_location = os.path.join(_MODELS_DIR, loss_file)
    plt.savefig(loss_location, dpi=175)
    _MODELNAME = 'LSTM_ch_' + str(channel) + '_seg_' + str(segment) + '.h5'
    model_location = os.path.join(_MODELS_DIR, _MODELNAME)
    model.save(model_location)
    print('\nmodel saved!\n')
コード例 #6
0
    def get_callbacks(self, model_prefix='Model'):
        callback_list = [
            callbacks.ModelCheckpoint(
                "gdrive/My Drive/Kaggle/tgiSaltChallenge/ModelCheckPoints/TestModel_ResNet50_Jan11_firstTrain_checkpoint.h5",
                monitor='val_my_iou_metric',
                mode='max',
                save_best_only=True,
                verbose=1), self.swa,
            callbacks.LearningRateScheduler(
                schedule=self._cosine_anneal_schedule),
            callbacks.ReduceLROnPlateau(factor=0.1,
                                        patience=5,
                                        min_lr=0.0000001,
                                        verbose=1)
        ]

        return callback_list
コード例 #7
0
def addCallBacks(model_params):

    callback_list = []
    if model_params["debug"] == False:
        if model_params["save_weight_only"]:
            #callback_list.append(callbacks.CSVLogger(filename=os.path.join('logging',model_params["model_name"]+'.csv')))
            callback_list.append(
                callbacks.ModelCheckpoint(os.path.join(
                    'weights', model_params["model_name"] + '.h5'),
                                          save_best_only=True,
                                          save_weights_only=True))
        else:
            callback_list.append(
                callbacks.ModelCheckpoint(os.path.join(
                    'models', model_params["model_name"] + '.hdf5'),
                                          save_best_only=True,
                                          save_weights_only=False))
    if model_params["TensorBoard_logging"] == True:
        testLogger = customCallbacks.LoggingTensorBoard(
            log_dir=os.path.join('tensorboard_logs',
                                 model_params["model_name"]),
            model_params=model_params,
            write_graph=False)
        callback_list.append(testLogger)
    if model_params["CustomPrint"] == True:
        my_callback = customCallbacks.CustomPrintCallback(model_params,
                                                          metric="loss")
        callback_list.append(my_callback)

    if (model_params["early_stopping"]):
        callback_list.append(
            callbacks.EarlyStopping(monitor='loss',
                                    min_delta=0.0001,
                                    patience=10,
                                    verbose=1,
                                    mode='auto'))

    if (model_params["rlr"]):
        callback_list.append(
            callbacks.ReduceLROnPlateau(monitor='loss',
                                        patience=10,
                                        verbose=1,
                                        factor=0.5,
                                        min_lr=0.000000001))

    return callback_list
コード例 #8
0
    def train(self):
        train_examples = self.ge.get_TrainExamples()
        test_examples = self.ge.get_TestExamples()

        model = res_Net50(self.input, classes=self.ge.get_Classes())
        # 实现gpu并行
        #model = multi_gpu_model(model, gpus=2)
        logging = callbacks.TensorBoard(log_dir=self.log_dir)
        checkpoint = callbacks.ModelCheckpoint(
            self.log_dir + '/' +
            'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
            monitor='val_loss',
            save_best_only=True,
            mode='min',
            save_weights_only=True,
            period=1)
        reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                factor=0.1,
                                                patience=9,
                                                verbose=1)
        early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                                 min_delta=0,
                                                 patience=19,
                                                 verbose=1)

        # 指定训练方式
        # for i in range(len(model.layers)-3):
        #     model.layers[i].trainable = False
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        if os.path.exists(self.log_dir + '/' + 'train_weights.h5'):
            model.load_weights(self.log_dir + '/' + 'train_weights.h5',
                               by_name=True)
            #model.load_weights()

        model.fit_generator(
            self.ge.train_Generate(),
            steps_per_epoch=max(1, train_examples // self.batch_size),
            validation_data=self.ge.test_Generate(),
            validation_steps=max(1, test_examples // self.batch_size),
            epochs=100,
            initial_epoch=0,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])  #
        model.save_weights(self.log_dir + '/' + 'train_weights.h5')
        model.save(self.log_dir + '/' + 'train_models.h5')
コード例 #9
0
def train(model, data, args):
    # unpacking the data
    (x_train, y_train), (x_test, y_test) = data

    def train_generator(x, y, batch_size):
        train_datagen = ImageDataGenerator(width_shift_range=0.1,
                                           height_shift_range=0.1,
                                           rotation_range=15,
                                           horizontal_flip=True)
        generator = train_datagen.flow(x, y, batch_size=batch_size)
        while 1:
            x_batch, y_batch = generator.next()
            yield (x_batch, y_batch)

    log = callbacks.CSVLogger(args.save_dir + '/log.csv')
    tb = TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
                     batch_size=args.batch_size,
                     histogram_freq=int(args.debug))
    checkpoint = callbacks.ModelCheckpoint(args.save_dir +
                                           '/weights-{epoch:02d}.h5',
                                           monitor='val_acc',
                                           save_best_only=True,
                                           save_weights_only=True,
                                           verbose=1)

    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.5,
                                            verbose=1,
                                            patience=10,
                                            min_lr=0.00001)

    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=generalized_kullback_leibler_divergence,
                  metrics={'lvq_caps': 'accuracy'})

    model.fit_generator(
        generator=train_generator(x_train, y_train, args.batch_size),
        steps_per_epoch=int(y_train.shape[0] / args.batch_size),
        epochs=args.epochs,
        validation_data=[x_test, y_test],
        callbacks=[log, tb, checkpoint, reduce_lr])

    model.save_weights(args.save_dir + '/trained_model.h5')
    print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)

    return model
コード例 #10
0
ファイル: train.py プロジェクト: TooTouch/Kaggle
    def training(self, train, val, size, weights=None):
        print('=' * 100)
        print('Start training')
        print('-' * 100)
        if self.self_training:
            if 'selftraining' not in self.save_name:
                self.save_name += '_selftraining'

        ckp = cb.ModelCheckpoint(self.model_dir + '/' + self.save_name + '.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='auto',
                                 period=1)
        es = cb.EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=10,
                              verbose=1,
                              mode='auto')
        rlp = cb.ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.75,
                                   patience=5,
                                   verbose=1,
                                   mode='auto',
                                   min_delta=0.0001,
                                   cooldown=0,
                                   min_lr=0.00001)
        start = time.time()
        history = self.model.fit_generator(
            generator=train,
            steps_per_epoch=size[0] // self.batch_size * 3,
            epochs=self.epochs,
            validation_data=val,
            validation_steps=size[1] // self.batch_size,
            class_weight=weights,
            callbacks=[es, ckp, rlp])
        e = int(time.time() - start)
        print('-' * 100)
        print('Complete')
        print('-' * 100)
        train_time = '{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60),
                                                   e % 60)
        print('Train time: ', train_time)
        print('=' * 100)
        return history, train_time
コード例 #11
0
def neural_network():
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation, initializers, regularizers
    from keras.layers.normalization import BatchNormalization
    from keras import optimizers
    np.random.seed(1337)  # for reproducibility

    batch_size = 100
    nb_epoch = 100

    x, y = train_data()
    test_data = test_data1('data/CH2014BST.txt')
    ss = preprocessing.StandardScaler()
    x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, random_state=random.randint(1, 100),
                                                                        test_size=0.2)

    x_train = ss.fit_transform(x_train)
    x_test = ss.transform(x_test)
    test_data_x = ss.transform(test_data[:, :15])
    test_data_y = test_data[:,15:17]
    model = Sequential()
    model.add(Dense(32, input_shape=(15,), kernel_initializer=initializers.random_normal(stddev=0.01), use_bias=True,
                    activation='relu'
                    ))
    model.add(
        Dense(64, kernel_initializer=initializers.random_normal(stddev=0.01), activation='relu', use_bias=True
              ))
    model.add(
        Dense(64, kernel_initializer=initializers.random_normal(stddev=0.01), use_bias=True
              ))
    model.add(
        Dense(32, kernel_initializer=initializers.random_normal(stddev=0.01), use_bias=True
              ))
    model.add(Dense(2, kernel_initializer=initializers.random_normal(stddev=0.01)))
    import keras.callbacks as cb

    model.compile(loss=loss_function, optimizer='nadam', metrics=[error_1, error_2])
    f**k = model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2,
                     validation_data=(x_test, y_test),
                     callbacks=[cb.ReduceLROnPlateau(monitor='val_loss')]
                     )
    predict = model.predict(test_data_x)
    print(error_1(test_data[:, 15:17], predict))
    print(error_2(test_data[:, 15:17], predict))
    print(mean_error(test_data_y, predict))
コード例 #12
0
ファイル: dae__utils.py プロジェクト: Hashizu/iida_utils_py
def DAE(df, d_df):
    """DenoisingAutoEncorder by keras
    
    Parameters
    ----------
    df : DataFrame
        Original input-DataFrame
    d_df : DataFrame 
        Added noise input-DataFrame
    
    """

    array = np.array(df)
    d_array = np.array(d_df)
    X_train, X_test, X_train_d, X_test_d = train_test_split(array, d_array, test_size=0.1, random_state=42)
    
    inp = Input(shape=(X_train_d.shape[1],))
    x = Dense(1000, activation="relu")(inp)
    x = BatchNormalization()(x)
    x = Dropout(rate = 0.3)(x)
    x = Dense(1000, activation="relu")(x)
    x = BatchNormalization()(x)
    x = Dropout(rate = 0.3)(x)
    x = Dense(2500, activation="relu")(x)
    x = BatchNormalization()(x)
    x = Dropout(rate = 0.2)(x)
    out = Dense(X_train.shape[1], activation="relu")(x)
    clf = Model(inputs=inp, outputs=out)
    clf.compile(loss='mean_squared_error', optimizer='adam')
    es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=1,
                                 verbose=1, mode='auto', baseline=None, restore_best_weights=True)
    rlr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5,
                                      patience=3, min_lr=1e-6, mode='max', verbose=0)
    clf.fit(X_train_d, X_train, validation_data=(X_test_d, X_test),
            callbacks=[es], epochs=500, batch_size=512,verbose=1)
    
    _input = clf.layers[0].input
    _output = clf.layers[1].output
    _output2 = clf.layers[2].output
    _output3 = clf.layers[3].output
    
    func = K.function([_input, K.learning_phase()], [_output,_output2, _output3])    
    d_array = [func(([d_array[x]],0)) for x in range(len(array))]
    
    return np.array(d_array)
コード例 #13
0
def create_model(data):
    # X_train, y_train, X_val, y_val
    from keras import models
    from keras import layers
    import numpy as np

    model = models.Sequential()
    model.add(
        layers.Dense(
            {{choice([np.power(2, 5),
                      np.power(2, 6),
                      np.power(2, 7)])}},
            input_shape=(len(data.columns), )))
    model.add(LeakyReLU(alpha={{uniform(0.5, 1)}}))
    model.add(Dropout({{uniform(0.5, 1)}}))
    model.add(
        layers.Dense(
            {{choice([np.power(2, 3),
                      np.power(2, 4),
                      np.power(2, 5)])}}))
    model.add(LeakyReLU(alpha={{uniform(0.5, 1)}}))
    model.add(Dropout({{uniform(0.5, 1)}}))
    model.add(layers.Dense(1, activation='sigmoid'))

    from keras import callbacks

    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.2,
                                            patience=5,
                                            min_lr=0.001)

    model.compile(optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.fit(X_train,
              y_train,
              epochs={{choice([25, 50, 75, 100])}},
              batch_size={{choice([16, 32, 64])}},
              validation_data=(X_val, y_val),
              callbacks=[reduce_lr])

    score, acc = model.evaluate(X_val, y_val, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
コード例 #14
0
    def learning_callbacks(self):
        callbacks = []
        # early stopping
        if self.use_earlystopping:
            callbacks.append(kc.EarlyStopping(**self.earlystopping_config))
        # reduce learning rate
        if self.use_reduceLR:
            callbacks.append(kc.ReduceLROnPlateau(**self.reduceLR_config))
        # check point
        if self.use_checkpoint:
            callbacks.append(kc.ModelCheckpoint(self.weights_path,
                                                **self.checkpoint_config))
        # tensorborad
        if self.use_tensorborad:
            callbacks.append(kc.TensorBoard(self.log_dir,
                                            **self.tensorborad_config))

        return callbacks
コード例 #15
0
ファイル: KerasCallbacks.py プロジェクト: ErfanThinker/PAN19
def get_callbacks_list_convnet():
    return [
        callbacks.EarlyStopping(
            monitor='val_loss',
            patience=35,
        ),
        callbacks.ModelCheckpoint(
            filepath='my_model_convnet.h5',
            monitor='val_loss',
            save_best_only=True,
        ),
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            verbose=1,
            patience=10,
        )
    ]
コード例 #16
0
    def test_TensorBoard_with_ReduceLROnPlateau(self):
        with self.cached_session():
            temp_dir = self.get_temp_dir()
            self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

            (x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES,
            )
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            model = test_utils.get_small_sequential_mlp(
                num_hidden=NUM_HIDDEN,
                num_classes=NUM_CLASSES,
                input_dim=INPUT_DIM,
            )
            model.compile(
                loss="binary_crossentropy",
                optimizer="sgd",
                metrics=["accuracy"],
            )

            cbks = [
                callbacks.ReduceLROnPlateau(monitor="val_loss",
                                            factor=0.5,
                                            patience=4,
                                            verbose=1),
                callbacks_v1.TensorBoard(log_dir=temp_dir),
            ]

            model.fit(
                x_train,
                y_train,
                batch_size=BATCH_SIZE,
                validation_data=(x_test, y_test),
                callbacks=cbks,
                epochs=2,
                verbose=0,
            )

            assert os.path.exists(temp_dir)
コード例 #17
0
 def _train(self):
     data, label = self._load_data()
     train_data, train_label, validate_data, validate_label, test_data, test_label = split_data(data, label,
                                                                                                to_categorical=True)
     network_input = Input(shape=(100, 3))
     network = LSTM(32, return_sequences=True)(network_input)
     network = LSTM(32)(network)
     network = Dense(5, activation=softmax)(network)
     network = Model(inputs=[network_input], outputs=[network])
     network.compile(optimizer=RMSprop(lr=0.01), loss=categorical_crossentropy, metrics=[categorical_accuracy])
     network.summary()
     callback = [
         callbacks.ReduceLROnPlateau(monitor="categorical_accuracy", factor=0.1, patience=3)
     ]
     self.train_history = network.fit(train_data, train_label,
                                      validation_data=(validate_data, validate_label), batch_size=self.BATCH_SIZE,
                                      epochs=self.EPOCHS, callbacks=callback)
     self.evaluate_history = network.evaluate(test_data, test_label, batch_size=self.BATCH_SIZE)
     return network
コード例 #18
0
    def __create_callbacks(self):
        filepath = 'RopePrediction-{epoch:02d}-{val_loss:.2f}.hdf5'
        checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True,
                                               save_weights_only=False, mode='auto', period=1)

        early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=100,
                                             verbose=0, mode='auto')

        reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                                patience=3, min_lr=0.00001)

        tensorboard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32,
                                            write_graph=True, write_grads=False, write_images=False,
                                            embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)

        csvLogger = callbacks.CSVLogger('C:/tmp\LearningLogs/log.csv', separator=',', append=False)

        callbacks_list = [checkpoint, early_stop, reduce_lr, tensorboard, csvLogger]
        return callbacks_list
コード例 #19
0
    def fit(self,
            train_x,
            train_y,
            valid_x,
            valid_y,
            predicted=False,
            filename='trained_models/best.model'):
        lr_decay = callbacks.ReduceLROnPlateau(
            monitor='val_acc',
            factor=0.5,
            patience=self.config['lr_decay_epoch'],
            min_lr=0.01 * self.config['learning_rate'])
        csv_log = callbacks.CSVLogger(filename.replace('.model', '.csv'))
        es = callbacks.EarlyStopping(monitor='val_acc',
                                     patience=self.config['n_stop'])
        mc = callbacks.ModelCheckpoint(filename,
                                       monitor='val_acc',
                                       save_best_only=True,
                                       save_weights_only=True)

        train_steps = int((len(train_y) - 1) / self.config['batch_size']) + 1
        valid_steps = int((len(valid_y) - 1) / self.config['batch_size']) + 1
        train_batches = self.data_generator(train_x, train_y,
                                            self.config['batch_size'],
                                            train_steps)
        valid_batches = self.data_generator(valid_x, valid_y,
                                            self.config['batch_size'],
                                            valid_steps)
        hist = self.model.fit_generator(train_batches,
                                        train_steps,
                                        epochs=self.config['epochs'],
                                        callbacks=[lr_decay, csv_log, es, mc],
                                        validation_data=valid_batches,
                                        validation_steps=valid_steps)

        # hist = self.model.fit(train_x, train_label, batch_size=self.config['batch_size'], epochs=self.config['epochs'],
        #                       validation_data=(valid_x, valid_y), callbacks=[lr_decay, csv_log, es, mc])
        best_acc = max(hist.history['val_acc'])
        if predicted:
            self.model.load_weights(filename)
            return self.predict(valid_x), best_acc
        else:
            return best_acc
コード例 #20
0
ファイル: segmentation.py プロジェクト: danilons/IRRA
def main():
    if not os.path.exists(CHECKPOINT_PATH):
        os.makedirs(CHECKPOINT_PATH)

    mean = np.load(MEAN_FILE)
    model = build_model(N_CLASSES, trainable=True)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=LEARNING_RATE, momentum=0.9),
                  metrics=['accuracy'])

    model_checkpoint = callbacks.ModelCheckpoint(CHECKPOINT_PATH + '/snapshot_ep{epoch:02d}-vl{val_loss:.4f}.hdf5', monitor='loss')
    tensorboard_cback = callbacks.TensorBoard(log_dir='{}/tboard'.format(CHECKPOINT_PATH),
                                              histogram_freq=0,
                                              write_graph=False,
                                              write_images=False)

    csv_log_cback = callbacks.CSVLogger('{}/history.log'.format(CHECKPOINT_PATH))
    reduce_lr_cback = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.2,
                                                  patience=5,
                                                  verbose=1,
                                                  min_lr=0.05 * LEARNING_RATE)

    weights_data = np.load(WEIGHTS, encoding='latin1').item()
    for layer in model.layers:
        if layer.name in weights_data.keys():
            print("Loading weight for layer {}".format(layer.name))
            layer_weights = weights_data[layer.name]
            layer.set_weights((layer_weights['weights'],
                               layer_weights['biases']))

    model.fit_generator(generate_samples(mean, mode='train', batch_size=BATCH_SIZE),
                        steps_per_epoch=STEPS_PER_EPOCH,
                        epochs=EPOCHS,
                        validation_data=generate_samples(mean, mode='test', batch_size=BATCH_SIZE),
                        validation_steps=VAL_STEPS_PER_EPOCH,
                        callbacks=[model_checkpoint,
                                   tensorboard_cback,
                                   csv_log_cback,
                                   reduce_lr_cback])

    model.save(FILENAME)
コード例 #21
0
    def get_callbacks(self):

        checkpointer = callbacks.ModelCheckpoint(filepath=self.save_path,
                                                 monitor='val_loss',
                                                 verbose=1,
                                                 save_best_only=True)
        early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                                 min_delta=1e-7,
                                                 patience=15,
                                                 verbose=1,
                                                 mode='auto')
        reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                factor=0.1,
                                                patience=5,
                                                verbose=1)
        csv_logger = callbacks.CSVLogger(self.train_log_path,
                                         separator=',',
                                         append=False)
        # tensorboard = TensorBoard(log_dir='logs/', histogram_freq=0, batch_size=self.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
        return [early_stopping, checkpointer, reduce_lr, csv_logger]
コード例 #22
0
 def train(model, src_dir, save_dir, img_size, batch_size, max_label_length,
           aug_nbr, epochs):
     print("[*] Setting up for checkpoints.")
     ckpt = callbacks.ModelCheckpoint(
         save_dir +
         "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",  #回调保存模型
         save_weights_only=True,
         save_best_only=True)  #只保存模型权重、监测值有改进时才会保存当前的模型
     reduce_lr_cbk = callbacks.ReduceLROnPlateau(
         monitor='val_loss', patience=3)  #3个epoch val_loss不再提示则减小学习速率
     print("[*] Setting up for compiler.")
     model.compile(optimizer='adam',
                   loss={'ctc_loss_output': fake_ctc_loss})  #优化器、损失函数类型
     print("[*] Preparing data generator.")
     train_list, val_list = train_val_split(src_dir)
     train_gen = DataGenerator(train_list,
                               img_shape=img_size,
                               down_sample_factor=4,
                               batch_size=batch_size,
                               max_label_length=max_label_length,
                               max_aug_nbr=aug_nbr,
                               width_shift_range=15,
                               height_shift_range=10,
                               zoom_range=12,
                               shear_range=15,
                               rotation_range=20,
                               blur_factor=5,
                               add_noise_factor=0.01)
     val_gen = DataGenerator(val_list, img_size, batch_size,
                             max_label_length)
     print("[*] Training start!")
     model.fit_generator(generator=train_gen.flow(),
                         steps_per_epoch=200,
                         validation_data=val_gen.flow(),
                         validation_steps=val_gen.data_nbr // batch_size,
                         callbacks=[ckpt, reduce_lr_cbk],
                         epochs=epochs)  #使用fit_generator进行训练
     print("[*] Training finished!")
     model.save(save_dir + "crnn_model.h5")
     print("[*] Model has been successfully saved in %s!" % save_dir)
     return 0
コード例 #23
0
def callback_for_training(tf_log_dir_name='./log/',
                          patience_lr=10,
                          snapshot_name=None):
    cb = [None] * 3
    """
    Tensorboard log callback
    """
    tb = callbacks.TensorBoard(log_dir=tf_log_dir_name, histogram_freq=0)
    cb[0] = tb
    """
    Early Stopping callback
    """
    #Uncomment for usage
    # early_stop = callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=5, verbose=1, mode='auto',save_best_only=True)
    # cb.apppend(early_stop)
    """
    Model Checkpointer
    """
    if snapshot_name != None:
        checkpointer = callbacks.ModelCheckpoint(
            filepath="optic-net.{epoch:02d}-{val_acc:.2f}.hdf5",
            verbose=0,
            monitor='val_acc')
    else:
        checkpointer = callbacks.ModelCheckpoint(
            filepath=snapshot_name + ".{epoch:02d}-{val_acc:.2f}.hdf5",
            verbose=0,
            monitor='val_acc')
    cb[1] = checkpointer
    """
    Reduce Learning Rate
    """
    reduce_lr_loss = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.1,
                                                 patience=6,
                                                 verbose=1,
                                                 min_lr=1e-8,
                                                 mode='auto')
    cb[2] = reduce_lr_loss

    return cb
def callback(
    model_name,
    tf_log_dir_name='./tf-log/',
    patience_lr=2,
):
    cb = []
    """
    Tensorboard log callback
    """
    tb = callbacks.TensorBoard(log_dir=tf_log_dir_name, histogram_freq=0)
    cb.append(tb)
    """
    Model-Checkpoint
    """
    m = callbacks.ModelCheckpoint(filepath=model_name,
                                  monitor='val_loss',
                                  mode='auto',
                                  save_best_only=True)
    cb.append(m)
    """
    Reduce Learning Rate
    """
    reduce_lr_loss = callbacks.ReduceLROnPlateau(monitor='loss',
                                                 factor=0.1,
                                                 patience=patience_lr,
                                                 verbose=1,
                                                 min_delta=1e-4,
                                                 mode='min')
    cb.append(reduce_lr_loss)
    """
    Early Stopping callback
    """
    # Uncomment for usage
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         min_delta=0,
                                         patience=5,
                                         verbose=1,
                                         mode='auto')
    cb.append(early_stop)

    return cb
コード例 #25
0
def run():
    model = build_model(is_training=True)

    ckpt = callbacks.ModelCheckpoint(MODEL_OUT_DIR + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
                                     save_weights_only=True, save_best_only=True)
    reduce_lr_cbk = callbacks.ReduceLROnPlateau(patience=3)
    early_stop = callbacks.EarlyStopping(monitor="loss", min_delta=0.001, patience=3, mode="min", verbose=1)
    model.compile(optimizer='adam', loss={'ctc_loss_output': lambda y_true, y_pred: y_pred})

    train_gen = DataGenerator(TRAIN_TXT_PATH)
    val_gen = DataGenerator(VAL_TXT_PATH)
    print("[*] Training start!")
    model.fit_generator(generator=train_gen.flow(),
                        steps_per_epoch=train_gen.data_nbr // BATCH_SIZE,
                        validation_data=val_gen.flow(),
                        validation_steps=val_gen.data_nbr // BATCH_SIZE,
                        callbacks=[ckpt, reduce_lr_cbk, early_stop],
                        epochs=EPOCH)
    print("[*] Training finished!")
    model.save(MODEL_OUT_DIR + "crnn_model.h5")
    print("[*] Model has been successfully saved in %s!" % MODEL_OUT_DIR)
コード例 #26
0
ファイル: dae__utils.py プロジェクト: Hashizu/iida_utils_py
def NN_model(X_train, y_train, X_val, y_val, X_test, y_test, test):
    inp = Input(shape=(X_train.shape[1],))
    x = Dense(500, activation="relu")(inp)
    x = BatchNormalization()(x)
    x = Dropout(rate = 0.5)(x)

    out = Dense(1, activation="relu")(x)
    clf = Model(inputs=inp, outputs=out)
    clf.compile(loss='mape', optimizer='adam')
    es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10,
                                 verbose=1, mode='auto', baseline=None, restore_best_weights=True)
    rlr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.945,
                                      patience=3, min_lr=1e-6, mode='max', verbose=0)
    clf.fit(X_train, y_train, validation_data=(X_val, y_val),
            callbacks=[es], epochs=500, batch_size=16,verbose=1)
    
    train_pred = clf.predict(X_train)
    test_pred = clf.predict(X_test)
    preds = clf.predict(test)
    
    return train_pred, test_pred, preds
コード例 #27
0
def train_lstm(model, x_train, y_train, x_test, y_test):
    opt = Adagrad(lr=0.02)
    ckpt_save = callbacks.ModelCheckpoint('best-cnn.hdf5',
                                          save_best_only=True,
                                          monitor='val_accuracy',
                                          mode='max')
    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.9,
                                            patience=25,
                                            min_lr=0.000001,
                                            verbose=1)
    model.compile(loss='mean_squared_error',
                  optimizer=opt)  # Try SGD, adam, adagrad and compare!!!
    model.fit(x_train,
              y_train,
              epochs=5,
              batch_size=1,
              verbose=2,
              validation_data=(x_test, y_test),
              shuffle=False,
              callbacks=[reduce_lr, ckpt_save])
コード例 #28
0
    def train_function(self, compiled_model, train_generator, valid_generator,
                       callbacks):

        early_stop = keras_cb.EarlyStopping(monitor="val_loss",
                                            patience=10,
                                            verbose=1,
                                            mode="min")
        reduce_lr = keras_cb.ReduceLROnPlateau(monitor="val_loss",
                                               factor=0.5,
                                               patience=3,
                                               verbose=1,
                                               mode="min",
                                               min_lr=1e-7)
        cb_ci = ConcordanceIndex((train_generator.x, train_generator.y),
                                 (valid_generator.x, valid_generator.y),
                                 freq=1)

        callbacks = [early_stop, reduce_lr, cb_ci] + callbacks

        return super().train_function(compiled_model, train_generator,
                                      valid_generator, callbacks)
コード例 #29
0
def main():
    """训练
    """

    x_train, x_test, y_train, y_test, class_weight = pickle.load(
        open('train_data.pkl', 'rb'))
    ws = pickle.load(open('ws.pkl', 'rb'))

    input_shape = x_train[0].shape
    model, atten_model = build_model(input_shape, len(ws))

    nb_epoch = 20
    batch_size = 64 * 8
    steps_per_epoch = int(len(x_train) / batch_size) + 1
    validation_steps = int(len(x_test) / batch_size) + 1

    model.save_weights('./model_gru_last_weights.hdf5')
    atten_model.save_weights('./model_gru_last_weights.hdf5')

    model.fit_generator(generator=batch_flow(x_train,
                                             y_train,
                                             batch_size=batch_size),
                        steps_per_epoch=steps_per_epoch,
                        epochs=nb_epoch,
                        validation_data=batch_flow(x_test,
                                                   y_test,
                                                   batch_size=batch_size),
                        validation_steps=validation_steps,
                        class_weight=class_weight,
                        callbacks=[
                            callbacks.ReduceLROnPlateau(min_lr=1e-6),
                            callbacks.ModelCheckpoint(
                                monitor='val_acc',
                                filepath='./model_gru_weights.hdf5',
                                verbose=1,
                                save_best_only=True)
                        ])

    model.save_weights('./model_gru_last_weights.hdf5')
    atten_model.save_weights('./model_gru_last_weights.hdf5')
コード例 #30
0
def fit_model(model, cfg, X_train, Y_train, X_test, Y_test, save_model_dir,
              validation_split):
    hist = None

    def scheduler(epoch):
        lr = None
        if epoch > 20:
            lr = 0.0005
        elif epoch > 10:
            lr = 0.001
        else:
            lr = 0.005
        return lr

    change_lr = LearningRateScheduler(scheduler, verbose=1)

    if validation_split:
        reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                verbose=1,
                                                patience=5,
                                                mode='auto',
                                                epsilon=0.0001)
        hist = model.fit(X_train,
                         Y_train,
                         validation_split=validation_split,
                         batch_size=cfg.batch_size,
                         epochs=cfg.epochs,
                         verbose=2,
                         callbacks=[reduce_lr])
    else:
        hist = model.fit(X_train,
                         Y_train,
                         callbacks=[change_lr],
                         batch_size=cfg.batch_size,
                         epochs=cfg.epochs,
                         verbose=2,
                         shuffle=True,
                         validation_data=(X_test, Y_test))
    model.save(save_model_dir)
    return hist