Пример #1
0
    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        train_datagen = ImageDataGenerator(rotation_range=20,
                                           width_shift_range=0.2,
                                           height_shift_range=0.2)
        train_datagen.fit(mnist.train.images.reshape(-1, 28, 28, 1))

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28,
                                                   1), mnist.test.labels
        self.model.fit_generator(
            train_datagen.flow(mnist.train.images.reshape(-1, 28, 28, 1),
                               mnist.train.labels),
            #batch_size=128,
            epochs=20,
            verbose=1,
            validation_data=(x_test, y_test),
            callbacks=[
                TrainValTensorBoard(log_dir='./logs/cnn4',
                                    histogram_freq=1,
                                    write_grads=True)
            ])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
    def train(self, data, labels, epochs, batch_size, optimizer_config,
              loss_func_name, metrics_names, x_val, y_val, log_training,
              log_tensorboard):
        """
        A method for training a model.

        :param data: The input data on which the model is trained.
        :param labels: The labels of the given input data.
        :param epochs: Number of epochs to perform in the training.
        :param batch_size: Number of samples in each batch of the training.
        :param optimizer_config: A dictionary which contains all the data required to create the optimizer for learning.
        :param loss_func_name: The loss function on which optimization is performed.
        :param metrics_names: The statistics which are measured on each epoch of the training.
        :param x_val: The validation data. Used for plotting TensorBoard statistics, NOT for training.
        :param y_val: The labels for the validation data.
        :param log_training: The flag which decides whether to plot the the terminal the training process.
        :param log_tensorboard: The flag which decides whether to plot to TensorBoard the training process.
        :return: None.
        """
        optimizer = create_optimizer(optimizer_config)
        loss_func = create_loss_func(loss_func_name)
        metrics = create_metrics(metrics_names)
        self.compile(optimizer=optimizer, loss=loss_func, metrics=metrics)

        verbose = 0
        if log_training:
            verbose = 1

        if log_tensorboard:
            tensorboard_callback = TrainValTensorBoard(
                log_dir=tensorboard_logs_path, write_graph=False)
            tensorboard_callback.set_model(self)
            self._callbacks.append(tensorboard_callback)

        labels = to_categorical(labels, self._classes_num)
        y_val = to_categorical(y_val, self._classes_num)

        self.fit(data,
                 labels,
                 batch_size,
                 epochs,
                 verbose,
                 validation_data=(x_val, y_val),
                 callbacks=self._callbacks)
Пример #3
0
    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28,
                                                   1), mnist.test.labels
        self.model.fit(
            mnist.train.images.reshape(-1, 28, 28, 1),
            mnist.train.labels,
            batch_size=128,
            epochs=20,
            verbose=1,
            validation_data=(x_test, y_test),
            callbacks=[TrainValTensorBoard(log_dir='./logs/cnn2', )])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
Пример #4
0
              nesterov=True)

    model.compile(
        optimizer=sgd,
        loss=losses.
        sparse_categorical_crossentropy,  # losses.categorical_crossentropy,
        metrics=['acc', miou])

    # checkpoint callback function
    checkpoint_callback = ModelCheckpoint(os.path.join(FLAGS.dataset_dir,
                                                       _model.name + ".hdf5"),
                                          monitor='val_loss',
                                          verbose=1,
                                          save_best_only=True)
    # tensorboard log callback function
    tb_callback = TrainValTensorBoard(
        log_dir=os.path.join(FLAGS.dataset_dir, "log_" + _model.name))

    # Dataset generator
    train_generator, train_steps_per_epoch = get_generator(subset="training")
    val_generator, val_steps_per_epoch = get_generator(subset="validation")

    # Start training
    model.fit_generator(
        generator=train_generator,
        steps_per_epoch=train_steps_per_epoch,
        epochs=FLAGS.epoch,
        verbose=1,
        validation_data=val_generator,
        validation_steps=val_steps_per_epoch,
        callbacks=[checkpoint_callback, tb_callback],
        workers=FLAGS.workers * 2 if FLAGS.use_gpus else FLAGS.workers,
Пример #5
0
import tensorflow.keras as keras

# helper libraries
import numpy as np

# internal imports
from config import BATCH_SIZE
from utils import TrainValTensorBoard
from model import get_model
from data import generate_datasets, wav2mfcc

train_dataset, steps_per_epoch, valid_dataset = generate_datasets()

model = get_model()

tensorboardCallback = TrainValTensorBoard(write_graph=False)

if not os.path.exists("checkpoint/"):
    os.makedirs("checkpoint/")

saverCallback = keras.callbacks.ModelCheckpoint("checkpoint/{epoch:02d}.hdf5",
                                                monitor='val_loss',
                                                verbose=0,
                                                save_best_only=False,
                                                save_weights_only=False,
                                                mode='auto',
                                                period=1)

model.fit(train_dataset,
          validation_data=valid_dataset,
          epochs=10,
Пример #6
0
                         momentum=momentum_SGD,
                         nesterov=nesterov_SGD)

# Compile model
model_top_layer.compile(optimizer=opt,
                        loss='categorical_crossentropy',
                        metrics=['accuracy', f1])

# %%============================================================================
# TRAIN AND SAVE TOP LAYER
#===============================================================================
if verbose:
    print('Training top layer..')
# Callbacks
tbCallBack = TrainValTensorBoard(log_dir='./Tensorboard/top_layer/',
                                 histogram_freq=0,
                                 write_graph=True,
                                 write_images=True)
# Checkpoint
filepath = 'best_model_and_weights_top_layer_{val_f1:.2f}.h5'
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_f1',
                             verbose=1,
                             save_best_only=True,
                             mode='max')

model_top_layer.fit(train_features,
                    train_labels,
                    epochs=epochs,
                    batch_size=batch_size,
                    callbacks=[checkpoint, tbCallBack],
                    validation_data=(validation_features, validation_labels))
Пример #7
0
# FINE-TUNE FULL MODEL IN BATCHES OF LOADED DATA
#===============================================================================
if verbose:
    print('Fine-tuning full model..')

#$$$$$$$$$$$$$$$$$$$$$ Arguments passed to fit_generator $$$$$$$$$$$$$$$$$$$$$$$
# Structure data with datagenerator (with augmentation)
datagen = ImageDataGenerator(rotation_range=10,
                             width_shift_range=0.15,
                             height_shift_range=0.15,
                             zoom_range=0.15,
                             horizontal_flip=True)

# Callbacks
tbCallBack = TrainValTensorBoard(log_dir='./Tensorboard/full_model/',
                                 histogram_freq=0,
                                 write_graph=True,
                                 write_images=True)
# Checkpoint
filepath = 'best_model_and_weights_full_model.h5'
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_f1',
                             verbose=1,
                             save_best_only=True,
                             mode='max')

#$$$$$$$$$$$$$$$$$$$$$$$$$$$ Prepare to load data $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# x_train (features)
train_dir = '../data/raw_images/train/'
image_files = os.listdir(train_dir)
image_files = np.random.permutation(image_files)
no_imgs_tot = len(image_files)
Пример #8
0
    # Train the model
    datagen = ImageDataGenerator(rotation_range=20,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2)
    print(to_categorical(Y_train).shape)
    datagen.fit(X_train / 255.0)

    model.fit_generator(
        datagen.flow(X_train / 255.0, to_categorical(Y_train), batch_size=128),
        #batch_size=128,
        shuffle=True,
        epochs=100,
        validation_data=(X_test / 255.0, to_categorical(Y_test)),
        callbacks=[
            TrainValTensorBoard(log_dir='./logs/cnn_cifar5', histogram_freq=1),
            wx_cb()
        ])

    model.fit(X_train / 255.0,
              to_categorical(Y_train),
              batch_size=128,
              shuffle=True,
              epochs=100,
              validation_data=(X_test / 255.0, to_categorical(Y_test)),
              callbacks=[
                  TrainValTensorBoard(log_dir='./logs/cnn_cifar5',
                                      histogram_freq=1),
                  wx_cb()
              ])
def main():
    args = get_args()
    config['load_checkpoint'] = args.__dict__['load_checkpoint']
    config['initial_epoch'] = int(args.__dict__['initial_epoch'])
    config['classification_checkpoint'] = args.__dict__['classification_checkpoint']

    input_shape = (config['seq_len'], 224, 224, 3)

    train_gen = TrainGenerator(config)
    cv_gen = CVGenerator(config)

    len_train = len(train_gen.train_paths)
    len_CV = len(cv_gen.cv_paths)

    print('len_train : ', len_train)
    print('len_CV : ', len_CV)

    model = model_lstm_without_top(input_shape)

    if os.path.isfile('weights/vgg16_weights_without_top.h5'):
        model.load_weights('weights/vgg16_weights_without_top.h5')
        print('Loaded VGG16 weights')
            
    model.add(TimeDistributed(Flatten()))

    for layer in model.layers:
        layer.trainable = False

    model.add( TimeDistributed( Dense(128) ) )
    model.add(Activation('relu') )
    model.add( Dropout(0.3) )
    model.add(LSTM(16, return_sequences=True))
    model.add(Dropout(0.3))
    model.add(LSTM(16, return_sequences=True))
    model.add(Dropout(0.3))
    model.add(Dense(7))
    model.add(GlobalAveragePooling1D())
    model.add(Activation('softmax'))

    model.summary()


    dummy_model = model_without_top((224, 224, 3))
    dummy_model.add(Flatten())
    dummy_model.add( Dense(128) )
    dummy_model.add(Activation('relu') )
    dummy_model.add( Dropout(0.3) )
    dummy_model.add(Dense(7))
    dummy_model.add(Activation('softmax'))

    dummy_model.load_weights(config['classification_checkpoint'])
    weights_list = dummy_model.get_weights()
    print('\n\nLoaded classification weights : ', len(weights_list), '\n\n')
    del dummy_model

    model.set_weights(weights_list[ 0 : len(weights_list)-2])
    print('Loaded Classification weights into LSTM model')

    optim = optimizers.Adam(lr = config['learning_rate'], decay = config['decay_rate'])
    model.compile(optimizer=optim,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if config['load_checkpoint'] != '':
        model.load_weights(config['load_checkpoint'])
        print('Successfully loaded weights from %s' % config['load_checkpoint'])
    else:
        print('No checkpoint found')
        

    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, min_lr=0.0001, verbose=1)

    filepath = 'model/model-{epoch:02d}-{val_loss:.2f}.hdf5'

    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=1, mode='auto')
    callbacks_list = [checkpoint, TrainValTensorBoard(write_graph=False), reduce_lr]

    print('\n', config, '\n')

    
    hist_obj = model.fit_generator(
            generator = train_gen,
            callbacks = callbacks_list, 
            epochs = config['epochs'],
            steps_per_epoch = int(len_train/config['batch_size']),
            verbose=1,
            validation_data = cv_gen,
            validation_steps = int(len_CV/config['batch_size']),
            workers = 4,
            use_multiprocessing=True, 
            initial_epoch = config['initial_epoch']
           )

    train_loss = hist_obj.history['loss']
    val_loss = hist_obj.history['val_loss']
    train_acc = hist_obj.history['acc']
    val_acc = hist_obj.history['val_acc']

    print('train_loss')
    print(train_loss)

    print('val_loss')
    print(val_loss)

    print('train_acc')
    print(train_acc)

    print('val_acc')
    print(val_acc)
    checkpoint = ModelCheckpoint('./models/model.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='auto')
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=50,
                                   verbose=0,
                                   mode='auto')
    draw_first_layer_filters_callback = draw_first_layer_filters()

    siamese_net.fit(x={
        'input_2': X_first_input,
        'input_3': X_second_input
    },
                    y=y_batch,
                    batch_size=BATCH_SIZE,
                    epochs=500,
                    verbose=1,
                    validation_data=({
                        'input_2': X_first_input_test,
                        'input_3': X_second_input_test
                    }, y_batch_test),
                    shuffle=True,
                    callbacks=[
                        checkpoint, early_stopping,
                        draw_first_layer_filters_callback,
                        TrainValTensorBoard(write_graph=False)
                    ])
Пример #11
0
    model = Av_CNN_GCN_trans_model(patch_sz=(pSx, pSy, pSz),
                                   droupout_rate=dp,
                                   number_class=Num_classes,
                                   number_neighbors=Num_neighbors)

optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
loss = keras.losses.categorical_crossentropy

cfilepath = "./logs/{}/models_best.h5".format(timeR)
if os.path.isfile(cfilepath):
    print("Resumed modelś weights from {}".format(cfilepath))
    model.load_weights(cfilepath)

model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
callbacks = [
    TrainValTensorBoard.TrainValTensorBoard(
        log_dir="./logs/{}/summary/".format(timeR)),
    ModelCheckpoint(cfilepath,
                    monitor='val_acc',
                    verbose=1,
                    save_best_only=True,
                    mode='max'),
]

trainGraphes = ArteryVein_data.load_data(path=path_data,
                                         case_name=Case_train,
                                         Num_neighbor=Num_neighbors,
                                         nornalization=nornalization)
valGraphes = ArteryVein_data.load_data(path=path_data,
                                       case_name=Case_val,
                                       Num_neighbor=Num_neighbors,
                                       nornalization=nornalization)