コード例 #1
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'simpleCNN_' + now + '.h5'
    batch_size = 256
    num_epochs = 30
    lr = .001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    x = layers.Conv2D(32, (3, 3))(input_tensor)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(64, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(128, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Conv2D(128, (3, 3))(x)
    x = layers.MaxPooling2D((2, 2))(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(.5)(x)
    x = layers.Dense(512, activation='relu')(x)
    output_tensor = layers.Dense(1, activation='sigmoid')(x)

    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: should adjust learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=3)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
コード例 #2
0
ファイル: unit_tests.py プロジェクト: foamliu/SegNet
 def test_data_generator(self):
     iter = train_gen()
     batch_x, batch_y = next(iter)
     for i in range(len(batch_x)):
         x = batch_x[i]
         y = batch_y[i]
         x = (x * 255.).astype(np.uint8)
         y = (y * 255.).astype(np.uint8)
         cv.imwrite('temp/test_data_generator_x_{}.png'.format(i), x)
         cv.imwrite('temp/test_data_generator_y_{}.png'.format(i), y)
コード例 #3
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'pretrain_NASNet_' + now + '.h5'
    batch_size = 32
    num_epochs = 30
    lr = .0001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    NASNet = NASNetMobile(include_top=False, input_shape=(96, 96, 3))
    x = NASNet(input_tensor)
    x1 = layers.GlobalMaxPooling2D()(x)
    x2 = layers.GlobalAveragePooling2D()(x)
    x3 = layers.Flatten()(x)
    z = layers.Concatenate(axis=-1)([x1, x2, x3])
    z = layers.Dropout(.5)(z)
    output_tensor = layers.Dense(1, activation='sigmoid')(z)

    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: tweak learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=4)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
コード例 #4
0
def main():
    # ------------------------------------------------------
    # Run training. Parameters specified in config
    # ------------------------------------------------------
    # Create tensorboard logs
    tensor_board = keras.callbacks.TensorBoard(log_dir='./logs',
                                               histogram_freq=0,
                                               write_graph=True,
                                               write_images=True)
    # Save models. Models are saved only if improved; check is made every 'save_period' epochs
    model_names = checkpoint_models_path + 'model.{epoch:02d}-{val_loss:.4f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       monitor='val_loss',
                                       verbose=1,
                                       save_best_only=True,
                                       period=save_period)
    # Stop training if validation loss does not improve after 'patience' epochs
    early_stop = EarlyStopping('val_loss', patience=patience)
    # Reduce learning rate if validation loss has stopped improving
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  factor=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)

    # ------------------------------------------------------
    # ---------------------- Optimizer ---------------------
    # ------------------------------------------------------
    new_model = build_model()
    opt = keras.optimizers.Adam(lr=learning_rate)
    new_model.compile(optimizer=opt, loss=categorical_crossentropy_color)

    # ------------
    print(new_model.summary())
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
    split_data(imgs_dir, fmt)
    # Read number of training and validation samples
    with open('image_names/train_num.txt', 'r') as f:
        num_train_samples = int(f.read())
    with open('image_names/valid_num.txt', 'r') as f:
        num_valid_samples = int(f.read())

    # Start training
    new_model.fit_generator(
        train_gen(imgs_dir),
        steps_per_epoch=num_train_samples // batch_size,
        validation_data=valid_gen(imgs_dir),
        validation_steps=num_valid_samples // batch_size,
        epochs=epochs,
        verbose=1,
        callbacks=callbacks,
        use_multiprocessing=False,
        # workers=8
    )
コード例 #5
0
ファイル: train.py プロジェクト: ahwhbc/LookIntoPerson
        new_model = build_model()
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)

    adam = keras.optimizers.Adam(lr=1e-4,
                                 beta_1=0.9,
                                 beta_2=0.99,
                                 epsilon=1e-08,
                                 decay=5E-6)
    # sgd = keras.optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
    new_model.compile(optimizer=adam,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    # new_model.compile(optimizer=adam, loss=[focal_loss(alpha=.25, gamma=2)], metrics=['accuracy'])

    print(new_model.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(train_gen(),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=int(get_available_cpus() * 0.80))
コード例 #6
0
        model_checkpoint = MyCbk(model)
    else:
        model = build_encoder_decoder()
        final = build_refinement(model)
        if pretrained_path is not None:
            final.load_weights(pretrained_path)
        else:
            migrate_model(final)

    decoder_target = tf.placeholder(dtype='float32',
                                    shape=(None, None, None, None))
    final.compile(optimizer='nadam',
                  loss=overall_loss,
                  target_tensors=[decoder_target])

    print(final.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    final.fit_generator(train_gen(),
                        steps_per_epoch=num_train_samples // batch_size,
                        validation_data=valid_gen(),
                        validation_steps=num_valid_samples // batch_size,
                        epochs=epochs,
                        verbose=1,
                        callbacks=callbacks,
                        use_multiprocessing=True,
                        workers=int(get_available_cpus() / 2))
コード例 #7
0
    sgd = keras.optimizers.SGD(lr=0.001,
                               momentum=0.9,
                               nesterov=True,
                               clipnorm=5.)
    new_model.compile(optimizer='adam', loss=categorical_mine)

    new_model.load_weights(fpath1)
    # new_model.compile(optimizer=sgd, loss='categorical_crossentropy')
    print(new_model.summary())

    # Final callbacks
    callbacks = [model_checkpoint, model_checkpoint2]

    # Start Fine-tuning
    new_model.fit(
        train_gen(),
        steps_per_epoch=num_train_samples // batch_size,
        validation_data=valid_gen(),
        validation_steps=num_valid_samples // batch_size,
        epochs=epochs,
        verbose=1,
        callbacks=callbacks,
    )
    # new_model.fit_generator(train_gen(),
    #                         steps_per_epoch=num_train_samples // batch_size,
    #                         validation_data=valid_gen(),
    #                         validation_steps=num_valid_samples // batch_size,
    #                         epochs=epochs,
    #                         verbose=1,
    #                         callbacks=callbacks,
    #                         use_multiprocessing=True,
コード例 #8
0
    #     count = 0
    #     for i in encoder_decoder.layers:
    #         count += 1
    #     print(count)
    #     for i in refinement.layers:
    #         count += 1
    #     print(count)

    # sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    refinement.compile(optimizer='nadam', loss=alpha_prediction_loss)

    print(refinement.summary())

    # Summarize then go!
    num_cpu = get_available_cpus()
    workers = int(round(num_cpu / 2))

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    refinement.fit_generator(train_gen(),
                             steps_per_epoch=num_train_samples // batch_size,
                             validation_data=valid_gen(),
                             validation_steps=num_valid_samples // batch_size,
                             epochs=epochs,
                             verbose=1,
                             callbacks=callbacks,
                             use_multiprocessing=False,
                             workers=workers)
コード例 #9
0
ファイル: train.py プロジェクト: templeblock/imageMatting
    run_opts = tfRunOptions(report_tensor_allocations_upon_oom=False)

    final.compile(optimizer=sgd,
                  loss=alpha_prediction_loss,
                  target_tensors=[decoder_target],
                  options=run_opts)
    # To use overall_loss/compositional_loss, ensure you are passing the expected number of channels in batch_y

    print(final.summary())

    # Currently the training has been set to use only 1 worker
    # However you can uncomment the following lines and pass
    # workers as a parameter to fit_generator and put use_multiprocssing as true
    # num_cpu = get_available_cpus()
    # workers = int(round(num_cpu / 2))

    # Final callbacks
    callbacks = [model_checkpoint, early_stop, reduce_lr]  #, tensor_board]

    # Start Fine-tuning
    final.fit(train_gen(),
              steps_per_epoch=num_train_samples // batch_size,
              validation_data=valid_gen(),
              validation_steps=num_valid_samples // batch_size,
              epochs=epochs,
              verbose=2,
              callbacks=callbacks,
              use_multiprocessing=False,
              workers=1)
    tf.keras.backend.clear_session()
コード例 #10
0
ファイル: train.py プロジェクト: stducc/Super-Resolution-Net
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model

        def on_epoch_end(self, epoch, logs=None):
            fmt = checkpoint_models_path + 'model.%02d-%.4f.hdf5'
            self.model_to_save.save(fmt % (epoch, logs['val_loss']))

    new_model = build_model(scale=scale)
    if pretrained_path is not None:
        new_model.load_weights(pretrained_path, by_name=True)

    adam = keras.optimizers.Adam(lr=1e-4, epsilon=1e-8, decay=1e-6)
    new_model.compile(optimizer=adam, loss='mean_absolute_error')

    print(new_model.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    num_train_samples, num_valid_samples = get_example_numbers()
    # Start Fine-tuning
    new_model.fit_generator(train_gen(scale=scale),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(scale=scale),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=4)
コード例 #11
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'pretrain_vgg16_' + now + '.h5'
    batch_size = 64
    num_epochs = 30
    lr = .0001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    vgg = VGG16(include_top=False, input_shape=(96, 96, 3))
    x = vgg(input_tensor)
    z = layers.Flatten()(x)
    z = layers.Dropout(.5)(z)
    z = layers.Dense(256, activation='relu')(z)

    output_tensor = layers.Dense(1, activation='sigmoid')(z)

    vgg.trainable = True
    set_trainable = False
    for layer in vgg.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        if set_trainable:
            layer.trainable = True
        else:
            layer.trainable = False
    vgg.summary()
    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: tweak learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=10)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
コード例 #12
0
    Nadam = keras.optimizers.Nadam(lr=0.002,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=None,
                                   schedule_decay=0.004)
    final.compile(optimizer='nadam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])  # 此处metric为None
    final.summary()

    # Summarize then go!
    num_cpu = get_available_cpus()
    workers = int(round(num_cpu / 4))

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    final.fit_generator(
        generator=train_gen(),
        steps_per_epoch=num_train_samples // batch_size,
        validation_data=valid_gen(),
        validation_steps=num_valid_samples // batch_size,
        epochs=epochs,
        verbose=1,
        callbacks=callbacks,
        use_multiprocessing=True,
        workers=workers,
        #initial_epoch = 56
    )
コード例 #13
0
ファイル: train.py プロジェクト: tuqc/Deep-Residual-Matting
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)

    # finetune the whole network together.
    for layer in new_model.layers:
        layer.trainable = True

    sgd = keras.optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    new_model.compile(optimizer=sgd, loss=alpha_prediction_loss)

    print(new_model.summary())

    # Summarize then go!
    num_cpu = get_available_cpus()
    workers = int(round(num_cpu / 2))

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(train_gen(),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=workers
                            )