Exemple #1
0
def get_callbacks(args):
    callbacks = []

    if args.tensorboard:
        log_dir = "logs/fit/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                              histogram_freq=1)
        callbacks.append(tensorboard_callback)

    if args.nvprof:
        callbacks.append(
            CudaProfileCallback(args.nvprof_epoch, args.nvprof_start,
                                args.nvprof_stop))

    # Enable TFLMS
    if args.lms:
        # Specifying this starting name, from previous runs of LMS,
        # speeds up graph analysis time.
        starting_names = ['conv1_bn/cond/pred_id']
        lms = LMSKerasCallback(n_tensors=args.n_tensors,
                               lb=args.lb,
                               starting_op_names=starting_names)
        callbacks.append(lms)

    return callbacks
Exemple #2
0
def get_callbacks(args):
    callbacks = []

    if args.nvprof:
        callbacks.append(CudaProfileCallback(args.nvprof_epoch,
                                             args.nvprof_start,
                                             args.nvprof_stop))

    # Enable TFLMS
    if args.lms:
        # Specifying this starting name, from previous runs of LMS,
        # speeds up graph analysis time.
        starting_names = ['bn_conv1/cond/pred_id']
        lms = LMSKerasCallback(n_tensors=args.n_tensors, lb=args.lb,
                               starting_op_names=starting_names)
        callbacks.append(lms)

    return callbacks
Exemple #3
0
#!/usr/bin/env python

import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.keras import backend as K
config = tf.ConfigProto()
config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.SCHEDULING_HEURISTICS
config.graph_options.rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF
K.set_session(tf.Session(config=config))

from tensorflow_large_model_support import LMSKerasCallback
lms_callback = LMSKerasCallback()

import tifffile
from functions import unet_model_3d, data_gen

if __name__ == "__main__":
    image = tifffile.imread('traindata/training_input.tif')
    label = tifffile.imread('traindata/training_groundtruth.tif')
    res = 56  # 8*n
    window_size = (res, res, res)
    input_data = data_gen(image, window_size)
    label_data = data_gen(label, window_size)
    print('image size:', image.shape)
    print('data size:', input_data.shape)
    model = unet_model_3d((1, ) + window_size)
    model.summary()
    batch_size = 8
    no_epochs = 10
    model.fit(input_data,
              label_data,
Exemple #4
0
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 8.0
set_session(tf.Session(config=config))

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
os.environ["TF_CUDA_HOST_MEM_LIMIT_IN_MB"] = "120000"
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "TRUE"

# include large model support
from tensorflow_large_model_support import LMSKerasCallback
# LMSKerasCallback and LMS share a set of keyword arguments. Here we just
# use the default options.
#lms_callback = LMSKerasCallback(starting_scope = ['conv_1'],n_tensors=-1, debug=True, debug_level=2)
lms_callback = LMSKerasCallback(swap_branches=False)

tf.logging.set_verbosity(tf.logging.INFO)

LABELS = ['spine']

IMAGE_Z, IMAGE_H, IMAGE_W = 96, 416, 416  # when changing the 96 change as well in preproceessing3d and utils3d
GRID_Z, GRID_H, GRID_W = 3, 13, 13
BOX = 5
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3  #0.5
NMS_THRESHOLD = 0.3  #0.45
ANCHORS = [
    0.5, 0.5, 0.5, 0.8, 0.8, 0.8, 0.8, 0.5, 0.5, 0.5, 0.8, 0.5, 0.5, 0.5, 0.8
]
def train_model(training_imagesize=224, batchsize=32, epochs=25):
    keras.backend.clear_session()

    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_tensor=Input(shape=(training_imagesize, training_imagesize, 3)))

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(50, activation='relu')(x)
    x = Dense(10, activation='relu')(x)
    preds = Dense(2, activation='softmax')(x)  # final layer with softmax activation
    model = Model(inputs=base_model.input, outputs=preds)

    # Assigning which layers are to be trainined/unfrozen
    # for layer in model.layers[:20]:
    #     layer.trainable = True
    # for layer in model.layers[20:]:
    #     layer.trainable = True


    train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
                                       rotation_range=20,
                                       zoom_range=0.15,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.15,
                                       horizontal_flip=True,
                                       vertical_flip=True)

    train_generator = train_datagen.flow_from_directory('./data/train/',
                                                        # this is where you specify the path to the main data folder
                                                        target_size=(training_imagesize, training_imagesize),
                                                        color_mode='rgb',
                                                        batch_size=batchsize,
                                                        class_mode='categorical',
                                                        shuffle=True)

    adam_optimizer = Adam(lr=0.0001, decay=0.9)
    model.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics=['accuracy'])  #

    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)  # included in our dependencies
    test_generator = test_datagen.flow_from_directory('./data/test/',

                                                      target_size=(training_imagesize, training_imagesize),
                                                      color_mode='rgb',
                                                      batch_size=batchsize,
                                                      class_mode='categorical',
                                                      shuffle=True)

    step_size_train = train_generator.n // train_generator.batch_size

    #Large model support callback to control the amount of gpu ram being used to prevent out of ram error
    lms_callback = LMSKerasCallback()
    fit_history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=step_size_train,
                                      epochs=epochs,
                                      validation_data=test_generator,
                                      validation_steps=test_generator.n // test_generator.batch_size,
                                      callbacks=[lms_callback])

    # Reporting of the model for analysis purposes
    plt.figure(1, figsize=(15, 8))
    plt.subplot(221)
    plt.plot(fit_history.history['acc'])
    plt.plot(fit_history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'valid'])

    plt.subplot(222)
    plt.plot(fit_history.history['loss'])
    plt.plot(fit_history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'valid'])
    plt.savefig("Graphical Outputs.pdf")
    plt.clf()  # clears entire figure

    trainloss = fit_history.history['loss']
    testloss = fit_history.history['val_loss']

    trainaccuracy = fit_history.history['acc']
    testaccuracy = fit_history.history['val_acc']

    np.savetxt("train_loss.csv", trainloss, delimiter=",", fmt='%s')
    np.savetxt("test_loss.csv", testloss, delimiter=",", fmt='%s')
    np.savetxt("train_accuracy.csv", trainaccuracy, delimiter=",", fmt='%s')
    np.savetxt("test_accuracy.csv", testaccuracy, delimiter=",", fmt='%s')

    # Saved model which is erased from memory so there is no chance of interference
    model.save('animalmodel.h5')

    return None