Example #1
0
        def train_gen():
            if self.scale_invariant:
                return data.train_generator(imgs, mask,
                                            edge=edge,
                                            padding=200,
                                            input_size=380,
                                            output_size=196,
                                            scale_range=0.5)

            return data.train_generator(imgs, mask,
                                        edge=edge,
                                        padding=100,
                                        input_size=188,
                                        output_size=100)
def model_test():

    size = 512

    model = load_model('model.h5')
    train_samples, validation_samples = load_data()
    a = len(train_samples)
    b = len(validation_samples)
    data = validation_generator(train_samples, batch_size=size)
    augmented_data = train_generator(train_samples, batch_size=size)

    images, steering = (next(data))
    aug_images, aug_steering = (next(augmented_data))
    predicted_steering = model.predict_generator(data, val_samples=1)

    bins = np.linspace(-0.5, 0.5, 100)

    plt.hist(predicted_steering, 100)
    plt.show()

    plt.hist(steering, bins, alpha=0.5, label='steering')
    plt.hist(aug_steering, bins, alpha=0.5, label='augmented steering')
    plt.hist(predicted_steering, bins, alpha=0.5, label='predicted steering')
    plt.legend(loc='upper right')
    plt.show()
Example #3
0
def run(flags_obj):
    data_aug_args = dict(rotation_range=0.2,
                         width_shift_range=0.05,
                         height_shift_range=0.05,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='nearest')

    train_gene = train_generator(flags_obj, data_aug_args)

    model = unet(flags_obj, n_filters=64)

    model.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=flags_obj.learning_rate),
                  loss=tf.keras.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])

    example = load_example(flags_obj)
    example_img = imageio.imread('data/membrane/test/image/0.png')
    # Save first prediction without training.
    save_prediction(model, example_img, example, 0)

    test_ds = load_test_dataset()

    history = model.fit_generator(train_gene,
                                  epochs=flags_obj.epoch,
                                  steps_per_epoch=flags_obj.steps_per_epoch,
                                  validation_data=test_ds,
                                  callbacks=[DisplayCallback(model, example)])

    create_gif()
    plot_history(history, flags_obj.epoch)
def augmented_data_distribution():
    batch_size = 1000  #len(train_samples)
    batch_count = 1
    epoch = 40
    non_zero_bias = 1 / (1 + epoch / 5.)
    data = train_generator(train_samples, batch_size, non_zero_bias)

    for i in range(batch_count):
        batch_images, batch_steering = (next(data))
        plt.hist(batch_steering, bins=100)
        plt.show()
def data_generator_test():
    batch_size = 1024
    batch_count = 2
    data = train_generator(train_samples, batch_size=batch_size)

    for i in range(batch_count):
        batch_images, batch_steering = (next(data))
        for (image, angle) in zip(batch_images, batch_steering):
            print(angle)
            plt.imshow(image, interpolation='nearest')
            plt.show()
            os.system('cls')
Example #6
0
# Define history callbacks
from visualisation import LossHistory
history = LossHistory()

# Fit
print("Training network..")
for epoch in range(nb_epoch):

    # Fit one epoch
    non_zero_bias = 1 / (1 + epoch / 5.)
    #non_zero_bias = 1.
    print("Non zero bias = " + str(non_zero_bias))

    # Define data generators
    train = train_generator(train_samples, batch_size, non_zero_bias)
    validation = train_generator(validation_samples, batch_size)

    model.fit_generator(train,
                        steps_per_epoch=steps_per_epoch,
                        initial_epoch=epoch,
                        epochs=epoch + 1,
                        verbose=1,
                        validation_data=validation,
                        validation_steps=validation_steps,
                        callbacks=[history])

print("Network trained!")

# Plot loss graph
from visualisation import loos_graph
Example #7
0
    # setup data augmentation
    data_gen_args = dict()
    if DATA_AUGMENTATION:
        data_gen_args = dict(rotation_range=0.2,
                             width_shift_range=0.05,
                             height_shift_range=0.05,
                             shear_range=0.05,
                             zoom_range=0.05,
                             horizontal_flip=True,
                             fill_mode='nearest')

    # initialize generator of training data
    my_generator = train_generator(
        batch_size=20,
        train_path=pth / 'membrane/train',
        image_folder='image',
        mask_folder='label',
        aug_dict=data_gen_args,
        save_to_dir=pth / 'membrane/train/aug',
    )

    # you will see 60 transformed images and their masks in data/membrane/train/aug
    num_batch = 3
    for i, batch in enumerate(my_generator):
        if i >= num_batch:
            break

    ## create .npy data

    image_arr, mask_arr = gen_train(
        pth / 'membrane/train/aug/',
        pth / '/membrane/train/aug/',
Example #8
0
    rotation_range=45,  # degrees
    width_shift_range=0.1,
    height_shift_range=0.1,
    shear_range=15,  # degrees
    zoom_range=0.1,
    brightness_range=[0.8, 1.2],
    horizontal_flip=True,
    vertical_flip=True,
    fill_mode='mirror',
    #preprocessing_function=foo,  # something to do the warping
    validation_split=0.1,  # Fraction of data to use for validation
)
print(f"   Augmentations: \n{indent(pformat(data_gen_args), ' '*18)}")
train, validate = train_generator(batch_size,
                                  'data/filament/train',
                                  'image',
                                  'label',
                                  data_gen_args,
                                  save_to_dir=None)

model = unet(learning_rate=learning_rate)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(str(
    output_dir / 'unet_filament.hdf5'),
                                                      monitor='val_loss',
                                                      verbose=1,
                                                      save_best_only=True)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                      verbose=1,
                                      patience=10)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                             histogram_freq=1,
                                             write_images=True)
# Specify the transformation ranges to be used when performing online augmentation.
data_gen_args = dict(rotation_range=2,
                     width_shift_range=0.02,
                     height_shift_range=0.02,
                     shear_range=2,
                     zoom_range=0.02,
                     brightness_range=[0.9,1.1],
                     horizontal_flip=True,
                     vertical_flip=True,
                     fill_mode="nearest")

# Initialise the training and validation data generators.
# Note that an empty augmentation dict() is provided to the validation generators as no
# augmentation should be performed whilst evaluating the validation performance.
train_gen = data.train_generator(args.batch, f"{args.dir}/train", "image","label", data_gen_args, target_size=(args.size, args.size))
val_gen = data.train_generator(1, f"{args.dir}/val", "image","label", dict(), target_size=(args.size, args.size))

# Format the current time as a string to be used in a TensorBoard log name.
time = datetime.now().strftime("%d-%m-%Y_%H:%M:%S")

# Initialise a Keras TensorBoard callback and set the log name to a combination the name
# specified in the --name argument and the current time.
tensorboard = TensorBoard(log_dir=f"logs/{args.name}_{time}")

# Initialise a Keras EarlyStopping callback and set the stopping criteria to be when the
# validation loss fails to decrease after 10 epochs.
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=10)

# Initialise a Keras ModelCheckpoint callback to save the model every epoch regardless of
# whether the loss decreases or not.