Пример #1
0
def train_model_from_scratch(X,
                             Y,
                             output_file,
                             batch_size,
                             epochs,
                             fisher_samples=200):
    """
    Train a model from scratch, while saving the needed parts of the Fisher
    information matrix for later use in EWC

    X: inputs
        Should have 4 dimensions: num_samples * width * height * num_channels
    Y: outputs
        Should have 3 dimensions: num_samples * width * height
    output_file:
        The filename of an HDF5 file which will contain the weights and
        the approximate Fisher information matrix
    batch_size: integer
        The batch size for model training -- passed directly to the model
    epochs: integer
        The number of epochs for model training -- passed directly to the model
    fisher_samples: integer
        The number of samples to use for estimating the Fisher information matrix.
        This is the most time-consuming part of training, so reducing this value
        will do a lot to cut down on training time.
    """
    model = get_unet(X.shape[1], X.shape[2], X.shape[3])
    model.fit(X, Y, batch_size=batch_size, epochs=epochs)
    model.save(output_file)
    fisher = estimate_fisher_information(model,
                                         X,
                                         Y,
                                         num_samples=fisher_samples)
    write_fisher(output_file, fisher)
Пример #2
0
def main():
    args = parse_arguments()

    imgs, gts, additional_data = load_imgs(args.input, 128)

    x_train, x_test_case, y_train, y_test_case, add_train, add_test = \
       train_test_split(imgs, gts, additional_data, shuffle=False, test_size=0.25)

    x_train = np.concatenate(x_train, axis=0)
    y_train = np.concatenate(y_train, axis=0)
    x_test = np.concatenate(x_test_case, axis=0)
    y_test = np.concatenate(y_test_case, axis=0)

    model = get_unet(x_train)

    earlystopper = EarlyStopping(patience=5, verbose=1)
    checkpointer = ModelCheckpoint("unet-gcloud.hd5f",
                                   verbose=1,
                                   save_best_only=True)

    results = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=32,
                        epochs=150,
                        callbacks=[earlystopper, checkpointer])

    # Workaround
    file_data = file_io.FileIO("unet-gcloud.hd5f", "r").read()
    file_io.FileIO(os.path.join(args.output, "unet-gcloud.hd5f"),
                   "w").write(file_data)
Пример #3
0
def test():
    print("Test mode")
    config = Config()
    loss_mode = args.lossmode
    model_name = 'unet_cl4_step3_e5_tr600_v100_jk0.9817'

    model = get_unet(config, loss_mode)
    model.load_weights(os.path.join(WEIGHTS_FLD, model_name))

    check_predict_gold(model, model_name, PRED_FLD, config, loss_mode)
Пример #4
0
def return_model(params):
    '''get model'''
    input_img = Input(params["img_shape"], name='img')
    model = mt.get_unet(input_img,
                        n_filters=12,
                        dropout=params["drop_out"],
                        batchnorm=True,
                        training=True)

    return model
def predict(folder_name, mode):
    print_heading('Loading and preprocessing data...')

    image_rows = 600
    image_cols = 800
    folder_pattern = os.path.join(folder_name, '*.jpg')
    image_list = glob.glob(folder_pattern)

    total = int(len(image_list) / miss) + 1 if miss > 0 else len(image_list)
    imgs = np.ndarray((total, image_rows, image_cols, 3), dtype=np.uint8)
    imgs_id = np.ndarray((total, ), dtype=object)

    count = 0
    for image_path in image_list:
        image_name = image_path.split('\\')[-1]
        if miss == 0 or count % miss == 1:
            index = int(count / miss) if miss > 0 else count
            imgs[index] = np.array(
                [imread(os.path.join(folder_name, image_name))])
            imgs_id[index] = image_name.split('.')[0]
        count += 1

    imgs = preprocess(imgs).astype('float32')

    if mode == "carla":
        mean = np.mean(imgs)  # mean for data centering
        std = np.std(imgs)  # std for data normalization

        imgs -= mean
        imgs /= std

    print_heading('Loading saved weights...')

    model = get_unet(mode)
    model.load_weights(os.path.join(mode, 'tl_weights.h5'))

    print_heading('Predicting masks on test data...')

    imgs_mask = model.predict(imgs, verbose=1)

    print_heading('Saving predicted masks to files...')

    pred_dir = os.path.join(folder_name, 'preds')
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask, imgs_id):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(pred_dir, image_id + '.pred.png'), image)
Пример #6
0
def train_and_predict():
    print_text('Loading and pre-processing train data.')
    images_train, images_mask_train = load_train_data()

    images_train = pre_process(images_train)
    images_mask_train = pre_process(images_mask_train)

    images_train = images_train.astype('float32')
    mean = np.mean(images_train)  # mean for data centering
    std = np.std(images_train)  # std for data normalization

    images_train -= mean
    images_train /= std

    images_mask_train = images_mask_train.astype('float32')
    images_mask_train /= 255.  # scale masks to [0, 1]

    print_text('Creating and compiling model.')
    model = get_unet(IMG_ROWS, IMG_COLS)
    model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)

    print_text('Fitting model.')
    model.fit(images_train, images_mask_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, shuffle=True,
              validation_split=VALID_SPLIT,
              callbacks=[model_checkpoint])

    print_text('Loading and pre-processing test data.')
    images_test, images_id_test = load_test_data()
    images_test = pre_process(images_test)

    images_test = images_test.astype('float32')
    images_test -= mean
    images_test /= std

    print_text('Loading saved weights.')
    model.load_weights('weights.h5')

    print_text('Predicting masks on test data.')
    images_mask_test = model.predict(images_test, verbose=1)
    np.save('images_mask_test.npy', images_mask_test)

    print_text('Saving predicted masks to files.')
    if not os.path.exists(PRED_DIR):
        os.mkdir(PRED_DIR)
    for image, image_id in zip(images_mask_test, images_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(PRED_DIR, str(image_id) + '_pred.png'), image)
Пример #7
0
def main(config):
    dataset_train = get_dataset_train()
    model = get_unet()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    loss_fn = nn.MSELoss()

    make_dir(config.result_dir)
    make_dir(config.sample_dir)
    make_dir(config.model_dir)
    make_dir(config.log_dir)

    print("Start training...")

    for epoch in range(config.epochs):
        SAVE_IMAGE_DIR = "{}/{}".format(config.sample_dir, epoch)
        make_dir(SAVE_IMAGE_DIR)
        train_loss = []

        for i, (image, mask) in enumerate(
                DataLoader(dataset_train,
                           batch_size=config.batch_size,
                           shuffle=True)):

            image = image.to(device)
            mask = mask.to(device)

            y_pred = model(image)
            loss = loss_fn(y_pred, mask)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss.append(loss.item())

            if i % 50 == 0:
                save_image(y_pred, "{}/{}.png".format(SAVE_IMAGE_DIR, i))
                print(train_loss[-1])

        print("Epoch: %d, Train: %.3f" % (epoch, np.mean(train_loss)))
        if epoch % 5 == 0:
            print("Saved model... {}.pth".format(epoch))
            save_checkpoint("{}/{}.pth".format(config.model_dir, epoch), model,
                            optimizer)
Пример #8
0
def predict(test_data, model_path):

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)

    imgs_test, data_file = read_test(test_data)

    print('***' * 30)
    print('Loading saved weights...')
    print('***' * 30)

    model = get_unet((256, 128, 96, 1))
    # model = get_unet((128, 128, 128, 1))
    # model = get_unet((256, 256, 256, 1))
    model.load_weights(model_path)

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)

    imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
    # imgs_mask_test = imgs_mask_test.astype(np.uint8)
    imgs_mask_test = np.argmax(imgs_mask_test, axis=-1)
    imgs_mask_test = imgs_mask_test.astype(np.uint8)
    print("======")
    print(imgs_mask_test.shape)  # 256 128 96
    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)

    for i in range(len(imgs_mask_test)):
        # save_path = os.path.join(, 'verse' + str(f"{i}") + '_seg.nii.gz')
        out = sitk.GetImageFromArray(imgs_mask_test[i])
        sitk.WriteImage(
            out, 'pred_dir/{}_seg.nii.gz'.format(
                data_file[i].split('/')[-1].split(".")[0]))

    print('-' * 30)
    print('Prediction finished')
    print('-' * 30)
Пример #9
0
def train(train_data, train_label):
    print('---' * 30)
    print('Loading and preprocessing train data...')
    print('---' * 30)

    print('---' * 30)
    print('Creating and compiling model...')
    print('---' * 30)
    model = get_unet((128, 96, 64, 1))

    model_checkpoint = ModelCheckpoint(filepath='model/ResUnet.hdf5',
                                       save_best_only=True,
                                       verbose=1,
                                       monitor='dice_score_metric',
                                       mode='max')

    log_dir = 'model/logs'
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    print('---' * 30)
    print('Fitting model...')
    print('---' * 30)  # 25
    # model.fit(train_data, train_label, batch_size=1, epochs=30, verbose=1, shuffle=True, validation_split=0.10, callbacks=[model_checkpoint, TensorBoard(log_dir="model/logs")])
    # model.fit(train_data, train_label, batch_size=1, epochs=600, verbose=1, shuffle=True, callbacks=[model_checkpoint, TensorBoard(log_dir="model/logs")])

    model.fit(train_data,
              train_label,
              batch_size=1,
              epochs=1500,
              verbose=1,
              shuffle=True,
              validation_split=0.10,
              callbacks=[model_checkpoint,
                         TensorBoard(log_dir="model/logs")])

    print('---' * 30)
    print('Training finished')
    print('---' * 30)
def train_unet(img, msk, x_val, y_val):
    print("start train net")
    s = 0
    model = get_unet()
    model.load_weights('weights/unet_jk0.6198', by_name=True)
    #model_checkpoint = ModelCheckpoint('weights/unet_tmp.hdf5', monitor='loss', save_best_only=True)  #save temp model
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
    for i in range(10):
        model.fit(img,
                  msk,
                  batch_size=64,
                  epochs=1,
                  verbose=1,
                  shuffle=True,
                  validation_data=(x_val, y_val))
        score, trs = calc_jacc(model, x_val, y_val)
        print('val jk', score)
        if score > s:
            model.save_weights('weights/unet_jk%.4f' % score)
            s = score
    return model, trs
Пример #11
0
def test(config):
    make_dir(config.test_dir)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    cudnn.benchmark = True

    dataset_test = get_dataset_test()
    model = get_unet()
    model.to(device)
    model.load_state_dict(torch.load(config.model)['state_dict'])

    for i, (image, image_path) in enumerate(
            data.DataLoader(dataset_test,
                            batch_size=config.batch_size,
                            shuffle=False)):
        image = image.to(device)
        FILE_NAME = image_path[0].split("/")[-1]

        y_pred = model(image)
        save_image(y_pred, "{}/{}".format(config.test_dir, FILE_NAME))

        if i % 10 == 0:
            print("Saved... {}".format(i))

    print("Finished...!")
Пример #12
0
nb_most_uncertain = 10
most_uncertain_rate = 5
pseudo_epoch = 5
nb_pseudo_initial = 20
pseudo_rate = 20
initial_train = True
apply_augmentation = False
nb_initial_epochs = 10
nb_active_epochs = 2
batch_size = 128

X_train, y_train = load_train_data()
labeled_index = np.arange(0, nb_labeled)
unlabeled_index = np.arange(nb_labeled, len(X_train))

model = get_unet(dropout=True)
if initial_train:
    model_checkpoint = ModelCheckpoint(initial_weights_path,
                                       monitor='loss',
                                       save_best_only=True)

    if apply_augmentation:
        for initial_epoch in range(0, nb_initial_epochs):
            history = model.fit_generator(data_generator().flow(
                X_train[labeled_index],
                y_train[labeled_index],
                batch_size=32,
                shuffle=True),
                                          steps_per_epoch=len(labeled_index),
                                          nb_epoch=1,
                                          verbose=1,
Пример #13
0
import sys
sys.path.append('../functions')
from model import get_unet
from data import load_data
from keras import backend as K
import tensorflow as tf

x_train, y_train = load_data('train')
x_val, y_val = load_data('val')
model = get_unet(input_shape=x_train[0].shape)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.fit(x_train, y_train, validation_data=(x_val, y_val))
Пример #14
0
def train_net(weights_folder, logs_folder, progress_predict_dir, config,
              loss_mode):

    print("start train net")
    train_dataset = build_dataset(config.TRAIN_DIR)
    val_dataset = build_dataset(config.VAL_DIR)
    x_trn, y_trn = get_patches_dataset(train_dataset,
                                       config,
                                       shuffleOn=True,
                                       amt=config.AMT_TRAIN)
    x_val, y_val = get_patches_dataset(val_dataset,
                                       config,
                                       shuffleOn=False,
                                       amt=config.AMT_VAL)
    model = get_unet(config, loss_mode)
    os.makedirs(weights_folder, exist_ok=True)
    #model.load_weights('weights/unet_cl2_step0_e5_tr600_v600_jk0.6271')
    model_checkpoint = ModelCheckpoint(os.path.join(weights_folder,
                                                    'unet_tmp.hdf5'),
                                       monitor='loss',
                                       save_best_only=True)
    tb_callback = TensorBoard(log_dir=logs_folder,
                              histogram_freq=0,
                              batch_size=config.BATCH_SIZE,
                              write_graph=True,
                              write_grads=False,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    start_time = time.time()
    for i in range(config.N_STEPS):
        print("Step i", i)
        model.fit(x_trn,
                  y_trn,
                  batch_size=config.BATCH_SIZE,
                  epochs=config.EPOCS,
                  verbose=1,
                  shuffle=True,
                  callbacks=[model_checkpoint, tb_callback],
                  validation_data=(x_val, y_val))

        print("---  Training for %s seconds ---" % (time.time() - start_time))
        score, trs = calc_jacc_img_msk(model, x_trn, y_trn, config.BATCH_SIZE,
                                       config.NUM_CLASSES)
        print('train jk', score)

        score, trs = calc_jacc_img_msk(model, x_val, y_val, config.BATCH_SIZE,
                                       config.NUM_CLASSES)
        print('val jk', score)
        score_str = '%.4f' % score
        model_name = 'unet_cl{0}_step{1}_e{2}_tr{3}_v{4}_jk{5}'.format(
            config.NUM_CLASSES, i, config.EPOCS, config.AMT_TRAIN,
            config.AMT_VAL, score_str)
        print("Weights: ", model_name)
        model.save_weights(os.path.join(weights_folder, model_name))

        #if (i % 10 == 0):
        check_predict_gold(model, model_name, progress_predict_dir, config,
                           loss_mode)
        check_predict_small_test(model, model_name, progress_predict_dir,
                                 config, loss_mode)

        #Get ready for next step
        del x_trn
        del y_trn
        x_trn, y_trn = get_patches_dataset(train_dataset,
                                           config,
                                           shuffleOn=True,
                                           amt=config.AMT_TRAIN)
    return model
import model as m
from sklearn.model_selection import train_test_split

path_list_train=d.getImagesPath('train')
path_list_test=d.getImagesPath('test')
X_train,Y_train=d.PreprocessData(path_list_train)
X_test,sizes_test=d.PreprocessData(path_list_test,False)
d.savePreparedData(X_test,X_train,Y_train,sizes_test)


xtr, xval, ytr, yval = train_test_split(X_train, Y_train, test_size=0.1, random_state=7)
#X_gen,Y_gen=d.genImagesAndMasks(X_train,Y_train)
#X_train=np.concatenate(X_train,X_gen)
#Y_train=np.concatenate(Y_train,Y_gen)
train_generator, val_generator = d.generator(xtr, xval, ytr, yval, 16)
model = get_unet(256,256,3)
model.fit_generator(train_generator, steps_per_epoch=len(xtr)/6, epochs=250,
                        validation_data=val_generator, validation_steps=len(xval)/16)
preds_test = model.predict(X_test, verbose=1)


preds_test_t = (preds_test > 0.5).astype(np.uint8)
preds_test_upsampled =d.resizeTest(path_list_test,preds_test,sizes_test)

new_test_ids = []
rles = []
for n, path in enumerate(path_list_test):
    rle = list(d.prob_to_rles(preds_test_upsampled[n]))
    rles.extend(rle)
    new_test_ids.extend([os.path.splitext(os.path.basename(os.path.normpath(str(path))))[0]] * len(rle))
Пример #16
0
from data_util import read_train_data, read_test_data, prob_to_rles, mask_to_rle, resize, np
from model import get_unet
import pandas as pd

epochs = 50

# get train_data
train_img, train_mask = read_train_data()

# get test_data
test_img, test_img_sizes = read_test_data()

# get u_net model
u_net = get_unet()

# fit model on train_data
print("\nTraining...")
u_net.fit(train_img, train_mask, batch_size=16, epochs=epochs)

print("Predicting")
# Predict on test data
test_mask = u_net.predict(test_img, verbose=1)

# Create list of upsampled test masks
test_mask_upsampled = []
for i in range(len(test_mask)):
    test_mask_upsampled.append(
        resize(np.squeeze(test_mask[i]),
               (test_img_sizes[i][0], test_img_sizes[i][1]),
               mode='constant',
               preserve_range=True))
Пример #17
0
def run_experiment():
    print("* experiment configurations")
    print("===========================")
    print("Epoch count: {}".format(HYPER_PARAMS.num_epochs))
    print("Image channel: {}".format(HYPER_PARAMS.num_channel))

    data_set = file_io.File_IO(
        'gs:///building_footprint/data/20180310/xtrain_test.npy')

    print("X train shape: {}".format(xtrain.shape))
    print("Y train shape: {}".format(ytrain.shape))
    print("===========================")

    FMT_VALMODEL_PATH = "{}_val_weights.h5"
    FMT_VALMODEL_LAST_PATH = "{}_val_weights_last.h5"
    FMT_VALMODEL_HIST = "{}_val_hist.csv"
    PREFIX = HYPER_PARAMS.prefix + "_test"
    INPUT_CHANNEL = HYPER_PARAMS.num_channel

    unet = model.get_unet(INPUT_CHANNEL)

    # train and evaluate
    model_checkpoint = ModelCheckpoint(
        FMT_VALMODEL_PATH.format(PREFIX + "_{epoch:02d}"),
        monitor='val_jaccard_coef_int',
        save_best_only=False)

    model_earlystop = EarlyStopping(monitor='val_jaccard_coef_int',
                                    patience=10,
                                    verbose=0,
                                    mode='max')

    model_history = History()

    model_board = TensorBoard(log_dir=os.path.join(HYPER_PARAMS.job_dir,
                                                   'logs'),
                              histogram_freq=0,
                              write_graph=True,
                              embeddings_freq=0)

    save_checkpoint_gcs = LambdaCallback(
        on_epoch_end=lambda epoch, logs: copy_file_to_gcs(
            HYPER_PARAMS.job_dir,
            FMT_VALMODEL_PATH.format(PREFIX + '_' + str(
                format(epoch + 1, '02d')))))

    unet.fit(xtrain,
             ytrain,
             nb_epoch=HYPER_PARAMS.num_epochs,
             batch_size=HYPER_PARAMS.batch_size,
             shuffle=True,
             verbose=1,
             validation_data=(xval, yval),
             callbacks=[
                 model_checkpoint, model_earlystop, model_history, model_board,
                 save_checkpoint_gcs
             ])

    pd.DataFrame(model_history.history).to_csv(
        FMT_VALMODEL_HIST.format(PREFIX), index=False)
    copy_file_to_gcs(HYPER_PARAMS.job_dir, FMT_VALMODEL_HIST.format(PREFIX))

    unet.save_weights(FMT_VALMODEL_LAST_PATH.format(PREFIX))
    copy_file_to_gcs(HYPER_PARAMS.job_dir,
                     FMT_VALMODEL_LAST_PATH.format(PREFIX))
Пример #18
0
from model import get_unet, img_rows, img_cols
from generator import DataGenerator
import json
import os
from skimage.transform import resize
from skimage.io import imread
import numpy as np

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# The GPU id to use, usually either "0" or "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "9"

weights_path = 'best.hdf5'
model = get_unet()
model.load_weights(weights_path)

with open('best-idx', 'r') as f:
    predict = json.loads(f.read())
for filename in predict:
    filename = filename.split('.')[0]
    image = imread('train/image/' + filename + '.jpg')
    image = resize(image, (img_rows, img_cols), order=1,
                   anti_aliasing=True)  # bi-linear
    mask = model.predict(np.array([image]))

    with open('predict/' + filename + '.npy', 'w') as f:
        f.write(json.dumps(mask.tolist()))
Пример #19
0
            batch_imgs[i] = img

            # read the next label:
            trainId_label = cv2.imread(train_trainId_label_paths[path_idx[i]], -1)

            # convert the label to onehot:
            onehot_label = np.zeros((img_height, img_width, no_of_classes), dtype=np.float32)
            onehot_label[layer_idx, component_idx, trainId_label] = 1
            batch_onehot_labels[i] = onehot_label
        path_idx = np.random.choice(a = len(train_data), size = batch_size)

        yield (batch_imgs, batch_onehot_labels)

from keras.layers import Input
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
import model as m

input_img = Input((img_height, img_width, 3), name='img')
model = m.get_unet(input_img, n_filters=32, dropout=0.1, batchnorm=True)

# model.compile(optimizer=Adam(), loss="binary_crossentropy")
# model.compile(optimizer=Adam(), loss=[mean_iou])
model.compile(optimizer=Adam(), loss="categorical_crossentropy", metrics=["accuracy"])

model.summary()

his = model.fit_generator(generator=train_data_iterator(), epochs=epochs, steps_per_epoch=no_of_batches)

print("--- %s seconds ---" % (time.time() - start_time))
Пример #20
0
       batch_size = batch_size,
       seed = seed,
       subset = 'validation')




# Zipping images and masks together
training_generator = zip(train_image_generator, train_label_generator)
validation_generator = zip(val_image_generator, val_label_generator)



# Arguments model
input_img = Input((512, 512, 1), name='img')
model = get_unet(input_img, n_filters=16, dropout=0.25, batchnorm=False)

# Compiling model
model.compile(optimizer=Adam(), loss=generalised_dice_loss_2d(metric= ), metrics=[dice_coef, "accuracy"])
# model.summary()

# Callbacks for training
callbacks = [
    EarlyStopping(patience=30, monitor='val_dice_coef', verbose=1, mode="max"),
    ReduceLROnPlateau(monitor='val_dice_coef', factor=0.1, patience=5, min_lr=0.0001, verbose=1, mode="max"),
    ModelCheckpoint(MODEL_NAME, monitor='val_dice_coef', save_best_only=True, verbose=1, mode="max")]


# Train model on dataset
history = model.fit_generator(generator=training_generator,
                    steps_per_epoch = steps_epoch,
Пример #21
0
def train_multiple_outputs(epoch_num, batch_size):
    for train_num in range(1, trains + 1):
        # load training data
        train_data_path = os.path.join('Segmentation_Spine', 'dataset_slices',
                                       'train')
        files = os.listdir(train_data_path)
        total = len(files) // (n_ch + 1)

        imgs_train = np.ndarray((total, img_rows, img_cols, n_ch),
                                dtype=np.uint8)
        imgs_train_mask = np.ndarray((total, img_rows, img_cols, 1),
                                     dtype=np.uint8)

        data = load_images(train_data_path, n_ch)
        imgs_train_mask, imgs_train = data['GTs'], data['imgs']

        imgs_train = imgs_train.astype('float32')
        imgs_train_mask = imgs_train_mask.astype('float32')
        # split data
        rand_train, rand_val = split_data(imgs_train, train_split)
        imgs_train_sp = imgs_train[rand_train, :, :, :]
        imgs_val_sp = imgs_train[rand_val, :, :, :]
        masks_train_sp = imgs_train_mask[rand_train, :, :, :]
        masks_val_sp = imgs_train_mask[rand_val, :, :, :]
        # augment data
        imgs_train_sp_aug = augment_data_ltrb(imgs_train_sp)
        imgs_val_sp_aug = augment_data_ltrb(imgs_val_sp)

        masks_train_sp_aug = (augment_data_ltrb(masks_train_sp))
        masks_val_sp_aug = (augment_data_ltrb(masks_val_sp))
        masks_train_sp_aug = np.round(masks_train_sp_aug / 255)
        masks_val_sp_aug = np.round(masks_val_sp_aug / 255)
        print(np.max(imgs_train_sp_aug))
        print(np.min(imgs_train_sp_aug))
        print(np.max(masks_train_sp_aug))
        print(np.min(masks_train_sp_aug))

        print('-' * 30)
        print('Creating and compiling model...')
        print('-' * 30)

        model = get_unet(img_rows, img_cols, n_ch)
        model.compile(optimizer=Adam(lr=1e-4),
                      loss=dice_coef_loss,
                      metrics=[dice_coef])
        model_checkpoint = ModelCheckpoint(os.path.join(
            data_path, 'learned_model_' + str(train_num) + '.hdf5'),
                                           monitor='val_loss',
                                           save_best_only=True)
        history = model.fit(imgs_train_sp_aug,
                            masks_train_sp_aug,
                            batch_size=batch_size,
                            epochs=epoch_num,
                            verbose=1,
                            shuffle=True,
                            validation_data=(imgs_val_sp_aug,
                                             masks_val_sp_aug),
                            callbacks=[model_checkpoint])

        print("*" * 30)
        print("train_dice_coeff " + str(history.history['dice_coef']))
        print("val_dice_coeff " + str(history.history['val_dice_coef']))
        print("val_loss " + str(history.history['val_loss']))
        # list all data in history
        print(history.history.keys())
        # summarize history for accuracy
        plt.plot(history.history['dice_coef'])
        plt.plot(history.history['val_dice_coef'])
        plt.plot(history.history['val_loss'])
        plt.title('model accuracy')
        plt.ylabel('Dice')
        plt.xlabel('epoch')
        plt.legend(['train', 'Val', 'Loss'], loc='upper left')
        plt.show()
        print("*" * 30)
        print('training is done successfully')
Пример #22
0
def check_predict(model_name, weights_folder, config, loss_mode, predic_cnt,
                  trs, x, y, min_pred_sum, output_folder):
    model = get_unet(config, loss_mode)
    model.load_weights(os.path.join(weights_folder, model_name))
    check_predict_model(model, model_name, config, predic_cnt, trs, x, y,
                        min_pred_sum, output_folder)
Пример #23
0
def train_and_predict(parent_folder):
    print_heading('Loading and preprocessing train data...')

    imgs_train, imgs_mask_train = load_train_data(parent_folder)

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')

    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    if parent_folder == "carla":
        imgs_train -= mean
        imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print_heading('Creating and compiling model...')

    model = get_unet(parent_folder)

    model_checkpoint = ModelCheckpoint('tl_weights.h5',
                                       monitor='val_loss',
                                       save_best_only=True)

    print_heading('Fitting model...')

    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=16,
              epochs=30,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint])

    print_heading('Fitting model finished')

    print_heading('Loading and preprocessing test data...')

    imgs_test, imgs_id_test = load_test_data(parent_folder)
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')

    if parent_folder == "carla":
        imgs_test -= mean
        imgs_test /= std

    print_heading('Loading saved weights...')

    model.load_weights('tl_weights.h5')

    print_heading('Predicting masks on test data...')

    imgs_mask_test = model.predict(imgs_test, verbose=1)

    print_heading('Saving predicted masks to files...')

    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(pred_dir, image_id + '.pred.png'), image)
Пример #24
0
def run_experiment():
    print("* experiment configurations")
    print("===========================")
    print("Epoch count: {}".format(HYPER_PARAMS.num_epochs))
    print("Image channel: {}".format(HYPER_PARAMS.num_channel))

    with file_io.FileIO(HYPER_PARAMS.xtrain_files,'rb') as bi1:
        file_io.write_string_to_file('xtrain_files.hdf5',bi1.read())
    with h5py.File('xtrain_files.hdf5', 'r') as f1:
        xtrain = np.asarray(f1['x_train'].value)
    # os.remove('xtrain_files.hdf5')
    with file_io.FileIO(HYPER_PARAMS.ytrain_files,'rb') as bi2:
        file_io.write_string_to_file('ytrain_files.hdf5',bi2.read())
    with h5py.File('ytrain_files.hdf5', 'r') as f2:
        ytrain = np.asarray(f2['y_train'].value)
    # os.remove('ytrain_files.hdf5')
    with file_io.FileIO(HYPER_PARAMS.xval_files,'rb') as bi3:
        file_io.write_string_to_file('xval_files.hdf5',bi3.read())
    with h5py.File('xval_files.hdf5', 'r') as f3:
        xval = np.asarray(f3['x_val'].value)
    # os.remove('xval_files.hdf5')
    with file_io.FileIO(HYPER_PARAMS.yval_files,'rb') as bi4:
        file_io.write_string_to_file('yval_files.hdf5',bi4.read())
    with h5py.File('yval_files.hdf5', 'r') as f4:
        yval = np.asarray(f4['y_val'].value)
    # os.remove('yval_files.hdf5')
        #ytrain = np.asarray(f['ytrain'].value)
        #xval = np.asarray(f['xval'].value)
        #yval = np.asarray(f['yval'].value)
    # a = file_io.FileIO(HYPER_PARAMS.xtrain_files,'r')
    # b = file_io.FileIO(HYPER_PARAMS.ytrain_files,'r')
    # c = file_io.FileIO(HYPER_PARAMS.xval_files,'r')
    # d = file_io.FileIO(HYPER_PARAMS.yval_files,'r')
    # xtrain = np.load(a)
    # ytrain = np.load(b)
    # xval = np.load(c)
    # yval = np.load(d)

    # a.close()
    # b.close()
    # c.close()
    # d.close()

    print("X train shape: {}".format(xtrain.shape))
    print("Y train shape: {}".format(ytrain.shape))
    print("===========================")

    FMT_VALMODEL_PATH ="{}_val_weights.h5"
    FMT_VALMODEL_LAST_PATH = "{}_val_weights_last.h5"
    FMT_VALMODEL_HIST = "{}_val_hist.csv"
    PREFIX = HYPER_PARAMS.prefix
    INPUT_CHANNEL =  HYPER_PARAMS.num_channel

    unet = model.get_unet(INPUT_CHANNEL)

    # train and evaluate
    model_checkpoint = ModelCheckpoint(
        FMT_VALMODEL_PATH.format(PREFIX + "_{epoch:02d}"),
        monitor='val_jaccard_coef_int',
        save_best_only=False,
        save_weights_only=True)

    model_earlystop = EarlyStopping(
        monitor='val_jaccard_coef_int',
        patience=20,
        verbose=0,
        mode='max')

    model_history = History()

    model_board = TensorBoard(
        log_dir=os.path.join(HYPER_PARAMS.job_dir, 'logs'),
        histogram_freq=0,
        write_graph=True,
        embeddings_freq=0)

    save_checkpoint_gcs = LambdaCallback(
        on_epoch_end=lambda epoch, logs: copy_file_to_gcs(HYPER_PARAMS.job_dir, FMT_VALMODEL_PATH.format(PREFIX + '_' + str(format(epoch + 1, '02d')))))

    unet.fit(
        xtrain, ytrain,
        nb_epoch=HYPER_PARAMS.num_epochs,
        batch_size = HYPER_PARAMS.batch_size,
        shuffle=True,
        verbose=1,
        validation_data=(xval, yval),
        callbacks=[model_checkpoint,model_earlystop, model_history, model_board, save_checkpoint_gcs])

    pd.DataFrame(model_history.history).to_csv(FMT_VALMODEL_HIST.format(PREFIX), index=False)
    copy_file_to_gcs(HYPER_PARAMS.job_dir, FMT_VALMODEL_HIST.format(PREFIX))

    unet.save_weights(FMT_VALMODEL_LAST_PATH.format(PREFIX))
    copy_file_to_gcs(HYPER_PARAMS.job_dir, FMT_VALMODEL_LAST_PATH.format(PREFIX))
    #trs = [0.4,0.4,0.4,0.4,0.5,0.3]
    
    for i in range(N_Cls):
        prd[:,:,i] = prd[:,:,i] > trs[i]
    return prd

def check_predict(id='6120_2_3'):
    model = Deeplabv3(backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_x_jk0.5970')
    msk = predict_id(id, model)
    plt.show()

if __name__ == '__main__':
    data=np.load('test.npy')
    data2=np.load('612022.npy')   
    model = Deeplabv3(backbone='xception')
    model.load_weights('weights/deeplab_x_jk0.6617', by_name=True)
    #model = Deeplabv3(backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_m_jk0.6350')
    msk1 = predict_id(data, model)
    msk2 = predict_id(data2, model)
    tiff.imshow(msk2[:,:,1])
    model2 = get_unet()
    model2.load_weights('weights/unet_jk0.6198')
    msk2 = predict_id(data2, model2)
    for i in range(N_Cls):
        plt.imshow(msk[:,:,1])
        #plt.imsave('picturem/_{}.tif'.format(i),msk[:,:,i])
    #model.summary()
    #plot_model(model,to_file='deeplab_x.png',show_shapes=True)
Пример #26
0
    def train(self):

        # img_size = self.flag.image_height
        batch_size = self.flag.batch_size
        epochs = self.flag.total_epoch

        datagen_args = dict(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            5,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.05,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.05,  # randomly shift images vertically (fraction of total height)
            # fill_mode='constant',
            # cval=0.,
            horizontal_flip=False,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        image_datagen = ImageDataGenerator(**datagen_args)
        mask_datagen = ImageDataGenerator(**datagen_args)

        ### generator
        seed = random.randrange(1, 1000)
        image_generator = image_datagen.flow_from_directory(
            os.path.join(self.flag.data_path, 'train/IMAGE'),
            class_mode=None,
            seed=seed,
            batch_size=batch_size,
            target_size=(self.flag.image_height, self.flag.image_width),
            color_mode='rgb')
        mask_generator = mask_datagen.flow_from_directory(
            os.path.join(self.flag.data_path, 'train/GT'),
            class_mode=None,
            seed=seed,
            batch_size=batch_size,
            target_size=(self.flag.image_height, self.flag.image_width),
            color_mode='grayscale')

        ### gpu config
        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.9
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        ### define model

        model = get_unet(self.flag)
        # model = get_unet_1class(self.flag)

        if self.flag.pretrained_weight_path != None:
            model.load_weights(self.flag.pretrained_weight_path)

        ### model save
        if not os.path.exists(
                os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name)):
            mkdir_p(os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name))
        model_json = model.to_json()
        with open(
                os.path.join(self.flag.ckpt_dir, self.flag.ckpt_name,
                             'model.json'), 'w') as json_file:
            json_file.write(model_json)

        ### define callback function
        vis = callbacks.trainCheck(self.flag)
        model_checkpoint = ModelCheckpoint(os.path.join(
            self.flag.ckpt_dir, self.flag.ckpt_name, 'weights.{epoch:03d}.h5'),
                                           period=self.flag.total_epoch // 10)
        learning_rate = LearningRateScheduler(self.lr_step_decay)

        ### train model
        model.fit_generator(
            #self.train_generator(image_generator, mask_generator),
            self.train_generator_multiclass(image_generator, mask_generator),
            steps_per_epoch=image_generator.n // batch_size,
            epochs=epochs,
            callbacks=[model_checkpoint, learning_rate, vis])