コード例 #1
0
import numpy as np
from keras.utils import multi_gpu_model

tf.keras.backend.clear_session()

DATA_DIR_IH = "/data/uob"
DATA_DIR_DEEPTHOUGHT = "/storage/yw18581/data"

data_folder = DATA_DIR_IH
TRAIN_VAL_TEST_DIR = os.path.join(data_folder, "train_validation_test")

N_FILES = 1
BATCH_SIZE = 6
N_EPOCHS = 500

model = get_unet()
model.summary()

CHECKPOINT_FOLDER_PATH = p.join(data_folder, 'trained_models')
TASK_NAME = 'UNet_training_generator_{}epochs'.format(N_EPOCHS)
TASK_FOLDER_PATH = os.path.join(CHECKPOINT_FOLDER_PATH, TASK_NAME)

if not os.path.exists(TASK_FOLDER_PATH):
    os.makedirs(TASK_FOLDER_PATH)

#TRAINING_WEIGHTS_FILEPATH = os.path.join(TASK_FOLDER_PATH,
#                                         '{}_weights_training{}.hdf5'.format(model.name, TASK_NAME))

TRAINING_WEIGHTS_FILEPATH = os.path.join(CHECKPOINT_FOLDER_PATH,
                                         'retrained_UNet_500+250epochs.hdf5')
コード例 #2
0
index_list_train = [load_indices(d, "train") for d in distances]
index_list_val = [load_indices(d, "val") for d in distances]
print("creating data generators")

train_generator = data_generator_index_list(fnames_list, index_list=index_list_train, batch_size=BATCH_SIZE,
                                            ftarget=lambda y: y, )
validation_generator = data_generator_index_list(fnames_list, index_list=index_list_val, batch_size=BATCH_SIZE,
                                                 ftarget=lambda y: y, )
print("calculating steps per epoch")
steps_per_epoch, n_events = get_n_iterations_index(fnames_list, index_list_train, batch_size=BATCH_SIZE)
validation_steps, n_events = get_n_iterations_index(fnames_list, index_list_val, batch_size=BATCH_SIZE)
##X_train = np.load(os.path.join(data_dir,"Xy_train.npz"))["x"]
# y_train = np.load(os.path.join(data_dir,"Xy_train.npz"))["y"]
print("retraining model")
TRAINING_WEIGHTS_FILEPATH = os.path.join(TASK_FOLDER_PATH, 'retrained_UNet_1500_epochs_clean_300.hdf5')
model_1500 = get_unet()
model_1500.load_weights(TRAINING_WEIGHTS_FILEPATH)
hist_2000 = train_neural_network(model_1500, train_generator, steps_per_epoch, validation_generator, validation_steps,
                                 epochs=500)
# model.load_weights(os.path.join(data_dir,"trained_models/retrained_UNet_500+250+250epochs.hdf5"))

# model.fit(x=X_train, y=y_train, epochs=250, batch_size=1, verbose=1, validation_split=.2)
print("saving trained model")
model_1500.save(os.path.join(TASK_FOLDER_PATH, "retrained_UNet_2000_epochs_clean_300.hdf5"))
pickle.dump(hist_2000, open(os.path.join(TASK_FOLDER_PATH, "hist_retrained_UNet_2000_epochs_clean_300.pkl"), 'wb'))
print("keep training for 500 more epochs")
hist_2500 = train_neural_network(model_1500, train_generator, steps_per_epoch, validation_generator, validation_steps,
                                 epochs=500)
model_1500.save(os.path.join(TASK_FOLDER_PATH, "retrained_UNet_2500_epochs_clean_300.hdf5"))
pickle.dump(hist_2500, open(os.path.join(TASK_FOLDER_PATH, "hist_retrained_UNet_2500_epochs_clean_300.pkl")), 'wb')
print("keep training for 500 more epochs")
コード例 #3
0
    return -dice_coef(y_true, y_pred)


tf.keras.backend.clear_session()

DATA_DIR_IH = "/data/uob"
DATA_DIR_DEEPTHOUGHT = "/storage/yw18581/data"

data_folder = DATA_DIR_IH
TRAIN_VAL_TEST_DIR = os.path.join(data_folder, "train_validation_test")

N_FILES = 1
BATCH_SIZE = 1
N_EPOCHS = 500

model = get_unet(compile_model=True)
model.summary()

CHECKPOINT_FOLDER_PATH = p.join(data_folder, 'trained_models')
TASK_NAME = 'UNet_training_generator_{}epochs'.format(N_EPOCHS)
TASK_FOLDER_PATH = os.path.join(CHECKPOINT_FOLDER_PATH, TASK_NAME)

if not os.path.exists(TASK_FOLDER_PATH):
    os.makedirs(TASK_FOLDER_PATH)

TRAINING_WEIGHTS_FILEPATH = os.path.join(TASK_FOLDER_PATH,
                                         '{}_weights_training{}.hdf5'.format(model.name, TASK_NAME))

HISTORY_FILEPATH = os.path.join(TASK_FOLDER_PATH,
                                '{}_history{}.pkl'.format(model.name, TASK_NAME))