def main(): # # no augmentation # from losses import dice # loss = dice.dice_loss # train, val = data_g.get_train_val_iterators(aug=None) # model = cnn.get_model(None, None, 3, do_compile=False) # model.compile(optimizer='adam', loss=loss, # metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)]) # model_name = 'cnn_dice_EXTDATA_augmentation_none' # train_sub(model, model_name, train, val, epochs=100, verbose=2) # small augmentation from losses import dice loss = dice.dice_loss train, val = data_g.get_train_val_iterators(aug='small') #get model model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss=loss, metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)]) model_name = 'cnn_dice_EXTDATA_augmentation_small' #train model train_sub(model, model_name, train, val, epochs=100, verbose=2)
def get_model(base_model, dataset_name=False, compile=True, weights=None, epsilon=1e-8, teacher_epsilon=1e-3, init_temp=2.5): """Take an uncompiled model and return model compiled for ENDD. Warning: This function works in place. Model is returned only for conveniance. """ if isinstance(base_model, str): if not dataset_name: raise ValueError( 'dataset_name must be provided if base_model is given by name.' ) if base_model == 'cnn': base_model = cnn.get_model(dataset_name, compile=False, softmax=False) elif base_model == 'vgg': base_model = vgg.get_model(dataset_name, compile=False, softmax=False) else: raise ValueError( """Base model {} not recognized, make sure it has been added to endd.py, or pass a Keras model object as base model instead.""" ) if weights: saveload.load_weights(base_model, weights) if compile: base_model.compile(optimizer='adam', loss=losses.DirichletEnDDLoss( init_temp=init_temp, epsilon=epsilon, ensemble_epsilon=teacher_epsilon)) return base_model
def main(): x1, y1 = data.get_training_data() x2, y2 = data.get_training_data2() x = np.concatenate((x1, x2), axis=0) y = np.concatenate((y1, y2), axis=0) #x, y = data.augment_data(x, y) # x, y = data.augment_data(x,y) # # crossentropy # model = unet.get_model(None, None, 3, do_compile=False) # model.compile(optimizer='adam', loss='binary_crossentropy', # metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2)]) # model_name = 'u_net_cross_entropy_test' # train_sub(model, model_name, x, y, (x_test, y_test), epochs=100) # # focal # from losses import focal # loss = focal.focal_loss # model_name = 'u_net_focal_loss' # model = unet.get_model(None, None, 3, do_compile=False) # model.compile(optimizer='adam', loss=loss, # metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2)]) # train_sub(model, model_name, x, y, epochs=1) # dice from losses import dice loss = dice.dice_loss model_name = 'cnn_dice_EXTDATA_100e_nostop' model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) train_sub(model, model_name, x, y, epochs=100)
import settings from models import cnn from utils import saveload from utils import datasets # Need these settings for GPU to work on my computer /Einar physical_devices = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) # Load data (train_images, train_labels), (test_images, test_labels) = datasets.get_dataset(DATASET_NAME) # Preprocess train_labels = tf.one_hot(train_labels.reshape((-1, )), settings.DATASET_N_CLASSES[DATASET_NAME]) test_labels = tf.one_hot(test_labels.reshape((-1, )), settings.DATASET_N_CLASSES[DATASET_NAME]) # Get model model = cnn.get_model(dataset_name=DATASET_NAME, compile=True) # Train model.fit(train_images, train_labels, epochs=N_EPOCHS, validation_data=(test_images, test_labels)) # Save weights saveload.save_tf_model(model, "cnn")
def main(): from models import cnn # u_net_cross_entropy model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) model_name = 'cnn_cross_entropy' cross_val(model, model_name) # dice from losses import dice loss = dice.dice_loss model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) model_name = 'cnn_dice' cross_val(model, model_name) # u_net_focal from losses import focal loss = focal.focal_loss model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) model_name = 'cnn_focal' cross_val(model, model_name) # u_net_lovasz from losses import lovasz loss = lovasz.lovasz_loss model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss=loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) model_name = 'cnn_lovasz' cross_val(model, model_name) # u_net_balanced_cross_entropy_class_weight model = cnn.get_model(None, None, 3, do_compile=False) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2), f1]) model_name = 'cnn_balanced_cross_entropy_class_weight' cross_val(model, model_name, use_class_weight=True)
def main(): model = cnn.get_model(None, None, 3, do_compile=False) model.load_weights('checkpoints/ckp_cnn_dice_SPECIALDATA.h5') submission.create(model, 'ckp_cnn_dice_SPECIALDATA')