grade_path = "../data/all_labels.csv"
model_out_dir = "/nfs/jaemin/isbi/2/models/dr/segmentation_as_input_{}".format(
    FLAGS.data_ratio)
segmentation_home = "../outputs/lesion_segmentation"
if not os.path.isdir(model_out_dir):
    os.makedirs(model_out_dir)

# set iterators for training and validation
training_set, validation_set = utils.split_dr(fundus_dirs, grade_path,
                                              val_ratio)
if FLAGS.data_ratio:
    n_train = int(len(training_set[0]) * FLAGS.data_ratio)
    n_val = int(len(validation_set[0]) * FLAGS.data_ratio)
    training_set = (training_set[0][:n_train], training_set[1][:n_train])
    val_set = (validation_set[0][:n_val], validation_set[1][:n_val])
class_weight = utils.class_weight(training_set[-1])
train_batch_fetcher = iterator_dr_640_segmentation_as_input.TrainBatchFetcher(
    training_set, batch_size, segmentation_home, class_weight)
val_batch_fetcher = iterator_dr_640_segmentation_as_input.ValidationBatchFetcher(
    validation_set, batch_size, segmentation_home)

# create networks
if FLAGS.load_model_dir:
    network_file = utils.all_files_under(FLAGS.load_model_dir,
                                         extension=".json")
    weight_file = utils.all_files_under(FLAGS.load_model_dir, extension=".h5")
    assert len(network_file) == 1 and len(weight_file) == 1
    with open(network_file[0], 'r') as f:
        network = model_from_json(f.read())
    network.load_weights(weight_file[0])
    network = model.set_optimizer(network)
示例#2
0
params = {
    'dim': (input_shape[0], input_shape[1]),
    'batch_size': 32,
    'n_classes': 28,
    'n_channels': input_shape[2],
    'augment': True,
    'shuffle': False
}

# Datasets
train_data_path = '../../train'
label_path = '../../train.csv'

labels = data.read_train_labels(label_path)

loss_weight = utils.class_weight(labels)

n_gpu = 2
n_epochs = 30
k_fold = False

if k_fold:
    train_data_set = data.kfold_dataset(labels, loss_weight, n_folds=5)
else:
    # split and suffle data
    np.random.seed(2018)
    indexes = np.arange(len(labels))
    np.random.shuffle(indexes)
    train_indexes = indexes[:25500]
    valid_indexes = indexes[25500:]
    train_data_set = [[train_indexes, valid_indexes]]