print("Image ordering: {}".format(keras.backend.image_dim_ordering()))

    use_N4Correction = True
    print("Using N4 correction: {}".format(use_N4Correction))

    batch_size = 128
    patch_size=(64, 64)
    label_size=(8,8)

    model = load_model(model_path)
    
    for index, dir in enumerate(listdir(images_dir_path)):
        if "brats" in dir:
            image_dir_path = os.path.join(images_dir_path, dir)
            print("Segmenting image {}".format(image_dir_path))
            image, image_dimension = loadTestImage(image_dir_path, use_N4Correction = use_N4Correction)
            image = normalize_scans([image], num_channels = 4)[0]

            print("Image dimension", image_dimension)

            segmentation = segment(image, image_dimension, patch_size, label_size, model.predict, batch_size = batch_size, tf_ordering=tf_ordering)

            #save segmentation
            segmentation = sitk.GetImageFromArray(segmentation)
            segmentation = sitk.Cast(segmentation, sitk.sitkUInt8)
            output_file = os.path.join(output_dir, dir) + ".mha"
            sitk.WriteImage(segmentation, output_file)
            print("Saved image to {}".format(output_file))

    print("--- {} seconds ---".format(time.time() - start_time))
Esempio n. 2
0
    shape = (num_channels, patch_size[0], patch_size[1])

if model_file is None:
    print("Creating new model")
    model = createModel(shape, tf_ordering)
else:
    print("Loading model from", model_file)
    model = load_model(model_file)

model.summary()
print("Trainable weights", model.trainable_weights)

(images, labels,
 image_dimensions) = loadImages(data_dir, use_N4Correction=use_N4Correction)
print("Normalizing scans")
images = normalize_scans(images, 4)
#(val_images, val_labels, val_dimensions) = loadImages(validation_dir, use_N4Correction = use_N4Correction)

assert (image_dimensions == [image.shape for image in images])
#assert(val_dimensions == [image.shape for image in val_images])
print("Loaded %d training images" % len(images))
#print("Loaded %d validation images"%len(val_images))

dataExtractor = DataExtractor(images,
                              labels, [], [],
                              tf_ordering=tf_ordering,
                              patch_size=patch_size,
                              label_size=label_size)

print("Batch size", batch_size)
Esempio n. 3
0
    shape = (4, patch_size[0], patch_size[1])

if model_file is None:
    print("Creating new model")
    model = createModel(shape, tf_ordering)
else:
    print("Loading model from", model_file)
    model = load_model(model_file, custom_objects={'dice': dice})

model.summary()
print("Trainable weights", model.trainable_weights)

(images, labels,
 image_dimensions) = loadImages(data_dir, use_N4Correction=use_N4Correction)
print("Normalizing each scan")
images = normalize_scans(images, num_channels=4)

#(val_images, val_labels, val_dimensions) = loadImages(validation_dir, use_N4Correction = use_N4Correction)

assert (image_dimensions == [image.shape for image in images])

print("Loaded %d training images" % len(images))

dataExtractor = DataExtractor(images,
                              labels,
                              tf_ordering=tf_ordering,
                              patch_size=patch_size)

samples_weights = [1, 1, 1, 1, 1]
print("Using weights for data", samples_weights)