示例#1
0
 def test_SUBFUNCTIONS_prepare_MULTIPROCESSING(self):
     ds = dict()
     for i in range(0, 5):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface, input_path="", output_path="",
                      batch_path=tmp_batches, delete_batchDir=False)
     sf = [Resize((8,8,8)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio, batch_size=1, prepare_subfunctions=True,
                       analysis="fullimage", subfunctions=sf,
                       use_multiprocessing=True)
     pp.mp_threads = 4
     sample_list = dataio.get_indiceslist()
     pp.run_subfunctions(sample_list, training=True)
     batches = pp.run(sample_list, training=True, validation=False)
     self.assertEqual(len(os.listdir(tmp_batches)), 5)
     for i in range(0, 5):
         file_prepared_subfunctions = os.path.join(tmp_batches,
                 str(pp.data_io.seed) + ".TEST.sample_" + str(i) + ".pickle")
         self.assertTrue(os.path.exists(file_prepared_subfunctions))
         img = batches[i][0]
         seg = batches[i][1]
         self.assertIsNotNone(img)
         self.assertIsNotNone(seg)
         self.assertEqual(img.shape, (1,8,8,8,1))
         self.assertEqual(seg.shape, (1,8,8,8,3))
示例#2
0
# Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
sf_resample = Resampling((1.58, 1.58, 2.70))
# Create a pixel value normalization Subfunction for z-score scaling
sf_zscore = Normalization(mode="z-score")

# Assemble Subfunction classes into a list
sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore]

# Create and configure the Preprocessor class
pp = Preprocessor(data_io, data_aug=None, batch_size=2, subfunctions=sf,
                  prepare_subfunctions=True, prepare_batches=False,
                  analysis="patchwise-crop", patch_shape=(160, 160, 80),
                  use_multiprocessing=True)
# Adjust the patch overlap for predictions
pp.patchwise_overlap = (80, 80, 30)
pp.mp_threads = 16

# Initialize the Architecture
unet_standard = Architecture(depth=4, activation="softmax",
                             batch_normalization=True)

# Create the Neural Network model
model = Neural_Network(preprocessor=pp, architecture=unet_standard,
                       loss=tversky_crossentropy,
                       metrics=[tversky_loss, dice_soft, dice_crossentropy],
                       batch_queue_size=3, workers=3, learninig_rate=0.001)

# Load best model weights during fitting
model.load(path_model)

# Compute predictions