def tuning_loop(self): """ Runs training process for hyper-parameters """ etas = [float(np.power(10.0, -i)) for i in range(2, 5)] ROIs = [ int(512 / (np.power(2, x) * self.CONFIG["EXPT"]["DOWN_SAMP"])) for x in range(4) ] ROI_grid, eta_grid = np.meshgrid(etas, ROIs) hyper_params = np.vstack([eta_grid.ravel(), ROI_grid.ravel()]) runs = hyper_params.shape[1] for i in range(runs): self.CONFIG["HYPERPARAMS"]["ETA"] = hyper_params[1, i] self.CONFIG["EXPT"]["IMG_DIMS"] = [ int(hyper_params[0, i]), int(hyper_params[0, i]), 12 ] # Select ROI or base model if hyper_params[0, i] == 512 // self.CONFIG["EXPT"]["DOWN_SAMP"]: self.CONFIG["EXPT"]["CROP"] = 0 Model = UNet(self.CONFIG) else: self.CONFIG["EXPT"]["CROP"] = 1 Model = CropUNet(self.CONFIG) run_name = f"ETA_{self.CONFIG['HYPERPARAMS']['ETA']:.5f}"\ f"_ROI_{int(hyper_params[0, i])}" self.Train = self.TrainingLoop(Model=Model, dataset=(self.train_ds, self.val_ds), config=self.CONFIG) print("=================================================") print(f"{run_name} ({i + 1} of {runs})") self.Train.training_loop(verbose=0) self.save_results(run_name) # Save sample images if self.Train.config["EXPT"]["CROP"]: self.Train.save_images_ROI( epoch=None, tuning_path=f"{self.RESULTS_PATH}images_{run_name}") else: self.Train.save_images( epoch=None, tuning_path=f"{self.RESULTS_PATH}images_{run_name}")
(4, 128), (4, 256), ], features_in=1) W = torch.rand(nClasses, NG[-1, -1], 1, 1) * 1e-3 if args.net_type == 'imex': net = IMEXnet(h, NG, use_gpu) K, L = net.init_weights() elif args.net_type == 'resnet': net = IMEXnet(h, NG, use_gpu) K, L = net.init_weights(L_mode='zero') elif args.net_type == 'unet': W = None K = None L = None net = UNet(1, nClasses) else: raise NotImplementedError() if True: if is_unet: n_params = 0 for p in net.parameters(): n_params += p.numel() print('UNet Params : %d' % n_params) else: explicit_params, implicit_params = net.num_params() print('IMEX Params : %d' % (explicit_params + implicit_params)) print('ResNet Params : %d' % (explicit_params))
from training_loops.OptimizerHelper import VggOneBlockFunctional model = Vgg19Net.build(IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], classes=3) print('VGG19Net') model.summary() model = SmallVggNet.build(IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], classes=3) print('SmallVggNet') model.summary() model = InceptionV3(input_shape=IMAGE_DIMS, classes=3, weights=None) print('InceptionV3') model.summary() model = UNet.build([IMAGE_DIMS[0], IMAGE_DIMS[1], 1], 3) print('U-Net') model.summary() model = resnet50(IMAGE_DIMS, 3) print('ResNet50') model.summary() model = VggOneBlockFunctional.build(IMAGE_DIMS[0], IMAGE_DIMS[1], IMAGE_DIMS[2], classes=3) print('VggOneBlock') model.summary()
TestGenerator = Loader(config=CONFIG["EXPT"], dataset_type="validation", fold=0) test_ds = tf.data.Dataset.from_generator( generator=TestGenerator.data_generator, output_types=(tf.float32, tf.float32, tf.float32, tf.float32)).batch(1) ABDO_WINDOW_MIN = -150 ABDO_WINDOW_MAX = 250 # Compile model if CONFIG["EXPT"]["MODEL"] == "UNet": if not CONFIG["EXPT"]["CROP"]: Model = UNet(config=CONFIG) Cropper = CropUNet(config=CROP_CONFIG) elif CONFIG["EXPT"]["CROP"]: Model = CropUNet(config=CONFIG) elif CONFIG["EXPT"]["MODEL"] == "GAN": if not CONFIG["EXPT"]["CROP"]: Model = GAN(config=CONFIG) Cropper = CropGAN(config=CROP_CONFIG) elif CONFIG["EXPT"]["CROP"]: Model = CropGAN(config=CONFIG) Model.load_weights(f"{MODEL_PATH}") base_MSE = [] base_pSNR = []
generator=TestGenerator.data_generator, output_types=(tf.float32, tf.float32, tf.float32, tf.float32)).batch(1) ABDO_WINDOW_MIN = -150 ABDO_WINDOW_MAX = 250 # Compile models models = dict.fromkeys(expts) outputs = dict.fromkeys(["CE", "NCE"] + expts) for expt in models.keys(): model_path = f"{CONFIG['EXPT']['SAVE_PATH']}models/{CONFIG['EXPT']['MODEL']}/{expt}/{expt}" if CONFIG["EXPT"]["MODEL"] == "UNet": if "256" in expt: models[expt] = UNet(config=CONFIG) else: models[expt] = CropUNet(config=CROP_CONFIG) elif CONFIG["EXPT"]["MODEL"] == "GAN": if "256" in expt: models[expt] = GAN(config=CONFIG) else: models[expt] = CropGAN(config=CROP_CONFIG) models[expt].load_weights(f"{model_path}") count = 0 for data in test_ds: NCE, ACE, seg, coords = data
def bind_net(self, **config): self.net = UNet(**config)
def tuning_loop(self, runs): """ Runs training process for hyper-parameters """ np.random.seed() for i in range(runs): run_name = "" new_config = {key: val for key, val in self.CONFIG.items()} ROI = None for key, val in self.tuning_config.items(): if key in ["ETA", "D_ETA", "G_ETA", "LAMBDA", "GAMMA"]: new_val = float( np.power(10.0, np.random.uniform(val[0], val[1]))) run_name += f"{key}_{new_val:.6f}_" elif key in ["MU"]: new_val = float(np.random.uniform(val[0], val[1])) run_name += f"{key}_{new_val:.2f}_" elif key in ["NF", "NDF_G", "NGF", "D_IN_CH"]: new_val = int(np.random.choice(val)) run_name += f"{key}_{new_val}_" elif key in ["ROI"]: ROI = int(np.random.choice(val)) new_config["EXPT"]["IMG_DIMS"] = [ROI, ROI, 12] run_name += f"{key}_{ROI}_" continue elif key in ["D_LAYERS_G"]: assert ROI, "ROI needs to come before D_LAYERS_G" new_val = int(np.random.randint(1, np.log2(ROI / 4))) run_name += f"{key}_{new_val}_" elif key in ["G_LAYERS"]: assert ROI, "ROI needs to come before G_LAYERS" new_val = int(np.random.randint(2, np.log2(ROI))) # TODO: need a better solution to max_z_downsample == num_layers bug if new_val == 3: if np.random.rand() > 0.5: new_val = 4 else: new_val = 2 run_name += f"{key}_{new_val}_" else: raise ValueError("Key not recognised") new_config["HYPERPARAMS"][key] = new_val run_name = run_name.strip('_') # Select ROI or base model if ROI == 512 // new_config["EXPT"]["DOWN_SAMP"]: new_config["EXPT"]["CROP"] = 0 if self.CONFIG["EXPT"]["MODEL"] == "GAN": Model = GAN(new_config) elif self.CONFIG["EXPT"]["MODEL"] == "UNet": Model = UNet(new_config) else: raise ValueError("Model not recognised") else: new_config["EXPT"]["CROP"] = 1 if self.CONFIG["EXPT"]["MODEL"] == "GAN": Model = CropGAN(new_config) elif self.CONFIG["EXPT"]["MODEL"] == "UNet": Model = CropUNet(new_config) else: raise ValueError("Model not recognised") self.Train = self.TrainingLoop(Model=Model, dataset=(self.train_ds, self.val_ds), config=new_config) print("=================================================") print(f"{run_name} ({i + 1} of {runs})") self.Train.training_loop(verbose=0) self.save_results(run_name) # Save sample images if self.Train.config["EXPT"]["CROP"]: self.Train.save_images_ROI( epoch=None, tuning_path=f"{self.RESULTS_PATH}images_{run_name}") else: self.Train.save_images( epoch=None, tuning_path=f"{self.RESULTS_PATH}images_{run_name}")
# Create dataloader train_ds = tf.data.Dataset.from_generator( generator=TrainGenerator.data_generator, output_types=(tf.float32, tf.float32, tf.float32, tf.float32)).batch(MB_SIZE) val_ds = tf.data.Dataset.from_generator( generator=ValGenerator.data_generator, output_types=(tf.float32, tf.float32, tf.float32, tf.float32)).batch(MB_SIZE) # Compile model if CONFIG["EXPT"]["MODEL"] == "UNet": if not CONFIG["EXPT"]["CROP"]: Model = UNet(config=CONFIG) elif CONFIG["EXPT"]["CROP"]: Model = CropUNet(config=CONFIG) TrainingLoop = TrainingLoopUNet(Model=Model, dataset=(train_ds, val_ds), config=CONFIG) elif CONFIG["EXPT"]["MODEL"] == "GAN": if not CONFIG["EXPT"]["CROP"]: Model = GAN(config=CONFIG) elif CONFIG["EXPT"]["CROP"]: Model = CropGAN(config=CONFIG) TrainingLoop = TrainingLoopGAN(Model=Model, dataset=(train_ds, val_ds),