val_loader = Loader(val_path, arg.batch_size, transform=preprocess, sampler=arg.sampler, torch_type=arg.dtype, cpus=arg.cpus, shuffle=False, drop_last=False) test_loader = Loader(test_path, 1, torch_type=arg.dtype, cpus=arg.cpus, shuffle=False, drop_last=False) norm_layer = nn.BatchNorm2d act = nn.ReLU if arg.model == "unet": net = Unet2D(feature_scale=arg.feature_scale, act=act) elif arg.model == "unetres": net = UnetRes2D(1, nn.InstanceNorm2d, is_pool=arg.pool) net = nn.DataParallel(net).to(torch_device) recon_loss = nn.BCEWithLogitsLoss() model = CNNTrainer(arg, net, torch_device, recon_loss=recon_loss) if arg.test is False: model.train(train_loader, val_loader) model.test(test_loader, val_loader)
norm_layer = nn.InstanceNorm2d if arg.act == "relu": act = nn.ReLU elif arg.act == "elu": act = nn.ELU elif arg.act == "leaky": act = nn.LeakyReLU elif arg.act == "prelu": act = nn.PReLU if arg.model == "fusion": net = Fusionnet(1, 1, arg.ngf, arg.clamp) elif arg.model == "unet": net = Unet2D(feature_scale=arg.feature_scale, is_pool=arg.pool, act=act) elif arg.model == "unetgn": net = UnetGN2D(feature_scale=arg.feature_scale, is_pool=arg.pool) elif arg.model == "unetslim": net = UnetSlim(feature_scale=arg.feature_scale, norm=norm_layer) elif arg.model == "unet_sh": net = UnetSH2D(arg.sh_size, feature_scale=arg.feature_scale, is_pool=arg.pool) elif arg.model == "unetres": net = UnetRes2D(1, nn.InstanceNorm2d, is_pool=arg.pool) elif arg.model == "unetgcn": net = UnetGCN(arg.feature_scale, norm=norm_layer, is_pool=arg.pool) elif arg.model == "unetgcnseb": net = UnetGCNSEB(arg.feature_scale, norm=norm_layer, is_pool=arg.pool)
shuffle=True, drop_last=True) test_loader = nucleusloader(f_path_test, batch_size=1, transform=None, cpus=arg.cpus, shuffle=False, drop_last=True) #test_loader = RBCLoader(f_path + "/test", batch_size=arg.batch_size, # transform=None,shuffle=True) if arg.model == "fusion": net = Fusionnet(arg.in_channel, arg.out_channel, arg.ngf, arg.clamp) elif arg.model == "unet": net = Unet2D(feature_scale=arg.feature_scale) elif arg.model == "unet_sh": net = UnetSH2D(arg.sh_size, feature_scale=arg.feature_scale) else: raise NotImplementedError("Not Implemented Model") net = nn.DataParallel(net).to(torch_device) if arg.loss == "l2": recon_loss = nn.L2Loss() elif arg.loss == "l1": recon_loss = nn.L1Loss() recon_loss = EdgeWeightedLoss(1, 10) #recon_loss=nn.BCELoss() model = CNNTrainer(arg,
os.environ["CUDA_VISIBLE_DEVICES"] = arg.gpus torch_device = torch.device("cuda") train_path = "data/prostate/train/" val_path = "data/prostate/val" test_path = "data/prostate/test" preprocess = preprocess.get_preprocess(arg.augment) train_loader = Loader(train_path, arg.batch_size, transform = preprocess, sampler = '', torch_type = 'float', cpus = 4, shuffle = True, drop_last = True) val_loader = Loader(val_path, arg.batch_size, transform = preprocess, sampler = '', torch_type = 'float', cpus = 4, shuffle = True, drop_last = True) test_loader = Loader(test_path, arg.batch_size, transform = None, sampler = '', torch_type = 'float', cpus = 4, shuffle = True, drop_last = True) norm_layer = nn.BatchNorm2d act = nn.ReLU net = Unet2D(feature_scale = 4, act = act) net = nn.DataParallel(net).to(torch_device) recon_loss = nn.BCEWithLogitsLoss() model = CNNTrainer(arg, net, torch_device, recon_loss = recon_loss) if arg.test is False: model.train(train_loader, val_loader) model.test(test_loader, val_loader)