def __init__(self, opt): """Initialize the CAE model""" BaseModel.__init__(self, opt) self.opt = opt img_size = (self.opt.channels, self.opt.img_size, self.opt.img_size) latent = self.opt.latent self.encoder = init_net(networks.Encoder_aae(latent).cuda(), gpu = opt.gpu, mode = opt.mode) # initialize encoder networks doing data parallel and init_weights self.decoder = init_net(networks.Decoder(img_size, latent).cuda(), gpu = opt.gpu, mode = opt.mode) # initialize decoder networks doing data parallel and init_weights self.discriminator = init_net(networks.Discriminator(latent).cuda(), gpu=opt.gpu, mode=opt.mode) # initialize discriminator networks doing data parallel and init_weights self.networks = ['encoder', 'decoder', 'discriminator'] self.criterion = torch.nn.MSELoss() self.criterion_dm = torch.nn.BCELoss() self.visual_names = ['generated_imgs'] self.model_name = self.opt.model self.loss_name = ['recon_loss', 'dm_loss', 'g_loss'] self.real_label = torch.ones([self.opt.batch_size, 1]) self.fake_label = torch.zeros([self.opt.batch_size, 1]) if self.opt.mode == 'train':# if mode is train, we have to set optimizer and requires grad is true self.optimizer_e = torch.optim.Adam(self.encoder.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) self.optimizer_d = torch.optim.Adam(self.decoder.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) self.optimizer_dm = torch.optim.Adam(self.discriminator.parameters(), lr=self.opt.lr/5, betas=(self.opt.beta1, self.opt.beta2)) self.optimizers.append(self.optimizer_e) self.optimizers.append(self.optimizer_d) self.optimizers.append(self.optimizer_dm) self.set_requires_grad(self.decoder, self.encoder, self.discriminator, requires_grad=True)
def __init__(self, args): super(BayesNet, self).__init__() self.hparams = args self.train_dset, self.test_dset, args = utils.load_dataset( self.hparams) self.net, self.hparams = models.init_net(self.hparams) self.batch_size = self.hparams.batch_size self.lr = self.hparams.lr if self.hparams.task == 'clf': ll = losses.cr_ent_ll self.metrics = {'accuracy': metr.accuracy} elif self.hparams.task == 'seg': ll = losses.unet_binary_ll self.metrics = {'IOU': metr.iou, 'DICE': metr.dice} if self.hparams.dwp: self.loss_fun = losses.DWPLoss(log_lik=ll, anneal=None, N=len(self.train_dset)) self.prior = 'dwp' self.prior_dataset = args.prior if self.hparams.kernel_dimention == 2: self.vae_class = models.kernel_vae.KernelVAE elif self.hparams.kernel_dimention == 3: self.vae_class = models.kernel_vae.KernelVAE3D self.vaes, self.layer_names = self.load_vae() print(self.layer_names) else: self.loss_fun = losses.VarDropoutLoss(log_lik=ll, anneal=None, N=len(self.train_dset)) self.prior = 'normal'
def run(): args = parse_opts() os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "9" # GLOBAL VARS # MODE = args.mode CLASS_WEIGHT = False N_EP = 20 FLATTEN = args.flatten RNN = args.rnn BATCH_SIZE = args.batch_size #### datasets, dataloaders = init_dataset( BATCH_SIZE, single_channel=args.single_channel) print('[Train] class counts', np.unique( datasets['train'].target_vals, return_counts=True)) print('[Test] class counts', np.unique( datasets['test'].target_vals, return_counts=True)) n_ch = 1 if args.single_channel else 3 if MODE == 'min': in_channels = datasets['train'].min_depth*n_ch elif MODE == 'max': in_channels = datasets['train'].max_depth*n_ch torch.manual_seed(0) # init net net = init_net(opt=args.model_idx, in_channels=in_channels) class_weight = None if CLASS_WEIGHT: cnts = Counter(datasets['train'].target_vals) n = len(datasets['train']) class_weight = [max(cnts.values())/cnts['0'], max(cnts.values())/cnts['1']] class_weight = torch.FloatTensor(class_weight) cross_entrp_loss = nn.CrossEntropyLoss(weight=class_weight).cuda() focal_loss = FocalLoss().cuda() optimizer = optim.Adam(net.parameters(), lr=0.000027) criterion = cross_entrp_loss # scheduler = optim.lr_scheduler.ReduceLROnPlateau( # optimizer, 'min', verbose=True, patience=7) for ep in range(N_EP): train_epoch(net, dataloaders['train'], optimizer, criterion, ep, scheduler=None, flatten=FLATTEN, MODE=MODE, rnn=RNN) valid_loss = evaluate(net, dataloaders['test'], criterion, ep, flatten=FLATTEN, MODE=MODE, rnn=RNN)
def __init__(self, args): super(BaseModel, self).__init__() self.hparams = args self.train_dset, self.test_dset, args = utils.load_dataset( self.hparams) self.net, self.hparams = models.init_net(self.hparams) self.batch_size = self.hparams.batch_size self.lr = self.hparams.lr if self.hparams.task == 'clf': self.loss_fun = losses.cr_ent_ll self.metrics = {'accuracy': metr.accuracy} elif self.hparams.task == 'seg': self.loss_fun = losses.unet_binary_ll self.metrics = {'IOU': metr.iou, 'DICE': metr.dice}