def __init__(self, config): self.config = config # load data self.train_dataloader, self.valid_dataloader = get_train_valid_loader( self.config.data_dir, self.config.batch_size, self.config.seed) num_channels = self.train_dataloader.dataset[0][0].shape[0] self.num_classes = len(self.train_dataloader.dataset.classes) # define model self.model = VisualSelfAttn(self.num_classes, config.num_heads, config.num_layers, config.d_k, config.d_v, config.hid_dim, config.dropout, config.mode) if config.gpu: self.model = self.model.cuda() self.model_name = 'VSA_{}_{}_{}_{}_{}_{}'.format( config.mode, config.num_heads, config.num_layers, config.d_k, config.d_v, config.hid_dim) # define optimizer and loss function self.opt = optim.Adam(self.model.parameters(), lr=config.lr) self.loss_fn = nn.CrossEntropyLoss() self.summary
def __init__(self, config): self.config = config # load data self.train_dataloader, self.valid_dataloader = get_train_valid_loader( self.config.data_dir, self.config.batch_size, self.config.seed) num_channels = self.train_dataloader.dataset[0][0].shape[0] # define model self.model = Model(config.num_classes, config.emb_dim, config.hid_dim, config.dropout, config.finetune) if config.gpu: self.model.cuda() self.model_name = 'SAT_{}_{}_{}_{}'.format(config.emb_dim, config.hid_dim, config.repeat, config.finetune) # define optimizer and loss function params = filter(lambda p: p.requires_grad, self.model.parameters()) self.opt = optim.Adam(params, lr=config.lr) self.loss_fn = nn.CrossEntropyLoss() self.summary
predictions.append(pred_mask) img_ids.append(id) return predictions, img_ids if __name__ == '__main__': """Train Unet model""" opt = Option() model = UNet2(input_channels=3, nclasses=1) if opt.is_train: # split all data to train and validation, set split = True train_loader, val_loader = get_train_valid_loader( opt.root_dir, batch_size=opt.batch_size, split=True, shuffle=opt.shuffle, num_workers=opt.num_workers, val_ratio=0.1, pin_memory=opt.pin_memory) # load all data for training # train_loader = get_train_valid_loader(opt.root_dir, batch_size=opt.batch_size, # split=False, shuffle=opt.shuffle, # num_workers=opt.num_workers, # val_ratio=0.1, pin_memory=opt.pin_memory) if opt.n_gpu > 1: model = nn.DataParallel(model) if opt.is_cuda: model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=opt.learning_rate,
np.squeeze(image.data.cpu().numpy().transpose( (0, 2, 3, 1))[i])) predictions.append(pred_mask) ground_truth.append(np.squeeze(label[i])) return images, predictions, ground_truth if __name__ == '__main__': """Train Unet model""" model = UNet2(input_channels=1, nclasses=1) if Option.is_train: # split all data to train and validation, set split = True train_loader, val_loader = get_train_valid_loader( Option.root_dir, batch_size=Option.batch_size, shuffle=Option.shuffle, num_workers=Option.num_workers, pin_memory=Option.pin_memory) if Option.n_gpu > 1: model = nn.DataParallel(model) if Option.is_cuda: model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=Option.learning_rate, weight_decay=Option.weight_decay) criterion = nn.MSELoss().cuda() # start to run a training run(model, train_loader, val_loader, criterion) # make prediction on validation set # predictions, img_ids = run_test(model, val_loader, Option) # SAVE model
if not isinstance(h, int): h = h.cpu().numpy() w = w.cpu().numpy() pred_mask = resize(pred_mask, (h, w), mode='constant') pred_mask = (pred_mask > 0.5) predictions.append(pred_mask) img_ids.append(id) return predictions, img_ids if __name__ == '__main__': train_loader, val_loader = get_train_valid_loader( Opt.dataset_path, batch_size=Opt.batch_size, split=True, shuffle=Opt.shuffle, num_workers=Opt.num_workers, val_ratio=0.1, pin_memory=Opt.pin_memory) netG = netG() netG.load_state_dict( torch.load(os.path.join(Opt.checkpoint_dir, 'model-01.pt'))) # netG.apply(weights_init) netD = netD() criterion = torch.nn.BCELoss() # netD.apply(weights_init) if Opt.ngpu > 1: netG = nn.DataParallel(netG) netD = nn.DataParallel(netD) if Opt.is_cuda: