def visualize_segmenter(self, dataset, export_path, use_gpu=False, with_ground_truth=False, n_inst=None): chk_mkdir(export_path) self.s.train(False) for batch_idx, (X_batch, y_batch, name) in enumerate(DataLoader(dataset, batch_size=1)): if n_inst is None: n_inst = len(dataset) else: assert n_inst >= 0, 'n_inst must be nonnegative' if use_gpu: X_batch = Variable(X_batch.cuda()) y_out = self.s(X_batch).cpu().data.numpy() X_im = X_batch[0].data.cpu().numpy().transpose((1, 2, 0)) else: X_batch = Variable(X_batch) y_out = self.s(X_batch).data.numpy() X_im = X_batch[0].data.numpy().transpose((1, 2, 0)) if with_ground_truth: visualize_segmentation(X=X_im, y_mask=y_batch[0, 1, :, :].numpy(), y_pred=y_out[0, 1, :, :], export_path=os.path.join( export_path, name[0] + '.png')) else: io.imsave(os.path.join(export_path, name[0] + '.png'), y_out[0, 1, :, :]) if batch_idx > n_inst: break
def visualize(self, dataset, n_inst=20, folder_name='comparison'): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_batch, name) in enumerate(DataLoader(dataset, batch_size=1)): if n_inst is None: n_inst = len(dataset) else: assert n_inst >= 0, 'n_inst must be nonnegative' if self.use_gpu: X_batch = Variable(X_batch.cuda()) X_out = self.model(X_batch).cpu().data.numpy() X_im = X_batch[0].data.cpu().numpy().transpose((1, 2, 0)) else: X_batch = Variable(X_batch) X_out = self.model(X_batch).data.numpy() X_im = X_batch[0].data.numpy().transpose((1, 2, 0)) if sum([abs(X_out.shape[k] - X_batch.shape[k]) for k in range(len(X_out.shape))]) != 0: print(X_out.shape) visualize_segmentation( X=X_im, y_mask=1-X_im, y_pred=1-X_out[0].transpose((1, 2, 0)), export_path=os.path.join(export_path, name[0] + '.png') ) if batch_idx > n_inst: break
def visualize(self, dataset, folder_name='visualize', n_inst=None): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_batch, y_mask_batch, y_rcnn_batch, name) in enumerate(DataLoader(dataset, batch_size=1)): if n_inst is None: n_inst = len(dataset) else: assert n_inst >= 0, 'n_inst must be nonnegative' if self.use_gpu: X_batch, y_rcnn_batch = Variable(X_batch.cuda()), Variable(y_rcnn_batch.cuda()) y_out = self.model(torch.cat([X_batch, y_rcnn_batch], dim=1)).cpu().data.numpy() y_rcnn_batch = y_rcnn_batch.data.cpu().numpy() X_im = X_batch[0].data.cpu().numpy().transpose((1, 2, 0)) else: X_batch, y_rcnn_batch = Variable(X_batch), Variable(y_rcnn_batch) y_out = self.model(torch.cat([X_batch, y_rcnn_batch], dim=1)).data.numpy() y_rcnn_batch = y_rcnn_batch.data.numpy() X_im = X_batch[0].data.numpy().transpose((1, 2, 0)) visualize_segmentation( X=X_im, y_mask=1 - y_out[0, 1:].transpose((1, 2, 0)), y_pred=1 - y_out[0, 1:].transpose((1, 2, 0)), export_path=os.path.join(export_path, name[0] + '.png') ) if batch_idx > n_inst: break
def visualize(self, dataset, n_inst=20, folder_name='comparison'): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_batch, y_batch, *rest) in enumerate(DataLoader(dataset, batch_size=1)): name = rest[-1] if n_inst is None: n_inst = len(dataset) else: assert n_inst >= 0, 'n_inst must be nonnegative' if self.use_gpu: X_batch = Variable(X_batch.cuda()) y_out = self.model(X_batch).cpu().data.numpy() X_im = X_batch[0].data.cpu().numpy().transpose((1, 2, 0)) else: X_batch = Variable(X_batch) y_out = self.model(X_batch).data.numpy() X_im = X_batch[0].data.numpy().transpose((1, 2, 0)) visualize_segmentation( X=X_im, y_mask=y_batch[0, 0, :, :].numpy(), y_pred=y_out[0, 0], export_path=os.path.join(export_path, name[0] + '.png') ) if batch_idx > n_inst: break
def visualize(self, dataset, folder_name): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_image, y_prob, y_label, name) in enumerate(DataLoader(dataset, batch_size=1)): if self.use_gpu: X_image = Variable(X_image.cuda()) X_pred = self.model(X_image).data.cpu().numpy() else: X_image = Variable(X_image) X_pred = self.model(X_image).data.numpy()
def predict(self, dataset, folder_name='prediction'): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_batch, name) in enumerate(DataLoader(dataset, batch_size=1)): if self.use_gpu: X_batch = Variable(X_batch.cuda()) X_out = self.model(X_batch).cpu().data.numpy() else: X_batch = Variable(X_batch) X_out = self.model(X_batch).data.numpy() io.imsave(os.path.join(export_path, name[0] + '.png'), X_out[0].transpose((1, 2, 0)))
def __init__( self, g, g_optim, g_loss, t, t_optim, model_name, results_root_path, g_scheduler=None, t_scheduler=None ): self.model_name = model_name self.model_results_path = os.path.join(results_root_path, model_name) self.use_gpu = torch.cuda.is_available() chk_mkdir(self.model_results_path) if self.use_gpu: self.g.cuda() self.g_loss.cuda() self.t.cuda()
def __init__( self, model, optimizer, model_name, results_root_path, scheduler=None ): self.model = model self.optimizer = optimizer self.scheduler = scheduler self.model_name = model_name self.model_results_path = os.path.join(results_root_path, model_name) self.use_gpu = torch.cuda.is_available() if self.use_gpu: self.model.cuda() chk_mkdir(self.model_results_path)
def predict(self, dataset, folder_name='prediction'): self.model.train(False) export_path = os.path.join(self.model_results_path, folder_name) chk_mkdir(export_path) for batch_idx, (X_batch, y_mask_batch, y_rcnn_batch, name) in enumerate(DataLoader(dataset, batch_size=1)): if self.use_gpu: X_batch, y_rcnn_batch = Variable(X_batch.cuda()), Variable(y_rcnn_batch.cuda()) y_out = self.model(torch.cat([X_batch, y_rcnn_batch], dim=1)).cpu().data.numpy() y_rcnn_batch = y_rcnn_batch.data.cpu().numpy() X_im = X_batch[0].data.cpu().numpy().transpose((1, 2, 0)) else: X_batch, y_rcnn_batch = Variable(X_batch), Variable(y_rcnn_batch) y_out = self.model(torch.cat([X_batch, y_rcnn_batch], dim=1)).data.numpy() y_rcnn_batch = y_rcnn_batch.data.numpy() X_im = X_batch[0].data.numpy().transpose((1, 2, 0)) io.imsave(os.path.join(export_path, name[0] + '.png'), y_out[0].transpose((1, 2, 0)))
def __init__( self, g, g_optim, d, d_optim, d_loss, model_name, results_root_path, g_scheduler=None, d_scheduler=None, ): self.g = g self.d = d self.g_optim = g_optim self.d_optim = d_optim self.d_loss = d_loss self.g_scheduler = g_scheduler self.d_scheduler = d_scheduler self.model_name = model_name self.model_results_path = os.path.join(results_root_path, model_name) self.use_gpu = torch.cuda.is_available() chk_mkdir(self.model_results_path) if self.use_gpu: self.g.cuda() self.d.cuda() self.d_loss.cuda()
def __init__( self, model, loss, optimizer, model_name, results_root_path, scheduler=None, validation_loss=None ): self.model = model self.loss = loss self.optimizer = optimizer self.scheduler = scheduler self.model_name = model_name self.model_results_path = os.path.join(results_root_path, model_name) self.use_gpu = torch.cuda.is_available() if validation_loss is None: self.validation_loss = loss else: self.validation_loss = validation_loss if self.use_gpu: self.model.cuda() self.loss.cuda() #self.validation_loss.cuda() chk_mkdir(self.model_results_path)
y_out[0, 1, :, :]) if batch_idx > n_inst: break use_gpu = torch.cuda.is_available() # paths model_name = 'UNet_GANv2_test' result_root_path = '/media/tdanka/B8703F33703EF828/tdanka' all_datasets = '/home/tdanka/Data/new' results_path = os.path.join(result_root_path, model_name) s_comparison_path = os.path.join(results_path, 's_comparison') # creating folders chk_mkdir(result_root_path, results_path) # datasets train_dataset = TrainFromFolder(os.path.join( all_datasets, '/home/tdanka/Data/new/stage1_train_tissue_weighted_patch=256/loc.csv'), transform=T.ToTensor(), remove_alpha=True, one_hot_mask=2, weighted=True) x = next(iter(train_dataset)) train_original_dataset = TrainFromFolder(os.path.join( all_datasets, 'stage1_train_merged/loc.csv'), transform=T.ToTensor(), remove_alpha=True, one_hot_mask=2) test_dataset = TestFromFolder(os.path.join(all_datasets,