def evaluate_ensemble(config: ConfigClass, models_list): device = torch.device( f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu') # Create dataloader class loader_class = get_dataloaders(config.data) segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes, threshold=config.binarize_threshold) # Evaluate models for model in models_list: model.eval() with torch.no_grad(): for batch in loader_class.evaluation_loader: x, y = batch x = x.to(device=device, non_blocking=True) y = y.to(device=device, non_blocking=True) averaged_y = torch.zeros_like(y) for model in models_list: y_pred = model(x) averaged_y += torch.sigmoid(y_pred) averaged_y = averaged_y / len(models_list) segment_metrics.update((averaged_y, y), process=False) metrics = segment_metrics.compute() return metrics
def evaluate_monte_carlo(config: ConfigClass, model): device = torch.device( f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu') mc_passes = config.prediction.mc_passes # Create dataloader class loader_class = get_dataloaders(config.data) segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes, threshold=config.binarize_threshold) # Evaluate model model.eval() model.apply(apply_dropout) with torch.no_grad(): for batch in loader_class.evaluation_loader: x, y = batch x = x.to(device=device, non_blocking=True) y = y.to(device=device, non_blocking=True) summed_y = torch.zeros_like(y) for idx in range(mc_passes): y_pred = model(x) summed_y += torch.sigmoid(y_pred) averaged_y = summed_y / mc_passes segment_metrics.update((averaged_y, y), process=False) metrics = segment_metrics.compute() return metrics
def mds_evaluate_monte_carlo(config: ConfigClass, model, name): device = torch.device( f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu') # Create dataloader wrapper loader_wrapper = MDSDataLoaders(config.data) segment_metrics = SegmentationMetrics( num_classes=loader_wrapper.num_classes, threshold=config.binarize_threshold) # Evaluate for dir_id in loader_wrapper.dir_list: data_loader = loader_wrapper.get_evaluation_loader(dir_id) model.eval() model.apply(apply_dropout) segmentation = torch.zeros(data_loader.dataset.shape) ground_truth = torch.zeros_like(segmentation) idx = 0 with torch.no_grad(): for batch in data_loader: x, y = batch x = x.to(device=device, non_blocking=True) for _ in range(config.prediction.mc_passes): y_pred = model(x) segmentation[idx:idx + config.data.batch_size_val] += torch.sigmoid( y_pred).cpu() ground_truth[idx:idx + config.data.batch_size_val] = y idx += config.data.batch_size_val segmentation = segmentation / config.prediction.mc_passes segment_metrics.update((segmentation, ground_truth), process=False) segmentation = segmentation.numpy() save_segmentation_to_file(segmentation, config.binarize_threshold, loader_wrapper.predict_path, dir_id, name) metrics = segment_metrics.compute() return metrics
def mds_evaluate_one_pass(config: ConfigClass, model, name): device = torch.device( f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu') # Create dataloader wrapper loader_wrapper = MDSDataLoaders(config.data) segment_metrics = SegmentationMetrics( num_classes=loader_wrapper.num_classes, threshold=config.binarize_threshold) # Evaluate for dir_id in loader_wrapper.dir_list: data_loader = loader_wrapper.get_evaluation_loader(dir_id) model.eval() segmentation = np.zeros(data_loader.dataset.shape) idx = 0 with torch.no_grad(): for batch in data_loader: x, y = batch x = x.to(device=device, non_blocking=True) y = y.to(device=device, non_blocking=True) y_pred = model(x) segment_metrics.update((y_pred, y)) segmentation[idx:idx + config.data.batch_size_val] = torch.sigmoid( y_pred.cpu()).numpy() idx += config.data.batch_size_val save_segmentation_to_file(segmentation, config.binarize_threshold, loader_wrapper.predict_path, dir_id, name) metrics = segment_metrics.compute() return metrics
def evaluate_one_pass(config: ConfigClass, model): device = torch.device( f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu') # Create dataloader class loader_class = get_dataloaders(config.data) segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes, threshold=config.binarize_threshold) # Evaluate model model.eval() with torch.no_grad(): for batch in loader_class.evaluation_loader: x, y = batch x = x.to(device=device, non_blocking=True) y = y.to(device=device, non_blocking=True) y_pred = model(x) segment_metrics.update((y_pred, y)) metrics = segment_metrics.compute() return metrics