示例#1
0
def evaluate_ensemble(config: ConfigClass, models_list):
    device = torch.device(
        f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu')

    # Create dataloader class
    loader_class = get_dataloaders(config.data)
    segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes,
                                          threshold=config.binarize_threshold)

    # Evaluate models
    for model in models_list:
        model.eval()

    with torch.no_grad():
        for batch in loader_class.evaluation_loader:
            x, y = batch
            x = x.to(device=device, non_blocking=True)
            y = y.to(device=device, non_blocking=True)

            averaged_y = torch.zeros_like(y)

            for model in models_list:
                y_pred = model(x)
                averaged_y += torch.sigmoid(y_pred)

            averaged_y = averaged_y / len(models_list)
            segment_metrics.update((averaged_y, y), process=False)

    metrics = segment_metrics.compute()
    return metrics
示例#2
0
def evaluate_monte_carlo(config: ConfigClass, model):
    device = torch.device(
        f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu')
    mc_passes = config.prediction.mc_passes

    # Create dataloader class
    loader_class = get_dataloaders(config.data)
    segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes,
                                          threshold=config.binarize_threshold)

    # Evaluate model
    model.eval()
    model.apply(apply_dropout)

    with torch.no_grad():
        for batch in loader_class.evaluation_loader:
            x, y = batch
            x = x.to(device=device, non_blocking=True)
            y = y.to(device=device, non_blocking=True)

            summed_y = torch.zeros_like(y)

            for idx in range(mc_passes):
                y_pred = model(x)
                summed_y += torch.sigmoid(y_pred)

            averaged_y = summed_y / mc_passes
            segment_metrics.update((averaged_y, y), process=False)

    metrics = segment_metrics.compute()
    return metrics
示例#3
0
    def _init_engines(self) -> Tuple[Engine, Engine]:
        self.train_metrics = {
            'total_loss':
            metrics.RunningAverage(output_transform=lambda x: x['loss']),
            'segment_loss':
            metrics.RunningAverage(
                output_transform=lambda x: x['segment_loss']),
            'kl_div':
            metrics.RunningAverage(output_transform=lambda x: x['kl_div'])
        }

        self.val_metrics = {
            'vae_metrics':
            VAEMetrics(loss_fn=self.criterion,
                       mse_factor=0,
                       kld_factor=self.starting_kld_factor),
            'segment_metrics':
            SegmentationMetrics(num_classes=self.data_loaders.num_classes,
                                threshold=self.config.binarize_threshold),
        }

        trainer = self._init_trainer_engine()
        evaluator = self._init_evaluator_engine()

        return trainer, evaluator
    def _init_train_components_ensemble(self, reinitialise=False):
        self.val_metrics = {
            'loss': metrics.Loss(BCEAndJaccardLoss(eval_ensemble=True, gpu_node=self.config.gpu_node)),
            'segment_metrics': SegmentationMetrics(num_classes=self.data_loaders.num_classes,
                                                   threshold=self.config.binarize_threshold,
                                                   eval_ensemble=True)
        }

        self.model_cfg.network_params.input_channels = self.data_loaders.input_channels
        self.model_cfg.network_params.num_classes = self.data_loaders.num_classes
        self.model_cfg.network_params.image_size = self.data_loaders.image_size

        self.criterion = self._init_criterion(not reinitialise)
        self.ens_models = list()
        self.ens_optimizers = list()
        self.ens_lr_schedulers = list()

        optimizer_cls = get_optimizer(self.optim_cfg)
        init_param_names = retrieve_class_init_parameters(optimizer_cls)
        optimizer_params = {k: v for k, v in self.optim_cfg.items() if k in init_param_names}

        for _ in range(self.len_models):
            self.ens_models.append(get_model(self.model_cfg).to(device=self.device))
            self.ens_optimizers.append(optimizer_cls(self.ens_models[-1].parameters(), **optimizer_params))

            lr_scheduler = self._init_lr_scheduler(self.ens_optimizers[-1])
            self.ens_lr_schedulers.append(lr_scheduler)

        if not reinitialise:
            self.main_logger.info(f'Using ensemble of {self.len_models} {self.ens_models[0]}')
            self.main_logger.info(f'Using optimizers {self.ens_optimizers[0].__class__.__name__}')

        self.trainer, self.evaluator = self._init_engines()

        self._init_handlers()
示例#5
0
def mds_evaluate_monte_carlo(config: ConfigClass, model, name):
    device = torch.device(
        f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu')

    # Create dataloader wrapper
    loader_wrapper = MDSDataLoaders(config.data)
    segment_metrics = SegmentationMetrics(
        num_classes=loader_wrapper.num_classes,
        threshold=config.binarize_threshold)

    # Evaluate
    for dir_id in loader_wrapper.dir_list:
        data_loader = loader_wrapper.get_evaluation_loader(dir_id)

        model.eval()
        model.apply(apply_dropout)

        segmentation = torch.zeros(data_loader.dataset.shape)
        ground_truth = torch.zeros_like(segmentation)

        idx = 0

        with torch.no_grad():
            for batch in data_loader:
                x, y = batch
                x = x.to(device=device, non_blocking=True)

                for _ in range(config.prediction.mc_passes):
                    y_pred = model(x)
                    segmentation[idx:idx +
                                 config.data.batch_size_val] += torch.sigmoid(
                                     y_pred).cpu()

                ground_truth[idx:idx + config.data.batch_size_val] = y
                idx += config.data.batch_size_val

        segmentation = segmentation / config.prediction.mc_passes
        segment_metrics.update((segmentation, ground_truth), process=False)

        segmentation = segmentation.numpy()

        save_segmentation_to_file(segmentation, config.binarize_threshold,
                                  loader_wrapper.predict_path, dir_id, name)

    metrics = segment_metrics.compute()
    return metrics
示例#6
0
def mds_evaluate_one_pass(config: ConfigClass, model, name):
    device = torch.device(
        f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu')

    # Create dataloader wrapper
    loader_wrapper = MDSDataLoaders(config.data)
    segment_metrics = SegmentationMetrics(
        num_classes=loader_wrapper.num_classes,
        threshold=config.binarize_threshold)

    # Evaluate
    for dir_id in loader_wrapper.dir_list:
        data_loader = loader_wrapper.get_evaluation_loader(dir_id)

        model.eval()
        segmentation = np.zeros(data_loader.dataset.shape)

        idx = 0

        with torch.no_grad():
            for batch in data_loader:
                x, y = batch
                x = x.to(device=device, non_blocking=True)
                y = y.to(device=device, non_blocking=True)

                y_pred = model(x)

                segment_metrics.update((y_pred, y))

                segmentation[idx:idx +
                             config.data.batch_size_val] = torch.sigmoid(
                                 y_pred.cpu()).numpy()
                idx += config.data.batch_size_val

        save_segmentation_to_file(segmentation, config.binarize_threshold,
                                  loader_wrapper.predict_path, dir_id, name)

    metrics = segment_metrics.compute()
    return metrics
示例#7
0
def evaluate_one_pass(config: ConfigClass, model):
    device = torch.device(
        f'cuda:{config.gpu_node}' if torch.cuda.is_available() else 'cpu')

    # Create dataloader class
    loader_class = get_dataloaders(config.data)
    segment_metrics = SegmentationMetrics(num_classes=loader_class.num_classes,
                                          threshold=config.binarize_threshold)

    # Evaluate model
    model.eval()

    with torch.no_grad():
        for batch in loader_class.evaluation_loader:
            x, y = batch
            x = x.to(device=device, non_blocking=True)
            y = y.to(device=device, non_blocking=True)

            y_pred = model(x)

            segment_metrics.update((y_pred, y))

    metrics = segment_metrics.compute()
    return metrics
    def _init_train_components(self, reinitialise=False):
        self.val_metrics = {
            'loss': metrics.Loss(get_loss_function(self.loss_cfg)),
            'segment_metrics': SegmentationMetrics(num_classes=self.data_loaders.num_classes,
                                                   threshold=self.config.binarize_threshold)
        }

        self.model_cfg.network_params.input_channels = self.data_loaders.input_channels
        self.model_cfg.network_params.num_classes = self.data_loaders.num_classes
        self.model_cfg.network_params.image_size = self.data_loaders.image_size

        self.model = self._init_model(not reinitialise)
        self.criterion = self._init_criterion(not reinitialise)
        self.optimizer = self._init_optimizer(not reinitialise)

        self.lr_scheduler = self._init_lr_scheduler(self.optimizer)

        self.trainer, self.evaluator = self._init_engines()

        self._init_handlers()
    def _init_train_components(self):
        self.metrics = {
            'loss':
            metrics.Loss(
                GaussianVariationalInference(
                    get_loss_function(self.train_cfg.loss_fn))),
            'segment_metrics':
            SegmentationMetrics(num_classes=self.data_loaders.num_classes,
                                threshold=self.config.binarize_threshold)
        }

        self.model_cfg.network_params.input_channels = self.data_loaders.input_channels
        self.model_cfg.network_params.num_classes = self.data_loaders.num_classes

        self.model = self._init_model()
        self.optimizer = self._init_optimizer()
        self.vi = GaussianVariationalInference(self._init_criterion())

        self.lr_scheduler = self._init_lr_scheduler(self.optimizer)

        self.trainer, self.evaluator = self._init_engines()

        self._init_handlers()