Exemplo n.º 1
0
    def _print_val_results(self, precisions, recalls, dices, epoch, name,
                           classes, logger):
        # Log the results
        # We add them to a pd dataframe just for the pretty print output
        index = ["cls %i" % i for i in classes]
        val_results = pd.DataFrame(
            {
                "precision": precisions,
                "recall": recalls,
                "dice": dices,
            },
            index=index)
        # Transpose the results to have metrics in rows
        val_results = val_results.T
        # Add mean and set in first row
        means = [precisions.mean(), recalls.mean(), dices.mean()]
        val_results["mean"] = means
        cols = list(val_results.columns)
        cols.insert(0, cols.pop(cols.index('mean')))
        val_results = val_results.ix[:, cols]

        # Print the df to screen
        logger(
            highlighted(("[%s] Validation Results for "
                         "Epoch %i" % (name, epoch)).lstrip(" ")))
        logger(val_results.round(self.print_round).to_string() + "\n")
Exemplo n.º 2
0
    def _print_val_results(self, class_wise_metrics, batch_wise_metrics, epoch,
                           task_name, classes):
        """
        TODO

        :param class_wise_metrics:
        :param batch_wise_metrics:
        :param epoch:
        :param task_name:
        :param classes:
        :return:
        """
        # We add them to a pd dataframe just for the pretty print output
        index = ["mean"] + ["cls %i" % i for i in classes]
        columns = list(batch_wise_metrics.keys()) + list(
            class_wise_metrics.keys())
        df = pd.DataFrame(data={c: [np.nan] * len(index)
                                for c in columns},
                          index=index)

        # Fill the df with metrics
        for m_name, value in batch_wise_metrics.items():
            df.loc['mean', m_name] = value
        for m_name, values in class_wise_metrics.items():
            values = [np.nanmean(values)] + list(values)
            df.loc[:, m_name] = values

        # Print the df to screen
        s = "Validation Results for epoch %i" % epoch
        self.logger(
            highlighted((("[%s]" % task_name) if task_name else "") + s))
        print_string = df.round(self.print_round).T.to_string()
        self.logger(print_string.replace("NaN", "  -") + "\n")
Exemplo n.º 3
0
    def _print_val_results(self, precisions, recalls, dices, metrics, epoch,
                           name, classes):
        # Log the results
        # We add them to a pd dataframe just for the pretty print output
        index = ["cls %i" % i for i in classes]
        metric_keys, metric_vals = map(list, list(zip(*metrics.items())))
        col_order = metric_keys + ["precision", "recall", "dice"]
        nan_arr = np.empty(shape=len(precisions))
        nan_arr[:] = np.nan
        value_dict = {"precision": precisions,
                      "recall": recalls,
                      "dice": dices}
        value_dict.update({key: nan_arr for key in metrics})
        val_results = pd.DataFrame(value_dict,
                                   index=index).loc[:, col_order]  # ensure order
        # Transpose the results to have metrics in rows
        val_results = val_results.T
        # Add mean and set in first row
        means = metric_vals + [precisions.mean(), recalls.mean(), dices.mean()]
        val_results["mean"] = means
        cols = list(val_results.columns)
        cols.insert(0, cols.pop(cols.index('mean')))
        val_results = val_results.loc[:, cols]

        # Print the df to screen
        self.logger(highlighted(("[%s] Validation Results for "
                            "Epoch %i" % (name, epoch)).lstrip(" ")))
        print_string = val_results.round(self.print_round).to_string()
        self.logger(print_string.replace("NaN", "---") + "\n")
Exemplo n.º 4
0
    def on_epoch_end(self, epoch, logs={}):
        scores = self.eval()
        mean_dice = scores.mean()
        s = "Mean dice for epoch %d: %.4f\nPr. class: %s" % (epoch, mean_dice,
                                                             scores)
        self.logger(highlighted(s))
        self.scores.append(mean_dice)

        # Add to log
        logs["val_dice"] = mean_dice
Exemplo n.º 5
0
 def log(self):
     self.logger(highlighted("\nAudit for %i images" % len(self.nii_paths)))
     self.logger("Total memory GiB:  %.3f" % self.total_memory_gib)
     if self.n_classes is not None:
         self.logger("Number of classes: %i" % self.n_classes)
     self.logger("\n2D:\n"
                 "Real space span:   %.3f\n"
                 "Sample dim:        %.3f" %
                 (self.real_space_span_2D, self.sample_dim_2D))
     self.logger(
         "\n3D:\n"
         "Sample dim:        %i\n"
         "Real space span:   %.3f\n"
         "Box span:          %.3f" %
         (self.sample_dim_3D, self.real_space_span_3D, self.real_box_span))
Exemplo n.º 6
0
    def on_epoch_end(self, epoch, logs={}):
        self.logger("\n")
        # Predict and get CM
        TPs, relevant, selected, metrics = self.predict()
        for name in self.IDs:
            tp, rel, sel = TPs[name], relevant[name], selected[name]
            precisions, recalls, dices = self._compute_dice(tp=tp,
                                                            sel=sel,
                                                            rel=rel)
            classes = np.arange(len(dices))

            # Add to log
            n = (name + "_") if len(self.IDs) > 1 else ""
            logs[f"{n}val_dice"] = dices.mean().round(self.log_round)
            logs[f"{n}val_precision"] = precisions.mean().round(self.log_round)
            logs[f"{n}val_recall"] = recalls.mean().round(self.log_round)
            for m_name, value in metrics[name].items():
                logs[f"{n}val_{m_name}"] = value.round(self.log_round)

            if self.verbose:
                self._print_val_results(precisions=precisions,
                                        recalls=recalls,
                                        dices=dices,
                                        epoch=epoch,
                                        name=name,
                                        classes=classes,
                                        logger=self.logger)

        if len(self.IDs) > 1:
            # Print cross-dataset mean values
            if self.verbose:
                self.logger(
                    highlighted(f"[ALL DATASETS] Means Across Classes"
                                f" for Epoch {epoch}"))
            fetch = ("val_dice", "val_precision", "val_recall")
            m_fetch = tuple(["val_" + s for s in self.model.metrics_names])
            to_print = {}
            for f in fetch + m_fetch:
                scores = [logs["%s_%s" % (name, f)] for name in self.IDs]
                res = np.mean(scores)
                logs[f] = res.round(self.log_round)  # Add to log file
                to_print[f.split("_")[-1]] = list(scores) + [res]
            if self.verbose:
                df = pd.DataFrame(to_print)
                df.index = self.IDs + ["mean"]
                print(df.round(self.print_round))
            self.logger("")
Exemplo n.º 7
0
    def on_epoch_end(self, epoch, logs={}):
        # Predict and get CM
        class_wise_metrics, mean_batch_wise_metrics = self.evalaute()
        for n_classes, name in zip(self.n_classes, self.task_names):
            classes = np.arange(n_classes)
            n = (name + "_") if len(self.task_names) > 1 else ""

            # Add batch-wise metrics to log
            for m_name, value in mean_batch_wise_metrics[name].items():
                logs[f"{n}val_{m_name}"] = value.round(self.log_round)
            # Add mean of class-wise metrics to log
            for m_name, values in class_wise_metrics[name].items():
                logs[f"{n}val_{m_name}"] = np.nanmean(values)

            if self.verbose:
                self._print_val_results(
                    class_wise_metrics=class_wise_metrics[name],
                    batch_wise_metrics=mean_batch_wise_metrics[name],
                    epoch=epoch,
                    task_name=name,
                    classes=classes)

        if len(self.task_names) > 1:
            # Print cross-dataset mean values
            if self.verbose:
                self.logger(
                    highlighted(f"[ALL DATASETS] Means Across Classes"
                                f" for Epoch {epoch}"))
            fetch = ("val_dice", "val_precision", "val_recall")
            m_fetch = tuple(["val_" + s for s in self.model.metrics_names])
            to_print = {}
            for f in fetch + m_fetch:
                scores = [
                    logs["%s_%s" % (name, f)] for name in self.task_names
                ]
                res = np.mean(scores)
                logs[f] = res.round(self.log_round)  # Add to log file
                to_print[f.split("_")[-1]] = list(scores) + [res]
            if self.verbose:
                df = pd.DataFrame(to_print)
                df.index = self.task_names + ["mean"]
                self.logger(df.round(self.print_round))
            self.logger("")
Exemplo n.º 8
0
def _run_fusion_training(sets, logger, hparams, min_val_images, is_validation,
                         views, n_classes, unet, fusion_model, early_stopping,
                         fm_batch_size, epochs, eval_prob,
                         fusion_weights_path):
    """
    TODO
    """

    for _round, _set in enumerate(sets):
        s = "Set %i/%i:\n%s" % (_round + 1, len(sets), _set)
        logger("\n%s" % highlighted(s))

        # Reload data
        images = ImagePairLoader(**hparams["val_data"])
        if len(images) < min_val_images:
            images.add_images(ImagePairLoader(**hparams["train_data"]))

        # Get list of ImagePair objects to run on
        image_set_dict = {
            m.identifier: m
            for m in images if m.identifier in _set
        }

        # Set scaler and bg values
        images.set_scaler_and_bg_values(
            bg_value=hparams.get_from_anywhere('bg_value'),
            scaler=hparams.get_from_anywhere('scaler'),
            compute_now=False)

        # Init LazyQueue and get its sequencer
        from mpunet.sequences.utils import get_sequence
        seq = get_sequence(data_queue=images,
                           is_validation=True,
                           views=views,
                           **hparams["fit"],
                           **hparams["build"])

        # Fetch points from the set images
        points_collection = []
        targets_collection = []
        N_im = len(image_set_dict)
        for num_im, image_id in enumerate(list(image_set_dict.keys())):
            logger("")
            logger(
                highlighted("(%i/%i) Running on %s (%s)" %
                            (num_im + 1, N_im, image_id,
                             "val" if is_validation[image_id] else "train")))

            with seq.image_pair_queue.get_image_by_id(image_id) as image:
                # Get voxel grid in real space
                voxel_grid_real_space = get_voxel_grid_real_space(image)

                # Get array to store predictions across all views
                targets = image.labels.reshape(-1, 1)
                points = np.empty(shape=(len(targets), len(views), n_classes),
                                  dtype=np.float32)
                points.fill(np.nan)

                # Predict on all views
                for k, v in enumerate(views):
                    print("\n%s" % "View: %s" % v)
                    points[:, k, :] = predict_and_map(
                        model=unet,
                        seq=seq,
                        image=image,
                        view=v,
                        voxel_grid_real_space=voxel_grid_real_space,
                        n_planes='same+20',
                        targets=targets,
                        eval_prob=eval_prob).reshape(-1, n_classes)

                # add to collections
                points_collection.append(points)
                targets_collection.append(targets)
            print(image.is_loaded)

        # Stack points into one matrix
        logger("Stacking points...")
        X, y = stack_collections(points_collection, targets_collection)

        # Shuffle train
        print("Shuffling points...")
        X, y = shuffle(X, y)

        print("Getting validation set...")
        val_ind = int(0.20 * X.shape[0])
        X_val, y_val = X[:val_ind], y[:val_ind]
        X, y = X[val_ind:], y[val_ind:]

        # Prepare dice score callback for validation data
        val_cb = ValDiceScores((X_val, y_val), n_classes, 50000, logger)

        # Callbacks
        cbs = [
            val_cb,
            CSVLogger(filename="logs/fusion_training.csv",
                      separator=",",
                      append=True),
            PrintLayerWeights(fusion_model.layers[-1],
                              every=1,
                              first=1000,
                              per_epoch=True,
                              logger=logger)
        ]

        es = EarlyStopping(monitor='val_dice',
                           min_delta=0.0,
                           patience=early_stopping,
                           verbose=1,
                           mode='max')
        cbs.append(es)

        # Start training
        try:
            fusion_model.fit(X,
                             y,
                             batch_size=fm_batch_size,
                             epochs=epochs,
                             callbacks=cbs,
                             verbose=1)
        except KeyboardInterrupt:
            pass
        fusion_model.save_weights(fusion_weights_path)