Beispiel #1
0
    def compute_registration_statistic(
        cls,
        idx_row,
        df_experiments,
        path_dataset=None,
        path_experiment=None,
        path_reference=None,
    ):
        """ after successful registration load initial nad estimated landmarks
        afterwords compute various statistic for init, and final alignment

        :param tuple(int,dict) df_row: row from iterated table
        :param DF df_experiments: DataFrame with experiments
        :param str|None path_dataset: path to the provided dataset folder
        :param str|None path_reference: path to the complete landmark collection folder
        :param str|None path_experiment: path to the experiment folder
        """
        idx, row = idx_row
        row = dict(row)  # convert even series to dictionary
        # load common landmarks and image size
        points_ref, points_move, path_img_ref = cls._load_landmarks(
            row, path_dataset)
        img_diag = cls._image_diag(row, path_img_ref)
        df_experiments.loc[idx, cls.COL_IMAGE_DIAGONAL] = img_diag

        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_ref,
                                          points_move,
                                          'init',
                                          img_diag,
                                          wo_affine=False)

        # define what is the target and init state according to the experiment results
        use_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP), str)
        if use_move_warp:
            points_init, points_target = points_move, points_ref
            col_source, col_target = cls.COL_POINTS_MOVE, cls.COL_POINTS_REF
            col_lnds_warp = cls.COL_POINTS_MOVE_WARP
        else:
            points_init, points_target = points_ref, points_move
            col_lnds_warp = cls.COL_POINTS_REF_WARP
            col_source, col_target = cls.COL_POINTS_REF, cls.COL_POINTS_MOVE

        # optional filtering
        if path_reference:
            ratio, points_target, _ = \
                filter_paired_landmarks(row, path_dataset, path_reference, col_source, col_target)
            df_experiments.loc[idx, COL_PAIRED_LANDMARKS] = np.round(ratio, 2)

        # load transformed landmarks
        if (cls.COL_POINTS_MOVE_WARP not in row) and (cls.COL_POINTS_REF_WARP
                                                      not in row):
            logging.error('Statistic: no output landmarks')
            return

        # check if there are reference landmarks
        if points_target is None:
            logging.warning(
                'Missing landmarks in "%s"',
                cls.COL_POINTS_REF if use_move_warp else cls.COL_POINTS_MOVE)
            return
        # load warped landmarks
        path_lnds_warp = update_path(row[col_lnds_warp],
                                     pre_path=path_experiment)
        if path_lnds_warp and os.path.isfile(path_lnds_warp):
            points_warp = load_landmarks(path_lnds_warp)
            points_warp = np.nan_to_num(points_warp)
        else:
            logging.warning('Invalid path to the landmarks: "%s" <- "%s"',
                            path_lnds_warp, row[col_lnds_warp])
            return
        df_experiments.loc[idx, cls.COL_NB_LANDMARKS_INPUT] = min(
            len(points_init), len(points_target))
        df_experiments.loc[idx, cls.COL_NB_LANDMARKS_WARP] = len(points_warp)

        # compute Affine statistic
        affine_diff = compute_affine_transf_diff(points_init, points_target,
                                                 points_warp)
        for name in affine_diff:
            df_experiments.loc[idx, name] = affine_diff[name]

        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_target,
                                          points_warp,
                                          'elastic',
                                          img_diag,
                                          wo_affine=True)
        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_target,
                                          points_warp,
                                          'target',
                                          img_diag,
                                          wo_affine=False)
        row_ = dict(df_experiments.loc[idx])
        # compute the robustness
        if 'TRE Mean' in row_:
            df_experiments.loc[idx, cls.COL_ROBUSTNESS] = \
                compute_tre_robustness(points_target, points_init, points_warp)
Beispiel #2
0
    def compute_registration_statistic(cls,
                                       idx_row,
                                       df_experiments,
                                       path_dataset=None,
                                       path_experiment=None):
        """ after successful registration load initial nad estimated landmarks
        afterwords compute various statistic for init, and final alignment

        :param tuple(int,dict) df_row: row from iterated table
        :param DF df_experiments: DataFrame with experiments
        :param str|None path_dataset: path to the dataset folder
        :param str|None path_experiment: path to the experiment folder
        """
        idx, row = idx_row
        row = dict(row)  # convert even series to dictionary
        points_ref, points_move, path_img_ref = cls._load_landmarks(
            row, path_dataset)
        img_diag = cls._image_diag(row, path_img_ref)
        df_experiments.loc[idx, cls.COL_IMAGE_DIAGONAL] = img_diag

        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_ref,
                                          points_move,
                                          'init',
                                          img_diag,
                                          wo_affine=False)

        # load transformed landmarks
        if (cls.COL_POINTS_MOVE_WARP not in row) and (cls.COL_POINTS_REF_WARP
                                                      not in row):
            logging.error('Statistic: no output landmarks')
            return

        # define what is the target and init state according to the experiment results
        is_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str)
        points_init = points_move if is_move_warp else points_ref
        points_target = points_ref if is_move_warp else points_move
        col_lnds_warp = cls.COL_POINTS_MOVE_WARP if is_move_warp else cls.COL_POINTS_REF_WARP

        # check if there are reference landmarks
        if points_target is None:
            logging.warning(
                'Missing landmarks in "%s"',
                cls.COL_POINTS_REF if is_move_warp else cls.COL_POINTS_MOVE)
            return
        # load warped landmarks
        path_lnds_wapr = update_path(row[col_lnds_warp],
                                     pre_path=path_experiment)
        if path_lnds_wapr and os.path.isfile(path_lnds_wapr):
            points_warp = load_landmarks(path_lnds_wapr)
            points_warp = np.nan_to_num(points_warp)
        else:
            logging.warning('Invalid path to the landmarks: "%s" <- "%s"',
                            path_lnds_wapr, row[col_lnds_warp])
            return

        # compute Affine statistic
        affine_diff = compute_affine_transf_diff(points_init, points_target,
                                                 points_warp)
        for name in affine_diff:
            df_experiments.loc[idx, name] = affine_diff[name]

        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_target,
                                          points_warp,
                                          'elastic',
                                          img_diag,
                                          wo_affine=True)
        # compute landmarks statistic
        cls.compute_registration_accuracy(df_experiments,
                                          idx,
                                          points_target,
                                          points_warp,
                                          'target',
                                          img_diag,
                                          wo_affine=False)
        row_ = dict(df_experiments.loc[idx])
        # compute the robustness
        if 'TRE Mean' in row_:
            df_experiments.loc[idx, cls.COL_ROBUSTNESS] = \
                compute_tre_robustness(points_target, points_init, points_warp)