def fraction_uncovered_examples(
            ruleset: IDSRuleSet,
            quant_dataframe: QuantitativeDataFrame) -> float:
        """

        This metric computes the fraction of the data points which are not covered by any rule in the decision set.
        REMEMBER, COVER is independent of the head of a rule.

        Boundary values:
            0.0 if every data point is covered by some rule in the data set.
            1.0 when no data point is covered by any rule in R
                (This could be the case when |R| = 0 )

        Note:
            * for decision lists, this metric is
                the fraction of the data points that are covered by the ELSE clause of the list
                (i.e. the default prediction).

        :param ruleset:
        :param quant_dataframe::
        :return:
        """

        if type(ruleset) != IDSRuleSet:
            raise Exception("Type of ruleset must be IDSRuleSet")

        if type(quant_dataframe) != QuantitativeDataFrame:
            raise Exception(
                "Type of quant_dataframe must be QuantitativeDataFrame")

        nb_of_test_examples = quant_dataframe.dataframe.index.size
        if nb_of_test_examples == 0:
            raise Exception(
                "There are no test instances to calculate the fraction uncovered on"
            )

        cover_cumulative_mask: np.ndarray = np.zeros(nb_of_test_examples,
                                                     dtype=bool)

        for rule in ruleset.ruleset:
            cover_mask = rule._cover(quant_dataframe)
            cover_cumulative_mask = np.logical_or(cover_cumulative_mask,
                                                  cover_mask)

        nb_of_covered_test_examples: int = np.count_nonzero(
            cover_cumulative_mask)

        frac_uncovered = 1 - 1 / nb_of_test_examples * nb_of_covered_test_examples

        if not is_valid_fraction(frac_uncovered):
            raise Exception(
                "Fraction uncovered examples is not within [0,1]: " +
                str(frac_uncovered))

        return frac_uncovered
    def fraction_predicted_classes(
            ruleset: IDSRuleSet,
            quant_dataframe: QuantitativeDataFrame) -> float:
        """
        This metric denotes the fraction of the classes in the data that are predicted by the ruleset R.

        Boundary value:
            0.0 if no class is predicted (e.g. the ruleset is empty)
            1.0 every class is predicted by some rule in R.

        Note:
            * The same for decision lists, but we not consider the ELSE clause (the default prediction).

        :param ruleset:
        :param quant_dataframe:
        :return:
        """
        if type(ruleset) != IDSRuleSet:
            raise Exception("Type of ruleset must be IDSRuleSet")

        if type(quant_dataframe) != QuantitativeDataFrame:
            raise Exception(
                "Type of quant_dataframe must be QuantitativeDataFrame")

        values_occuring_in_data: Set[TargetVal] = set(
            quant_dataframe.dataframe.iloc[:, -1].values)
        values_predicted_by_rules: Set[TargetVal] = set()

        for rule in ruleset.ruleset:
            covered_class: TargetVal = rule.car.consequent.value
            if covered_class in values_occuring_in_data:
                values_predicted_by_rules.add(covered_class)

        nb_of_values_predicted_by_rules = len(values_predicted_by_rules)
        nb_of_values_occuring_in_test_data = len(values_occuring_in_data)

        frac_classes: float = nb_of_values_predicted_by_rules / nb_of_values_occuring_in_test_data

        if not is_valid_fraction(frac_classes):
            raise Exception(
                "Fraction predicted classes examples is not within [0,1]: " +
                str(frac_classes))

        return frac_classes
    def _fraction_overlap(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
                          target_attr: Optional[TargetAttr] = None,
                          cover_checker_on_test: Optional[CoverChecker] = None,
                          overlap_checker_on_test: Optional[OverlapChecker] = None,
                          debug=False) -> float:
        """
        This metric captures the extend of overlap between every pair of rules in a decision set R.
        Smaller values of this metric signify higher interpretability.

        Boundary values:
            0.0 if no rules in R overlap:
            1.0 if all data points in are covered by all rules in R.

        NOTE:
            * this is 0.0 for any decision list,
              because their if-else structure ensures that a rule in the list applies only to those data points
                which have not been covered by any of the preceeding rules
            * this is 0.0 for the empty rule set

        :param ruleset:
        :param test_dataframe:
        :param cover_checker_on_test:
        :param overlap_checker_on_test:
        :return:
        """

        if type(ruleset) != MIDSRuleSet:
            raise Exception(f"Type of ruleset must be MIDSRuleSet, but is {type(ruleset)}")
        # warnings.warn("FRACTION_OVERLAP IS CURRENTLY NOT RELATIVE TO A TARGET ATTRIBUTE. THIS MIGHT BE INCORRECT")

        ruleset_size: int = len(ruleset)
        if ruleset_size == 0:
            print("Warning: the MIDS rule set is empty.")
            return 0.0
        nb_of_test_examples: int = test_dataframe.index.size
        if nb_of_test_examples == 0:
            raise Exception("There are no test instances to calculate overlap on")

        if cover_checker_on_test is None:
            cover_checker_on_test = DefaultCoverChecker()
        if overlap_checker_on_test is None:
            overlap_checker_on_test = DefaultOverlapChecker(cover_checker_on_test, debug)

        overlap_sum: int = 0

        rule_i: MIDSRule
        rule_j: MIDSRule
        for i, rule_i in enumerate(ruleset.ruleset):
            for j, rule_j in enumerate(ruleset.ruleset):
                if i <= j:
                    continue
                else:
                    if target_attr is None:
                        overlap_sum += overlap_checker_on_test.get_pure_overlap_count(rule_i, rule_j, test_dataframe)
                    else:
                        overlap_sum += overlap_checker_on_test.get_relative_overlap_count(rule_i, rule_j,
                                                                                          test_dataframe, target_attr)

        if overlap_sum == 0:
            warnings.warn("overlap is 0, which is unlikely")
            return 0
        else:
            frac_overlap = 2 / (ruleset_size * (ruleset_size - 1)) * overlap_sum / nb_of_test_examples

            if not is_valid_fraction(frac_overlap):
                raise Exception("Fraction overlap is not within [0,1]: " + str(frac_overlap))

            return frac_overlap
    def fraction_predicted_classes(ruleset: MIDSRuleSet, test_dataframe,
                                   target_attributes: List[TargetAttr]
                                   ) -> Tuple[float, Dict[TargetAttr, float]]:
        """
        This metric denotes the fraction of the classes in the data that are predicted by the ruleset R.

        Returns:
            1. fraction per target attribute, averaged over the different targets
            2. a map from target attribute to fraction for that target attr


        Boundary value:
            0.0 if no class is predicted (e.g. the ruleset is empty)
            1.0 every class is predicted by some rule in R.

        Note:
            * The same for decision lists, but we not consider the ELSE clause (the default prediction).

        :param target_attributes:
        :param ruleset:
        :param test_dataframe:
        :return:
        """

        if type(ruleset) != MIDSRuleSet:
            raise Exception("Type of ruleset must be IDSRuleSet")

        warnings.warn(
            "Ugly conversion to string to deal with numerical attributes."
            " Clean this up (look at Survived in Titanic).")

        values_in_data_per_target_attribute: Dict[TargetAttr, Set[TargetVal]] = {}
        predicted_values_per_target_attribute: Dict[TargetAttr, Set[TargetVal]] = {}

        for target_attr in target_attributes:
            values_as_str: List[str] = [str(val) for val in test_dataframe[target_attr].values]
            values_in_data_per_target_attribute[target_attr] = set(values_as_str)
            predicted_values_per_target_attribute[target_attr] = set()

        target_attribute_set: Set[TargetAttr] = set(target_attributes)

        for rule in ruleset.ruleset:
            consequent: Consequent = rule.car.consequent
            for literal in consequent.get_literals():
                predicted_attribute: TargetAttr = literal.get_attribute()
                predicted_value: TargetVal = literal.get_value()

                if predicted_attribute in target_attribute_set:
                    predicted_value_str = str(predicted_value)
                    predicted_values: Set[TargetVal] = predicted_values_per_target_attribute[predicted_attribute]
                    if predicted_value_str in values_in_data_per_target_attribute[predicted_attribute]:
                        predicted_values.add(predicted_value_str)

        # print("values_in_data_per_target_attribute", values_in_data_per_target_attribute)
        # print("predicted_values_per_target_attribute", predicted_values_per_target_attribute)

        frac_predicted_classes_per_target_attr: Dict[TargetAttr, float] = {}

        avg_frac_predicted_classes: float = 0
        for target_attr in values_in_data_per_target_attribute.keys():
            values_occuring_in_data = values_in_data_per_target_attribute[target_attr]
            predicted_values = predicted_values_per_target_attribute[target_attr]

            domain_size_in_test_data = len(values_occuring_in_data)
            nb_of_predicted_values = len(predicted_values)

            frac_classes: float = nb_of_predicted_values / domain_size_in_test_data
            frac_predicted_classes_per_target_attr[target_attr] = frac_classes

            avg_frac_predicted_classes += frac_classes

        nb_of_target_attrs = len(target_attributes)
        avg_frac_predicted_classes = avg_frac_predicted_classes / nb_of_target_attrs

        if not is_valid_fraction(avg_frac_predicted_classes):
            raise Exception("Avg fraction predicted classes examples is not within [0,1]: "
                            + str(avg_frac_predicted_classes))

        return avg_frac_predicted_classes, frac_predicted_classes_per_target_attr
    def fraction_bodily_overlap(
            ruleset: IDSRuleSet,
            quant_dataframe: QuantitativeDataFrame) -> float:
        """
        This metric captures the extend of overlap between every pair of rules in a decision set R.
        Smaller values of this metric signify higher interpretability.

        Boundary values:
            0.0 if no rules in R overlap:
            1.0 if all data points in are covered by all rules in R.

        NOTE:
            * this is 0.0 for any decision list,
              because their if-else structure ensures that a rule in the list applies only to those data points
                which have not been covered by any of the preceeding rules
            * this is 0.0 for the empty rule set

        :param ruleset:
        :param quant_dataframe:
        :return:
        """
        if type(ruleset) != IDSRuleSet:
            raise Exception("Type of ruleset must be IDSRuleSet")

        if type(quant_dataframe) != QuantitativeDataFrame:
            raise Exception(
                "Type of quant_dataframe must be QuantitativeDataFrame")

        ruleset_size = len(ruleset)
        if ruleset_size == 0:
            print("Warning: the IDS rule set is empty.")
            return 0.0
        nb_of_test_examples: int = quant_dataframe.dataframe.index.size
        if nb_of_test_examples == 0:
            raise Exception(
                "There are no test instances to calculate overlap on")

        rule: IDSRule
        for rule in ruleset.ruleset:
            rule.calculate_cover(quant_dataframe)

        overlap_sum: int = 0
        for i, rule_i in enumerate(ruleset.ruleset):
            for j, rule_j in enumerate(ruleset.ruleset):
                if i <= j:
                    continue
                overlap_sum += np.sum(
                    rule_i.rule_overlap(rule_j, quant_dataframe))

        if overlap_sum == 0:
            warnings.warn("overlap is 0, which is unlikely")
            return 0
        else:
            frac_overlap: float = 2 / (
                ruleset_size *
                (ruleset_size - 1)) * overlap_sum / nb_of_test_examples

            if not is_valid_fraction(frac_overlap):
                raise Exception("Fraction overlap is not within [0,1]: " +
                                str(frac_overlap))

            return frac_overlap