示例#1
0
 def _get_metric_fct(self):
     if self.metric == 'Mutual_information':
         metric_fct = lambda TP, FP, TN, FN: BinaryClassificationDataset.mutual_information(
             TN, FN, TP, FP)
     elif self.metric == 'Normalized_mutual_information':
         metric_fct = lambda TP, FP, TN, FN: BinaryClassificationDataset.normalized_mutual_information(
             TN, FN, TP, FP)
     elif self.metric == "Balanced_accuracy":
         metric_fct = self.balanced_accuracy
     elif '{TP}' in self.metric or '{FP}' in self.metric or '{TN}' in self.metric or '{FN}' in self.metric:
         metric_fct = lambda TP, FP, TN, FN: eval(
             self.metric.format(
                 TP=float(TP), FP=float(FP), TN=float(TN), FN=float(FN)))
     elif self.inverse_metric:
         metric_fct = lambda TP, FP, TN, FN: \
             (-1.0)*BinaryClassificationDataset.calculate_confusion_metrics(
                 {"True_negatives": TN,
                  "True_positives": TP,
                  "False_positives": FP,
                  "False_negatives": FN},
                 weight=self.weight,)[self.metric]
     else:
         metric_fct = lambda TP, FP, TN, FN: \
             BinaryClassificationDataset.calculate_confusion_metrics(
                 {"True_negatives": TN,
                  "True_positives": TP,
                  "False_positives": FP,
                  "False_negatives": FN},
                 weight=self.weight,)[self.metric]
     return metric_fct
 def _get_metric_fct(self):
     if self.metric == 'Mutual_information':
         metric_fct = lambda TP, FP, TN, FN: ClassificationCollection.mutual_information(TN, FN, TP, FP)
     elif self.metric == 'Normalized_mutual_information':
         metric_fct = lambda TP, FP, TN, FN: ClassificationCollection.normalized_mutual_information(TN, FN, TP, FP)
     elif self.metric == "Balanced_accuracy":
         metric_fct = self.balanced_accuracy
     elif '{TP}' in self.metric or '{FP}' in self.metric or '{TN}' in self.metric or '{FN}' in self.metric:
         metric_fct = lambda TP, FP, TN, FN: eval(self.metric.format(TP=float(TP),
                                                                     FP=float(FP), 
                                                                     TN=float(TN),
                                                                     FN=float(FN)))
     elif self.inverse_metric:
         metric_fct = lambda TP, FP, TN, FN: \
             (-1.0)*ClassificationCollection.calculate_confusion_metrics(
                             {"True_negatives":TN,
                              "True_positives":TP,
                              "False_positives":FP,
                              "False_negatives":FN},
                             weight=self.weight,
                             )[self.metric]
     else: 
         metric_fct = lambda TP, FP, TN, FN: \
             ClassificationCollection.calculate_confusion_metrics(
                             {"True_negatives":TN,
                              "True_positives":TP,
                              "False_positives":FP,
                              "False_negatives":FN},
                             weight=self.weight,
                             )[self.metric]
     return metric_fct 
示例#3
0
    def __init__(self, erp_class_label, retained_channels=None):
        super(SSNRSinkNode, self).__init__()

        # We reuse the ClassificationCollection (maybe this should be renamed to
        # MetricsDataset?)
        self.set_permanent_attributes(# Object for handling SSNR related calculations
                                      ssnr=SSNR(erp_class_label, retained_channels),
                                      # Result collection
                                      ssnr_collection=BinaryClassificationDataset(),
                                      erp_class_label=erp_class_label)
示例#4
0
文件: SOR.py 项目: Hansa064/pyspace
    def looCV(self):
        """ Calculate leave one out metrics """
        # remember original solution
        optimal_w = copy.deepcopy(self.w)
        optimal_b = copy.deepcopy(self.b)
        optimal_dual_solution = copy.deepcopy(self.dual_solution)
        # preparation of sorting
        sort_dual = self.dual_solution
        # sort indices --> zero weights do not need any changing and
        # low weights are less relevant for changes
        sorted_indices = map(list, [numpy.argsort(sort_dual)])[0]
        sorted_indices.reverse()

        prediction_vectors = []
        using_initial_solution = True
        for index in sorted_indices:
            d_i = self.dual_solution[index]
            # delete each index from the current observation
            if d_i == 0 and using_initial_solution:
                # no change in classifier necessary
                pass
            else:
                # set weight to zero and track the corresponding changes
                self.reduce_dual_weight(index)
                # reiterate till convergence but skip current index
                temp_iter = self.iterations
                self.iteration_loop(self.M, reduced_indices=[index])
                self.iterations += temp_iter
                using_initial_solution = False
            prediction_vectors.append((
                self._execute(numpy.atleast_2d(self.samples[index])),
                                    self.classes[self.labels[index]]))
        self.loo_metrics = BinaryClassificationDataset.calculate_metrics(
            prediction_vectors,
            ir_class=self.classes[1],
            sec_class=self.classes[0])
        #undo changes
        self.b = optimal_b
        self.w = optimal_w
        self.dual_solution = optimal_dual_solution
示例#5
0
    def looCV(self):
        """ Calculate leave one out metrics """
        # remember original solution
        optimal_w = copy.deepcopy(self.w)
        optimal_b = copy.deepcopy(self.b)
        optimal_dual_solution = copy.deepcopy(self.dual_solution)
        # preparation of sorting
        sort_dual = self.dual_solution
        # sort indices --> zero weights do not need any changing and
        # low weights are less relevant for changes
        sorted_indices = map(list, [numpy.argsort(sort_dual)])[0]
        sorted_indices.reverse()

        prediction_vectors = []
        using_initial_solution = True
        for index in sorted_indices:
            d_i = self.dual_solution[index]
            # delete each index from the current observation
            if d_i == 0 and using_initial_solution:
                # no change in classifier necessary
                pass
            else:
                # set weight to zero and track the corresponding changes
                self.reduce_dual_weight(index)
                # reiterate till convergence but skip current index
                temp_iter = self.iterations
                self.iteration_loop(self.M, reduced_indices=[index])
                self.iterations += temp_iter
                using_initial_solution = False
            prediction_vectors.append(
                (self._execute(numpy.atleast_2d(self.samples[index])),
                 self.classes[self.labels[index]]))
        self.loo_metrics = BinaryClassificationDataset.calculate_metrics(
            prediction_vectors,
            ir_class=self.classes[1],
            sec_class=self.classes[0])
        #undo changes
        self.b = optimal_b
        self.w = optimal_w
        self.dual_solution = optimal_dual_solution