示例#1
0
def compute_average_precision(true_labels, predictions, verbose=0):
    average_precisions = []
    for ii in sorted(true_labels.keys()):

        ap = average_precision(true_labels[ii], predictions[ii])
        average_precisions.append(ap)

        if verbose:
            print "%3d %6.2f" % (ii, 100 * ap)

    if verbose:
        print '----------'

    print "mAP %6.2f" % (100 * np.mean(average_precisions))
def compute_average_precision(true_labels, predictions, verbose=0):
    average_precisions = []
    for ii in sorted(true_labels.keys()):

        ap = average_precision(true_labels[ii], predictions[ii])
        average_precisions.append(ap)

        if verbose:
            print "%3d %6.2f" % (ii, 100 * ap)

    if verbose:
        print '----------'

    print "mAP %6.2f" % (100 * np.mean(average_precisions))
示例#3
0
    def fit_late_fusion(self, scores, tr_labels):
        # Equal weights.
        #D = scores.shape[1]
        #self.weights = np.array([1. / D] * D)

        # Dumb crossvalidation.
        best_ap = 0
        D = scores.shape[1]
        for self.weights in weights_grid(D):
            ap = average_precision(
                tr_labels, self.predict_late_fusion(scores))

            if self.weights in self.weight_scores:
                self.weight_scores[self.weights].append(ap)
            else:
                self.weight_scores[self.weights] = [ap]
示例#4
0
 def score(self, te_kernel, te_labels):
     return average_precision(te_labels, self.predict_proba(te_kernel))
 def score(self, te_kernel, te_labels):
     predicted = self.predict(te_kernel)
     score = average_precision(te_labels, predicted) * 100
     return score