Exemplo n.º 1
0
    def value(self, weights, verbose=False):
        objective = 0.0

        if verbose:
            print "Calculating log probabilities and objective..."
        log_probs = list()
        for pos, (label, features) in enumerate(self.labeled_extracted_features):
            log_probs.append(get_log_probs(features, weights, self.labels))

        objective = -sum(log_probs[index][label] for (index, (label, _)) in enumerate(self.labeled_extracted_features))

        if verbose:
            print "Raw objective: %f" % objective

        if verbose:
            print "Applying penalty"

        if self.sigma:
            # Apply a penalty (e.g. smooth the results)
            penalty = sum(
                sum(weight ** 2 for weight in feature_weights.itervalues()) for feature_weights in weights.itervalues()
            )
            penalty /= 2 * self.sigma ** 2
            objective += penalty

            if verbose:
                print "Penalized objective: %f" % objective

        return objective
Exemplo n.º 2
0
    def value(self, weights, verbose=False):
        objective = 0.0

        if verbose: print "Calculating log probabilities and objective..."
        log_probs = list()
        for pos, (label,
                  features) in enumerate(self.labeled_extracted_features):
            log_probs.append(get_log_probs(features, weights, self.labels))

        objective = -sum(
            log_probs[index][label]
            for (index,
                 (label, _)) in enumerate(self.labeled_extracted_features))

        if verbose: print "Raw objective: %f" % objective

        if verbose: print "Applying penalty"

        if self.sigma:
            # Apply a penalty (e.g. smooth the results)
            penalty = sum(
                sum(weight**2 for weight in feature_weights.itervalues())
                for feature_weights in weights.itervalues())
            penalty /= 2 * self.sigma**2
            objective += penalty

            if verbose: print "Penalized objective: %f" % objective

        return objective
Exemplo n.º 3
0
    def value_and_gradient(self, weights, verbose=False):
        if weights == self.last_vg_weights:
            return self.last_vg
        objective = 0.0
        gradient = CounterMap()

        if verbose:
            print "Calculating log probabilities and objective..."

        # log_prob
        log_probs = list()
        for pos, (label, features) in enumerate(self.labeled_extracted_features):
            log_probs.append(get_log_probs(features, weights, self.labels))
            assert (
                abs(sum(exp(log_probs[pos][label]) for label in self.labels) - 1.0) < 0.0001
            ), "Not a distribution: P[any | features] = %f" % (sum(exp(log_probs[pos][label]) for label in self.labels))

        objective = -sum(log_prob[label] for (log_prob, (label, _)) in zip(log_probs, self.labeled_extracted_features))

        if verbose:
            print "Raw objective: %f" % objective

        if verbose:
            print "Calculating expected counts..."

        expected_counts = get_expected_counts(self.labeled_extracted_features, self.labels, log_probs, CounterMap())

        if verbose:
            print "Calculating gradient..."

        gradient = expected_counts - self.empirical_counts

        if verbose:
            print "Applying penalty"

        # Apply a penalty (e.g. smooth the results)
        if self.sigma:
            penalty = 0.0

            for label, feature_weights in gradient.iteritems():
                for feature in feature_weights:
                    weight = weights[label][feature]
                    penalty += weight ** 2
                    gradient[label][feature] += weight / (self.sigma ** 2)

            penalty /= 2 * self.sigma ** 2
            objective += penalty
            if verbose:
                print "Penalized objective: %f" % objective

        self.last_vg_weights = weights
        self.last_vg = (objective, gradient)
        return (objective, gradient)
Exemplo n.º 4
0
    def value_and_gradient(self, weights, verbose=False):
        if weights == self.last_vg_weights:
            return self.last_vg
        objective = 0.0
        gradient = CounterMap()

        if verbose: print "Calculating log probabilities and objective..."

        # log_prob
        log_probs = list()
        for pos, (label,
                  features) in enumerate(self.labeled_extracted_features):
            log_probs.append(get_log_probs(features, weights, self.labels))
            assert abs(
                sum(exp(log_probs[pos][label]) for label in self.labels) -
                1.0) < 0.0001, "Not a distribution: P[any | features] = %f" % (
                    sum(exp(log_probs[pos][label]) for label in self.labels))

        objective = -sum(log_prob[label] for (log_prob, (
            label, _)) in zip(log_probs, self.labeled_extracted_features))

        if verbose: print "Raw objective: %f" % objective

        if verbose: print "Calculating expected counts..."

        expected_counts = get_expected_counts(self.labeled_extracted_features,
                                              self.labels, log_probs,
                                              CounterMap())

        if verbose: print "Calculating gradient..."

        gradient = expected_counts - self.empirical_counts

        if verbose: print "Applying penalty"

        # Apply a penalty (e.g. smooth the results)
        if self.sigma:
            penalty = 0.0

            for label, feature_weights in gradient.iteritems():
                for feature in feature_weights:
                    weight = weights[label][feature]
                    penalty += weight**2
                    gradient[label][feature] += (weight / (self.sigma**2))

            penalty /= 2 * self.sigma**2
            objective += penalty
            if verbose: print "Penalized objective: %f" % objective

        self.last_vg_weights = weights
        self.last_vg = (objective, gradient)
        return (objective, gradient)
Exemplo n.º 5
0
 def get_log_probabilities(self, datum_features):
     return get_log_probs(datum_features, self.weights, self.labels)
Exemplo n.º 6
0
 def get_log_probabilities(self, datum_features):
     return get_log_probs(datum_features, self.weights, self.labels)