Exemple #1
0
    def in_concept_avg(self, domain):
        """
        p(y in C | `self.hypotheses`)

        for each hypothesis h, if y in C_h, accumulated w_h where w is the weight of a hypothesis,
        determined by the hypothesis's posterior score p(h | y)

        ==> This is the weighted bayesian model averaging described in (Murphy, 2007)

        """
        self.update()
        probs_in_c = {}

        for y in domain:
            prob_in_c = 0
            Z = logsumexp([h.posterior_score for h in self.hypotheses])

            # for h in self.hypotheses:
            #     h.set_value(h.value)
            # print self.hypotheses[0].prior, self.hypotheses[3].prior, self.hypotheses[5].prior

            for h in self.hypotheses:
                C = h()
                w = h.posterior_score - Z
                if y in C:
                    prob_in_c += exp(w)
            probs_in_c[y] = prob_in_c

        return probs_in_c
Exemple #2
0
    def in_concept_avg(self, domain):
        """
        p(y in C | `self.hypotheses`)

        for each hypothesis h, if y in C_h, accumulated w_h where w is the weight of a hypothesis,
        determined by the hypothesis's posterior score p(h | y)

        ==> This is the weighted bayesian model averaging described in (Murphy, 2007)

        """
        self.update()
        probs_in_c = {}

        for y in domain:
            prob_in_c = 0
            Z = logsumexp([h.posterior_score for h in self.hypotheses])

            # for h in self.hypotheses:
            #     h.set_value(h.value)
            # print self.hypotheses[0].prior, self.hypotheses[3].prior, self.hypotheses[5].prior

            for h in self.hypotheses:
                C = h()
                w = h.posterior_score - Z
                if y in C:
                    prob_in_c += exp(w)
            probs_in_c[y] = prob_in_c

        return probs_in_c
Exemple #3
0
#comparison = GriceanQuantifierLexicon(make_my_hypothesis, my_weight_function)
#comparison.set_word('every', LOTHypothesis(G, value='SET_IN_TARGET', f=lambda A, B, S:  presup_( nonempty_( A ), empty_( A ) )))
#comparison.set_word('some', LOTHypothesis(G, value='SET_IN_TARGET', f=lambda A, B, S: presup_( True, subset_( A, A ) )))

# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#This will debug -- for a given context and all_utterances, see if our likelihood is the same as empirical sampling
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

N_SAMPLES = 10000

if __name__ == "__main__":

    context = sample_context()

    cnt = defaultdict(int)
    for _ in xrange(N_SAMPLES):
        u = target.sample_utterance(context=context,
                                    possible_utterances=target.all_words())
        cnt[u] += 1

    #for w,c in cnt.items():
    #print w, float(c)/float(N_SAMPLES), exp(target.compute_single_likelihood( UtteranceData(utterance=w, possible_utterances=target.all_words(), context=context)))

    print check_counts(
        cnt, lambda w: exp(
            target.compute_single_likelihood(
                UtteranceData(utterance=w,
                              possible_utterances=target.all_words(),
                              context=context))))
Exemple #4
0
from LOTlib.Miscellaneous import exp
from LOTlib.Examples.Quantifier.Model import *

#distribution of context sizes
#for i in xrange(1000):
        #context = sample_context()

#comparison = GriceanQuantifierLexicon(make_my_hypothesis, my_weight_function)
#comparison.set_word('every', LOTHypothesis(G, value='SET_IN_TARGET', f=lambda A, B, S:  presup_( nonempty_( A ), empty_( A ) )))
#comparison.set_word('some', LOTHypothesis(G, value='SET_IN_TARGET', f=lambda A, B, S: presup_( True, subset_( A, A ) )))

# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#This will debug -- for a given context and all_utterances, see if our likelihood is the same as empirical sampling
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

N_SAMPLES = 10000

if __name__ == "__main__":

    context = sample_context()

    cnt = defaultdict(int)
    for _ in xrange(N_SAMPLES):
        u = target.sample_utterance( context=context, possible_utterances=target.all_words())
        cnt[u] += 1

    #for w,c in cnt.items():
        #print w, float(c)/float(N_SAMPLES), exp(target.compute_single_likelihood( UtteranceData(utterance=w, possible_utterances=target.all_words(), context=context)))

    print check_counts( cnt, lambda w: exp(target.compute_single_likelihood( UtteranceData(utterance=w, possible_utterances=target.all_words(), context=context))))