예제 #1
0
    def compute_single_likelihood(self, di):
        """
            Compute the likelihood, where di.input is a [f arg1 arg2 arg3 ...], curried
        """

        freduced = self(*di.input)  # this is where the magic happens

        # Okay with 1.0-ALPHA we'll sample uniformly from words
        # and with probability ALPHA we'll sample from all the words which have
        # identical associated lambdas
        p = (1.0 - self.alpha) / len(self.all_words())

        freduced_str = lambdastring(freduced)

        # TODO: maybe we also reduce self.value[w]?
        matches = [
            w for w in self.all_words()
            if lambdastring(self.value[w].value) == freduced_str
        ]
        # print ">>>", di.input, freduced_str, matches

        if di.output in matches:
            p += self.alpha / len(matches)  # else outlier likelihood

        return log(p)
예제 #2
0
def print_lexicon_and_data(L, data):
    """
    Friendlier printing of the lexicon and associated data inputs and outputs
    """

    print(L.posterior_score, L.prior, L.likelihood)
    print(L)

    for di in data:
        outstr = lambdastring(L(*di.input))
        print("\t", di.input, "->", di.output, "\t==>", [
            w for w in list(L.value.keys())
            if lambdastring(L.value[w].value) == outstr
        ], "\t", outstr)

    print("\n")
예제 #3
0
파일: Model.py 프로젝트: joshrule/LOTlib
    def compute_single_likelihood(self, di):
        """
            Compute the likelihood, where di.input is a [f arg1 arg2 arg3 ...], curried
        """

        freduced = self(*di.input) # this is where the magic happens

        # Okay with 1.0-ALPHA we'll sample uniformly from words
        # and with probability ALPHA we'll sample from all the words which have
        # identical associated lambdas
        p = (1.0-self.alpha) / len(self.all_words())

        freduced_str = lambdastring(freduced)

        # TODO: maybe we also reduce self.value[w]?
        matches = filter(lambda w: lambdastring(self.value[w].value) == freduced_str, self.all_words())
        # print ">>>", di.input, freduced_str, matches

        if di.output in matches:
            p += self.alpha / len(matches) # else outlier likelihood

        return log(p)
예제 #4
0
파일: Model.py 프로젝트: joshrule/LOTlib
def print_lexicon_and_data(L, data):
    """
    Friendlier printing of the lexicon and associated data inputs and outputs
    """

    print L.posterior_score, L.prior, L.likelihood
    print L

    for di in data:
        outstr = lambdastring(L(*di.input))
        print "\t", di.input, "->", di.output, "\t==>", [ w for w in L.value.keys() if lambdastring(L.value[w].value)==outstr], "\t", outstr

    print "\n"
예제 #5
0
파일: Model.py 프로젝트: joshrule/LOTlib
 def __str__(self):
     return ('\n'.join([u"%-15s: %s" % (qq(w), lambdastring(v.value)) for w, v in sorted(self.value.iteritems())]) + '\0').encode('utf-8')
예제 #6
0
 def __str__(self):
     return '\n'.join([
         "%-15s: %s" % (qq(w), lambdastring(v.value))
         for w, v in sorted(self.value.items())
     ]) + '\0'
예제 #7
0
 def __str__(self):
     return ('\n'.join([
         u"%-15s: %s" % (qq(w), lambdastring(v.value))
         for w, v in sorted(self.value.iteritems())
     ]) + '\0').encode('utf-8')