Example #1
0
    def _compute_probs(self, w):
        probs = {}#numpy.zeros(len(self.partitions))
        for pidx in range(len(self.partitions)):
            expsums = [0] * self.valuecounts[pidx]
            for fidx in self.partition2formulas[pidx]:
                for i, v in enumerate(self._stat[fidx][pidx]):
                    if w[fidx] == HARD:
                        if v == 0: expsums[i] = None
                    elif expsums[i] is not None:
#                         out('adding', v, '*', w[fidx], 'to', i)
                        expsums[i] += v * w[fidx]
#                         stop(expsums)
#             sum_max = numpy.max(sums)
#             sums -= sum_max
#             expsums = numpy.sum(numpy.exp(sums))
#             s = numpy.log(expsums)
#             probs[pidx] = numpy.exp(sums - s)
#             out(w)
            expsum = numpy.array([numpy.exp(s) if s is not None else 0 for s in expsums])# leave out the inadmissible values
            z = fsum(expsum)
            if z == 0: raise SatisfiabilityException('MLN is unsatisfiable: all probability masses of partition %s are zero.' % str(self.partitions[pidx]))
            probs[pidx] = expsum / z
            self.probs[pidx] = expsum
        self.probs = probs
        return probs
Example #2
0
 def _f(self, wt, **params):
     self._calculateAtomProbsMB(wt)
     #print self.atomProbsMB
     probs = map(lambda x: x if x > 0 else 1e-10, self.atomProbsMB) # prevent 0 probs
     pll = fsum(map(log, probs))
     print "pseudo-log-likelihood:", pll
     return pll
Example #3
0
 def _f(self, wt, **params):
     self._calculateAtomProbsMB(wt)
     #print self.atomProbsMB
     probs = map(lambda x: x if x > 0 else 1e-10, self.atomProbsMB) # prevent 0 probs
     pll = fsum(map(log, probs))
     print "pseudo-log-likelihood:", pll
     return pll
Example #4
0
    def _compute_probs(self, w):
        probs = {}  #numpy.zeros(len(self.partitions))
        for pidx in range(len(self.partitions)):
            expsums = [0] * self.valuecounts[pidx]
            for fidx in self.partition2formulas[pidx]:
                for i, v in enumerate(self._stat[fidx][pidx]):
                    if w[fidx] == HARD:
                        if v == 0: expsums[i] = None
                    elif expsums[i] is not None:
                        #                         out('adding', v, '*', w[fidx], 'to', i)
                        expsums[i] += v * w[fidx]


#                         stop(expsums)
#             sum_max = numpy.max(sums)
#             sums -= sum_max
#             expsums = numpy.sum(numpy.exp(sums))
#             s = numpy.log(expsums)
#             probs[pidx] = numpy.exp(sums - s)
#             out(w)
            expsum = numpy.array([
                numpy.exp(s) if s is not None else 0 for s in expsums
            ])  # leave out the inadmissible values
            z = fsum(expsum)
            if z == 0:
                raise SatisfiabilityException(
                    'MLN is unsatisfiable: all probability masses of partition %s are zero.'
                    % str(self.partitions[pidx]))
            probs[pidx] = expsum / z
            self.probs[pidx] = expsum
        self.probs = probs
        return probs
Example #5
0
 def _f(self, w):
     self._compute_pls(w)
     probs = []
     for var in self.mrf.variables:
         p = self._pls[var.idx][var.evidence_value_index()]
         if p == 0: p = 1e-10  # prevent 0 probabilities
         probs.append(p)
     return fsum(map(log, probs))
Example #6
0
 def _f(self, w):
     self._compute_pls(w)
     probs = []
     for var in self.mrf.variables:
         p = self._pls[var.idx][var.evidence_value_index()]
         if p == 0: p = 1e-10 # prevent 0 probabilities
         probs.append(p)
     return fsum(map(log, probs))
Example #7
0
 def _f(self, w, **params):
     self._compute_pls(w)
     probs = []
     for var in self.mrf.variables:
         if var.predicate.name in self.epreds: continue
         p = self._pls[var.idx][var.evidence_value_index()]
         if p == 0: p = 1e-10  # prevent 0 probabilities
         probs.append(p)
     return fsum(map(log, probs))
Example #8
0
 def _f(self, w, **params):
     self._compute_pls(w)
     probs = []
     for var in self.mrf.variables:
         if var.predicate.name in self.epreds: continue
         p = self._pls[var.idx][var.evidence_value_index()]
         if p == 0: p = 1e-10 # prevent 0 probabilities
         probs.append(p)
     return fsum(map(log, probs))
Example #9
0
 def printAtomProbsMB(self):
     gndAtoms = self.mrf.gndAtoms.keys()
     gndAtoms.sort()
     values = []
     for gndAtom in gndAtoms:
         v = self.getAtomProbMB(gndAtom)
         print("%s=%s  %f" % (gndAtom, str(self.mrf._getEvidence(self.mrf.gndAtoms[gndAtom].idx)), v))
         values.append(v)
     pll = fsum(map(log, values))
     print("PLL = %f" % pll)
Example #10
0
 def printAtomProbsMB(self):
     gndAtoms = self.mrf.gndAtoms.keys()
     gndAtoms.sort()
     values = []
     for gndAtom in gndAtoms:
         v = self.getAtomProbMB(gndAtom)
         print "%s=%s  %f" % (gndAtom, str(self.mrf._getEvidence(self.mrf.gndAtoms[gndAtom].idx)), v)
         values.append(v)
     pll = fsum(map(log, values))
     print "PLL = %f" % pll
Example #11
0
 def _f(self, w):
     if self.current_wts is None or list(w) != self.current_wts:
         self.current_wts = list(w)
         self.probs = self._compute_probs(w)
     likelihood = numpy.zeros(len(self.partitions))
     for pidx in range(len(self.partitions)):
         p = self.probs[pidx][self.evidx[pidx]]
         if p == 0: p = 1e-10
         likelihood[pidx] += p
     self.iter += 1
     return fsum(map(log, likelihood))
Example #12
0
 def _f(self, w):
     if self.current_wts is None or list(w) != self.current_wts:
         self.current_wts = list(w)
         self.probs = self._compute_probs(w)
     likelihood = numpy.zeros(len(self.partitions))
     for pidx in range(len(self.partitions)):
         p = self.probs[pidx][self.evidx[pidx]]
         if p == 0: p = 1e-10
         likelihood[pidx] += p
     self.iter += 1
     return fsum(map(log, likelihood))
Example #13
0
 def _grad(self, w):
     self._compute_pls(w)
     grad = numpy.zeros(len(self.mrf.formulas), numpy.float64)
     for fidx, varval in self._stat.iteritems():
         for varidx, counts in varval.iteritems():
             evidx = self.mrf.variable(varidx).evidence_value_index()
             g = counts[evidx]
             for i, val in enumerate(counts):
                 g -= val * self._pls[varidx][i]
             grad[fidx] += g
     self.grad_opt_norm = sqrt(float(fsum(map(lambda x: x * x, grad))))
     return numpy.array(grad)
Example #14
0
 def _grad(self, w):
     self._compute_pls(w)
     grad = numpy.zeros(len(self.mrf.formulas), numpy.float64)        
     for fidx, varval in self._stat.iteritems():
         for varidx, counts in varval.iteritems():
             evidx = self.mrf.variable(varidx).evidence_value_index()
             g = counts[evidx]
             for i, val in enumerate(counts):
                 g -= val * self._pls[varidx][i]
             grad[fidx] += g
     self.grad_opt_norm = float(sqrt(fsum(map(lambda x: x * x, grad))))
     return numpy.array(grad)
Example #15
0
 def _grad(self, w, **params):    
     if self.current_wts is None or not list(w) != self.current_wts:
         self.current_wts = w
         self.probs = self._compute_probs(w)
     grad = numpy.zeros(len(w))
     for fidx, partitions in self._stat.iteritems():
         for part, values in partitions.iteritems():
             v = values[self.evidx[part]]
             for i, val in enumerate(values):
                 v -= self.probs[part][i] * val
             grad[fidx] += v
     self.grad_opt_norm = float(sqrt(fsum(map(lambda x: x * x, grad))))
     return numpy.array(grad)
Example #16
0
 def _grad(self, w, **params):
     if self.current_wts is None or not list(w) != self.current_wts:
         self.current_wts = w
         self.probs = self._compute_probs(w)
     grad = numpy.zeros(len(w))
     for fidx, partitions in self._stat.iteritems():
         for part, values in partitions.iteritems():
             v = values[self.evidx[part]]
             for i, val in enumerate(values):
                 v -= self.probs[part][i] * val
             grad[fidx] += v
     self.grad_opt_norm = sqrt(float(fsum(map(lambda x: x * x, grad))))
     return numpy.array(grad)