def _computeCounts(self): ''' compute soft counts (assuming independence) ''' allSoft = self.params.get("allSoft", False) if allSoft == False: # compute regular counts for all "normal" possible worlds LL._computeCounts(self) # add another world for soft beliefs baseWorld = self.mrf.worlds[self.idxTrainingDB] self.mrf.worlds.append({"values": baseWorld["values"]}) self.idxTrainingDB = len(self.mrf.worlds) - 1 # and compute soft counts only for that world softCountWorldIndices = [self.idxTrainingDB] else: # compute soft counts for all possible worlds self.counts = {} softCountWorldIndices = xrange(len(self.mrf.worlds)) # compute soft counts for i in softCountWorldIndices: world = self.mrf.worlds[i] if i == self.idxTrainingDB: print "TrainingDB: prod, groundformula" for gf in self.mrf.gndFormulas: prod = truthDegreeGivenSoftEvidence(gf, world["values"], self.mrf) key = (i, gf.idxFormula) cnt = self.counts.get(key, 0) cnt += prod self.counts[key] = cnt if i == self.idxTrainingDB: print "%f gf: %s" % (prod, str(gf)) print "worlds len: ", len(self.mrf.worlds)
def _computeCounts(self): ''' compute soft counts (assuming independence) ''' allSoft = self.params.get("allSoft", False) if allSoft == False: # compute regular counts for all "normal" possible worlds LL._computeCounts(self) # add another world for soft beliefs baseWorld = self.mrf.worlds[self.idxTrainingDB] self.mrf.worlds.append({"values": baseWorld["values"]}) self.idxTrainingDB = len(self.mrf.worlds) - 1 # and compute soft counts only for that world softCountWorldIndices = [self.idxTrainingDB] else: # compute soft counts for all possible worlds self.counts = {} softCountWorldIndices = xrange(len(self.mrf.worlds)) # compute soft counts for i in softCountWorldIndices: world = self.mrf.worlds[i] if i == self.idxTrainingDB: print "TrainingDB: prod, groundformula" for gf in self.mrf.gndFormulas: prod = truthDegreeGivenSoftEvidence(gf, world["values"], self.mrf) key = (i, gf.idxFormula) cnt = self.counts.get(key, 0) cnt += prod self.counts[key] = cnt if i == self.idxTrainingDB: print "%f gf: %s" % (prod, str(gf)) print "worlds len: ", len(self.mrf.worlds)
def _getTruthDegreeGivenEvidence(self, gf, worldValues=None): if worldValues is None: worldValues = self.mrf.evidence return truthDegreeGivenSoftEvidence(gf, worldValues, self.mrf)
def _getTruthDegreeGivenEvidence(self, gf, worldValues=None): if worldValues is None: worldValues = self.mrf.evidence return truthDegreeGivenSoftEvidence(gf, worldValues, self.mrf)
def _f(self, wt): self._calculateWorldValues(wt) #only to calculate partition function here: #self._calculateWorldProbabilities() #new idea: minimize squared error of world prob. given by weights and world prob given by soft evidence error = 0 #old method (does not work with mixed hard and soft evidence) if True: for idxWorld, world in enumerate(self.mrf.worlds): if idxWorld in self.worldProbabilities: #lambda_x worldProbability = self.worldProbabilities[idxWorld] else: worldProbability = 0 worldProbGivenWeights = self.expsums[idxWorld] / self.partition_function error += abs(worldProbGivenWeights - worldProbability) #print "worldProbGivenWeights - worldProbability ", worldProbGivenWeights, "-", worldProbability # for idxWorld, worldProbability in self.worldProbabilities.iteritems(): #lambda_x # worldProbGivenWeights = self.expsums[idxWorld] / self.partition_function # error += abs(worldProbGivenWeights - worldProbability) # #print "world:", self.mrf.worlds[idxWorld] # print "worldProbGivenWeights - worldProbability ", worldProbGivenWeights, "-", worldProbability if False:#new try, doesn't work... for idxWorld, world in enumerate(self.mrf.worlds): worldProbGivenWeights = self.expsums[idxWorld] / self.partition_function #compute countDiffSum: #for i, world in enumerate(self.mrf.worlds): if idxWorld not in self.countsByWorld: print "computing counts for:", idxWorld counts = {} #n for gf in self.mrf.gndFormulas: if self.mrf._isTrue(gf, self.mrf.worlds[idxWorld]["values"]): key = gf.idxFormula cnt = counts.get(key, 0) cnt += 1 counts[key] = cnt self.countsByWorld[idxWorld] = counts #� (soft counts for evidence) if len(self.softCountsEvidenceWorld) == 0: print "computing evidence soft counts" self.softCountsEvidenceWorld = {} for gf in self.mrf.gndFormulas: prod = truthDegreeGivenSoftEvidence(gf, self.mrf.evidence, self.mrf) key = gf.idxFormula cnt = self.softCountsEvidenceWorld.get(key, 0) cnt += prod self.softCountsEvidenceWorld[key] = cnt #if i == self.idxTrainingDB: # print "%f gf: %s" % (prod, str(gf)) countDiffSum = 0 for idxFormula, count in self.countsByWorld[idxWorld].iteritems(): countDiffSum += abs(count - self.softCountsEvidenceWorld[idxFormula]) #print "countDiffSum", countDiffSum, "worldProbability", worldProbGivenWeights error += worldProbGivenWeights * ((countDiffSum)**2) print "wt =", wt print "error:", error ll = -error print return ll
def _f(self, wt): self._calculateWorldValues( wt) #only to calculate partition function here: #self._calculateWorldProbabilities() #new idea: minimize squared error of world prob. given by weights and world prob given by soft evidence error = 0 #old method (does not work with mixed hard and soft evidence) if True: for idxWorld, world in enumerate(self.mrf.worlds): if idxWorld in self.worldProbabilities: #lambda_x worldProbability = self.worldProbabilities[idxWorld] else: worldProbability = 0 worldProbGivenWeights = self.expsums[ idxWorld] / self.partition_function error += abs(worldProbGivenWeights - worldProbability) #print "worldProbGivenWeights - worldProbability ", worldProbGivenWeights, "-", worldProbability # for idxWorld, worldProbability in self.worldProbabilities.iteritems(): #lambda_x # worldProbGivenWeights = self.expsums[idxWorld] / self.partition_function # error += abs(worldProbGivenWeights - worldProbability) # #print "world:", self.mrf.worlds[idxWorld] # print "worldProbGivenWeights - worldProbability ", worldProbGivenWeights, "-", worldProbability if False: #new try, doesn't work... for idxWorld, world in enumerate(self.mrf.worlds): worldProbGivenWeights = self.expsums[ idxWorld] / self.partition_function #compute countDiffSum: #for i, world in enumerate(self.mrf.worlds): if idxWorld not in self.countsByWorld: print "computing counts for:", idxWorld counts = {} #n for gf in self.mrf.gndFormulas: if self.mrf._isTrue( gf, self.mrf.worlds[idxWorld]["values"]): key = gf.idxFormula cnt = counts.get(key, 0) cnt += 1 counts[key] = cnt self.countsByWorld[idxWorld] = counts #� (soft counts for evidence) if len(self.softCountsEvidenceWorld) == 0: print "computing evidence soft counts" self.softCountsEvidenceWorld = {} for gf in self.mrf.gndFormulas: prod = truthDegreeGivenSoftEvidence( gf, self.mrf.evidence, self.mrf) key = gf.idxFormula cnt = self.softCountsEvidenceWorld.get(key, 0) cnt += prod self.softCountsEvidenceWorld[key] = cnt #if i == self.idxTrainingDB: # print "%f gf: %s" % (prod, str(gf)) countDiffSum = 0 for idxFormula, count in self.countsByWorld[ idxWorld].iteritems(): countDiffSum += abs( count - self.softCountsEvidenceWorld[idxFormula]) #print "countDiffSum", countDiffSum, "worldProbability", worldProbGivenWeights error += worldProbGivenWeights * ((countDiffSum)**2) print "wt =", wt print "error:", error ll = -error print return ll