Beispiel #1
0
 def __init__(self, mrf, queries=ALL, state=None, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     if state is None:
         self.state = self.random_world(self.mrf.evidence)
     else:
         self.state = state
     self.sum = 0
     self.var2gf = defaultdict(set)
     self.weights = list(self.mrf.mln.weights)
     formulas = []
     for f in self.mrf.formulas:
         if f.weight < 0:
             f_ = self.mrf.mln.logic.negate(f)
             f_.weight = -f.weight
             formulas.append(f_.nnf())
     grounder = FastConjunctionGrounding(mrf,
                                         formulas=formulas,
                                         simplify=True,
                                         unsatfailure=True)
     for gf in grounder.itergroundings():
         if isinstance(gf, Logic.TrueFalse): continue
         vars_ = set(map(lambda a: self.mrf.variable(a).idx, gf.gndatoms()))
         for v in vars_:
             self.var2gf[v].add(gf)
         self.sum += (self.hardw if gf.weight == HARD else
                      gf.weight) * (1 - gf(self.state))
Beispiel #2
0
 def __init__(self, mrf, queries=ALL, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     self.var2gf = defaultdict(set)
     grounder = FastConjunctionGrounding(mrf, simplify=True, unsatfailure=True, cache=None)
     for gf in grounder.itergroundings():
         if isinstance(gf, Logic.TrueFalse): continue
         vars_ = set(map(lambda a: self.mrf.variable(a).idx, gf.gndatoms()))
         for v in vars_: self.var2gf[v].add(gf)
Beispiel #3
0
 def __init__(self, mrf, queries=ALL, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     self.var2gf = defaultdict(set)
     grounder = FastConjunctionGrounding(mrf, simplify=True, unsatfailure=True, cache=None)
     for gf in grounder.itergroundings():
         if isinstance(gf, Logic.TrueFalse): continue
         vars_ = set(map(lambda a: self.mrf.variable(a).idx, gf.gndatoms()))
         for v in vars_: self.var2gf[v].add(gf)
Beispiel #4
0
 def __init__(self, mrf, queries=ALL, state=None, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     if state is None:
         self.state = self.random_world(self.mrf.evidence)
     else:
         self.state = state
     self.sum = 0
     self.var2gf = defaultdict(set)
     self.weights = list(self.mrf.mln.weights)
     formulas = []
     for f in self.mrf.formulas:
         if f.weight < 0:
             f_ = self.mrf.mln.logic.negate(f)
             f_.weight = - f.weight
             formulas.append(f_.nnf())
     grounder = FastConjunctionGrounding(mrf, formulas=formulas, simplify=True, unsatfailure=True)
     for gf in grounder.itergroundings():
         if isinstance(gf, Logic.TrueFalse): continue
         vars_ = set(map(lambda a: self.mrf.variable(a).idx, gf.gndatoms()))
         for v in vars_: self.var2gf[v].add(gf)
         self.sum += (self.hardw if gf.weight == HARD else gf.weight) * (1 - gf(self.state))
Beispiel #5
0
    def _run(self, **params):
        '''
        infer one or more probabilities P(F1 | F2)
        what: a ground formula (string) or a list of ground formulas (list of strings) (F1)
        given: a formula as a string (F2)
        set evidence according to given conjunction (if any)
        '''
        #         if softEvidence is None:
        #             self.softEvidence = self.mln.softEvidence
        #         else:
        #             self.softEvidence = softEvidence
        # initialize chains
        chains = MCMCInference.ChainGroup(self)
        for i in range(self.chains):
            chain = GibbsSampler.Chain(self, self.queries)
            chains.chain(chain)
#             if self.softEvidence is not None:
#                 chain.setSoftEvidence(self.softEvidence)
# do Gibbs sampling
#         if verbose and details: print "sampling..."
        converged = 0
        steps = 0
        if self.verbose:
            bar = ProgressBar(color='green', steps=self.maxsteps)
        while converged != self.chains and steps < self.maxsteps:
            converged = 0
            steps += 1
            for chain in chains.chains:
                chain.step()
            if self.verbose:
                bar.inc()
                bar.label('%d / %d' % (steps, self.maxsteps))
#                 if self.useConvergenceTest:
#                     if chain.converged and numSteps >= minSteps:
#                         converged += 1
#             if verbose and details:
#                 if numSteps % infoInterval == 0:
#                     print "step %d (fraction converged: %.2f)" % (numSteps, float(converged) / numChains)
#                 if numSteps % resultsInterval == 0:
#                     chainGroup.getResults()
#                     chainGroup.printResults(shortOutput=True)
# get the results
        return chains.results()[0]
Beispiel #6
0
 def __init__(self, mrf, queries=ALL, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     self._weight_backup = list(self.mrf.mln.weights)
Beispiel #7
0
 def _run(self):
     '''
     p: probability of a greedy (WalkSAT) move
     initAlgo: algorithm to use in order to find an initial state that satisfies all hard constraints ("SampleSAT" or "SAMaxWalkSat")
     verbose: whether to display results upon completion
     details: whether to display information while the algorithm is running            
     infoInterval: [if details==True] interval (no. of steps) in which to display the current step number and some additional info
     resultsInterval: [if details==True] interval (no. of steps) in which to display intermediate results; [if keepResultsHistory==True] interval in which to store intermediate results in the history
     debug: whether to display debug information (e.g. internal data structures) while the algorithm is running
         debugLevel: controls degree to which debug information is presented
     keepResultsHistory: whether to store the history of results (at each resultsInterval)
     referenceResults: reference results to compare obtained results to
     saveHistoryFile: if not None, save history to given filename
     sampleCallback: function that is called for every sample with the sample and step number as parameters
     softEvidence: if None, use soft evidence from MLN, otherwise use given dictionary of soft evidence
     handleSoftEvidence: if False, ignore all soft evidence in the MCMC sampling (but still compute softe evidence statistics if soft evidence is there)
     '''
     logger.debug("starting MC-SAT with maxsteps=%d, softevidence=%s" %
                  (self.maxsteps, self.softevidence))
     # initialize the KB and gather required info
     self._initkb()
     # print CNF KB
     logger.debug("CNF KB:")
     for gf in self.gndformulas:
         logger.debug("%7.3f  %s" % (gf.weight, str(gf)))
     print
     # set the random seed if it was given
     if self.rndseed is not None:
         random.seed(self.rndseed)
     # create chains
     chaingroup = MCMCInference.ChainGroup(self)
     self.chaingroup = chaingroup
     for i in range(self.chains):
         chain = MCMCInference.Chain(self, self.queries)
         chaingroup.chain(chain)
         # satisfy hard constraints using initialization algorithm
         M = []
         NLC = []
         for i, gf in enumerate(self.gndformulas):
             if gf.weight == HARD:
                 if gf.islogical():
                     clause_range = self.gf2clauseidx[i]
                     M.extend(range(*clause_range))
                 else:
                     NLC.append(gf)
         if M or NLC:
             logger.debug('Running SampleSAT')
             chain.state = SampleSAT(
                 self.mrf, chain.state, M, NLC, self, p=self.p
             ).run(
             )  # Note: can't use p=1.0 because there is a chance of getting into an oscillating state
     if logger.level == logs.DEBUG:
         self.mrf.print_world_vars(chain.state)
     self.step = 1
     logger.debug('running MC-SAT with %d chains' % len(chaingroup.chains))
     self._watch.tag('running MC-SAT', self.verbose)
     if self.verbose:
         bar = ProgressBar(steps=self.maxsteps, color='green')
     while self.step <= self.maxsteps:
         # take one step in each chain
         for chain in chaingroup.chains:
             # choose a subset of the satisfied formulas and sample a state that satisfies them
             state = self._satisfy_subset(chain)
             # update chain counts
             chain.update(state)
         if self.verbose:
             bar.inc()
             bar.label('%d / %d' % (self.step, self.maxsteps))
         # intermediate results
         self.step += 1
     # get results
     self.step -= 1
     results = chaingroup.results()
     return results[0]
Beispiel #8
0
 def __init__(self, mrf, queries=ALL, **params):
     MCMCInference.__init__(self, mrf, queries, **params)
     self._weight_backup = list(self.mrf.mln.weights)