def _itergroundings(self, simplify=False, unsatfailure=False): global global_bpll_grounding global_bpll_grounding = self if self.multicore: pool = Pool(maxtasksperchild=1) try: for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas): for fidx, stat in gndresult: for (varidx, validx, val) in stat: self._varidx2fidx[varidx].add(fidx) self._addstat(fidx, varidx, validx, val) checkmem() yield None except CtrlCException as e: pool.terminate() raise e pool.close() pool.join() else: for gndresult in imap(create_formula_groundings, self.formulas): for fidx, stat in gndresult: for (varidx, validx, val) in stat: self._varidx2fidx[varidx].add(fidx) self._addstat(fidx, varidx, validx, val) yield None
def _itergroundings(self, simplify=False, unsatfailure=False): global global_bpll_grounding global_bpll_grounding = self if self.multicore: pool = Pool(maxtasksperchild=1) try: for gndresult in pool.imap( with_tracing(create_formula_groundings), self.formulas): for fidx, stat in gndresult: for (varidx, validx, val) in stat: self._varidx2fidx[varidx].add(fidx) self._addstat(fidx, varidx, validx, val) checkmem() yield None except Exception as e: logger.error('Error in child process. Terminating pool...') pool.close() raise e finally: pool.terminate() pool.join() else: for gndresult in imap(create_formula_groundings, self.formulas): for fidx, stat in gndresult: for (varidx, validx, val) in stat: self._varidx2fidx[varidx].add(fidx) self._addstat(fidx, varidx, validx, val) yield None
def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(steps=len(self.dbs), color='green') if self.multicore: pool = Pool(maxtasksperchild=1) try: for i, (_, d_) in enumerate( pool.imap( with_tracing( _methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() except Exception as e: logger.error('Error in child process. Terminating pool...') pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _prepare(self): self.watch.tag("preparing optimization", verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color="green") if self.multicore: pool = Pool(maxtasksperchild=1) try: for i, (_, d_) in enumerate( pool.imap(with_tracing(_methodcaller("_prepare", sideeffects=True)), self.learners) ): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() except Exception as e: logger.error("Error in child process. Terminating pool...") pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color='green') if self.multicore: for i, (_, d_) in enumerate(Pool(maxtasksperchild=1).imap(with_tracing(_methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color='green') if self.multicore: for i, (_, d_) in enumerate( Pool(maxtasksperchild=1).imap( with_tracing( _methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def create_formula_groundings(formula, unsatfailure=True): checkmem() results = [] if global_bpll_grounding.mrf.mln.logic.islitconj(formula): for res in global_bpll_grounding.itergroundings_fast(formula): checkmem() results.append(res) else: for gf in formula.itergroundings(global_bpll_grounding.mrf, simplify=False): checkmem() stat = [] for gndatom in gf.gndatoms(): world = list(global_bpll_grounding.mrf.evidence) var = global_bpll_grounding.mrf.variable(gndatom) for validx, value in var.itervalues(): var.setval(value, world) truth = gf(world) if truth != 0: stat.append((var.idx, validx, truth)) elif unsatfailure and gf.weight == HARD and gf( global_bpll_grounding.mrf.evidence) != 1: print gf.print_structure(global_bpll_grounding.mrf.evidence) raise SatisfiabilityException( 'MLN is unsatisfiable due to hard constraint violation {} (see above)' .format( global_bpll_grounding.mrf.formulas[gf.idx])) results.append((gf.idx, stat)) return results
def create_formula_groundings(formula, unsatfailure=True): checkmem() results = [] if global_bpll_grounding.mrf.mln.logic.islitconj(formula): for res in global_bpll_grounding.itergroundings_fast(formula): checkmem() results.append(res) else: for gf in formula.itergroundings(global_bpll_grounding.mrf, simplify=False): checkmem() stat = [] for gndatom in gf.gndatoms(): world = list(global_bpll_grounding.mrf.evidence) var = global_bpll_grounding.mrf.variable(gndatom) for validx, value in var.itervalues(): var.setval(value, world) truth = gf(world) if truth != 0: stat.append((var.idx, validx, truth)) elif unsatfailure and gf.weight == HARD and gf(global_bpll_grounding.mrf.evidence) != 1: print gf.print_structure(global_bpll_grounding.mrf.evidence) raise SatisfiabilityException('MLN is unsatisfiable due to hard constraint violation %s (see above)' % global_bpll_grounding.mrf.formulas[gf.idx]) results.append((gf.idx, stat)) return results
def _setup_learner((i, mln_, db, method, params)): checkmem() mrf = mln_.ground(db) algo = method(mrf, **params) return i, algo