def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(steps=len(self.dbs), color='green') if self.multicore: pool = Pool(maxtasksperchild=1) try: for i, (_, d_) in enumerate( pool.imap( with_tracing( _methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() except Exception as e: logger.error('Error in child process. Terminating pool...') pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _prepare(self): self.watch.tag("preparing optimization", verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color="green") if self.multicore: pool = Pool(maxtasksperchild=1) try: for i, (_, d_) in enumerate( pool.imap(with_tracing(_methodcaller("_prepare", sideeffects=True)), self.learners) ): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() except Exception as e: logger.error("Error in child process. Terminating pool...") pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _grad(self, w): grad = numpy.zeros(len(self.mln.formulas), numpy.float64) if False: # self.multicore: # it turned out that it doesn't pay off to evaluate the gradient # in separate processes, so we turn it off pool = Pool() try: for i, (grad_, d_) in enumerate( pool.imap( with_tracing( _methodcaller('_grad', sideeffects=True)), map(lambda l: (l, w), self.learners))): self.learners[i].__dict__ = d_ grad += grad_ except Exception as e: logger.error('Error in child process. Terminating pool...') pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: grad += learner._grad(w) return grad
def _grad(self, w): grad = numpy.zeros(len(self.mln.formulas), numpy.float64) if False: # self.multicore: # it turned out that it doesn't pay off to evaluate the gradient # in separate processes, so we turn it off pool = Pool() try: for i, (grad_, d_) in enumerate( pool.imap( with_tracing(_methodcaller("_grad", sideeffects=True)), map(lambda l: (l, w), self.learners) ) ): self.learners[i].__dict__ = d_ grad += grad_ except Exception as e: logger.error("Error in child process. Terminating pool...") pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: grad += learner._grad(w) return grad
def _hessian(self, w): N = len(self.mln.formulas) hessian = numpy.matrix(numpy.zeros((N,N))) if self.multicore: pool = Pool() for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)): hessian += h pool.terminate() pool.join() else: for learner in self.learners: hessian += learner._hessian(w) return hessian
def _hessian(self, w): N = len(self.mln.formulas) hessian = numpy.matrix(numpy.zeros((N, N))) if self.multicore: pool = Pool() for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)): hessian += h pool.terminate() pool.join() else: for learner in self.learners: hessian += learner._hessian(w) return hessian
def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color='green') if self.multicore: for i, (_, d_) in enumerate(Pool(maxtasksperchild=1).imap(with_tracing(_methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _grad(self, w): grad = numpy.zeros(len(self.mln.formulas), numpy.float64) if False:#self.multicore: # it turned out that it doesn't pay off to evaluate the gradient # in separate processes, so we turn it off pool = Pool() for i, (grad_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_grad', sideeffects=True)), map(lambda l: (l, w), self.learners))): self.learners[i].__dict__ = d_ grad += grad_ pool.terminate() pool.join() else: for learner in self.learners: grad += learner._grad(w) return grad
def _f(self, w): # it turned out that it doesn't pay off to evaluate the function # in separate processes, so we turn it off if False:#self.multicore: likelihood = 0 pool = Pool() for i, (f_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_f', sideeffects=True)), map(lambda l: (l, w), self.learners))): self.learners[i].__dict__ = d_ likelihood += f_ pool.terminate() pool.join() return likelihood else: return sum(map(lambda l: l._f(w), self.learners))
def _f(self, w): # it turned out that it doesn't pay off to evaluate the function # in separate processes, so we turn it off if False: #self.multicore: likelihood = 0 pool = Pool() for i, (f_, d_) in enumerate( pool.imap( with_tracing(_methodcaller('_f', sideeffects=True)), map(lambda l: (l, w), self.learners))): self.learners[i].__dict__ = d_ likelihood += f_ pool.terminate() pool.join() return likelihood else: return sum(map(lambda l: l._f(w), self.learners))
def _hessian(self, w): N = len(self.mln.formulas) hessian = numpy.matrix(numpy.zeros((N, N))) if self.multicore: pool = Pool() try: for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)): hessian += h except Exception as e: logger.error('Error in child process. Terminating pool...') pool.close() raise e finally: pool.terminate() pool.join() else: for learner in self.learners: hessian += learner._hessian(w) return hessian
def _prepare(self): self.watch.tag('preparing optimization', verbose=self.verbose) if self.verbose: bar = ProgressBar(width=100, steps=len(self.dbs), color='green') if self.multicore: for i, (_, d_) in enumerate( Pool(maxtasksperchild=1).imap( with_tracing( _methodcaller('_prepare', sideeffects=True)), self.learners)): checkmem() self.learners[i].__dict__ = d_ if self.verbose: bar.inc() else: for learner in self.learners: checkmem() learner._prepare() if self.verbose: bar.inc()
def _f(self, w): # it turned out that it doesn't pay off to evaluate the function # in separate processes, so we turn it off if False: # self.multicore: likelihood = 0 pool = Pool() try: for i, (f_, d_) in enumerate( pool.imap(with_tracing(_methodcaller("_f", sideeffects=True)), map(lambda l: (l, w), self.learners)) ): self.learners[i].__dict__ = d_ likelihood += f_ except Exception as e: logger.error("Error in child process. Terminating pool...") pool.close() raise e finally: pool.terminate() pool.join() return likelihood else: return sum(map(lambda l: l._f(w), self.learners))