示例#1
0
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(steps=len(self.dbs), color='green')
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for i, (_, d_) in enumerate(
                     pool.imap(
                         with_tracing(
                             _methodcaller('_prepare', sideeffects=True)),
                         self.learners)):
                 checkmem()
                 self.learners[i].__dict__ = d_
                 if self.verbose: bar.inc()
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
示例#2
0
 def _prepare(self):
     self.watch.tag("preparing optimization", verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color="green")
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for i, (_, d_) in enumerate(
                 pool.imap(with_tracing(_methodcaller("_prepare", sideeffects=True)), self.learners)
             ):
                 checkmem()
                 self.learners[i].__dict__ = d_
                 if self.verbose:
                     bar.inc()
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose:
                 bar.inc()
示例#3
0
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:  # self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient
         # in separate processes, so we turn it off
         pool = Pool()
         try:
             for i, (grad_, d_) in enumerate(
                     pool.imap(
                         with_tracing(
                             _methodcaller('_grad', sideeffects=True)),
                         map(lambda l: (l, w), self.learners))):
                 self.learners[i].__dict__ = d_
                 grad += grad_
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             grad += learner._grad(w)
     return grad
示例#4
0
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:  # self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient
         # in separate processes, so we turn it off
         pool = Pool()
         try:
             for i, (grad_, d_) in enumerate(
                 pool.imap(
                     with_tracing(_methodcaller("_grad", sideeffects=True)), map(lambda l: (l, w), self.learners)
                 )
             ):
                 self.learners[i].__dict__ = d_
                 grad += grad_
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             grad += learner._grad(w)
     return grad
示例#5
0
文件: multidb.py 项目: Bovril/pracmln
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N,N)))
     if self.multicore:
         pool = Pool()
         for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)): hessian += h
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
示例#6
0
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         for h in pool.imap(with_tracing(_methodcaller('_hessian')),
                            map(lambda l: (l, w), self.learners)):
             hessian += h
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners:
             hessian += learner._hessian(w)
     return hessian
示例#7
0
文件: multidb.py 项目: Bovril/pracmln
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color='green')
     if self.multicore:
         for i, (_, d_) in enumerate(Pool(maxtasksperchild=1).imap(with_tracing(_methodcaller('_prepare', sideeffects=True)), self.learners)):
             checkmem()
             self.learners[i].__dict__ = d_
             if self.verbose: bar.inc()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
示例#8
0
文件: multidb.py 项目: Bovril/pracmln
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:#self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient  
         # in separate processes, so we turn it off 
         pool = Pool()
         for i, (grad_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_grad', sideeffects=True)), map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_
             grad += grad_
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners: grad += learner._grad(w)
     return grad
示例#9
0
文件: multidb.py 项目: Bovril/pracmln
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function  
     # in separate processes, so we turn it off 
     if False:#self.multicore:
         likelihood = 0
         pool = Pool()
         for i, (f_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_f', sideeffects=True)), map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_ 
             likelihood += f_
         pool.terminate()
         pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))
示例#10
0
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function
     # in separate processes, so we turn it off
     if False:  #self.multicore:
         likelihood = 0
         pool = Pool()
         for i, (f_, d_) in enumerate(
                 pool.imap(
                     with_tracing(_methodcaller('_f', sideeffects=True)),
                     map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_
             likelihood += f_
         pool.terminate()
         pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))
示例#11
0
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         try:
             for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)):
                 hessian += h
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
示例#12
0
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         try:
             for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)):
                 hessian += h
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
示例#13
0
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color='green')
     if self.multicore:
         for i, (_, d_) in enumerate(
                 Pool(maxtasksperchild=1).imap(
                     with_tracing(
                         _methodcaller('_prepare', sideeffects=True)),
                     self.learners)):
             checkmem()
             self.learners[i].__dict__ = d_
             if self.verbose: bar.inc()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
示例#14
0
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function
     # in separate processes, so we turn it off
     if False:  # self.multicore:
         likelihood = 0
         pool = Pool()
         try:
             for i, (f_, d_) in enumerate(
                 pool.imap(with_tracing(_methodcaller("_f", sideeffects=True)), map(lambda l: (l, w), self.learners))
             ):
                 self.learners[i].__dict__ = d_
                 likelihood += f_
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))