コード例 #1
0
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(steps=len(self.dbs), color='green')
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for i, (_, d_) in enumerate(
                     pool.imap(
                         with_tracing(
                             _methodcaller('_prepare', sideeffects=True)),
                         self.learners)):
                 checkmem()
                 self.learners[i].__dict__ = d_
                 if self.verbose: bar.inc()
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
コード例 #2
0
ファイル: multidb.py プロジェクト: danielnyga/pracmln
 def _prepare(self):
     self.watch.tag("preparing optimization", verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color="green")
     if self.multicore:
         pool = Pool(maxtasksperchild=1)
         try:
             for i, (_, d_) in enumerate(
                 pool.imap(with_tracing(_methodcaller("_prepare", sideeffects=True)), self.learners)
             ):
                 checkmem()
                 self.learners[i].__dict__ = d_
                 if self.verbose:
                     bar.inc()
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose:
                 bar.inc()
コード例 #3
0
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:  # self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient
         # in separate processes, so we turn it off
         pool = Pool()
         try:
             for i, (grad_, d_) in enumerate(
                     pool.imap(
                         with_tracing(
                             _methodcaller('_grad', sideeffects=True)),
                         map(lambda l: (l, w), self.learners))):
                 self.learners[i].__dict__ = d_
                 grad += grad_
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             grad += learner._grad(w)
     return grad
コード例 #4
0
ファイル: multidb.py プロジェクト: danielnyga/pracmln
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:  # self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient
         # in separate processes, so we turn it off
         pool = Pool()
         try:
             for i, (grad_, d_) in enumerate(
                 pool.imap(
                     with_tracing(_methodcaller("_grad", sideeffects=True)), map(lambda l: (l, w), self.learners)
                 )
             ):
                 self.learners[i].__dict__ = d_
                 grad += grad_
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners:
             grad += learner._grad(w)
     return grad
コード例 #5
0
ファイル: multidb.py プロジェクト: Bovril/pracmln
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N,N)))
     if self.multicore:
         pool = Pool()
         for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)): hessian += h
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
コード例 #6
0
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         for h in pool.imap(with_tracing(_methodcaller('_hessian')),
                            map(lambda l: (l, w), self.learners)):
             hessian += h
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners:
             hessian += learner._hessian(w)
     return hessian
コード例 #7
0
ファイル: multidb.py プロジェクト: Bovril/pracmln
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color='green')
     if self.multicore:
         for i, (_, d_) in enumerate(Pool(maxtasksperchild=1).imap(with_tracing(_methodcaller('_prepare', sideeffects=True)), self.learners)):
             checkmem()
             self.learners[i].__dict__ = d_
             if self.verbose: bar.inc()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
コード例 #8
0
ファイル: multidb.py プロジェクト: Bovril/pracmln
 def _grad(self, w):
     grad = numpy.zeros(len(self.mln.formulas), numpy.float64)
     if False:#self.multicore:
         # it turned out that it doesn't pay off to evaluate the gradient  
         # in separate processes, so we turn it off 
         pool = Pool()
         for i, (grad_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_grad', sideeffects=True)), map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_
             grad += grad_
         pool.terminate()
         pool.join()
     else:
         for learner in self.learners: grad += learner._grad(w)
     return grad
コード例 #9
0
ファイル: multidb.py プロジェクト: Bovril/pracmln
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function  
     # in separate processes, so we turn it off 
     if False:#self.multicore:
         likelihood = 0
         pool = Pool()
         for i, (f_, d_) in enumerate(pool.imap(with_tracing(_methodcaller('_f', sideeffects=True)), map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_ 
             likelihood += f_
         pool.terminate()
         pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))
コード例 #10
0
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function
     # in separate processes, so we turn it off
     if False:  #self.multicore:
         likelihood = 0
         pool = Pool()
         for i, (f_, d_) in enumerate(
                 pool.imap(
                     with_tracing(_methodcaller('_f', sideeffects=True)),
                     map(lambda l: (l, w), self.learners))):
             self.learners[i].__dict__ = d_
             likelihood += f_
         pool.terminate()
         pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))
コード例 #11
0
ファイル: multidb.py プロジェクト: pombredanne/pracmln
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         try:
             for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)):
                 hessian += h
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
コード例 #12
0
ファイル: multidb.py プロジェクト: bbferka/pracmln
 def _hessian(self, w):
     N = len(self.mln.formulas)
     hessian = numpy.matrix(numpy.zeros((N, N)))
     if self.multicore:
         pool = Pool()
         try:
             for h in pool.imap(with_tracing(_methodcaller('_hessian')), map(lambda l: (l, w), self.learners)):
                 hessian += h
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:
         for learner in self.learners: hessian += learner._hessian(w)
     return hessian
コード例 #13
0
 def _prepare(self):
     self.watch.tag('preparing optimization', verbose=self.verbose)
     if self.verbose:
         bar = ProgressBar(width=100, steps=len(self.dbs), color='green')
     if self.multicore:
         for i, (_, d_) in enumerate(
                 Pool(maxtasksperchild=1).imap(
                     with_tracing(
                         _methodcaller('_prepare', sideeffects=True)),
                     self.learners)):
             checkmem()
             self.learners[i].__dict__ = d_
             if self.verbose: bar.inc()
     else:
         for learner in self.learners:
             checkmem()
             learner._prepare()
             if self.verbose: bar.inc()
コード例 #14
0
ファイル: multidb.py プロジェクト: danielnyga/pracmln
 def _f(self, w):
     # it turned out that it doesn't pay off to evaluate the function
     # in separate processes, so we turn it off
     if False:  # self.multicore:
         likelihood = 0
         pool = Pool()
         try:
             for i, (f_, d_) in enumerate(
                 pool.imap(with_tracing(_methodcaller("_f", sideeffects=True)), map(lambda l: (l, w), self.learners))
             ):
                 self.learners[i].__dict__ = d_
                 likelihood += f_
         except Exception as e:
             logger.error("Error in child process. Terminating pool...")
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
         return likelihood
     else:
         return sum(map(lambda l: l._f(w), self.learners))