def infer_onestep(self, feed_dict): """inference the gru-unit by one step""" return executor_run_with_fetch_dict(self.base_exe, program=self.infer_onestep_program, fetch_dict=self.infer_onestep_outputs['fetch_dict'], feed=feed_dict, return_numpy=False, scope=self.scope)
def infer_init(self, feed_dict): """inference only the init part""" return executor_run_with_fetch_dict(self.base_exe, program=self.infer_init_program, fetch_dict=self.infer_init_outputs['fetch_dict'], feed=feed_dict, return_numpy=False, scope=self.scope)
def softmax_sampling(self, feed_dict): """sampling""" return executor_run_with_fetch_dict(self.base_exe, program=self.softmax_sampling_program, fetch_dict=self.softmax_sampling_outputs['fetch_dict'], feed=feed_dict, return_numpy=False, scope=self.scope)
def test(self, list_feed_dict): """test""" if self.mode == 'single': assert len(list_feed_dict) == 1 return executor_run_with_fetch_dict(self.test_exe, program=self.test_program, fetch_dict=self.test_outputs['fetch_dict'], feed=list_feed_dict[0], return_numpy=False, scope=self.scope) elif self.mode == 'parallel' or self.mode == 'pserver': return parallel_executor_run_with_fetch_dict(self.test_exe, fetch_dict=self.test_outputs['fetch_dict'], feed=list_feed_dict, return_numpy=False)
def train(self, list_feed_dict): """train""" with fluid.scope_guard(self.scope): self.alg.before_every_batch() if self.mode == 'single': fetch_dict = executor_run_with_fetch_dict( self.train_exe, program=self.train_program, fetch_dict=self.train_outputs['fetch_dict'], feed=list_feed_dict, return_numpy=False, scope=self.scope) elif self.mode == 'parallel' or self.mode == 'pserver': fetch_dict = parallel_executor_run_with_fetch_dict( self.train_exe, fetch_dict=self.train_outputs['fetch_dict'], feed=list_feed_dict, return_numpy=False) with fluid.scope_guard(self.scope): self.alg.after_every_batch() return fetch_dict