def train(self, train_ds, train_hooks=[]): """train on a `Dataset`""" if not isinstance(train_ds, Dataset): raise ValueError( 'expect dataset to be instance of Dataset, got %s' % repr(train_ds)) train_program, model_spec, summary_record = self._build_for_train( train_ds) train_run_hooks = [ hooks.StopAtStepHook(self.run_config.max_steps, self.run_config.run_steps), hooks.LoggingHook( model_spec.loss, summary_record=summary_record, summary_writer=_get_summary_writer( os.path.join(self.run_config.model_dir, 'train_history%s' % self.run_config.log_id)), per_step=self.run_config.log_steps, prefix=self.run_config.log_prefix or 'training', skip_step=self.run_config.skip_steps), ] if model_spec.train_hooks is not None: train_run_hooks.extend(model_spec.train_hooks) train_run_hooks.extend(train_hooks) train_executor = F.Executor(_get_one_place()) mon_exe = MonitoredExecutor(train_executor, train_program, loss=model_spec.loss, run_config=self.run_config, run_hooks=train_run_hooks, warm_start_setting=self.warm_start_setting) distribution.init_distribuition_env( train_program) #only initialize distribute training with mon_exe.init_or_restore_variables() if distribution.status.is_master: mon_exe._hooks.append( hooks.CheckpointSaverHook( mon_exe._saver, per_step=mon_exe._save_steps, skip_step=mon_exe._skip_steps, )) try: with mon_exe: for data in train_ds.start(): mon_exe.run(feed=data) except (StopException, F.core.EOFException) as e: pass return mon_exe.result
def evaluate(self, eval_dataset, eval_hooks=[]): """eval on a `Dataset`""" if not isinstance(eval_dataset, Dataset): raise ValueError( 'expect dataset to be instance of Dataset, got %s' % repr(eval_dataset)) program, model_spec = self._build_for_eval(eval_dataset) single_card_place = _get_one_place() eval_executor = F.Executor(single_card_place) eval_run_hooks = [ hooks.StopAtStepHook(self.run_config.eval_max_steps, self.run_config.eval_max_steps), hooks.EvalHook(model_spec.metrics, ) ] if model_spec.eval_hooks is not None: eval_run_hooks.extend(model_spec.eval_hooks) eval_run_hooks.extend(eval_hooks) mon_exe = MonitoredExecutor(eval_executor, program, loss=model_spec.loss, run_config=self.run_config, run_hooks=eval_run_hooks, warm_start_setting=self.warm_start_setting) distribution.init_distribuition_env( program) #only initialize distribute training with mon_exe.init_or_restore_variables() try: with mon_exe: for data in eval_dataset.start(): mon_exe.run(feed=data) except (StopException, F.core.EOFException) as e: pass _, eval_result = mon_exe.result summary_writer = _get_summary_writer( os.path.join(self.run_config.model_dir, 'eval_history%s' % self.run_config.log_id)) _log_eval_result('eval', eval_result, summary_writer, mon_exe.state) return eval_result