def _do_evaluation(self, run_context, global_step): """ Evaluates the model by loss function. Furthermore, if the decay_type of optimizer is "loss_decay", anneal the learning rate at the right time. Args: run_context: A `SessionRunContext` object. global_step: A python integer, the current training step. """ loss = evaluate(sess=run_context.session, eval_op=self._loss_op, feeding_data=self._eval_feeding_data) tf.logging.info("Evaluating DEVSET: DevLoss=%f GlobalStep=%d" % (loss, global_step)) if self._summary_writer is not None: self._summary_writer.add_summary("Metrics/DevLoss", loss, global_step) if self._half_lr: if loss <= self._min_loss: self._min_loss = loss self._patience = 0 else: self._patience += 1 if self._patience >= self._max_patience: self._patience = 0 run_context.session.run(self._half_lr_op) now_lr = run_context.session.run(self._learning_rate) tf.logging.info( "Hit maximum patience=%d. HALF THE LEARNING RATE TO %f at %d" % (self._max_patience, now_lr, global_step))
def _do_evaluation(self, run_context, global_step): """ Evaluates the model by loss function. Furthermore, if the decay_type of optimizer is "loss_decay", anneal the learning rate at the right time. Args: run_context: A `SessionRunContext` object. global_step: A python integer, the current training step. """ loss = evaluate(sess=run_context.session, loss_op=self._loss_op, eval_data=self._eval_feeding_data) tf.logging.info("Evaluating DEVSET: DevLoss=%f GlobalStep=%d" % (loss, global_step)) if self._summary_writer is not None: self._summary_writer.add_summary("Metrics/DevLoss", loss, global_step) if self._half_lr and global_step >= self._start_decay_at: if loss <= self._min_loss: self._min_loss = loss self._bad_count = 0 else: self._bad_count += 1 if self._bad_count >= self._max_patience: self._bad_count = 0 run_context.session.run(self._half_lr_op) now_lr = run_context.session.run(self._learning_rate) tf.logging.info("Hit maximum patience=%d. HALF THE LEARNING RATE TO %f at %d" % (self._max_patience, now_lr, global_step))