Beispiel #1
0
 def _check(self, state):
     if self._monitor in state[torchbearer.METRICS]:
         value = get_metric('TerminateOnNaN', state, self._monitor)
         if value is not None:
             if math.isnan(value) or math.isinf(value):
                 print('Invalid ' + self._monitor + ', terminating')
                 state[torchbearer.STOP_TRAINING] = True
Beispiel #2
0
 def _on_end_epoch(self, state):
     self.plt.update({
         k: get_metric('LiveLossPlot', state, k)
         for k in state[torchbearer.METRICS]
     })
     if not self.draw_once:
         with no_print():
             self.plt.draw()
 def on_end_epoch(self, state):
     if not self._step_on_batch:
         if self._monitor is not None:
             current = get_metric('Scheduler', state, self._monitor)
             if current is None:
                 return
             self._scheduler.step(current, epoch=state[torchbearer.EPOCH])
         else:
             self._scheduler.step(epoch=state[torchbearer.EPOCH])
 def on_step_training(self, state):
     if self._step_on_batch:
         if self._monitor is not None:
             current = get_metric('Scheduler', state, self._monitor)
             if current is None:
                 return
             self._scheduler.step(current)
         else:
             self._scheduler.step()
Beispiel #5
0
 def _on_step_training(self, state):
     self.batch_plt.update({
         k: get_metric('LiveLossPlot', state, k)
         for k in state[torchbearer.METRICS]
     })
     if state[torchbearer.
              BATCH] % self.batch_step_size == 0 and not self.draw_once:
         with no_print():
             self.batch_plt.draw()
 def step(self, state):
     current = get_metric('Early Stopping', state, self.monitor)
     if self.monitor_op(current - self.min_delta, self.best):
         self.best = current
         self.wait = 0
     else:
         self.wait += 1
         if self.wait >= self.patience:
             state[torchbearer.STOP_TRAINING] = True
Beispiel #7
0
 def on_end_epoch(self, state):
     if not self._step_on_batch:
         if self._monitor is not None:
             current = get_metric('Scheduler', state, self._monitor)
             if current is None:
                 return
             self._step(state, current)
         else:
             self._step(state)
    def on_checkpoint(self, state):
        super(Best, self).on_end_epoch(state)
        self.epochs_since_last_save += 1
        if self.epochs_since_last_save >= self.period:
            self.epochs_since_last_save = 0

            current = get_metric('Best Checkpoint', state, self.monitor)

            if self.monitor_op(current, self.best):
                self.best = current
                self.save_checkpoint(state, overwrite_most_recent=True)
 def on_end_epoch(self, state):
     if not self._step_on_batch and self._monitor is not None:
         self._scheduler.step(get_metric('Scheduler', state, self._monitor),
                              epoch=state[torchbearer.EPOCH])
 def on_step_training(self, state):
     if self._step_on_batch and self._monitor is not None:
         self._scheduler.step(get_metric('Scheduler', state, self._monitor))