Ejemplo n.º 1
0
 def after_batch(self):
     if self.learn.training:
         self._dags_step_num += 1
         hypers = {
             f'{k}_{i}': v
             for i, h in enumerate(self.opt.hypers) for k, v in h.items()
         }
         metrics = {
             'train_loss':
             to_detach(self.learn.smooth_loss.clone()).numpy(),
             'raw_loss': to_detach(self.learn.loss.clone()).numpy(),
             **hypers
         }
         self.logger.log_metrics(metrics, step_num=self._dags_step_num)
Ejemplo n.º 2
0
def loss_batch(
    model: nn.Module,
    xb: Tensor,
    yb: Tensor,
    loss_func: OptLossFunc = None,
    opt: OptOptimizer = None,
    cb_handler: Optional[CallbackHandler] = None
) -> Tuple[Union[Tensor, int, float, str]]:
    "Calculate loss and metrics for a batch, call out to callbacks as necessary."
    cb_handler = ifnone(cb_handler, CallbackHandler())
    if not is_listy(xb): xb = [xb]
    if not is_listy(yb): yb = [yb]
    out = model(*xb)
    out = cb_handler.on_loss_begin(out)

    if not loss_func: return to_detach(out), yb[0].detach()
    loss = loss_func(out, *yb)

    if opt is not None:
        loss, skip_bwd = cb_handler.on_backward_begin(loss)
        if not skip_bwd: loss.backward()
        if not cb_handler.on_backward_end(): opt.step()
        if not cb_handler.on_step_end(): opt.zero_grad()

    return loss.detach().cpu()
Ejemplo n.º 3
0
def loss_batch(
    model: nn.Module,
    xb: Tensor,
    yb: Tensor,
    loss_func: OptLossFunc = None,
    opt: OptOptimizer = None,
    cb_handler: Optional[CallbackHandler] = None,
) -> Tuple[Union[Tensor, int, float, str]]:
    "Calculate loss and metrics for a batch, call out to callbacks as necessary."
    cb_handler = ifnone(cb_handler, CallbackHandler())
    if not is_listy(xb):
        xb = [xb]
    if not is_listy(yb):
        yb = [yb]
    out = [model(x) for x in xb]
    out = cb_handler.on_loss_begin(out)

    if not loss_func:
        return to_detach(out), yb[0].detach()

    loss = loss_func(out)

    if opt is not None:
        loss = cb_handler.on_backward_begin(loss)
        # fastai v1.0.52 introduced the possibility for the backwards step to
        # be optional by returning a tuple here
        # see https://github.com/fastai/fastai/commit/6fcaad870e0e833d325052b57e72e23a450ebc6f#diff-0730afdfa67f9712e46ad7866b0123f8L32
        if type(loss) == tuple:
            loss, skip_bwd = loss
            if not skip_bwd:
                loss.backward()
        else:
            loss.backward()

        cb_handler.on_backward_end()
        opt.step()
        cb_handler.on_step_end()
        opt.zero_grad()

    return loss.detach().cpu()
Ejemplo n.º 4
0
 def hook_fn(self, module, inp):
     "Applies `hook_fn` to `module` and `inp`"
     if self.detach:
         inp = to_detach(inp, cpu=self.cpu, gather=self.gather)
     self.stored = self.hook_func(module, inp)