Exemple #1
0
def loss_batch(
    model: nn.Module,
    xb: Tensor,
    yb: Tensor,
    loss_func: OptLossFunc = None,
    opt: OptOptimizer = None,
    cb_handler: Optional[CallbackHandler] = None
) -> Tuple[Union[Tensor, int, float, str]]:
    "Calculate loss and metrics for a batch, call out to callbacks as necessary."
    cb_handler = ifnone(cb_handler, CallbackHandler())
    if not is_listy(xb): xb = [xb]
    if not is_listy(yb): yb = [yb]
    out = model(*xb)
    out = cb_handler.on_loss_begin(out)

    if not loss_func: return to_detach(out), yb[0].detach()
    loss = loss_func(out, *yb)

    if opt is not None:
        loss, skip_bwd = cb_handler.on_backward_begin(loss)
        if not skip_bwd: loss.backward()
        if not cb_handler.on_backward_end(): opt.step()
        if not cb_handler.on_step_end(): opt.zero_grad()

    return loss.detach().cpu()
Exemple #2
0
 def on_batch_end(self, last_output, last_target, **kwargs):
     "Update metric computation with `last_output` and `last_target`."
     if not is_listy(last_target): last_target = [last_target]
     met_val = self.func(last_output, *last_target)
     if met_val is not None:
         self.count += last_target[0].size(0)
         self.val += last_target[0].size(0) * met_val.detach().cpu()
Exemple #3
0
def validate_w_dropout(
        model: nn.Module,
        dl: DataLoader,
        loss_func: OptLossFunc = None,
        cb_handler: Optional[CallbackHandler] = None,
        pbar: Optional[PBar] = None,
        average=True,
        n_batch: Optional[int] = None
) -> Iterator[Tuple[Union[Tensor, int], ...]]:
    "Calculate `loss_func` of `model` on `dl` in evaluation mode."
    model.train()
    with torch.no_grad():
        val_losses, nums = [], []
        if cb_handler: cb_handler.set_dl(dl)
        for xb, yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)):
            if cb_handler:
                xb, yb = cb_handler.on_batch_begin(xb, yb, train=False)
            val_loss = loss_batch(model,
                                  xb,
                                  yb,
                                  loss_func,
                                  cb_handler=cb_handler)
            val_losses.append(val_loss)
            if not is_listy(yb): yb = [yb]
            nums.append(yb[0].shape[0])
            if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
            if n_batch and (len(nums) >= n_batch): break
        nums = np.array(nums, dtype=np.float32)
        if average:
            return (to_np(torch.stack(val_losses)) * nums).sum() / nums.sum()
        else:
            return val_losses
    model.eval()
Exemple #4
0
def make_data_bunch_from_df(cls,
                            path: PathOrStr,
                            train_df: DataFrame,
                            valid_df: DataFrame,
                            tokenizer: Tokenizer = None,
                            vocab: Vocab = None,
                            classes: Collection[str] = None,
                            text_cols: IntsOrStrs = 1,
                            label_cols: IntsOrStrs = 0,
                            label_delim: str = None,
                            chunksize: int = 10000,
                            max_vocab: int = 60000,
                            label_cls: Callable = None,
                            min_freq: int = 2,
                            mark_fields: bool = False,
                            include_bos: bool = True,
                            include_eos: bool = False,
                            processor=None,
                            **kwargs) -> DataBunch:
    "Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation."
    assert processor is None or tokenizer is None, "Processor and tokenizer are mutually exclusive."

    if processor is None:
        processor = fastai.text.data._get_processor(tokenizer=tokenizer,
                                                    vocab=vocab,
                                                    chunksize=chunksize,
                                                    max_vocab=max_vocab,
                                                    min_freq=min_freq,
                                                    mark_fields=mark_fields,
                                                    include_bos=include_bos,
                                                    include_eos=include_eos)

    if classes is None and is_listy(label_cols) and len(label_cols) > 1:
        classes = label_cols
    src = ItemLists(
        path,
        TextList.from_df(train_df,
                         path,
                         cols=text_cols,
                         processor=processor,
                         label_cls=label_cls),
        TextList.from_df(valid_df,
                         path,
                         cols=text_cols,
                         processor=processor,
                         label_cls=label_cls))
    if cls == TextLMDataBunch:
        src = src.label_for_lm()
    else:
        if label_delim is not None:
            src = src.label_from_df(cols=label_cols,
                                    classes=classes,
                                    label_delim=label_delim)
        else:
            src = src.label_from_df(cols=label_cols, classes=classes)
    return src.databunch(**kwargs)
Exemple #5
0
def loss_batch(
    model: nn.Module,
    xb: Tensor,
    yb: Tensor,
    loss_func: OptLossFunc = None,
    opt: OptOptimizer = None,
    cb_handler: Optional[CallbackHandler] = None,
) -> Tuple[Union[Tensor, int, float, str]]:
    "Calculate loss and metrics for a batch, call out to callbacks as necessary."
    cb_handler = ifnone(cb_handler, CallbackHandler())
    if not is_listy(xb):
        xb = [xb]
    if not is_listy(yb):
        yb = [yb]
    out = [model(x) for x in xb]
    out = cb_handler.on_loss_begin(out)

    if not loss_func:
        return to_detach(out), yb[0].detach()

    loss = loss_func(out)

    if opt is not None:
        loss = cb_handler.on_backward_begin(loss)
        # fastai v1.0.52 introduced the possibility for the backwards step to
        # be optional by returning a tuple here
        # see https://github.com/fastai/fastai/commit/6fcaad870e0e833d325052b57e72e23a450ebc6f#diff-0730afdfa67f9712e46ad7866b0123f8L32
        if type(loss) == tuple:
            loss, skip_bwd = loss
            if not skip_bwd:
                loss.backward()
        else:
            loss.backward()

        cb_handler.on_backward_end()
        opt.step()
        cb_handler.on_step_end()
        opt.zero_grad()

    return loss.detach().cpu()
Exemple #6
0
 def on_batch_end(self, last_output, last_target, **kwargs):
     "Update metric computation with `last_output` and `last_target`."
     if not is_listy(last_target): last_target = [last_target]
     val = self.func(last_output, *last_target)
     ## If nan do not increase counter and return
     if torch.isnan(val).tolist():
         return
     self.count += first_el(last_target).size(0)
     if self.world:
         val = val.clone()
         dist.all_reduce(val, op=dist.ReduceOp.SUM)
         val /= self.world
     self.val += first_el(last_target).size(0) * val.detach().cpu()