Exemplo n.º 1
0
def fit(net,
        optimizer,
        loss_function,
        dataloader_train,
        epochs=10,
        pbar=None,
        device='cpu'):
    val_loss_best = np.inf
    loss_list = []

    # Prepare loss history
    for idx_epoch in range(epochs):
        for idx_batch, (x, y) in enumerate(dataloader_train):
            optimizer.zero_grad()

            # Propagate input
            netout = net(x.to(device))  # [8,1460]

            # Comupte loss
            loss = loss_function(y.to(device), netout)
            print('Epoch:', '%04d' % (idx_epoch + 1), 'loss =',
                  '{:.6f}'.format(loss))
            loss_list.append(loss)

            # Backpropage loss
            loss.backward()

            # Update weights
            optimizer.step()

        val_loss = compute_loss(net, dataloader_train, loss_function,
                                device).item()

        if val_loss < val_loss_best:
            val_loss_best = val_loss

        if pbar is not None:
            pbar.update()

    return val_loss_best
Exemplo n.º 2
0
def fit(net,
        optimizer,
        loss_function,
        dataloader_train,
        dataloader_val,
        epochs=10,
        pbar=None,
        device='cpu'):
    val_loss_best = np.inf

    # Prepare loss history
    for idx_epoch in range(epochs):
        for idx_batch, (x, y) in enumerate(dataloader_train):
            optimizer.zero_grad()

            # Propagate input
            netout = net(x.to(device))

            # Comupte loss
            loss = loss_function(y.to(device), netout)

            # Backpropage loss
            loss.backward()

            # Update weights
            optimizer.step()

        val_loss = compute_loss(net, dataloader_val, loss_function,
                                device).item()

        if val_loss < val_loss_best:
            val_loss_best = val_loss

        if pbar is not None:
            pbar.update()

    return val_loss_best