def train_network(net, optimizer, loss_func, epochs, train_data, test_data,
                  with_preprocessing):
    train_loss_per_epoch, test_loss_per_epoch, train_acc_per_epoch, test_acc_per_epoch = [], [], [], []
    for epoch in range(epochs):
        running_loss = 0.0
        for i_batch, sample_batched in enumerate(train_data):
            images, labels = sample_batched
            images, labels = images.to(torch.device("cuda:0")), labels.to(
                torch.device("cuda:0"))
            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(images.float())
            loss = loss_func(outputs, labels).cuda()
            loss.backward()
            optimizer.step()

        # calculate train & test loss
        epoch_train_loss = calc_dataset_loss(train_data, net, loss_func)
        epoch_test_loss = calc_dataset_loss(test_data, net, loss_func)
        # calculate train & test accuracy
        epoch_train_acc = calc_dataset_acc(train_data, net)
        epoch_test_acc = calc_dataset_acc(test_data, net)

        print(
            '[epoch %d]\ttrain_loss: %.3f\t test_loss: %.3f\ttrain_acc: %.2f%%\ttest_acc: %.2f%%'
            % (epoch + 1, epoch_train_loss, epoch_test_loss,
               100 * epoch_train_acc, 100 * epoch_test_acc))

        train_loss_per_epoch.append(epoch_train_loss)
        test_loss_per_epoch.append(epoch_test_loss)
        train_acc_per_epoch.append(epoch_train_acc)
        test_acc_per_epoch.append(epoch_test_acc)

    np.save(
        f"statistics/results/preprocessing/results/{with_preprocessing}_preprocessing_stats",
        np.array([
            train_loss_per_epoch, test_loss_per_epoch, train_acc_per_epoch,
            test_acc_per_epoch
        ]))
    print()
コード例 #2
0
        # Train Network
        for epoch in range(epochs):
            for sample_batched in train_dataloader:
                images, labels = sample_batched

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = net(images.float())
                loss = loss_fn(outputs, labels)
                loss.backward()
                optimizer.step()

            # calculate train & test loss
            epoch_train_loss = calc_dataset_loss(train_dataloader, net, loss_fn)
            epoch_test_loss = calc_dataset_loss(test_dataloader, net, loss_fn)
            # calculate train & test accuracy
            epoch_train_acc = calc_dataset_acc(train_dataloader, net)
            epoch_test_acc = calc_dataset_acc(test_dataloader, net)

            print('[epoch %d]\ttrain_loss: %.3f\t test_loss: %.3f\ttrain_acc: %.2f%%\ttest_acc: %.2f%%' %
                  (epoch + 1, epoch_train_loss, epoch_test_loss, 100*epoch_train_acc, 100*epoch_test_acc))

            train_loss_per_epoch.append(epoch_train_loss)
            test_loss_per_epoch.append(epoch_test_loss)
            train_acc_per_epoch.append(epoch_train_acc)
            test_acc_per_epoch.append(epoch_test_acc)

        np.save("statistics/results/widths/results/width_%d_stats" % network_width, np.array([train_loss_per_epoch, test_loss_per_epoch, train_acc_per_epoch, test_acc_per_epoch]))
        print()