예제 #1
0
def test_dense_saturation_runs_with_many_writers():
    save_path = TEMP_DIRNAME
    model = torch.nn.Sequential(torch.nn.Linear(10, 88)).to(device)

    writer = CSVandPlottingWriter(save_path,
                                  fontsize=16,
                                  primary_metric='test_accuracy')
    writer2 = NPYWriter(save_path)
    writer3 = PrintWriter()
    sat = SaturationTracker(save_path, [writer, writer2, writer3],
                            model,
                            stats=['lsat', 'idim'],
                            device=device)

    test_input = torch.randn(5, 10).to(device)
    _ = model(test_input)
    sat.add_scalar("test_accuracy", 1.0)
    sat.add_saturations()

    return True
예제 #2
0
                              stats=["lsat", "lsat_eval"])

    loss_fn = torch.nn.MSELoss(reduction='sum')
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
    steps_iter = trange(2000, desc='steps', leave=True, position=0)
    steps_iter.write("{:^80}".format(
        "Regression - TwoLayerNet - Hidden layer size {}".format(h)))
    for step in steps_iter:
        # training step
        model.train()
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # test step
        model.eval()
        y_pred = model(x_test)
        loss_test = loss_fn(y_pred, y_test)

        # update statistics
        steps_iter.set_description('loss=%g' % loss.item())
        stats.add_scalar("train-loss", loss.item())
        stats.add_scalar("test-loss", loss_test.item())

        stats.add_saturations()
    steps_iter.write('\n')
    stats.close()
    steps_iter.close()
예제 #3
0
            step = epoch * len(loader) + i
            inputs = flatten(inputs)  # [bs,inp_dim]
            inputs = inputs.unsqueeze(1)  # [bs,1,inp_dim]
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs, mu, logvar = net(inputs)
            print(outputs.shape)
            loss = loss_fn(outputs, inputs, mu, logvar, eps)
            loss.backward()

            optimizer.step()

            running_loss += loss.data
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0

            # update the training progress display
            loader.set_description(desc='[%d/%d, %5d] loss: %.3f' %
                                   (epoch + 1, epochs, i + 1, loss.data))
            # display layer saturation levels

        stats.add_scalar('epoch', epoch)  # optional
        stats.add_scalar('loss', running_loss.cpu().numpy())  # optional
        stats.add_saturations()

    loader.write('\n')
    loader.close()
    stats.close()
예제 #4
0
                _, predicted = torch.max(outputs.data, 1)

                loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

        total = 0
        test_loss = 0
        correct = 0
        model.eval()
        for (images, labels) in tqdm(test_loader):
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            _, predicted = torch.max(outputs.data, 1)

            total += labels.size(0)
            correct += torch.sum((predicted == labels)).item()
            test_loss += loss.item()

        # add some additional metrics we want to keep track of
        tracker.add_scalar("accuracy", correct / total)
        tracker.add_scalar("loss", test_loss / total)

        # add saturation to the mix
        tracker.add_saturations()
        tracker.save()

    # close the tracker to finish training
    tracker.close()