correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
named_layers = []
for i in net.named_parameters():
    named_layers.append(i[0])
opt = SwatsLocal(net.parameters(), named_layers, lr=0.001)
loss = nn.CrossEntropyLoss().type(dtype)


def adjust_lr(opt, epochs):
    base_lr = 0.001
    if epochs >= 75:
        for ui in opt.param_groups:
            ui['div_lr_decay'] = 10
    if epochs >= 150:
        for ui in opt.param_groups:
            ui['div_lr_decay'] = 100
    if epochs >= 100:  #For Layers which are still in Adam
        for ui in opt.param_groups:
            ui['lr'] = base_lr / 10
    with torch.no_grad():
        for i, j in loader:
            inputs, labels = i.to(device), j.to(device)
            outputs = model(inputs)
            correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
opt = AdaBound(net.parameters(), lr=0.001, final_lr=0.1)
loss = nn.CrossEntropyLoss().type(dtype)
scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=100, gamma=0.1)

total_epochs = 200
train_loss = []
train_acc = []
test_acc = []
for s in range(1, total_epochs + 1):
    a, b = train(net, s, trainloader)
    c = test(net, s, testloader)
    train_loss.append(a)
    train_acc.append(b)
    test_acc.append(c)
    scheduler.step()
        for i, j in loader:
            inputs, labels = i.to(device), j.to(device)
            outputs = model(inputs)
            correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
opt = SwatsVanillaGlobal(net.parameters(), lr=0.001)
loss = nn.CrossEntropyLoss().type(dtype)


def adjust_lr(opt, epochs):
    base_lr = 0.001
    if epochs >= 100:
        for ui in opt.param_groups:
            ui['lr_decay'] = 10


total_epochs = 200
train_loss = []
train_acc = []
test_acc = []
for s in range(1, total_epochs + 1):
예제 #4
0
    # shuffle the dataset
    parquet_df = parquet_df.sample(frac=1).reset_index(drop=True)

    # Get image dataset from train df
    train_images = BengaliDataset(parquet_df, img_height=HEIGHT,
                                  img_width=WIDTH)

    dataset.add_BengaliDataset(train_images)

# Define loss function
loss_fn = torch.nn.MSELoss(reduction='sum')

learning_rate = 1e-4

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
x = torch.randn(64, 1000)
y = torch.randn(64, 10)

for t in range(500):
    # Forward pass: compute predicted y by passing x to the model.
    y_pred = model(x)

    # Compute and print loss.
    loss = loss_fn(y_pred, y)
    if t % 100 == 99:
        print(t, loss.item())

    # Before the backward pass, use the optimizer object to zero all of
    # the gradients for the variables it will update
    # (which are the learnable weights of the model).