예제 #1
0
def test(epoch):
    global best_acc
    model.eval()
    test_loss = []
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            test_loss.append(loss.item())
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    # Save checkpoint.
    acc = 100. * correct / total
    print('Test loss: %.2f' % np.mean(test_loss))
    print('Test accuracy: %.2f%%' % acc)
    print('Compression: %.2f%%' % (100. * get_dropped_params_ratio(model)))
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': model.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, ckpt_file)
        best_acc = acc
예제 #2
0
def test(epoch):
    global best_acc
    global best_compression
    model.eval()
    test_loss = []
    correct = 0
    total = 0
    inference_time_seconds = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            start_ts = time.time()
            outputs = model(inputs)
            inference_time_seconds += time.time() - start_ts
            loss = criterion(outputs, targets)

            test_loss.append(loss.item())
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    # Save checkpoint.
    acc = 100. * correct / total
    compression = 100. * get_dropped_params_ratio(model)
    print('Test loss: %.3f' % np.mean(test_loss))
    print('Test accuracy: %.3f%%' % acc)
    print('Compression: %.2f%%' % compression)
    print('Inference time: %.2f seconds' % inference_time_seconds)
    # if acc > best_acc:
    if compression > best_compression:
        print('Saving..')
        state = {
            'net': model.state_dict(),
            'acc': acc,
            'epoch': epoch,
            'compression': compression
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, ckpt_file)
        # best_acc = acc
        best_compression = compression
예제 #3
0
    [torch.from_numpy(np.array(x)).float().to(device)
     for x in [train_X, test_X, train_y, test_y]]

model = DenseModelARD(input_shape=train_X.shape[1], output_shape=1,
                      activation=nn.functional.relu).to(device)
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, 'min')
criterion = ELBOLoss(model, F.mse_loss).to(device)

n_epoches = 100000
debug_frequency = 100
def get_kl_weight(epoch): return min(1, 2 * epoch / n_epoches)


pbar = trange(n_epoches, leave=True, position=0)
for epoch in pbar:
    kl_weight = get_kl_weight(epoch)
    opt.zero_grad()
    preds = model(train_X).squeeze()
    loss = criterion(preds, train_y, 1, kl_weight)
    loss.backward()
    opt.step()
    loss_train = float(
        criterion(preds, train_y, 1, 0).detach().cpu().numpy())
    preds = model(test_X).squeeze()
    loss_test = float(
        criterion(preds, test_y, 1, 0).detach().cpu().numpy())
    pbar.set_description('MSE (train): %.3f\tMSE (test): %.3f\tReg: %.3f\tDropout rate: %f%%' % (
        loss_train, loss_test, get_ard_reg(model).item(), 100 * get_dropped_params_ratio(model)))
    pbar.update()
예제 #4
0
train_X, test_X, train_y, test_y = \
    [torch.from_numpy(np.array(x)).float().to(device) for x in [train_X, test_X, train_y, test_y]]

model = DenseModelARD(input_shape=train_X.shape[1], output_shape=1,
        activation=nn.functional.relu).to(device)
opt = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, 'min')
criterion = nn.MSELoss()

n_epoches = 100000
debug_frequency = 100
reg_factor = 1

for epoch in range(n_epoches):
    opt.zero_grad()
    preds = model(train_X).squeeze()
    reg = get_ard_reg(model)
    loss = criterion(preds, train_y) + reg*reg_factor
    loss.backward()
    # scheduler.step(loss)
    opt.step()
    loss_train = float(criterion(preds, train_y).detach().cpu().numpy())
    preds = model(test_X).squeeze()
    loss_test = float(criterion(preds, test_y).detach().cpu().numpy())
    if epoch % debug_frequency == 0:
        print('%d epoch' % epoch)
        print('MSE (train): %.3f' % loss_train)
        print('MSE (test): %.3f' % loss_test)
        print('Reg: %.3f' % reg.item())
        print('Dropout rate: %f%%' % (100*get_dropped_params_ratio(model)))