Exemplo n.º 1
0
def get_model(args):
    ''' define model '''
    model = ConvNet(use_batch_norm=True, use_resnet=False)

    print('---Model Information---')
    print('Net:', model)
    print('Use GPU:', args.use_cuda)
    return model.to(args.device)
    # unpack args
    device = args.device
    epoch = 1
    lmbda = args.lmbda
    lr = args.lr
    criterion = make_criterion(args)

    train_loss_tracker, train_acc_tracker = [], []
    test_loss_tracker, test_acc_tracker = [], []

    # ADD FILENAMES FOR MODEL WEIGHTS TO QUANTIZE AND EVALUATE THEM
    filenames = ['control']

    experiment_net = ConvNet()
    experiment_net = experiment_net.to(device)
    base_accuracies = []
    for h in range(len(filenames)):
        experiment_net.load_state_dict(torch.load(filenames[h] + '.pt'))
        print('Test Accuracy without Quantization for ' + filenames[h] + '.pt')
        acc = test(experiment_net, testloader, criterion, epoch, lmbda,
                   test_loss_tracker, test_acc_tracker)
        base_accuracies.append(acc)

    # CHANGE FOR LOOP RANGE TO QUANTIZE FOR DIFFERENT BITWIDTHS
    for n_bits in range(4, 9):
        print('{} BITWIDTH'.format(n_bits))
        # L1 AND L2
        for n in range(len(filenames)):
            experiment_net.load_state_dict(torch.load(filenames[n] + '.pt'))
Exemplo n.º 3
0
from models import ConvNet

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Load the dataset
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

dataset = torchvision.datasets.ImageFolder("D:/PokeRapper/Pokemon",
                                           transform=transform)
dataloader = torch.utils.data.DataLoad(dataset,
                                       batch_size=1024,
                                       shuffle=True,
                                       num_workers=4)

# build the model
# TODO: Add support for loading different models
model = ConvNet()
model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# train the network
for epoch in range(100):
    for i, data in enumerate(dataloader, 0):
        print('')