Esempio n. 1
0
def forward_pytorch(weightfile, image):
    #net=resnet.resnet18()
    #net = resnet.resnet18()
    net=LeNet(1,2)
    checkpoint = torch.load(weightfile)
    net.load_state_dict(checkpoint['weight'])
    net.double()                # to double
    if args.cuda:
        net.cuda()
    print(net)
    net.eval()
    image = torch.from_numpy(image.astype(np.float64)) # to double

    if args.cuda:
        image = Variable(image.cuda())
    else:
        image = Variable(image)
    t0 = time.time()
    blobs = net.forward(image)
    print(blobs.data.numpy().flatten())
    t1 = time.time()
    return t1-t0, blobs, net, torch.from_numpy(blobs.data.numpy())
Esempio n. 2
0
import torchvision as tv
import torch.nn as nn
import torch.optim as optim

from torch.autograd import Variable as V
from torchvision import transforms

from lenet import LeNet
from sobolev import SobolevLoss

USE_SOBOLEV = False

student = LeNet()
teacher = LeNet()
teacher.load_state_dict(th.load('teacher.pth'))
student = student.cuda()
teacher = teacher.cuda()

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = tv.datasets.CIFAR10(root='./data',
Esempio n. 3
0
def main():
    global args, rank, world_size, best_prec1, dataset_len

    if args.dist == 1:
        rank, world_size = dist_init()
    else:
        rank = 0
        world_size = 1

    model = LeNet()
    model.cuda()

    param_copy = [
        param.clone().type(torch.cuda.FloatTensor).detach()
        for param in model.parameters()
    ]

    for param in param_copy:
        param.requires_grad = True

    if args.dist == 1:
        model = DistModule(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(param_copy,
                                args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    last_iter = -1

    # Data loading code
    train_dataset = datasets.MNIST(root='./data',
                                   train=True,
                                   transform=transforms.ToTensor(),
                                   download=False)
    val_dataset = datasets.MNIST(root='./data',
                                 train=False,
                                 transform=transforms.ToTensor(),
                                 download=False)

    dataset_len = len(train_dataset)
    args.max_iter = math.ceil(
        (dataset_len * args.epoch) / (world_size * args.batch_size))

    if args.dist == 1:
        train_sampler = DistributedGivenIterationSampler(train_dataset,
                                                         args.max_iter,
                                                         args.batch_size,
                                                         last_iter=last_iter)
        val_sampler = DistributedSampler(val_dataset, round_up=False)
    else:
        train_sampler = DistributedGivenIterationSampler(train_dataset,
                                                         args.max_iter,
                                                         args.batch_size,
                                                         world_size=1,
                                                         rank=0,
                                                         last_iter=last_iter)
        val_sampler = None

    # pin_memory if true, will copy the tensor to cuda pinned memory
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.workers,
                              pin_memory=True,
                              sampler=train_sampler)

    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=True,
                            sampler=val_sampler)

    train(train_loader, val_loader, model, criterion, optimizer, param_copy)
Esempio n. 4
0
#!/usr/bin/env python3

import torch as th
import torchvision as tv
import torch.nn as nn
import torch.optim as optim

from torch.autograd import Variable as V
from torchvision import transforms

from lenet import LeNet

teacher = LeNet()
teacher = teacher.cuda()

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = tv.datasets.CIFAR10(root='./data',
                               train=True,
                               download=True,
                               transform=transform_train)
    w, h = shape

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=cuda)

    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=cuda)

    model = LeNet(k=len(classes), args=args)

    if cuda:
        model.cuda()

    for epoch in range(1, args.epochs + 1):
        model.train()

        for i, (x_batch, y_batch) in enumerate(trainloader):
            if cuda:
                x_batch, y_batch = x_batch.cuda(), y_batch.cuda()

            model.optimizer.zero_grad()

            # forward + backward + optimize
            output = model(x_batch)
            loss = model.criterion(output, y_batch)
            loss.backward()
            model.optimizer.step()
Esempio n. 6
0
            filename = os.path.join(root, 'soft_targets.pickle')
            f = open(filename, 'wb')
            pickle.dump(self.train_labels, f)
            f.close()

    def get_soft_label(self, img):
        img = Image.fromarray(img.numpy(), mode='L')
        if self.transform is not None:
            img = self.transform(img)
        return self.init_target_transform(img)


leNetModel = LeNet(args)

if args.cuda:
    leNetModel.cuda()

soft_labels = []

if importMode:
    leNetModel.load_state_dict(
        torch.load(os.path.join('./result', 'lenet5_best.json')))
else:
    for epoch in range(1, args.epochs + 1):
        leNetModel.train_(train_loader, epoch)


def get_soft_label(img):
    return leNetModel.forward_with_temperature(img.view(
        1, *(img.size()))).view(-1)