Exemple #1
0
def parse_distillation_loss(args):
    try:
        fields = args.distillation.split(",")
    except:
        fields = args.feat_dist.split(",")
    method = fields[0]
    d_args = dict([i.split("-") for i in fields[1:]])

    for k, v in d_args.items():
        if k in ["lambda"]:
            d_args[k] = float(v)
        elif k in ["p"]:
            d_args[k] = int(v)

    print("Perdida", method, "con parametros", d_args)
    losses_list = [hint, att_max, att_mean, PKT, nst_gauss, nst_linear, nst_poly]

    d = dict([(func.__name__, func) for func in losses_list])

    # folder: -> [dataset]/[teacher]/students/[student_model]/[distilation type]/[]
    auto_change_dir(args.distillation.replace(",", "/"))

    try:
        loss = d[method]
    except:
        raise ModuleNotFoundError("Loss not found")

    try:
        return loss(**d_args)
    except:
        raise NameError("There is an argument error")
Exemple #2
0
def parse_distillation_loss(args, string_input=False):
    if string_input:
        fields = args.split(",")
    else:
        fields = args.distillation.split(",")
    method = fields[0]
    ar = dict([i.split("-") for i in fields[1:]])
    for k, v in ar.items():
        ar[k] = float(v)

    print("Perdida", method, "con parametros", ar)
    losses_list = [KD, KD_CE, CE]
    d = dict([(func.__name__, func) for func in losses_list])

    try:
        loss = d[method]
    except:
        raise ModuleNotFoundError("Loss not found")
    if not string_input:

        try:
            # folder: -> [dataset]/[teacher]/students/[student_model]/[distilation type]/[]
            auto_change_dir("/".join([
                args.distillation[:args.distillation.find(",")],
                args.distillation[args.distillation.find(",") + 1:]
            ]))
            return loss(**ar)
        except:

            raise NameError("There is an argument error")
    else:
        return loss(**ar)  #todo: ordenar
Exemple #3
0
def get_dataloaders(batch_size, folder=None):
    trainset, testset = get_imageNet()

    auto_change_dir("Imagenet")
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size, shuffle=True, num_workers=0)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size, shuffle=False, num_workers=0)
    return trainloader, testloader
Exemple #4
0
def experiment_run(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    trainloader, testloader = get_dataloaders(args)

    auto_change_dir(args.exp_name)

    print("Using device", device)  # todo: cambiar a logger

    # This will download a model with it's weights. For using another model just instantiate it here.
    teacher = load_model(args.teacher, trainable=False, device=device)
    student = load_model(args.student)

    teacher.eval()
    student.train()

    best_acc = 0
    start_epoch = 0

    feat_loss = parse_distillation_loss(args)
    kd_loss = last_layer_loss_parser(args.log_dist, string_input=True)

    eval_criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(student.parameters(),
                           lr=args.lr)  # todo: evaluar si mover en exp
    flatten = args.student.split("_")[0] == "linear"
    layer = args.layer
    idxs = [layer]
    auto_change_dir(",".join([str(i) for i in idxs]))

    writer = SummaryWriter("tb_logs")  # todo mover dentro de exp

    exp = FeatureExperiment(device=device,
                            student=student,
                            teacher=teacher,
                            optimizer=optimizer,
                            kd_criterion=kd_loss,
                            ft_criterion=feat_loss,
                            eval_criterion=eval_criterion,
                            linear=flatten,
                            writer=writer,
                            testloader=testloader,
                            trainloader=trainloader,
                            best_acc=best_acc,
                            idxs=idxs,
                            use_regressor=args.feat_dist == "hint",
                            args=args)
    if exp.epoch + 1 < args.epochs:
        print("training", exp.epoch, "-", args.epochs)
        for epoch in range(exp.epoch, args.epochs):
            exp.train_epoch()
            exp.test_epoch()
        exp.save_model()
    else:
        print("epochs surpassed")
Exemple #5
0
def experiment_run(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print("Using device", device)  # todo: cambiar a logger
    trainloader, testloader, classes = cifar10_parser(args)
    teacher = load_teacher(args, device)

    if args.exp_name is not None:
        # os.chdir("/home/jp/Memoria/repo/Cifar10/ResNet101/") #Linux
        os.chdir("C:/Users/PC/PycharmProjects/Memoria/Cifar10/ResNet101/"
                 )  # Windows
        auto_change_dir(args.exp_name)

    student, best_acc, start_epoch = load_student(args, device)

    feat_loss = parse_distillation_loss(args)
    kd_loss = last_layer_loss_parser(args.last_layer, string_input=True)

    eval_criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(student.parameters(),
                           lr=args.lr)  # todo: evaluar si mover en exp
    flatten = args.student.split("_")[0] == "linear"
    layer = args.layer
    idxs = [layer]
    auto_change_dir(",".join([str(i) for i in idxs]))

    writer = SummaryWriter("tb_logs")  # todo mover dentro de exp

    exp = FeatureExperiment(device=device,
                            student=student,
                            teacher=teacher,
                            optimizer=optimizer,
                            kd_criterion=kd_loss,
                            ft_criterion=feat_loss,
                            eval_criterion=eval_criterion,
                            linear=flatten,
                            writer=writer,
                            testloader=testloader,
                            trainloader=trainloader,
                            best_acc=best_acc,
                            idxs=idxs,
                            use_regressor=args.distillation == "hint",
                            args=args)
    if exp.epoch + 1 < args.epochs:
        print("training", exp.epoch, "-", args.epochs)
        for epoch in range(exp.epoch, args.epochs):
            exp.train_epoch()
            exp.test_epoch()
        exp.save_model()
    else:
        print("epochs surpassed")
Exemple #6
0
def load_samples(args,
                 samples_folder,
                 transform_train=None,
                 transform_test=None):
    auto_change_dir("Cifar10")

    if transform_train is None:
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            #transforms.Lambda(random_return),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    if transform_test is None:
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    print(os.listdir())

    trainset = torchvision.datasets.ImageFolder(
        root="C:\\Users\\PC\\PycharmProjects\\Memoria\\Cifar10\\" +
        samples_folder,
        transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=0)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=0)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')
    return trainloader, testloader, classes
Exemple #7
0
def load_mnist(args):
    # Load MNIST
    auto_change_dir("Mnist")

    train_data = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transforms.Compose([
        transforms.ToTensor(),  # ToTensor does min-max normalization.
    ]), )

    test_data = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transforms.Compose([
        transforms.ToTensor(),  # ToTensor does min-max normalization.
    ]), )

    # Create DataLoader
    dataloader_args = dict(shuffle=True, batch_size=args.train_batch_size, num_workers=2)
    train_loader = torch.utils.data.DataLoader(train_data, **dataloader_args)
    test_loader = torch.utils.data.DataLoader(test_data, **dataloader_args)

    return train_loader, test_loader, range(10)
Exemple #8
0
def load_teacher(args, device):
  print('==> Building teacher model..', args.teacher)
  net = get_model(args.teacher)
  net = net.to(device)

  for param in net.parameters():
    param.requires_grad = False

  if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

  auto_change_dir(args.teacher)
  print(os.getcwd())
  # Load checkpoint.
  print('==> Resuming from checkpoint..')
  assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
  checkpoint = torch.load('./checkpoint/ckpt.pth')
  net.load_state_dict(checkpoint['net'])

  return net
Exemple #9
0
def load_model(model_name, trainable=True, device="cuda"):
    model_dict = {
        "ResNet18": models.resnet18,
        "ResNet152": models.resnet152,
        "MobileNetV2": models.mobilenet_v2,
        "squeezenet1": models.squeezenet1_0
    }

    model = model_dict[model_name]
    # print(model)
    auto_change_dir(model_name)

    if trainable:
        model = model(pretrained=False)
    else:
        model = model(pretrained=True, progress=True)
        for param in model.parameters():
            param.requires_grad = False

    model = model.to(device)
    if device == 'cuda':
        model = nn.DataParallel(model)

    return model
Exemple #10
0
trainset = torchvision.datasets.CIFAR10(root='./data',
                                        train=True,
                                        download=True,
                                        transform=tf)

trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True)

#print(len(dataset))
#print(dataset[0][0].size())
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck', 'fake')

auto_change_dir(args.folder)
check_folders()

dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images.size())
showImage(make_grid(images[0:64]))

# custom weights initialization called on netG and netD

gen = Generator().to(device)
gen.apply(weights_init)

disc = Discriminator().to(device)
disc.apply(weights_init)