Пример #1
0
def main(args):
    net = smallnet(in_channels=1, num_targets=10)
    augerino = models.UniformAug()
    model = models.AugAveragedModel(net, augerino, ncopies=args.ncopies)

    start_widths = torch.ones(6) * -5.
    start_widths[2] = 1.
    model.aug.set_width(start_widths)

    softplus = torch.nn.Softplus()

    dataset = datasets.RotMNIST("~/datasets/", train=True)
    trainloader = DataLoader(dataset, batch_size=args.batch_size)

    optimizer = torch.optim.Adam([{
        'name': 'model',
        'params': model.model.parameters(),
        "weight_decay": args.wd
    }, {
        'name': 'aug',
        'params': model.aug.parameters(),
        "weight_decay": 0.
    }],
                                 lr=args.lr)
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model = model.cuda()
        print("Using Cuda")

    ## save init model ##
    fname = "/model" + str(args.aug_reg) + "_init.pt"
    torch.save(model.state_dict(), args.dir + fname)

    criterion = losses.safe_unif_aug_loss
    logger = []
    for epoch in range(args.epochs):  # loop over the dataset multiple times

        epoch_loss = 0
        batches = 0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if use_cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            # print(inputs.shape)
            outputs = model(inputs)
            loss = criterion(outputs, labels, model, reg=args.aug_reg)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.detach().item()
            batches += 1
            print(epoch, loss.item(), softplus(model.aug.width).detach().data)
            log = model.aug.width.tolist()
            log += model.aug.width.grad.data.tolist()
            log += [loss.item()]
            logger.append(log)

    fname = "/model" + str(args.aug_reg) + ".pt"
    torch.save(model.state_dict(), args.dir + fname)
    df = pd.DataFrame(logger)
    df.to_pickle(args.dir + "/auglog_" + str(args.aug_reg) + ".pkl")
def main(args):
    net = models.layer13s(in_channels=3, num_targets=10)
    augerino = models.UniformAug()
    model = models.AugAveragedModel(net, augerino, ncopies=args.ncopies)

    transform = transforms.Compose([
        # you can add other transformations in this list
        transforms.ToTensor()
    ])
    fname = "aug_reg" + str(args.aug_reg) + ".pt"

    dataset = torchvision.datasets.CIFAR10("/datasets/cifar10",
                                           train=True,
                                           download=False,
                                           transform=transform)
    trainloader = DataLoader(dataset, batch_size=args.batch_size)

    optimizer = torch.optim.Adam([{
        'name': 'model',
        'params': model.model.parameters(),
        "weight_decay": args.wd
    }, {
        'name': 'aug',
        'params': model.aug.parameters(),
        "weight_decay": 0.
    }],
                                 lr=args.lr)

    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model = model.cuda()
        print("Using Cuda")

    criterion = losses.safe_unif_aug_loss

    for epoch in range(args.epochs):  # loop over the dataset multiple times

        epoch_loss = 0
        batches = 0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if use_cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            # print(inputs.shape)
            outputs = model(inputs)
            loss = criterion(outputs, labels, model, reg=args.aug_reg)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            batches += 1

        print("Epoch = ", epoch)
        print("Epoch loss = ", epoch_loss / batches)
        print("\n")

    torch.save(model.state_dict(), args.dir + fname)
Пример #3
0
def main(args):
    net = models.layer13s(in_channels=3,num_targets=10)
    augerino = models.UniformAug()
    model = models.AugAveragedModel(net, augerino,ncopies=args.ncopies)
    if args.transform == 'none':
        transform = transforms.Compose([
            # you can add other transformations in this list
            transforms.ToTensor()
        ])
        root_name = "aug_no_trans_"
    else:
        transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.Resize(32),
            transforms.RandomCrop(32, padding=4),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
        root_name = "aug_fixed_trans_"

    dataset = torchvision.datasets.CIFAR10(args.data_dir, train=True, download=False,
                                           transform=transform)
    trainloader = DataLoader(dataset, batch_size=args.batch_size)

    optimizer = torch.optim.Adam([{'name': 'model', 
                                   'params': model.model.parameters(), 
                                   "weight_decay": args.wd}, 
                                  {'name': 'aug', 
                                   'params': model.aug.parameters(), 
                                   "weight_decay": 0.}], 
                                 lr=args.lr)

    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model = model.cuda()
        print("Using Cuda")

    ## save init model ##
    fname = root_name + "init.pt"
    torch.save(model.state_dict(), args.dir + fname)

    criterion = losses.safe_unif_aug_loss

    for epoch in range(args.epochs):  # loop over the dataset multiple times

        epoch_loss = 0
        batches = 0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            if use_cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            # print(inputs.shape)
            outputs = model(inputs)
            loss = criterion(outputs, labels, model, reg=args.aug_reg)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            batches += 1

        print("Epoch = ", epoch)
        print("Epoch loss = ", epoch_loss/batches)
        print("\n")


    fname = root_name + "_trained.pt"
    torch.save(model.state_dict(), args.dir + fname)