示例#1
0
filename = arch + args.dataset + '_' + str(args.num_class)
checkpoint_filename = os.path.join(args.checkpoint, filename + '.pt')

model = ResNet(num_classes=args.num_class)
criterion = torch.nn.CrossEntropyLoss(size_average=True)
weight_criterion = CE(aggregate='sum')

use_gpu = torch.cuda.is_available()

if use_gpu:
    model = model.cuda()
    criterion = criterion.cuda()
    weight_criterion.cuda()
    torch.cuda.manual_seed(args.seed)

optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

# Adjust learning rate and betas for Adam Optimizer
n_epoch = 200
epoch_decay_start = 80
learning_rate = 1e-3
mom1 = 0.9
mom2 = 0.1
alpha_plan = [learning_rate] * n_epoch
beta1_plan = [mom1] * n_epoch
for i in range(epoch_decay_start, n_epoch):
    alpha_plan[i] = float(n_epoch - i) / (n_epoch -
                                          epoch_decay_start) * learning_rate
    beta1_plan[i] = mom2

示例#2
0
def main():
    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ])

    transforms_test = transforms.Compose([transforms.ToTensor()])

    mode = {'train': True, 'test': True}

    rate = np.squeeze([0.2, 0.5, 0.8])

    for iter in range(rate.size):

        model = ResNet(num_classes=args.num_class)
        if use_gpu:
            model = model.cuda()
            model = torch.nn.DataParallel(model)

        optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

        image_datasets = {
            'train':
            Cifar10(root='./datasets',
                    train=True,
                    transform=None,
                    download=True),
            'test':
            Cifar10(root='./datasets',
                    train=False,
                    transform=None,
                    download=True)
        }

        trainData = image_datasets['train'].train_data
        trainLabel = image_datasets['train'].train_labels

        testData = image_datasets['test'].test_data
        testLabel = image_datasets['test'].test_labels

        true_label = np.squeeze(trainLabel).copy()

        trainLabel, actual_noise_rate = GN.noisify(
            nb_classes=args.num_class,
            train_labels=np.squeeze(trainLabel),
            noise_type='symmetric',
            noise_rate=rate[iter])

        trainData = np.array(trainData)
        trainLabel = np.squeeze(trainLabel)

        testData = np.array(testData)
        testLabel = np.squeeze(testLabel)

        train_data = DT(trainData=trainData,
                        trainLabel=trainLabel,
                        transform=transforms_train)
        train_data_test = DT(trainData=trainData,
                             trainLabel=trainLabel,
                             transform=transforms_test)
        test_data = DT(trainData=testData,
                       trainLabel=testLabel,
                       transform=transforms_test)

        train_loader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers)
        train_loader_test = torch.utils.data.DataLoader(
            train_data_test,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.workers)
        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=args.workers)

        train(model, optimizer, train_loader, test_loader, train_loader_test,
              true_label, rate[iter])