def main(): print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor() ]) transforms_test = transforms.Compose([ transforms.ToTensor() ]) mode = {'train': True, 'test': True} rate = np.squeeze([0.45, 0.4]) for iter in range(1): image_datasets = {'train': Cifar10(root='./datasets', train=True, transform = None, download=True), 'test': Cifar10(root='./datasets', train=False, transform = None, download=True)} trainData = image_datasets['train'].train_data trainLabel = image_datasets['train'].train_labels testData = image_datasets['test'].test_data testLabel = image_datasets['test'].test_labels true_label = np.squeeze(trainLabel).copy() trainLabel, actual_noise_rate = GN.noisify(nb_classes=args.num_class, train_labels=np.squeeze(trainLabel), noise_type='pairflip', noise_rate=rate[iter]) trainData = np.array(trainData) trainLabel = np.squeeze(trainLabel) testData = np.array(testData) testLabel = np.squeeze(testLabel) train_data = DT(trainData= trainData, trainLabel = trainLabel, transform=transforms_train) train_data_test = DT(trainData= trainData, trainLabel = trainLabel, transform=transforms_test) test_data = DT(trainData= testData, trainLabel = testLabel, transform=transforms_test) train_loader = torch.utils.data.DataLoader(train_data, batch_size = args.batch_size, shuffle=True, num_workers=args.workers) train_loader_test = torch.utils.data.DataLoader(train_data_test, batch_size = args.batch_size, shuffle=False, num_workers=args.workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size = args.batch_size, shuffle=False, num_workers=args.workers) train(train_loader, test_loader, train_loader_test, true_label, rate[iter])
def main(): print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor() ]) transforms_test = transforms.Compose([transforms.ToTensor()]) mode = {'train': True, 'test': True} rate = np.squeeze([0.2, 0.5, 0.8]) for iter in range(rate.size): model = ResNet(num_classes=args.num_class) if use_gpu: model = model.cuda() model = torch.nn.DataParallel(model) optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) image_datasets = { 'train': Cifar10(root='./datasets', train=True, transform=None, download=True), 'test': Cifar10(root='./datasets', train=False, transform=None, download=True) } trainData = image_datasets['train'].train_data trainLabel = image_datasets['train'].train_labels testData = image_datasets['test'].test_data testLabel = image_datasets['test'].test_labels true_label = np.squeeze(trainLabel).copy() trainLabel, actual_noise_rate = GN.noisify( nb_classes=args.num_class, train_labels=np.squeeze(trainLabel), noise_type='symmetric', noise_rate=rate[iter]) trainData = np.array(trainData) trainLabel = np.squeeze(trainLabel) testData = np.array(testData) testLabel = np.squeeze(testLabel) train_data = DT(trainData=trainData, trainLabel=trainLabel, transform=transforms_train) train_data_test = DT(trainData=trainData, trainLabel=trainLabel, transform=transforms_test) test_data = DT(trainData=testData, trainLabel=testLabel, transform=transforms_test) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) train_loader_test = torch.utils.data.DataLoader( train_data_test, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) train(model, optimizer, train_loader, test_loader, train_loader_test, true_label, rate[iter])