def dataset_mini_mnist(train_size=5000, val_size=1000, test_size=1000): # Data loading code normalize = transforms.Normalize(mean=(0.1307, ), std=(0.3081, )) transform = transforms.Compose([transforms.ToTensor(), normalize]) # Note: the train-val split will be performed in `create_datasets` dataset_train_ = datasets.MNIST(root="data_mnist", train=True, transform=transform, download=True) dataset_val_ = datasets.MNIST(root="data_mnist", train=True, transform=transform, download=True) dataset_test_ = datasets.MNIST(root="data_mnist", train=False, transform=transform, download=True) # we want flattened images dataset_train = FlattenedDataset(dataset_train_) dataset_val = FlattenedDataset(dataset_val_) dataset_test = FlattenedDataset(dataset_test_) return create_datasets(dataset_train, dataset_val, dataset_test, train_size, val_size, test_size, True)
def __init__(self, bs, device=None, n=None, flatten=False): self.img_shape = [1, 28, 28] tr_data = datasets.MNIST(root='./data', download=True, train=True).data te_data = datasets.MNIST(root='./data', download=True, train=False).data # Flatten if flatten: tr_data = tr_data.reshape(-1, 784) te_data = te_data.reshape(-1, 784) else: tr_data = tr_data.view(-1, 1, 28, 28) te_data = te_data.view(-1, 1, 28, 28) # Move to target device tr_data = tr_data.to(device) te_data = te_data.to(device) if n: tr_data = tr_data[:n] self.train_data = tr_data self.test_data = te_data self.train_loader = DataLoader(self.train_data, batch_size=bs, shuffle=True) self.test_loader = DataLoader(self.test_data, batch_size=bs, shuffle=False)
def load_base_dataset(args): if args.dataset == 'synthetic': base_dataset, test_dataset = generate_synthetic_dataset(args) elif args.dataset == 'mnist': base_dataset = datasets.MNIST('./dataset/mnist', train=True, download=True) test_dataset = datasets.MNIST('./dataset/mnist', train=False, download=True) elif args.dataset == 'fmnist': base_dataset = datasets.FashionMNIST('./dataset/fmnist', train=True, download=True) test_dataset = datasets.FashionMNIST('./dataset/fmnist', train=False, download=True) elif args.dataset == 'svhn': base_dataset = datasets.SVHN('./dataset/svhn', split='train', download=True) test_dataset = datasets.SVHN('./dataset/svhn', split='test', download=True) elif args.dataset == 'cifar10': base_dataset = datasets.CIFAR10('./dataset/cifar10', train=True, download=True) test_dataset = datasets.CIFAR10('./dataset/cifar10', train=False, download=True) elif args.dataset == 'stl10': base_dataset = datasets.STL10('./dataset/stl10', split='train', download=True) test_dataset = datasets.STL10('./dataset/stl10', split='test', download=True) elif args.dataset == 'lsun': train_transform = get_transform(args.image_size, args.train_transform) test_transform = get_transform(args.image_size, args.test_transform) base_dataset = datasets.LSUN('./dataset/lsun', classes='val', transform=train_transform) test_dataset = datasets.LSUN('./dataset/lsun', classes='val', transform=test_transform) else: raise NotImplementedError return base_dataset, test_dataset
from torch import nn from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import transforms from torchvision.datasets import MNIST from torchvision.utils import save_image from torchvision import datasets import matplotlib.pyplot as plt torch.manual_seed(1) batch_size = 128 learning_rate = 0.01 num_epochs = 10 train_dataset = datasets.MNIST(root='F:/数据/data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root='F:/数据/data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) class autoencoder(nn.Module): def __init__(self): super(autoencoder, self).__init__() self.encoder = nn.Sequential(nn.Linear(28 * 28, 1000), nn.ReLU(True), nn.Linear(1000, 500), nn.ReLU(True), nn.Linear(500, 250), nn.ReLU(True), nn.Linear(250, 2))
torch.cuda.device(0) torch.cuda.set_device(0) torch.backends.cudnn.benchmark = True # Comment for smaller convnets # Batch size B = 40 # Load MNIST dataset kwargs = { 'num_workers': 1, 'pin_memory': True } if (enable_CUDA & torch.cuda.is_available()) else {} train_loader = torch.utils.data.DataLoader(datasets.MNIST( 'data/MNIST', train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])), batch_size=32, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(datasets.MNIST( 'data/MNIST', train=False, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])), batch_size=32, shuffle=True, **kwargs)