def main(epochs):
    if torch.cuda.is_available:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    trainset = mnist.MNIST(root="~/pytorch",
                           train=True,
                           download=True,
                           transform=transforms.ToTensor())
    testset = mnist.MNIST(root="~/pytorch", train=False, download=True)
    trainloader = DataLoader(trainset, 32, shuffle=True)
    testloader = DataLoader(testset, 32)
    capsnet = CapsuleNetwork().to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(capsnet.parameters(), lr=0.001)

    for epoch in range(epochs):
        pbar = tqdm(trainloader)
        for x, y in pbar:
            x, y = x.to(device), y.to(device)
            optimizer.zero_grad()
            output = capsnet(x)
            loss = criterion(output, y)
            loss.backward()
            optimizer.step()

            pbar.set_description("Loss: {:0.4f}".format(loss.item()))
Example #2
0
def train_minst():
    train_dataset = mnist.MNIST(root='./minist_train',
                                download=False,
                                train=True,
                                transform=ToTensor())
    test_dataset = mnist.MNIST(root='./minst_test',
                               download=False,
                               train=False,
                               transform=ToTensor())
    model = train.TrainTask(train_dataset, test_dataset, 50, 1)
    torch.save(model.state_dict(), "./minst.pt")
Example #3
0
 def __init__(self, transformer=None):
     if transformer == None:
         transformer = Compose([ToTensor(), Normalize([0.5], [0.5])])
     self.training_data = mnist.MNIST("../data/",
                                      train=True,
                                      transform=transformer,
                                      download=True)
     self.test_data = mnist.MNIST("../data/",
                                  train=False,
                                  transform=transformer,
                                  download=True)
Example #4
0
def getDataLoader():
    train_set = mnist.MNIST('../../data/mnist',
                            train=False,
                            transform=data_tf,
                            download=False)
    test_set = mnist.MNIST('../../data/mnist',
                           train=False,
                           transform=data_tf,
                           download=False)
    train_data = DataLoader(train_set, batch_size=64, shuffle=True)
    test_data = DataLoader(test_set, batch_size=128, shuffle=False)
    return train_data, test_data
Example #5
0
    def _get_data_iter(self):
        transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
        data_train_and_valid = mnist.MNIST('./data', train=True, download=True, transform=transform)
        data_test = mnist.MNIST('./data', train=False, download=True, transform=transform)

        valid_size = int(self.ct.validation_split * len(data_train_and_valid))
        train_size = len(data_train_and_valid) - valid_size
        data_train, data_valid = torch.utils.data.random_split(data_train_and_valid, [train_size, valid_size])

        iter_train = DataLoader(data_train, batch_size=self.args.train_batch_size, shuffle=False, sampler=RandomSampler(data_train))
        iter_train_eval = DataLoader(data_train, batch_size=self.args.eval_batch_size, shuffle=False, sampler=SequentialSampler(data_train))
        iter_valid = DataLoader(data_valid, batch_size=self.args.eval_batch_size, shuffle=False, sampler=SequentialSampler(data_valid))
        iter_test = DataLoader(data_test, batch_size=self.args.eval_batch_size, shuffle=False, sampler=SequentialSampler(data_test))

        return data_train, data_valid, data_test, iter_train, iter_train_eval, iter_valid, iter_test
Example #6
0
def get_data_loaders():
    img_transform = transforms.Compose([transforms.ToTensor(),
                                        transforms.Lambda(lambda x: x.view(-1))])
    train_dset = mnist.MNIST(root='./mnist_data', train=True,
                             download=True,
                             transform=img_transform,
                             target_transform=OneHot(10))
    val_dset = mnist.MNIST(root='./mnist_data', train=False,
                           download=True,
                           transform=img_transform,
                           target_transform=OneHot(10))
    train_loader = DataLoader(train_dset, batch_size=BATCH_SIZE,
                              shuffle=True)
    val_loader = DataLoader(val_dset, batch_size=BATCH_SIZE,
                            shuffle=False)
    return train_loader, val_loader
Example #7
0
def load_train_mnist(args):
    """
    load mnist dataset for training
    :param args: hyper parameter
    :return: 
    """
    # data preprocessing
    train_transform, _ = _data_transforms_mnist()

    # load data
    train_data = mnist.MNIST(root=args.dataset_path,
                             train=True,
                             download=True,
                             transform=train_transform)

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    # set up data queue
    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=2)
    valid_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(
            indices[split:num_train]),
        pin_memory=True,
        num_workers=2)

    return train_queue, valid_queue
Example #8
0
def MNIST(data_path, split, *args, **kwargs):
    mnist_trans = transforms.Compose([
                           transforms.Resize(32),
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,)),
                           transforms.Lambda(lambda x: x.repeat(3, 1, 1))
                       ])
    return mnist.MNIST(data_path, split=='train', mnist_trans, download=True)
Example #9
0
def testMinst():
    minst_test = mnist.MNIST("minst_test", train=False, transform=ToTensor())

    batch_size = 256
    minst_model = model.Model()
    minst_model.load_state_dict(torch.load("./minst.pt"))
    test_loader = DataLoader(minst_test, batch_size=batch_size)
    val(test_loader, minst_model)
Example #10
0
def get_data_loader(dataset_name='MNIST',
                    batch_size=32,
                    train=True,
                    transform=transforms.ToTensor()):
    dataset = None
    if dataset_name == 'MNIST':
        if train == True:
            dataset = mnist.MNIST('./mnist_dataset',
                                  train=True,
                                  download=False,
                                  transform=transform)
        else:
            dataset = mnist.MNIST('./mnist_dataset',
                                  train=False,
                                  download=False,
                                  transform=transform)
    elif dataset_name == 'CIFAR10':
        if train == True:
            dataset = CIFAR10(root='./cifar10_dataset',
                              train=True,
                              download=False,
                              transform=transform)
        else:
            dataset = CIFAR10(root='./cifar10_dataset',
                              train=False,
                              download=False,
                              transform=transform)
    elif dataset_name == 'CIFAR2':
        if train == True:
            data, label = torch.load('./cifar10_dataset/data/CIFAR2.pt')
            dataset = Data.TensorDataset(data, label)
        else:
            data, label = torch.load('./cifar10_dataset/data/CIFAR2_test.pt')
            dataset = Data.TensorDataset(data, label)
    elif dataset_name == 'CIFAR5':
        if train == True:
            data, label = torch.load('./cifar10_dataset/data/CIFAR5.pt')
            dataset = Data.TensorDataset(data, label)
        else:
            data, label = torch.load('./cifar10_dataset/data/CIFAR5_test.pt')
            dataset = Data.TensorDataset(data, label)
    if dataset == None:
        raise ('dataset is None')
    else:
        data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    return data_loader
Example #11
0
    def __init__(self, img_shape=(1024, 1024, 3), img_nums=100, digit_nums=(2, 4), transform=None,
                 data_root='', data_set='train', download=False, data_transform=None):

        self.mnist = mnist.MNIST(data_root, data_set, data_transform, None, download)
        self.img_shape = img_shape
        self.img_nums = img_nums
        self.digit_nums = digit_nums
        self.transform = transform
        self.index_list = []
Example #12
0
def main(args):
    normalize = transforms.Normalize(
        mean=[0.131], std=[0.308])
    train_dataset = mnist.MNIST(root='../data', train=True, download=True, transform=transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ]))
    test_dataset = mnist.MNIST(root='../data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ]))
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size)
    model = model_select(args.model)
    sgd = SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    cross_error = CrossEntropyLoss()

    for _epoch in range(args.epochs):
        print("Epoch {}/{}".format(_epoch, args.epochs))
        print("-" * 10)
        for idx, (train_x, train_label) in enumerate(train_loader):
            label_np = np.zeros((train_label.shape[0], 10))
            sgd.zero_grad()
            predict_y = model(train_x.float())
            _error = cross_error(predict_y, train_label.long())
            if idx % 100 == 0:
                print('idx: {}, _error: {}'.format(idx, _error))
            _error.backward()
            sgd.step()

        correct = 0
        _sum = 0

        for idx, (test_x, test_label) in enumerate(test_loader):
            predict_y = model(test_x.float()).detach()
            predict_ys = np.argmax(predict_y, axis=-1)
            label_np = test_label.numpy()
            _ = predict_ys == test_label
            correct += np.sum(_.numpy(), axis=-1)
            _sum += _.shape[0]

        print('test accuracy: {:.2f}'.format(correct / _sum))
Example #13
0
def get_iterators(batch_size):
    # prepare data
    trainset = mnist.MNIST(root=MNIST_ROOT,
                           train=True,
                           download=True,
                           transform=transforms.ToTensor())
    testset = mnist.MNIST(root=MNIST_ROOT,
                          train=False,
                          transform=transforms.ToTensor())

    train_iter = torch.utils.data.DataLoader(trainset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=1)
    test_iter = torch.utils.data.DataLoader(testset,
                                            batch_size=batch_size,
                                            shuffle=True,
                                            num_workers=1)

    return train_iter, test_iter
Example #14
0
 def load(self):
     self.mnist_dataset = mnist.MNIST(root=self.datasets_path,
                                      download=True,
                                      train=True,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize(
                                              (0.13066062, ),
                                              (0.30810776, ))
                                      ]))
     self.test_dataset = mnist.MNIST(root=self.datasets_path,
                                     download=True,
                                     train=False,
                                     transform=transforms.Compose([
                                         transforms.ToTensor(),
                                         transforms.Normalize(
                                             (0.13066062, ), (0.30810776, ))
                                     ]))
     self.mnist_model = Net()
     self.mnist_optim = optim.SGD(self.mnist_model.parameters(), lr=0.01)
     self.criterion = nn.CrossEntropyLoss()
Example #15
0
 def load_data_mnist(self, batch_size=128):
     '''
     Returns a nested structure of tensors based on MNIST database.
     Will be divided into (60000/batch_size) batches of (batch_size) each.
     '''
     mnist_data = mnist.MNIST(root='./data/mnist',
                              train=True,
                              download=True,
                              transform=transforms.Compose([
                                  transforms.ToTensor(),
                                  transforms.Normalize([0.5], [0.5])
                              ]))
     mnist_loader = DataLoader(mnist_data,
                               batch_size=batch_size,
                               shuffle=True)
     return mnist_loader
Example #16
0
def get_mnist_loader(batch_size=16, num_workers=0, train=True):
    """Build and return a data loader."""
    transform = []
    transform.append(T.Resize(32))
    transform.append(T.ToTensor())
    # transform.append(lambda x: torch.cat((x, x, x), dim=0))

    dataset = mnist.MNIST("~/datasets/MNIST",
                          transform=T.Compose(transform),
                          train=train)

    data_loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=True,
        drop_last=True,
        num_workers=num_workers,
        pin_memory=torch.cuda.is_available(),
    )
    return data_loader
Example #17
0
def MNIST(val_split=0.2, path=None, **kwargs):
    r"""The MNIST Dataset.

    Args:
        val_split (float): The fraction of training data to hold out
            as validation if validation set is not given. Default: ``0.2``
        path (pathlib.Path or str): The path to save the dataset to.
            Default: Magnet Datapath

    Keyword Args:
        (): See ``Data`` for more details.
    """
    from torchvision.datasets import mnist

    if path is None: path = data.DIR_DATA

    dataset = {mode: mnist.MNIST(path, train=(mode == 'train'), download=True)
                        for mode in ('train', 'test')}
    transforms = kwargs.pop('transforms', image_transforms())
    return data.Data(**dataset, val_split=val_split, transforms=transforms)
Example #18
0
def load_test_mnist(args):
    """
    load mnist dataset for testing
    :param args: hyper parameter
    :return: 
    """
    # data preprocessing
    _, valid_transform = _data_transforms_mnist()

    # load data
    test_data = mnist.MNIST(root=args.dataset_path,
                            train=False,
                            download=True,
                            transform=valid_transform)
    num_test = len(test_data)

    # set up data queue
    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    return test_queue, num_test
Example #19
0
# 数据集的预处理
data_tf = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),  # 对原有数据转成Tensor类型
    torchvision.transforms.Normalize([0.5], [0.5])  # 用平均值和标准偏差归一化
])

# 超参数
EPOCH = 10  # 训练整批数据多少次
BATCH_SIZE = 128
LR = 0.001  # 学习率
DOWNLOAD_MNIST = False  # 是否需要下载数据

# 获取数据集
data_path = r'./mnist'
train_data = mnist.MNIST(data_path,
                         train=True,
                         transform=data_tf,
                         download=DOWNLOAD_MNIST)
test_data = mnist.MNIST(data_path,
                        train=False,
                        transform=data_tf,
                        download=DOWNLOAD_MNIST)

# 对数据进行装载
train_loader = data.DataLoader(train_data, batch_size=BATCH_SIZE,
                               shuffle=True)  # 总共有60000/128 = 469个批次
test_loader = data.DataLoader(
    test_data, batch_size=BATCH_SIZE,
    shuffle=False)  # 总共有10000/128 = 79个批次, 测试集不需要打乱数据


# 定义CNN网络结构
Example #20
0
learning_rate = 0.01
num_epoches = 20


#定义数据预处理函数,Compose将预处理函数进行组合
def data_tf(x):
    x = np.array(x, dtype='float32') / 255
    x = (x - 0.5) / 0.5  # 标准化
    x = np.reshape(x, [1, 28, 28])
    x = torch.from_numpy(x)
    return x


#下载mnist数据集
train_set = mnist.MNIST('C:\\Users\\T\\Desktop\\python\\data',
                        train=True,
                        transform=data_tf,
                        download=True)
test_set = mnist.MNIST('C:\\Users\\T\\Desktop\\python\\data',
                       train=False,
                       transform=data_tf,
                       download=True)

#加载数据集,shuffle=True表示在每个epoch都会对数据进行打乱
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)


#定义卷积神经网络
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
Example #21
0
import numpy as np
import torch
from torchvision.datasets import mnist  # 导入 pytorch 内置的 mnist 数据
from PIL import Image
from torch import nn
from torch.autograd import Variable

# 使用内置函数下载 mnist 数据集
train_set = mnist.MNIST('./data', train=True, download=True)
test_set = mnist.MNIST('./data', train=False, download=True)
a_data, a_label = train_set[0]

a_data = np.array(a_data, dtype='float32')


def data_tf(x):
    x = np.array(x, dtype='float32') / 255
    x = (x - 0.5) / 0.5  # 标准化,这个技巧之后会讲到
    x = x.reshape((-1, ))  # 拉平
    x = torch.from_numpy(x)
    return x


train_set = mnist.MNIST('./data', train=True, transform=data_tf,
                        download=True)  # 重新载入数据集,申明定义的数据变换
test_set = mnist.MNIST('./data', train=False, transform=data_tf, download=True)

from torch.utils.data import DataLoader
# 使用 pytorch 自带的 DataLoader 定义一个数据迭代器
train_data = DataLoader(train_set, batch_size=64, shuffle=True)
test_data = DataLoader(test_set, batch_size=128, shuffle=False)
Example #22
0
        out = self.pool2(out)
        out = out.view(out.shape[0], -1)
        out = self.fc1(out)
        out = self.relu3(out)
        out = self.fc2(out)
        out = self.relu4(out)
        out = self.fc3(out)
        out = self.relu5(out)
        return out


###########################################
if __name__ == "__main__":
    batch_size = 64
    mnist_train = mnist.MNIST(
        "./train", train=True, download=True, transform=ToTensor()
    )
    mnist_test = mnist.MNIST("./test", train=False, download=True, transform=ToTensor())
    train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
    net = Net()
    # The cost function we used for logistic regression
    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.01)
    n_epochs = 938
    for epoch in range(1, n_epochs + 1):
        train_loss = 0.0
        for imgs, labels in train_loader:
            optimizer.zero_grad()
            outputs = net(imgs)
            train_loss = loss_fn(outputs, labels)
Example #23
0
import torch
from torch import nn
from torchvision.datasets import mnist
from torch.utils.data import DataLoader
from torchvision import transforms
from datetime import datetime

train_set =mnist.MNIST('./data',train=True)
test_set  =mnist.MNIST('./data',train=False)
def get_acc(output, label):
    total = output.shape[0]
    _, pred_label = output.max(1)#求每行的最大就是最有可能的类别
    num_correct = (pred_label == label).sum().float()
    return num_correct / total
#def data_tf(x):
#	x=np.array(x,dtype='float32')
#	x=(x - 0.5) /0.5
#	x= x.reshape((-1,))
#	x=torch.from_numpy(x)
#	return x
data_tf=transforms.Compose(
[transforms.Resize(224),#拉伸到224
 transforms.ToTensor(),
 transforms.Normalize([0.5],[0.5])
 
]
)
train_set =mnist.MNIST('./data',train=True,transform=data_tf,download=True)
test_set  =mnist.MNIST('./data',train=False,transform=data_tf,download=True)
train_data=DataLoader(train_set,batch_size=64,shuffle=True)
test_data =DataLoader(test_set,batch_size=128,shuffle=True)
            nn.Linear(in_features=512, out_features=128),  #bz*128
            nn.ReLU())
        self.fc2 = nn.Linear(in_features=128, out_features=10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        y = self.fc2(x)
        return x, y


test_set = mnist.MNIST('../../../Dataset/',
                       train=False,
                       transform=transforms.ToTensor(),
                       download=False)
test_loader = DataLoader(test_set, batch_size=1000, shuffle=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = net().to(device)
net.load_state_dict(torch.load('model.pt'))
net.eval()

number = 2  ## decide how many pictures you want
tn = 8  ## choose top-tn pictures that maximize the neuron's value
## show the semantic information that a single neuron consists
for i in range(number):
    iden = torch.eye(128)[i, :].to(device)
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
Example #25
0
import torchvision.datasets.mnist as mnist
import numpy as np
import matplotlib.pyplot as plt

download_root = './MNIST_DATASET'
train_dataset = mnist.MNIST(download_root,
                            transform=None,
                            train=True,
                            download=True)
imgs = np.add(np.divide(np.array(train_dataset.data), 255.0),
              -0.5).reshape(60000, 28 * 28, 1)


class Unit:
    def __init__(self, n_in, n_out):
        self.input_size = n_in
        self.output_size = n_out
        self.input_container = None

    def forward(self, x):
        pass

    def backward(self, out_grad):
        pass

    def zero_grad(self):
        pass

    def feed(self, lr):
        pass
Example #26
0
                                    self.mnist_optim,
                                    self.criterion,
                                    epoch=50,
                                    device=self.device,
                                    console_out="console.out")
        trainer.mount_dataset(self.mnist_dataset,
                              self.test_dataset,
                              batch_size=32)
        return trainer.train()


def print_result(group: TaskGroup):
    print("%2s %9s %9s" % ("ID", "train acc", " val acc "))
    for i in range(20):
        task = group.get_task(i)
        result = task.result
        print("%02d  %6.2f%%   %6.2f%%" %
              (i, result["train_acc"], result["val_acc"]))


if __name__ == "__main__":
    # Download mnist datasets
    mnist.MNIST(root=datasets_path, download=True)
    group = TaskGroup("mnist")
    for i in range(20):
        group.add_task(MnistTask(i, datasets_path))
    with FedFlow() as flow:
        flow.execute(group)

    print_result(group)
Example #27
0
import torchvision
import numpy as np
from torchvision.datasets import mnist,svhn

data_tf = torchvision.transforms.Compose(
    [
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.0],[1.0])
    ]
)

root_path = './'


mnist_train_data = mnist.MNIST(root_path,train=True,transform=data_tf,download=True)
mnist_test_data = mnist.MNIST(root_path,train=False,transform=data_tf,download=True)

# split train set
mnist_cls_map = dict() #
for i in range(10):
    mnist_cls_map[i] = []
for i,c in enumerate(mnist_train_data.targets):
    mnist_cls_map[int(c.numpy())].append(i)

mnist_a_idx = []
mnist_b_idx = []
for i in range(10):
    num = len(mnist_cls_map[i])
    if num % 2 != 0:
        num -= 1
    #
Example #28
0
from torchvision.datasets import mnist  # 导入 pytorch 内置的 mnist 数据
from torch.utils.data import DataLoader
from torch import nn
from torch.autograd import Variable


def data_tf(x):
    x = np.array(x, dtype='float32') / 255
    x = x / 0.5 - 1
    x = x.reshape((-1, ))
    x = torch.from_numpy(x)
    return x


train_set = mnist.MNIST('../../data/mnist',
                        train=True,
                        transform=data_tf,
                        download=0)
test_set = mnist.MNIST('../../data/mnist',
                       train=False,
                       transform=data_tf,
                       download=0)

train_data = DataLoader(train_set, batch_size=64, shuffle=True)
test_data = DataLoader(test_set, batch_size=128, shuffle=False)

net = nn.Sequential(nn.Linear(784, 400), nn.ReLU(), nn.Linear(400, 200),
                    nn.ReLU(), nn.Linear(200, 100), nn.ReLU(),
                    nn.Linear(100, 10))

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), 1e-1)  # 使用随机梯度下降,学习率 0.1
from torchvision.datasets import mnist
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable

train_set = mnist.MNIST('./data', train=True)
test_set = mnist.MNIST('./data', train=False)


def data_tf(x):
    x = np.array(x, dtype='float32') / 255
    x = x.reshape((-1, ))  # 维度转化
    x = torch.from_numpy(x)
    return x


from torch.utils.data import DataLoader

train_data = DataLoader(train_set, batch_size=64, shuffle=True)
test_data = DataLoader(test_set, batch_size=128, shuffle=False)
a, a_label = next(iter(train_data))

net = nn.Sequential(nn.Linear(28 * 28, 300), nn.ReLU(), nn.Linear(300, 10))

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters())

losses = []
acces = []
eval_losses = []
import numpy as np
import torch
from torchvision.datasets import mnist

from torch import nn
from torch.autograd import Variable
import torchvision

#下载mnist数据集,因为我已经下载了,所以直接download=True
train_set = mnist.MNIST('./data', train=True, download=False)

test_set = mnist.MNIST('./data', train=False, download=False)

a_data, a_label = train_set[0]

#这时候的a_data是PIL库中的格式,我们需要把它转成ndarray的格式
a_data = np.array(a_data)
#print(a_data)
#输出如下,是每个元素 0~256 的28*28的图

#[[  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   3.  18.  18.  18. 126. 136. 175.  26. 166. 255. 247. 127.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.  30.  36.  94. 154. 170. 253. 253. 253. 253. 253. 225. 172. 253. 242. 195.  64.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.  49. 238. 253. 253. 253. 253. 253. 253. 253. 253. 251.  93.  82.  82.  56.  39.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.  18. 219. 253. 253. 253. 253. 253. 198. 182. 247. 241.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.  80. 156. 107. 253. 253. 205.  11.   0.  43. 154.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]
# [  0.   0.   0.   0.   0.   0.   0.   0.   0.  14.   1. 154. 253.  90.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.   0.]