コード例 #1
0
#
# For single channel images, don't forget the trailing commas as seen below.
#
# `transforms.Normalize((0.5,),(0.5,))`
#
# We normalize the MNIST images in this manner to ensure that pixel values lie between -0.5 and 0.5.  This allows us to use a sigmoid when predicting pixel values.

# In[5]:

batch_size = 128

my_transforms = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])

dataset = MNIST('../datasets', transform=my_transforms, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# Lets check out the data

# In[6]:

print('Size of dataset: ', len(dataset))
idx = np.random.choice(len(dataset))
x, y = dataset[idx]

plt.figure()
plt.imshow(x.squeeze())
plt.title('idx = {}, Label = {}'.format(idx, y))

# Method `to_img` is the opposite of what `Normalize` above does to an MNIST image.
コード例 #2
0
    model_args = vars(model_args)
    model_args['loss_type'] = LOSS_DICT[model_args['loss_type']]
    if MODEL_DICT[train_args.model] == ModelType.Triplet:
        model = TripletNet(input_shape=input_shape,
                           lr=train_args.lr, 
                           device=device,
                           **model_args)
    return model

if __name__ == "__main__":

    model_args, train_args, data_args = parse_args()

    # data loaders
    train_dataset = MNIST(os.getcwd(), 
                          train=True, 
                          download=True, 
                          transform=Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]))
    train_loader = DataLoader(train_dataset, batch_size=train_args.batch_size, shuffle=True)
    test_dataset = MNIST(os.getcwd(), 
                          train=False, 
                          download=True, 
                          transform=Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]))
    test_loader = DataLoader(test_dataset, batch_size=train_args.batch_size, shuffle=False)

    # Initialize a trainer
    trainer = pl.Trainer(gpus=1, max_epochs=train_args.epochs, progress_bar_refresh_rate=20)
    device =  torch.device(f"cuda:{trainer.root_gpu}") if trainer.on_gpu and trainer.root_gpu is not None else torch.device('cpu')

    # Init Model
    # get input shape
    x_train, _ = next(iter(train_loader))
コード例 #3
0
    os.mkdir("./img")


def to_img(x):
    out = 0.5 * (x + 1)
    out = out.clamp(0, 1)
    out = out.view(-1, 1, 28, 28)
    return out


img_transformer = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize([0.5], [0.5])])

dataset = MNIST(root='./data/MNIST',
                train=True,
                transform=img_transformer,
                download=False)

dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)


# =======================Model=======================
class autoencoder(nn.Module):
    def __init__(self):
        super(autoencoder, self).__init__()
        self.encoder = nn.Sequential(nn.Linear(img_size, 128), nn.ReLU(True),
                                     nn.Linear(128, 64), nn.ReLU(True),
                                     nn.Linear(64, 12), nn.ReLU(True),
                                     nn.Linear(12, 3))

        self.decoder = nn.Sequential(nn.Linear(3, 12), nn.ReLU(True),
コード例 #4
0
    plt.legend()
    plt.show()


def draw_accuracy_plot(train_accuracy, test_accuracy, epochs):
    plt.plot(epochs, train_accuracy, label="train")
    plt.plot(epochs, test_accuracy, label="test")
    plt.xlabel('epochs')
    plt.ylabel('accuracy')
    plt.title('training / test accuracy')
    plt.legend()
    plt.show()


if args.dataset == 'MNIST':
    train_data = MNIST('../data/MNIST', train=True, download=True)
if args.dataset == 'EMNIST':
    train_data = EMNIST('../data/EMNIST',
                        split='balanced',
                        train=True,
                        download=True)  # 47 balanced classes

all_labels = list(dict.fromkeys(train_data.train_labels.numpy()))
_train_data = {}
for label in all_labels:
    train_indexes = np.where(train_data.train_labels == label)[0]
    label_list = []
    for index in train_indexes:
        label_list.append(train_data[index][0])
    _train_data[label] = label_list
コード例 #5
0
 def prepare_data(self):
     transform = transforms.Compose([transforms.ToTensor(),
                                     transforms.Normalize((0.5,), (1.0,))])
     _ = MNIST(root=self.hparams.data_root, train=True,
               transform=transform, download=True)
コード例 #6
0
        return self.decoder(z), mu, logvar


# Defining the loss functions.
def loss_function(preds, beta=1):
    x_hat, x, mu, logvar = preds
    BCE = nn.functional.binary_cross_entropy(x_hat, x.view(-1, 784), reduction="sum")
    KLD = 0.5 * torch.sum(logvar.exp() - logvar - 1 + mu.pow(2))

    return BCE + beta * KLD


if __name__ == "__main___":
    # Creating dataloaders.
    train_dataset = MNIST(
        root="./mnist_data/", train=True, transform=transforms.ToTensor(), download=True
    )
    test_dataset = MNIST(
        root="./mnist_data/", train=False, transform=transforms.ToTensor(), download=False
    )

    train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [50000, 10000])

    bs = 64
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=bs, shuffle=True)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=bs, shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=bs, shuffle=False)

    # Defining config for experiment.
    config = ModelConfig(
        nn_module=VAE,
コード例 #7
0
img_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

transform_train = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(
        (0.5, ),
        (0.5, ))  #image = (image - mean) / std    makes it between -1 and 1
])

#trainset= MNIST(root='./data', download=True, transform=transform_train)

train_dataset = MNIST(root='./data', download=True, transform=transform_train)

valid_dataset = MNIST(root='./data',
                      train=False,
                      download=True,
                      transform=transform_train)

print(len(train_dataset))
print(len(valid_dataset))

#test_ds, valid_ds = torch.utils.data.random_split(train_dataset, (50000, 10000))

#print(len(test_ds))
#print(len(valid_ds))

#validation_set = torch.utils.data.random_split(trainset, 0.6)
コード例 #8
0
def load_mnist(root=None, transform=None, target_transform=None, download=True):
    root = root or Path("~/.learner/dataset").expanduser()
    train_ds = MNIST(root, train=True, download=download, transform=transform, target_transform=target_transform)
    test_ds = MNIST(root, train=False, download=download, transform=transform, target_transform=target_transform)
    data = Data(train_ds, test_ds=test_ds, auto_split=True)
    return data
コード例 #9
0
"""
from pathlib import Path

from torchvision import models
from torchvision.datasets import MNIST, CIFAR10

from .notebooks import my_datasets
from notebooks.utils import ptitle

if __name__ == "__main__":
    ROOT = Path("data/raw")
    ROOT.mkdir(parents=True, exist_ok=True)

    ptitle("Downloading DogsCatsDataset")
    _ds = my_datasets.DogsCatsDataset(ROOT, "train", download=True)

    print()
    ptitle("Downloading MNIST")
    _ds = MNIST(ROOT, train=True, download=True)
    _ds = MNIST(ROOT, train=False, download=True)

    print()
    ptitle("Downloading CIFAR10")
    _ds = CIFAR10(ROOT, train=True, download=True)
    _ds = CIFAR10(ROOT, train=False, download=True)

    print()
    ptitle("Downloading models")
    _model = models.resnet18(pretrained=True)
    _model = models.squeezenet1_1(pretrained=True)
コード例 #10
0
def main():
    MNIST(data_path, train=True, download=True)
コード例 #11
0
def main():
    # Init DataLoader from MNIST Dataset
    dataset = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor())
    mnist_test = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
    mnist_train, mnist_val = random_split(dataset, [55000, 5000])
コード例 #12
0
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch import cuda
import numpy as np
import matplotlib.pyplot as plt

device = 'cuda' if cuda.is_available() else 'cpu'
batch_size = 64
mnist_train = MNIST('mnist', train=True, download=True, transform=ToTensor())
train_data_loader = T.utils.data.DataLoader(mnist_train,
                                            batch_size=64,
                                            shuffle=True)
mnist_test = MNIST('mnist', train=False, download=True, transform=ToTensor())
test_data_loader = T.utils.data.DataLoader(mnist_test,
                                           batch_size=64,
                                           shuffle=True)


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv3 = nn.Conv2d(20, 40, kernel_size=3)
        self.mp = nn.MaxPool2d(2)
        self.fc = nn.Linear(160, 10)
コード例 #13
0
def get_dl(hparams, ds_types='all'):
    """

    Args:
        hparams:
        ds_types: one of [train_val, all, test]

    Returns:

    """
    dataloaders = None

    # make a tuple.
    if ds_types == 'all':
        ds_types = ('train', 'val', 'test')
    elif ds_types == 'train_val':
        ds_types = ('train', 'val')
    elif ds_types == 'test':
        ds_types = ('test', )
    else:
        raise NotImplementedError

    train_dataset = None
    val_dataset = None
    test_dataset = None

    if hparams.dataset == 'mnist':

        dataset = MNIST('~/datasets/mnist',
                        train=True,
                        download=True,
                        transform=transforms.ToTensor())
        val_size = int(hparams.val_split * len(dataset))
        train_dataset, val_dataset = random_split(
            dataset, [len(dataset) - val_size, val_size])
        if 'test' in ds_types:
            test_dataset = MNIST('~/datasets/mnist',
                                 train=False,
                                 download=True,
                                 transform=transforms.ToTensor())
    elif hparams.dataset == 'cifar10':
        dataset = CIFAR10('~/datasets/cifar10',
                          train=True,
                          download=True,
                          transform=transforms.ToTensor())
        val_size = int(hparams.val_split * len(dataset))
        train_dataset, val_dataset = random_split(
            dataset, [len(dataset) - val_size, val_size])
        if 'test' in ds_types:
            test_dataset = CIFAR10('~/datasets/cifar10',
                                   train=False,
                                   download=True,
                                   transform=transforms.ToTensor())

    elif hparams.dataset == 'cifar100':
        dataset = CIFAR100('~/datasets/cifar100',
                           train=True,
                           download=True,
                           transform=transforms.ToTensor())
        val_size = int(hparams.val_split * len(dataset))
        train_dataset, val_dataset = random_split(
            dataset, [len(dataset) - val_size, val_size])
        if 'test' in ds_types:
            test_dataset = CIFAR100('~/datasets/cifar100',
                                    train=False,
                                    download=True,
                                    transform=transforms.ToTensor())
    else:
        raise NotImplementedError

    dataloaders = dict(
        train_dataloader=None if 'train' not in ds_types else DataLoader(
            train_dataset,
            batch_size=hparams.batch_size,
            shuffle=hparams.shuffle,
            num_workers=hparams.dl_num_workers),
        val_dataloaders=None if 'val' not in ds_types else DataLoader(
            val_dataset,
            batch_size=hparams.batch_size,
            shuffle=False,
            num_workers=hparams.dl_num_workers),
        test_dataloaders=None if 'test' not in ds_types else DataLoader(
            test_dataset,
            batch_size=hparams.batch_size,
            shuffle=False,
            num_workers=hparams.dl_num_workers),
    )

    return {k: v for k, v in dataloaders.items() if v is not None}
コード例 #14
0
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor

from torchutils import BaseModel
from torchutils.utils import *
from torchutils.callbacks.callbacks import ModelCheckpoint
from torchutils.losses.losses import cross_entropy_focal_loss
from torchutils.metrics.metrics import accuraty

dataset = MNIST(root='data/', download=True, transform=ToTensor())
val_size = 10000
train_size = len(dataset) - val_size

train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)

labels = [y for _, y in train_ds]

batch_size = 128

train_loader = DataLoader(train_ds,
                          # batch_size,
                          batch_sampler=RandomBalancedSampler(list(range(len(labels))), labels, batch_size=batch_size),
                          # shuffle=True,
                          num_workers=0, pin_memory=True)
val_loader = DataLoader(val_ds, batch_size * 2, num_workers=4, pin_memory=True)
コード例 #15
0
                        default=2)
    parser.add_argument(
        '--data_parallel',
        type=bool,
        help='whether to parallelise based on data (default: False)',
        default=False)

    args = parser.parse_args()

    # Define dataset
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: torch.bernoulli(x))
    ])
    dataset = MNIST(root=args.data_dir,
                    train=True,
                    download=True,
                    transform=transform)

    # Create model and optimizer
    model = DRAW(x_dim=784, h_dim=256, z_dim=16, T=10).to(device)
    model = nn.DataParallel(model) if args.data_parallel else model
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=1e-3,
                                 betas=(0.5, 0.999))
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10, 0.5)

    # Load the dataset
    kwargs = {'num_workers': args.workers, 'pin_memory': True} if cuda else {}
    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        shuffle=True,
コード例 #16
0
ファイル: LinearSVM.py プロジェクト: SaulZhang/CS231n
    total = output.shape[0]
    _, pred_label = output.max(1)
    num_correct = (pred_label == label).sum().item()
    return num_correct / total


data_tf = tfs.Compose([
    tfs.ToTensor(),
    tfs.Normalize([0.5], [0.5])  # 标准化
])

num_epoch = 10
batch_size = 64

train_set = MNIST('C:/Users/Jet Zhang/Desktop/pytorch/mnist',
                  train=True,
                  transform=data_tf)
test_set = MNIST('C:/Users/Jet Zhang/Desktop/pytorch/mnist',
                 train=False,
                 transform=data_tf)

train_data = DataLoader(train_set, batch_size, True, num_workers=0)
test_data = DataLoader(test_set, batch_size * 2, False, num_workers=0)

net = LinerSVM(10)

optimzier = torch.optim.Adadelta(net.parameters(), 1e-3)

criterion = multiClassHingeLoss()

for epoch in range(num_epoch):
コード例 #17
0
ファイル: test_module.py プロジェクト: ozen/ml_commons
 def prepare_data(self):
     transform = self._get_transform()
     _ = MNIST(root=self.config['data_root'],
               train=True,
               transform=transform,
               download=True)
コード例 #18
0
    def forward(self, x):
        return torch.relu(self.l1(x.view(x.size(0), -1)))

    def training_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        self.log('train_loss', loss)
        return loss

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=PARAMS['learning_rate'])

# DataLoader
train_loader = DataLoader(MNIST(os.getcwd(), download=True, transform=transforms.ToTensor()),
                          batch_size=PARAMS['batch_size'])

# Step 4: Create NeptuneLogger

from pytorch_lightning.loggers.neptune import NeptuneLogger

neptune_logger = NeptuneLogger(
    api_key="ANONYMOUS",
    project_name="shared/pytorch-lightning-integration",
    params=PARAMS)

# Step 5: Pass NeptuneLogger to the Trainer

trainer = pl.Trainer(max_epochs=PARAMS['max_epochs'],
                     logger=neptune_logger)
コード例 #19
0
 def prepare_data(self):
     MNIST(os.getcwd(),
           train=True,
           download=True,
           transform=transforms.ToTensor())
コード例 #20
0
def load_mnist(path, batch_size):
    return DataLoader(MNIST(path,
                            download=True,
                            transform=transforms.ToTensor()),
                      batch_size=batch_size,
                      shuffle=True)
コード例 #21
0
 def download_data(data_dir):
     transform = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))
     ])
     return MNIST(data_dir, train=True, download=True, transform=transform)
コード例 #22
0
def download_mnist(data_dir):
    # Original URL: http://yann.lecun.com/exdb/mnist/
    full_path = stage_path(data_dir, "MNIST")
    MNIST(full_path, download=True)
コード例 #23
0
ファイル: lr.py プロジェクト: pipecat/LearnTorch
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython

# %%
import torch
import torchvision
from torchvision.datasets import MNIST


# %%
dataset = MNIST(root='data/', download=True)


# %%
len(dataset)


# %%
dataset[0]


# %%
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')


# %%
image, label = dataset[0]
plt.imshow(image)
コード例 #24
0
ファイル: mnist.py プロジェクト: xxffliu/TorchFusion
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.cuda as cuda

#Transformations and data augmentation
train_transformations = transforms.Compose([
    transforms.Resize(28),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

batch_size = 64

# Load the training set
train_set = MNIST(root="./data",
                  train=True,
                  transform=train_transformations,
                  download=True)

train_data = DataLoader(train_set,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=4)

#Create an instance of the NormalDistribution
source = tfgan.NormalDistribution(length=len(train_set), size=(100))
source_data = DataLoader(source,
                         batch_size=batch_size,
                         shuffle=True,
                         num_workers=4)

#Create an instance of the Generator and Discriminator
コード例 #25
0
def get_dataset(train):
    return MNIST('.', train=train, transform=lambda x: ToTensor()(x).view(-1))
コード例 #26
0
import torch
import torch.nn as nn
from torch.utils.data import random_split
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from poutyne import Model, EpochMetric

# Instanciate the MNIST dataset
train_valid_dataset = MNIST('./datasets',
                            train=True,
                            download=True,
                            transform=ToTensor())
test_dataset = MNIST('./datasets',
                     train=False,
                     download=True,
                     transform=ToTensor())
train_dataset, valid_dataset = random_split(
    train_valid_dataset, [50_000, 10_000],
    generator=torch.Generator().manual_seed(42))

# Select CUDA device if available
cuda_device = 0
device = torch.device('cuda:%d' %
                      cuda_device if torch.cuda.is_available() else 'cpu')

# Define the network
network = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 100), nn.ReLU(),
                        nn.Linear(100, 10))
epochs = 5

コード例 #27
0
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST, CIFAR10, ImageFolder
a = torch.cuda.is_available()
print(a)

ngpu = 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
print(torch.rand(3, 3))

transform = transforms.Compose([
    transforms.Resize(32),
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])

train_ds = MNIST(root='./data', train=True, download=True, transform=transform)
train_dl = DataLoader(dataset=train_ds,
                      batch_size=32,
                      shuffle=True,
                      drop_last=True)
for data in train_dl:
    print(data[0])
コード例 #28
0
 def __init__(self, data_root, noise_level):
     MNIST_db = MNIST(root = data_root,train = True, download = True, 
         transform=torchvision.transforms.ToTensor())
     self.getitem = MNIST_db.__getitem__
     self.len = MNIST_db.__len__()
     self.noise_level = noise_level
コード例 #29
0
    x = 0.5 * (x + 1)
    x = x.clamp(0, 1)
    x = x.view(x.size(0), 1, 28, 28)
    return x


num_epochs = 100
batch_size = 128
learning_rate = 1e-3

img_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

dataset = MNIST('./data', transform=img_transform)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)


class autoencoder(nn.Module):
    def __init__(self):
        super(autoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(28 * 28, 128),
            nn.ReLU(True),
            nn.Linear(128, 64),
            nn.ReLU(True), nn.Linear(64, 12), nn.ReLU(True), nn.Linear(12, 3))
        self.decoder = nn.Sequential(
            nn.Linear(3, 12),
            nn.ReLU(True),
            nn.Linear(12, 64),
コード例 #30
0
def main():
    # 画像を変換の構成
    transform = transforms.Compose(
        # 画像をTensor型に変換
        [
            transforms.ToTensor(),
            # モノクロ画像の平均値と標準偏差によって正規化する
            transforms.Normalize((0.5, ), (0.5, ))
        ])

    # 学習用データセット
    trainset = MNIST(
        root='./data',  # データパス
        train=True,  # 学習用データか否か
        download=True,  # ダウンロードするか否か
        transform=transform)  # 上記の画像変換の構成で画像変換する
    # テスト用データセット
    testset = MNIST(
        root='./data',  # データパス
        train=False,  # 学習用データか否か
        download=True,  # ダウンロードするか否か
        transform=transform)  # 上記の画像変換の構成で画像変換する

    trainloader = DataLoader(
        trainset,  # データセット
        batch_size=128,  # バッチ数
        shuffle=True,  # 学習世代ごとにシャッフルする
        num_workers=2)  # データを読み込むサブプロセスの数(CPUが強ければ大きくしてもいいかも?)

    testloader = DataLoader(
        testset,  # データセット
        batch_size=128,  # バッチ数
        shuffle=False,  # 学習世代ごとにシャッフルする
        num_workers=2)  # データを読み込むサブプロセスの数(CPUが強ければ大きくしてもいいかも?)

    # タプルで(0,1,2,3,4,5,6,7,8,9)の等差数列(np.linspace)を作る
    classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8))

    # ネットワークモデルの読み込み
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = Net().to(device)
    #net.to(device)

    # 基準となる損失関数の指定
    criterion = nn.CrossEntropyLoss()
    # 最適化のためにネットワークのパラメータと学習率、勾配の勢い、Nesterov加速勾配の有効化
    optimizer = optim.SGD(net.parameters(),
                          lr=0.01,
                          momentum=0.99,
                          nesterov=True)

    # 10世代学習させる
    for epoch in range(10):
        running_loss = 0.0
        # 1バッチ分のループ
        for i, (inputs, labels) in enumerate(trainloader, 0):
            # 勾配の初期化
            optimizer.zero_grad()
            # ネットワークにトレーニングデータを読み込ませる
            #outputs = net(inputs)
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = net(inputs)
            # 誤差
            loss = criterion(outputs, labels)
            # 誤差伝搬
            loss.backward()
            # パラメータ更新
            optimizer.step()

            # 1バッチ回ったらロスを表示する
            running_loss += loss.item()
            if i % 100 == 99:
                print('[{:d}, {:5d}] loss: {:.3f}'.format(
                    epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
    print('Finished Training')

    # 検証
    correct = 0
    total = 0
    with torch.no_grad():
        # テストデータを読み込み
        for (images, labels) in testloader:
            # imageをネットワークに読み込ませる
            images = images.to(device)
            labels = labels.to(device)
            outputs = net(images)
            # 予測正確性と予測した種類を数値で返す
            _, predicted = torch.max(outputs.data, 1)
            # 1バッチのデータ分に相当=>100
            total += labels.size(0)
            # 1バッチのテストデータ正解の個数をカウント
            correct += (predicted == labels).sum().item()
    print('Accuracy: {:.2f} %'.format(100 * float(correct / total)))

    # モデルの保存
    torch.save(net.state_dict(), "./mnist_model.pth")