x = F.relu(self.fc2(x))
        nn.Dropout()
        x = F.relu(self.fc3(x))
        x = self.softmax(x)
        return x


# Define a transform to normalize the data
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])

# Download and load the training data
trainset = datasets.MNIST('./MNIST_data/',
                          download=True,
                          train=True,
                          transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True)

# Download and load the test data
testset = datasets.MNIST('./MNIST_data/',
                         download=True,
                         train=False,
                         transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=True)

model = myModel()

#import numpy as np
示例#2
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='ONNX Runtime MNIST Example')
    parser.add_argument(
        '--train-steps',
        type=int,
        default=-1,
        metavar='N',
        help=
        'number of steps to train. Set -1 to run through whole dataset (default: -1)'
    )
    parser.add_argument('--batch-size',
                        type=int,
                        default=20,
                        metavar='N',
                        help='input batch size for training (default: 20)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=1,
                        metavar='N',
                        help='number of epochs to train (default: 1)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-path',
                        type=str,
                        default='',
                        help='Path for Saving the current Model state')

    # Basic setup
    args = parser.parse_args()
    if not args.no_cuda and torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    torch.manual_seed(args.seed)
    onnxruntime.set_seed(args.seed)

    # Data loader
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True)

    if args.test_batch_size > 0:
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('./data',
                           train=False,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               transforms.Normalize((0.1307, ), (0.3081, ))
                           ])),
            batch_size=args.test_batch_size,
            shuffle=True)

    # Modeling
    model = NeuralNet(784, 500, 10)
    model_desc = mnist_model_description()
    optim_config = optim.SGDConfig(lr=args.lr)
    opts = {'device': {'id': device}}
    opts = ORTTrainerOptions(opts)

    trainer = ORTTrainer(model,
                         model_desc,
                         optim_config,
                         loss_fn=my_loss,
                         options=opts)

    # Train loop
    for epoch in range(1, args.epochs + 1):
        train(args.log_interval, trainer, device, train_loader, epoch,
              args.train_steps)
        if args.test_batch_size > 0:
            test(trainer, device, test_loader)

    # Save model
    if args.save_path:
        torch.save(model.state_dict(),
                   os.path.join(args.save_path, "mnist_cnn.pt"))
示例#3
0
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from torchsummary import summary

# hyper parameters
batch_size = 100
learning_rate = 1e-3
num_epochs = 1

# download MNIST digit data set
train_set = datasets.MNIST(root='../data',
                           train=True,
                           transform=transforms.ToTensor(),
                           download=True)
test_set = datasets.MNIST(root='../data',
                          train=False,
                          transform=transforms.ToTensor())

train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)


# define a rnn model
class rnn(nn.Module):
    def __init__(self, in_dim, hidden_dim, n_layer, n_class):
        super(rnn, self).__init__()
        self.lstm = nn.LSTM(in_dim, hidden_dim, n_layer, batch_first=True)
        self.classifier = nn.Linear(hidden_dim, n_class)
import net

from visdom import Visdom

# (Hyper parameters)
batch_size = 64
learning_rate = 1e-2
num_epochs = 20

if __name__ == '__main__':
    data_tf = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize([0.5], [0.5])])
    train_dataset = datasets.MNIST(
        root=r'C:\Users\beach\Desktop\Study\深度学习技术与应用\作业1\data',
        train=True,
        transform=data_tf,
        download=True)
    test_dataset = datasets.MNIST(
        root=r'C:\Users\beach\Desktop\Study\深度学习技术与应用\作业1\data',
        train=False,
        transform=data_tf)

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)  #batch批量梯度下降
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

    model = net.SimpleNet(28 * 28, 300, 100, 10)
示例#5
0
        x = self.pool(x)
        x = x.reshape(x.shape[0], -1)
        return x


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

learning_rate = 0.001
in_channels = 1
num_classes = 10
batch_size = 64
epochs = 5

# load dataset
train_dataset = datasets.MNIST(root='dataset/',
                               train=True,
                               transform=transforms.ToTensor(),
                               download=True)
train_loader = DataLoader(dataset=train_dataset,
                          shuffle=True,
                          batch_size=batch_size)

model = CNN(in_channels=in_channels, num_classes=num_classes)
model.to(device)

# Loss and Optimier function
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0)
writer = SummaryWriter(f'runs/MNIST/traingout_tensorboard')

step = 0
for epoch in epochs:
示例#6
0
def main():
    args = parse_args()

    if args.name is None:
        args.name = 'mnist_%s_%s_%dd' %(args.arch, args.metric, args.num_features)

    if not os.path.exists('models/%s' %args.name):
        os.makedirs('models/%s' %args.name)

    print('Config -----')
    for arg in vars(args):
        print('%s: %s' %(arg, getattr(args, arg)))
    print('------------')

    with open('models/%s/args.txt' %args.name, 'w') as f:
        for arg in vars(args):
            print('%s: %s' %(arg, getattr(args, arg)), file=f)

    joblib.dump(args, 'models/%s/args.pkl' %args.name)

    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    transform_train = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_set = datasets.MNIST(
        root='~/data',
        train=True,
        download=True,
        transform=transform_train)
    test_set = datasets.MNIST(
        root='~/data',
        train=False,
        download=True,
        transform=transform_train)

    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=8)
    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=8)

    # create model
    model = archs.__dict__[args.arch](args)
    model = model.cuda()

    if args.metric == 'adacos':
        metric_fc = metrics.AdaCos(num_features=args.num_features, num_classes=10)
    elif args.metric == 'arcface':
        metric_fc = metrics.ArcFace(num_features=args.num_features, num_classes=10)
    elif args.metric == 'sphereface':
        metric_fc = metrics.SphereFace(num_features=args.num_features, num_classes=10)
    elif args.metric == 'cosface':
        metric_fc = metrics.CosFace(num_features=args.num_features, num_classes=10)
    else:
        metric_fc = nn.Linear(args.num_features, 10)
    metric_fc = metric_fc.cuda()

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr,
            momentum=args.momentum, weight_decay=args.weight_decay)

    scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
            T_max=args.epochs, eta_min=args.min_lr)

    log = pd.DataFrame(index=[], columns=[
        'epoch', 'lr', 'loss', 'acc1', 'val_loss', 'val_acc1'
    ])

    best_loss = float('inf')
    for epoch in range(args.epochs):
        print('Epoch [%d/%d]' %(epoch+1, args.epochs))

        scheduler.step()

        # train for one epoch
        train_log = train(args, train_loader, model, metric_fc, criterion, optimizer)
        # evaluate on validation set
        val_log = validate(args, test_loader, model, metric_fc, criterion)

        print('loss %.4f - acc1 %.4f - val_loss %.4f - val_acc %.4f'
            %(train_log['loss'], train_log['acc1'], val_log['loss'], val_log['acc1']))

        tmp = pd.Series([
            epoch,
            scheduler.get_lr()[0],
            train_log['loss'],
            train_log['acc1'],
            val_log['loss'],
            val_log['acc1'],
        ], index=['epoch', 'lr', 'loss', 'acc1', 'val_loss', 'val_acc1'])

        log = log.append(tmp, ignore_index=True)
        log.to_csv('models/%s/log.csv' %args.name, index=False)

        if val_log['loss'] < best_loss:
            torch.save(model.state_dict(), 'models/%s/model.pth' %args.name)
            best_loss = val_log['loss']
            print("=> saved best model")
示例#7
0
import torch
import torchvision
from torchvision import datasets, transforms
from torch import nn
import torch.nn.functional as F
import time
import model

if __name__ == "__main__":
    #读取训练集和测试集并转换成pytorch所需要的格式
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, ), (0.5, ))])
    data_train = datasets.MNIST(root='./data/',
                                transform=transform,
                                train=True,
                                download=True)
    data_test = datasets.MNIST(root='./data/',
                               transform=transform,
                               train=False)
    data_loader_train = torch.utils.data.DataLoader(dataset=data_train,
                                                    batch_size=128,
                                                    shuffle=True)
    data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
                                                   batch_size=128,
                                                   shuffle=True)

    #定义训练所使用的设备、模型、损失函数和优化器(梯度下降算法)
    device = torch.device("cuda:0")
    #model = LeNet().to(device)
    model = model.Model().to(device)
示例#8
0
img_side = 28
my_net = Net(img_side).double().cuda()
print('Loading model')
my_net.load_state_dict(torch.load(CKPT_PATH))
batch_size = 64
max_iter = 1000
save_every = 10
mu = 0.1307
sigma = 0.3801
origin = -1 * mu / sigma
lr = 1e-2

dl = torch.utils.data.DataLoader(datasets.MNIST('../data',
                                                train=False,
                                                download=False,
                                                transform=transforms.Compose([
                                                    transforms.ToTensor(),
                                                    transforms.Normalize(
                                                        (mu, ), (sigma, ))
                                                ])),
                                 batch_size=batch_size,
                                 shuffle=True)


def Energy1(out, target=1):
    out_norm = torch.sqrt(out[:10]**2 + out[10:]**2)
    return -out_norm[target] + 1


def Energy2(out, target=1):
    out_norm = torch.sqrt(out[:10]**2 + out[10:]**2)
    target_vector = np.zeros(10)
示例#9
0
from torch.nn import functional as F
import matplotlib.pyplot as plt
from statistics import mean

# Hyper Parameters 
input_size = 784
inter = 56
num_classes = 10
num_epochs = 25
batch_size = 100
learning_rate = 0.01
momentum = 0.5

# MNIST Dataset (Images and Labels)
train_dataset = dsets.MNIST(root='./files', 
                            train=True, 
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./files', 
                           train=False, 
                           transform=transforms.ToTensor(),
                           download=True)

# Dataset Loader (Input Pipline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, 
                                           batch_size=batch_size, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                          batch_size=batch_size, 
                                          shuffle=False)
示例#10
0
def train_supernet_mnist(nnet, training_settings, subnet=None, test_all=False):
    # Training settings
    seed = training_settings['seed']
    batch_size = training_settings['batch_size']
    test_batch_size = training_settings['test_batch_size']
    epochs = training_settings['epochs']
    lr = training_settings['learning_rate']
    gamma = training_settings['gamma']
    no_cuda = training_settings['no_cuda']
    log_interval = training_settings['log_interval']
    save_model = training_settings['save_model']

    use_cuda = not no_cuda and torch.cuda.is_available()

    torch.manual_seed(seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'batch_size': batch_size}
    if use_cuda:
        kwargs.update({
            'num_workers': 1,
            'pin_memory': True,
            'shuffle': True
        }, )

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    dataset1 = datasets.MNIST('../data',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('../data', train=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2,
                                              batch_size=test_batch_size)

    model = nnet.to(device)
    if subnet is not None:
        model.set_subnet(subnet)

    optimizer = optim.Adadelta(model.parameters(), lr=lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=gamma)

    test_acc = []

    if subnet is None:
        print("\nTraining SuperNet\n")
    else:
        print("\nTraining subnet {}\n".format(subnet))

    for epoch in range(1, epochs + 1):
        train(model, device, train_loader, optimizer, epoch, log_interval,
              F.nll_loss, subnet)

        test_acc_ = []
        if subnet is None or test_all:
            for choice in [[0, 0], [1, 0], [0, 1], [1, 1]]:
                model.set_subnet(choice)
                test_acc_.append(test(model, device, test_loader, F.nll_loss))
            test_acc.append(test_acc_)
        else:
            test_acc.append(test(model, device, test_loader, F.nll_loss))

        scheduler.step()

    if save_model:
        torch.save(model.state_dict(), "mnist_supernet.pt")

    return test_acc
示例#11
0
def activation(x):
    return 1 / (1 + torch.exp(-x))


#Defining Softmax
def softmax(x):
    return torch.exp(x) / torch.sum(torch.exp(x), dim=1).reshape(-1, 1)


#Downloading MNIST data
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = datasets.MNIST('MNIST_data/',
                          download=True,
                          train=True,
                          transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=64,
                                          shuffle=True)
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)

#Showing one of the image
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r')

#Defining input, hidden and output units for network
number_input = images.view(images.shape[0], -1)
示例#12
0
sample_batch_size = 25
obs = (1, 28, 28) if 'mnist' in args.dataset else (3, 32, 32)
input_channels = obs[0]
rescaling = lambda x: (x - .5) * 2.
rescaling_inv = lambda x: .5 * x + .5
kwargs = {'num_workers': 1, 'pin_memory': True, 'drop_last': True}
ds_transforms = transforms.Compose([transforms.ToTensor(), rescaling])

m_transforms = transforms.Compose(
    [transforms.RandomCrop(size=(14, 30)),
     transforms.ToTensor()])
#padding

if 'mnist' in args.dataset:
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        args.data_dir, download=True, train=True, transform=ds_transforms),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        args.data_dir, train=False, transform=ds_transforms),
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              **kwargs)

    loss_op = lambda real, fake: discretized_mix_logistic_loss_1d(real, fake)
    sample_op = lambda x: sample_from_discretized_mix_logistic_1d(
        x, args.nr_logistic_mix)
elif 'mario' in args.dataset:
    train_loader = torch.utils.data.DataLoader(MyDataset(
示例#13
0
    def forward(self, x):
        return self.gen(x)


lr = 3e-4
z_dim = 64
img_dim = 28 * 28
epochs = 25
disc = Discriminator(img_dim).cuda()
gen = Generator(z_dim, img_dim).cuda()
noise = torch.randn((32, z_dim)).cuda()
transforms = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5), (0.5))])

dataset = datasets.MNIST("data/", transform=transforms, download=True)
loader = DataLoader(dataset, batch_size=32, shuffle=True)

opt_disc = optim.Adam(disc.parameters(), lr=lr)
opt_gen = optim.Adam(gen.parameters(), lr=lr)

crit = nn.BCELoss()
writer_fake = SummaryWriter(f"runs/Gan/fake")
writer_real = SummaryWriter(f"runs/Gan/real")
step = 0

for epoch in range(epochs):
    for batch_idx, (imges, _) in enumerate(loader):
        real = imges.view(-1, 784).cuda()
        batch_size = real.shape[0]
        #train discr
示例#14
0
from matplotlib import pyplot as plt

batch_size = 32
n_epoch = 25
latent_dim = 100
n_classes = 10
img_shape = 28 * 28
lr = .0001

dataloader = torch.utils.data.DataLoader(
    datasets.MNIST(
        "../../data/mnist",
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.Resize(28),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ]),
    ),
    batch_size=batch_size,
    shuffle=True,
)

feat, label = next(iter(dataloader))


class Discriminator(nn.Module):
    def __init__(self, n_classes, img_shape):
        super(Discriminator, self).__init__()
示例#15
0
                    type=int,
                    default=100,
                    help="latent dimension")
parser.add_argument("--out_layer",
                    type=int,
                    default=256,
                    help="number of out layer")

args = parser.parse_args()

print("device", device)
print(args)
os.makedirs('generated-images', exist_ok=True)

# setting data
train_loader = torch.utils.data.DataLoader(datasets.MNIST(
    '../../data', train=True, download=True, transform=transforms.ToTensor()),
                                           batch_size=args.batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(datasets.MNIST(
    '../../data', train=False, transform=transforms.ToTensor()),
                                          shuffle=True)


class Generator(nn.Module):
    def __init__(self, out_layer):
        super(Generator, self).__init__()
        self.layer1 = nn.Linear(args.latent_vector + 10, out_layer)
        self.layer2 = nn.Linear(out_layer, out_layer * 2)
        self.layer3 = nn.Linear(out_layer * 2, out_layer * 4)
        self.layer4 = nn.Linear(out_layer * 4, 784)
示例#16
0
                        help='how many batches to wait before logging training status')
    
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)
    dt = [ex for ex in test_loader][:1]
    dt = [[ex[0][:n_test], ex[1][:n_test]] for ex in dt]
    for s in range(n_samples):
        print (s, n_samples)
        model = Net()
        model.eval()
#未归一化,正确率不高
#基于MNIST数据集

import torch
from torch.utils.data import DataLoader
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import KNN_classify_algorithm as KNN
batch_size = 100

#MNIST dataset
train_dataset = dsets.MNIST(root='/dataset/MNIST',
                            train=True,
                            transform=None,
                            download=True)

test_dataset = dsets.MNIST(root='/dataset/MNIST',
                           train=False,
                           transform=None,
                           download=True)

train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True)

test_loader = DataLoader(dataset=test_dataset,
                         batch_size=batch_size,
                         shuffle=True)
'''
示例#18
0
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
                    help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                    help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)

device = torch.device("cuda" if args.cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.ToTensor()),
    batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),
    batch_size=args.batch_size, shuffle=True, **kwargs)


class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()

        self.fc1 = nn.Linear(784, 400)
        self.fc21 = nn.Linear(400, 20)
        self.fc22 = nn.Linear(400, 20)
        self.fc3 = nn.Linear(20, 400)
        self.fc4 = nn.Linear(400, 784)
                        type=str,
                        help='distributed backend')

    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    # initialize distributed group
    dist.init_process_group(backend=args.dist_backend,
                            init_method=args.dist_url,
                            world_size=args.world_size)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.batch_size,
                                              shuffle=True,
示例#20
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=25,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=1.0,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = Net().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        scheduler.step()
    test(model, device, test_loader)

    if args.save_model:
        torch.save(model, "mnist_cnn.pth")
        c.apply(init_weights)
    
    load_model(g, None, load_g_path)
    g.eval()
    for param in g.parameters():
        param.requires_grad = False


    # Configure data loader
    
    transform = transforms.Compose([transforms.Resize(img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
    os.makedirs("../data/mnist", exist_ok=True)
    dataloader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data/mnist",
            train=True,
            download=True,
            transform=transform,
        ),
        batch_size=batch_size,
        sampler=torch.utils.data.SubsetRandomSampler(list(range(training_set_size)))
    )
    test_dataloader = torch.utils.data.DataLoader(
        datasets.MNIST(
            "../data/mnist",
            train=False,
            download=True,
            transform=transform,
        ),
        batch_size=batch_size,
        shuffle=True
    )
示例#22
0
def run():
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=25,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=1.0,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        download=True,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = torch.load("mnist_cnn.pth",
                       map_location=lambda storage, loc: storage)

    model.to(device)

    test(model, device, test_loader)
示例#23
0
                transforms.CenterCrop(args.img_size),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]))

        # Create the dataloader
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=args.num_workers)
    elif args.data == 'mnist':
        dataloader = torch.utils.data.DataLoader(dset.MNIST(
            './data/mnist',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.Grayscale(num_output_channels=1),
                transforms.Resize(args.img_size),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            ])),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=args.num_workers)

    elif args.data == 'cifar10':
        dataloader = torch.utils.data.DataLoader(dset.CIFAR10(
            './data/cifar10',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.Grayscale(num_output_channels=1),
示例#24
0
if __name__ == "__main__":
    # get arguments
    args = parse_args()
    # init and load model
    classifier = Classifier(args)
    classifier.load_state_dict(
        torch.load('../pretrained_model/classifier_mnist.pt'))
    classifier.eval()
    classifier = classifier.cuda()

    # init dataset
    transform = transforms.Compose(
        [transforms.CenterCrop(args.image_size),
         transforms.ToTensor()])
    dataset = datasets.MNIST('../data',
                             train=True,
                             download=True,
                             transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=512,
                                             shuffle=True,
                                             num_workers=1)
    # adversarial methods
    adv_list = ['fgsm', 'r-fgsm', 'cw']
    # test for accuracy
    xs = list()
    ys = list()
    advs = list()
    for image, label in dataloader:
        image = image.cuda()
        label = label.cuda()
        batch += 1
示例#25
0
文件: dcgan.py 项目: lysh/PyTorch-GAN
if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()

# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)

# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader = torch.utils.data.DataLoader(
    datasets.MNIST('../../data/mnist', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.Resize(opt.img_size),
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ])),
    batch_size=opt.batch_size, shuffle=True)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

# ----------
#  Training
# ----------

for epoch in range(opt.n_epochs):
示例#26
0
elif opt.dataset == 'cifar10':
    dataset = dset.CIFAR10(root=opt.dataroot,
                           download=True,
                           transform=transforms.Compose([
                               transforms.Resize(opt.imageSize),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5)),
                           ]))
    nc = 3

elif opt.dataset == 'mnist':
    dataset = dset.MNIST(root=opt.dataroot,
                         download=True,
                         transform=transforms.Compose([
                             transforms.Resize(opt.imageSize),
                             transforms.ToTensor(),
                             transforms.Normalize((0.5, ), (0.5, )),
                         ]))
    nc = 1

elif opt.dataset == 'fake':
    dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
                            transform=transforms.ToTensor())
    nc = 3

assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=int(opt.workers))
示例#27
0
def main():
    global args
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=14,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.1)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='Momentum (default: 0.9)')
    parser.add_argument('--wd',
                        type=float,
                        default=0.0001,
                        metavar='M',
                        help='Weight decay (default: 0.0005)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--data',
                        type=str,
                        default='../data',
                        help='Location to store data')
    parser.add_argument('--sparsity',
                        type=float,
                        default=0.1,
                        help='how sparse is each layer')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        os.path.join(args.data, 'mnist'),
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        os.path.join(args.data, 'mnist'),
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = Net().to(device)
    # NOTE: only pass the parameters where p.requires_grad == True to the optimizer! Important!
    optimizer = optim.SGD(
        [p for p in model.parameters() if p.requires_grad],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.wd,
    )
    criterion = nn.CrossEntropyLoss().to(device)
    scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
    for epoch in range(1, args.epochs + 1):
        train(model, device, train_loader, optimizer, criterion, epoch)
        test(model, device, criterion, test_loader)
        scheduler.step()

    if args.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
示例#28
0
# At this point the data come in Python tuples, a 28x28 image and a label.
# while the label is a tensor, the image is not; it needs to be converted.
# So we need to transform PIL image to tensor and then normalize it.
# Normalization is quite a good practise to avoid numerical and convergence
# problems. For that we need the dataset's mean and std which fortunately
# can be computed!
# ******************
mean = 0.1307
std = 0.3081
# Bundle our transforms sequentially, one after another. This is important.
# Convert images to tensors + normalize
transform = tTrans.Compose(
    [tTrans.ToTensor(), tTrans.Normalize((mean, ), (std, ))])
# Load data set
mnistTrainset = tdata.MNIST(root='../data',
                            train=True,
                            download=True,
                            transform=transform)
mnistTestset = tdata.MNIST(root='../data',
                           train=False,
                           download=True,
                           transform=transform)

# Once we have a dataset, torch.utils has a very nice lirary for iterating on that
# dataset, wit hshuffle AND batch logic. Very usefull in larger datasets.
trainLoader = torch.utils.data.DataLoader(mnistTrainset,
                                          batch_size=batch,
                                          **comArgs)
testLoader = torch.utils.data.DataLoader(mnistTestset,
                                         batch_size=10 * batch,
                                         **comArgs)
示例#29
0
print(opt)

# the size of image
img_shape = (opt.channels, opt.img_size, opt.img_size)
print(*img_shape)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

# Configure data loader
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = DataLoader(
    dataset=datasets.MNIST(
        root="../../data/mnist",
        train=True,
        download=True,
        transform=transforms.Compose(
            [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
        )
    ),
    batch_size=opt.batch_size,
    shuffle=True
)

class Generator(nn.Module):

    def __init__(self):
        super(Generator, self).__init__()

        self.label_embedding = nn.Embedding(opt.n_classes, opt.n_classes)

        def generator_block(in_channels, out_channels, bn=True):
示例#30
0
 def download_test_data(self):
     return datasets.MNIST('./data',
                           train=False,
                           download=True,
                           transform=test_transforms)