Esempio n. 1
0
def main():
    model = Net()
    if args.cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
    loss_function = F.nll_loss

    missinglink_project = missinglink.PyTorchProject(owner_id=OWNER_ID, project_token=PROJECT_TOKEN)
    with missinglink_project.create_experiment(
            model,
            metrics={'loss': loss_function},
            display_name='PyTorch convolutional neural network',
            description='Two dimensional convolutional neural network') as experiment:
        loss_function = experiment.metrics['loss']
        for epoch in experiment.epoch_loop(epochs=args.epochs):
            train(experiment, model, loss_function, optimizer, epoch)
            test(experiment, model, loss_function)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Net()
if args.cuda:
    model.cuda()

optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
loss_function = F.nll_loss

# Override credential values if provided as arguments
OWNER_ID = args.owner_id or OWNER_ID
PROJECT_TOKEN = args.project_token or PROJECT_TOKEN

missinglink_project = missinglink.PyTorchProject(owner_id=OWNER_ID,
                                                 project_token=PROJECT_TOKEN)


def train(epoch):
    model.train()
    for batch_idx, (data,
                    target) in experiment.batch_loop(iterable=train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = loss_function(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
Esempio n. 3
0
################################
from __future__ import print_function
import argparse

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

import missinglink


# Change the `project_token` to your own when trying to run this.
missinglink_project = missinglink.PyTorchProject(project_token='KzcbCxZWjewiqxCi')

################################
## Constants
################################

class dotdict(dict):
    """dot.notation access to dictionary attributes"""
    __getattr__ = dict.get
    __setattr__ = dict.__setitem__
    __delattr__ = dict.__delitem__

args = dotdict()

args.seed = 321
args.batch_size = 200
Esempio n. 4
0
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
                    help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                    help='how many batches to wait before logging training status')
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
                    help='use fp16 compression during allreduce')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)

missinglink_project = missinglink.PyTorchProject(project='5740395606573056')
is_hvd_leader = (hvd.rank() == 0)

class NullContextManager(object):
    def __init__(self, dummy_resource=None):
        self.dummy_resource = dummy_resource
    def __enter__(self):
        return self.dummy_resource
    def __exit__(self, *args):
        pass

null_with = NullContextManager()

if args.cuda:
    # Horovod: pin GPU to local rank.
    torch.cuda.set_device(hvd.local_rank())
Esempio n. 5
0
## Imports
################################
from __future__ import print_function
import argparse

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

import missinglink

# Change the `project_token` to your own when trying to run this.
missinglink_project = missinglink.PyTorchProject(project=6385640660795392)


################################
## Constants
################################

class dotdict(dict):
    """dot.notation access to dictionary attributes"""
    __getattr__ = dict.get
    __setattr__ = dict.__setitem__
    __delattr__ = dict.__delitem__


args = dotdict()
Esempio n. 6
0
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

import missinglink

missinglink_project = missinglink.PyTorchProject()

# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument(
    '--batch-size',
    type=int,
    default=64,
    metavar='N',
    help='input batch size for training (default: 64)')
parser.add_argument(
    '--test-batch-size',
    type=int,
    default=1000,
    metavar='N',
    help='input batch size for testing (default: 1000)')
parser.add_argument(
    '--epochs',
    type=int,
    default=2,