コード例 #1
0
 def log_train_metrics(self, loss, acc, completed_batch,  worker=0):
     acc = acc/100.0
     self.gs += 1
     with EMetrics.open() as em:
         em.record(EMetrics.TEST_GROUP,completed_batch,{'loss': loss, 'accuracy': acc})
     with ELog.open() as log:
         log.recordTrain("Train", completed_batch, self.gs, loss, acc, worker)
コード例 #2
0
def mq_record(batch, logs):
    loss = logs['loss']
    accuracy = logs['accuracy']
    with EMetrics.open() as em:
        em.record(EMetrics.TEST_GROUP, batch, {
            'loss': loss,
            'accuracy': accuracy
        })
コード例 #3
0
 def __init__(self):
     self.emetrics = EMetrics.open(getCurrentSubID())
コード例 #4
0
def test(epoch,em):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    # accuracy = correct / len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))

    #em.record(EMetrics.TEST_GROUP,epoch,{'loss': test_loss, 'accuracy': accuracy})


with EMetrics.open() as em:
    for epoch in range(1, args.epochs + 1):
        train(epoch)
        test(epoch,em)

torch.save(model.state_dict(),output_model_path)
コード例 #5
0
ファイル: pytorch_mnist.py プロジェクト: IBM/wmla-assets
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    print(args)

    use_cuda = not args.no_cuda

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    kwargs = {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_dir,
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_dir,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    if use_cuda:
        print("Let's use {} gpus".format(str(torch.cuda.device_count())))

    # multi-GPUs data
    if use_cuda and torch.cuda.device_count() > 1:
        model = nn.DataParallel(Net()).to(device)
    else:
        model = Net().to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    # optimizer = nn.DataParallel(optimizer, device_ids=[0, 1])

    start_time = time.time()
    with EMetrics.open() as em:
        for epoch in range(1, args.epochs + 1):
            train(args, model, device, train_loader, optimizer, epoch, em)
            test(args, model, device, test_loader)
    duration = (time.time() - start_time) / 60
    print("Train finished. Time cost: %.2f minutes" % (duration))

    torch.save(model.state_dict(), output_model_path)
    print("Model saved in path: %s" % output_model_path)
コード例 #6
0
ファイル: main.py プロジェクト: IBM/wmla-assets
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    args, unknown = parser.parse_known_args()
    print(sys.path)
    print("known arguments: ", args)
    print("unknown arguments", unknown)
    print("torch version: %s" % torch.__version__)

    use_cuda = not args.no_cuda

    if use_cuda:
        print("Let's use {} gpus".format(str(torch.cuda.device_count())))
        # for onnx
        torch.cuda.manual_seed(args.seed)
        #torch.set_default_tensor_type(torch.cuda.FloatTensor)
    else:
        print("Let's use cpu")
        torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    kwargs = {}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST(data_dir, train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(data_dir, train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    # multi-GPUs data
    if use_cuda and torch.cuda.device_count() > 1:
        model = nn.DataParallel(Net()).to(device)
    else:
        model = Net().to(device)

    print("Model parameters are on cuda") if all(p.is_cuda for p in model.parameters()) else print("Model parameters are on cpu")

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    start_time = time.time()
    with EMetrics.open() as em:
        for epoch in range(1, args.epochs + 1):
            train(args, model, device, train_loader, optimizer, epoch, em)
            test(args, model, device, test_loader)
    duration = (time.time() - start_time) / 60
    print("Train finished. Time cost: %.2f minutes" % duration)

    torch.save(model.state_dict(), output_model_pt)
    print("Model saved in path: %s" % output_model_pt)

    # export onnx model
    dummy_input = torch.randn(1, 1, 28, 28, device=device)
    if type(model) is nn.DataParallel:
        # torch.nn.DataParallel is not supported by ONNX exporter, please use 'attribute' module to unwrap model from torch.nn.DataParallel
        model = model.module
    torch.onnx.export(model, dummy_input, output_model_onnx, export_params=True)

    print("Onnx Model saved in path: %s" % output_model_onnx)
コード例 #7
0
from emetrics import EMetrics

with EMetrics.open() as metrics:
    metrics.record(
        EMetrics.TEST_GROUP, 1,
        {"accuracy": 0.6})  # record TEST metric accuracy=0.6 after step 1
    metrics.record(
        EMetrics.TRAIN_GROUP, 1,
        {"accuracy": 0.67})  # record TRAIN metric accuracy=0.6 after step 1

    metrics.record(
        EMetrics.TEST_GROUP, 2,
        {"accuracy": 0.5})  # record TEST metric accuracy=0.5 after step 2
    metrics.record(
        EMetrics.TRAIN_GROUP, 2,
        {"accuracy": 0.54})  # record TRAIN metric accuracy=0.6 after step 1

    metrics.record(
        EMetrics.TEST_GROUP, 3,
        {"accuracy": 0.9})  # record TEST metric accuracy=0.9 after step 3
    metrics.record(
        EMetrics.TRAIN_GROUP, 1,
        {"accuracy": 0.91})  # record TRAIN metric accuracy=0.6 after step 1
コード例 #8
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    if use_cuda:
        torch.cuda.manual_seed(args.seed)
        #torch.cuda.set_device(device)
        torch.set_default_tensor_type(torch.cuda.FloatTensor)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    root_folder = os.getenv("DATA_DIR")
    output_model_folder = os.environ["RESULT_DIR"]
    output_model_path = os.path.join(output_model_folder, "model")
    output_model_path_file = os.path.join(output_model_path,
                                          "trained_model.pt")
    output_model_path_onnx = os.path.join(output_model_path,
                                          "trained_model.onnx")
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        root_folder,
        train=True,
        download=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        root_folder,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = Net().to(device)
    if use_cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    with EMetrics.open() as em:
        for epoch in range(1, args.epochs + 1):
            train(args, model, device, train_loader, optimizer, epoch)
            test(args, model, device, test_loader, epoch, em)

    torch.save(model.state_dict(), output_model_path_file)
    x = torch.randn(1, 1, 28, 28, requires_grad=True)
    torch.onnx.export(model, x, output_model_path_onnx, export_params=True)