def test_external_graph_transformer_triggering(self): input_size = 784 hidden_size = 500 num_classes = 10 batch_size = 128 model = NeuralNet(input_size, hidden_size, num_classes) model_desc = { 'inputs': [('x', [batch_size, input_size]), ('target', [ batch_size, ])], 'outputs': [('loss', [], True)] } optim_config = optim.SGDConfig() opts = orttrainer.ORTTrainerOptions({'device': {'id': 'cpu'}}) model = orttrainer.ORTTrainer(model, model_desc, optim_config, options=opts) # because orttrainer is lazy initialized, feed in a random data to trigger the graph transformer data = torch.rand(batch_size, input_size) target = torch.randint(0, 10, (batch_size, )) with OutputGrabber() as out: loss = model.train_step(data, target) assert '******************Trigger Customized Graph Transformer: MyGraphTransformer!' in out.capturedtext
def main(): # Training settings parser = argparse.ArgumentParser(description='ONNX Runtime MNIST Example') parser.add_argument('--train-steps', type=int, default=-1, metavar='N', help='number of steps to train. Set -1 to run through whole dataset (default: -1)') parser.add_argument('--batch-size', type=int, default=20, metavar='N', help='input batch size for training (default: 20)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=1, metavar='N', help='number of epochs to train (default: 1)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-path', type=str, default='', help='Path for Saving the current Model state') # Basic setup args = parser.parse_args() if not args.no_cuda and torch.cuda.is_available(): device = "cuda" else: device = "cpu" torch.manual_seed(args.seed) onnxruntime.set_seed(args.seed) # Data loader train_loader = torch.utils.data.DataLoader( datasets.MNIST('./data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True) if args.test_batch_size > 0: test_loader = torch.utils.data.DataLoader( datasets.MNIST('./data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=True) # Modeling model = NeuralNet(784, 500, 10) model_desc = mnist_model_description() optim_config = optim.SGDConfig(lr=args.lr) opts = {'device': {'id': device}} opts = ORTTrainerOptions(opts) trainer = ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts) # Train loop for epoch in range(1, args.epochs + 1): train(args.log_interval, trainer, device, train_loader, epoch, args.train_steps) if args.test_batch_size > 0: test(trainer, device, test_loader) # Save model if args.save_path: torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt"))