예제 #1
0
    def test_import_custom_eps(self):
        torch_ort.set_device(0, 'CPUExecutionProvider', {})

        torch_ort._register_provider_lib(
            'TestExecutionProvider', self.get_test_execution_provider_path(),
            'ProviderEntryPoint')
        torch_ort.set_device(1, 'TestExecutionProvider', {
            'device_id': '0',
            'some_config': 'val'
        })
        ort_device = torch_ort.device(1)
예제 #2
0
    def test_import_custom_eps(self):
        torch_ort.set_device(0, "CPUExecutionProvider", {})

        torch_ort._register_provider_lib(
            "TestExecutionProvider", self.get_test_execution_provider_path(),
            {})
        # capture std out
        with OutputGrabber() as out:
            torch_ort.set_device(1, "TestExecutionProvider", {
                "device_id": "0",
                "some_config": "val"
            })
            ort_device = torch_ort.device(1)
        assert "My EP provider created, with device id: 0, some_option: val" in out.capturedtext
        with OutputGrabber() as out:
            torch_ort.set_device(2, "TestExecutionProvider", {
                "device_id": "1",
                "some_config": "val"
            })
            ort_device = torch_ort.device(1)
        assert "My EP provider created, with device id: 1, some_option: val" in out.capturedtext
        # test the reusing EP instance
        with OutputGrabber() as out:
            torch_ort.set_device(3, "TestExecutionProvider", {
                "device_id": "0",
                "some_config": "val"
            })
            ort_device = torch_ort.device(1)
        assert "My EP provider created, with device id: 0, some_option: val" not in out.capturedtext
        # test clear training ep instance pool
        torch_ort.clear_training_ep_instances()
        with OutputGrabber() as out:
            torch_ort.set_device(3, "TestExecutionProvider", {
                "device_id": "0",
                "some_config": "val"
            })
            ort_device = torch_ort.device(1)
        assert "My EP provider created, with device id: 0, some_option: val" in out.capturedtext
예제 #3
0
    def test_import_custom_eps(self):
        torch_ort.set_device(0, 'CPUExecutionProvider', {})

        torch_ort._register_provider_lib(
            'TestExecutionProvider', self.get_test_execution_provider_path(),
            {})
        # capture std out
        with OutputGrabber() as out:
            torch_ort.set_device(1, 'TestExecutionProvider', {
                'device_id': '0',
                'some_config': 'val'
            })
            ort_device = torch_ort.device(1)
        assert 'My EP provider created, with device id: 0, some_option: val' in out.capturedtext
        with OutputGrabber() as out:
            torch_ort.set_device(2, 'TestExecutionProvider', {
                'device_id': '1',
                'some_config': 'val'
            })
            ort_device = torch_ort.device(1)
        assert 'My EP provider created, with device id: 1, some_option: val' in out.capturedtext
        # test the reusing EP instance
        with OutputGrabber() as out:
            torch_ort.set_device(3, 'TestExecutionProvider', {
                'device_id': '0',
                'some_config': 'val'
            })
            ort_device = torch_ort.device(1)
        assert 'My EP provider created, with device id: 0, some_option: val' not in out.capturedtext
        # test clear training ep instance pool
        torch_ort.clear_training_ep_instances()
        with OutputGrabber() as out:
            torch_ort.set_device(3, 'TestExecutionProvider', {
                'device_id': '0',
                'some_config': 'val'
            })
            ort_device = torch_ort.device(1)
        assert 'My EP provider created, with device id: 0, some_option: val' in out.capturedtext
예제 #4
0
 def get_device(self):
     return torch_ort.device()
예제 #5
0
        self.fc2 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out


input_size = 784
hidden_size = 500
num_classes = 10
batch_size = 128

batch = torch.rand((batch_size, input_size))
device = torch_ort.device()

with torch.no_grad():

    model = NeuralNet(input_size, hidden_size, num_classes)
    pred = model(batch)
    print("inference result is: ")
    print(pred)

    model.to(device)

    ort_batch = batch.to(device)
    ort_pred = model(ort_batch)
    print("ORT inference result is:")
    print(ort_pred.cpu())
    print("Compare result:")
예제 #6
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
    parser.add_argument("--batch-size",
                        type=int,
                        default=64,
                        metavar="N",
                        help="input batch size for training (default: 64)")
    parser.add_argument("--test-batch-size",
                        type=int,
                        default=1000,
                        metavar="N",
                        help="input batch size for testing (default: 1000)")
    parser.add_argument("--epochs",
                        type=int,
                        default=1,
                        metavar="N",
                        help="number of epochs to train (default: 1)")
    parser.add_argument("--lr",
                        type=float,
                        default=0.01,
                        metavar="LR",
                        help="learning rate (default: 0.01)")
    parser.add_argument("--seed",
                        type=int,
                        default=1,
                        metavar="S",
                        help="random seed (default: 1)")
    parser.add_argument(
        "--log-interval",
        type=int,
        default=10,
        metavar="N",
        help="how many batches to wait before logging training status",
    )

    args = parser.parse_args()

    torch.manual_seed(args.seed)

    kwargs = {"num_workers": 0, "pin_memory": True}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            dataset_root_dir,
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs,
    )
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST(
            dataset_root_dir,
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307, ), (0.3081, ))
            ]),
        ),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs,
    )

    device_ort = torch_ort.device()
    input_size = 784
    hidden_size = 500
    num_classes = 10
    model_nn = NeuralNet(input_size, hidden_size, num_classes)
    model_nn.to(device_ort)
    optimizer = optim.SGD(model_nn.parameters(), lr=0.01)

    print("\nStart Training.")

    for epoch in range(1, args.epochs + 1):
        train_with_eager(args, model_nn, optimizer, device_ort, train_loader,
                         epoch)