def main():

    test_status = "PASS!"

    # CHECK-LABEL: test_op_report_vgg_style_lenet
    # CHECK:       PASS!
    print("test_op_report_vgg_style_lenet")

    device = torch_mlir.mlir_device()

    model = Net().to(device)
    ref_tensor = torch.randn((8, 1, 30, 30))
    tensor = ref_tensor.clone().to(device)

    result = model(tensor)
    target = torch.ones((8), dtype=torch.long).to(device)
    loss = F.nll_loss(result, target)
    loss.backward()

    mlir0 = torch_mlir.get_mlir(model.conv1.weight.grad)
    print(mlir0)
    report = torch_mlir.op_report(mlir0)
    print(report)

    report_dict = report
    expected = 32
    if (len(report_dict) != expected):
        print("### ERROR: Expecting", expected,
              "items in the report, but got ", len(report_dict))
        test_status = "FAIL!"

    # Every item should have a read and a write
    for key, value in report_dict.items():
        if not 'reads' in value:
            print(
                f"### ERROR: {key} does not contain the required reads field")
            test_status = "FAIL!"
        if not 'writes' in value:
            print(
                f"### ERROR: {key} does not contain the required writes field")
            test_status = "FAIL!"
        if "convolution" in key:
            if not 'ops:MAC' in value:
                print(
                    f"### ERROR: convolution {key} does not contain the required MAC field"
                )
                test_status = "FAIL!"
        if "mm" in key:
            if not 'ops:MAC' in value:
                print(
                    f"### ERROR: mm {key} does not contain the required MAC field"
                )
                test_status = "FAIL!"

    print(test_status)
Beispiel #2
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.

import torch
import npcomp.frontends.pytorch as torch_mlir

# RUN: %PYTHON %s | FileCheck %s

dev = torch_mlir.mlir_device()
t0 = torch.randn((1, 2, 3, 4), device=dev)
t1 = torch.randn((1, 2, 3, 4), device=dev)
t2 = torch.randn((1, 2, 3, 4), device=dev)

t3 = t0 + t1 + t2

#
# Generate and check the MLIR for the result tensor
#
t3_mlir = torch_mlir.get_mlir(t3)

# CHECK-LABEL: test_export_add3
#   CHECK: %1 = "aten.add"(%arg0, %arg1, %0) {layer_name = "L0-add-0"} : (tensor<1x2x3x4xf32>, tensor<1x2x3x4xf32>, i32) -> tensor<1x2x3x4xf32>
#   CHECK: %2 = "aten.add"(%1, %arg2, %0) {layer_name = "L1-add-1"} : (tensor<1x2x3x4xf32>, tensor<1x2x3x4xf32>, i32) -> tensor<1x2x3x4xf32>
print("test_export_add3")
print(t3_mlir)
Beispiel #3
0
    def _test_model(self, model, model_args):
        result = model(model_args)

        mlir = torch_mlir.get_mlir(result)
        printWithCurrentFunctionName(mlir)
        return True
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.

import torch
import npcomp.frontends.pytorch as torch_mlir
import torchvision.models as models

# RUN: python %s | FileCheck %s

dev = torch_mlir.mlir_device()

model = models.vgg11_bn().to(dev)
model.training = False

result = model(torch.ones(32,3,32,32).to(dev))

mlir = torch_mlir.get_mlir( result )

# for now we just check the output shape
# CHECK-LABEL: test_export_vgg11
#   CHECK: return %{{.*}} : tensor<32x1000xf32>
print("test_export_vgg11")
print(mlir)
Beispiel #5
0
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.

import torch
import npcomp.frontends.pytorch as torch_mlir

# RUN: python %s | FileCheck %s

dev = torch_mlir.mlir_device()

t0 = torch.randn(4, device=dev)
t1 = torch.randn(4, device=dev)
t2 = torch.randn(4, device=dev)

t4 = t0 + t1 + t2
t5 = t4 + t1
t6 = t5 + t4

# CHECK-LABEL: test_multi_out
#   CHECK: return %2, %3, %4 : tensor<4xf32>, tensor<4xf32>, tensor<4xf32>
mlir = torch_mlir.get_mlir([t4, t5, t6])
print("test_multi_out")
print(mlir)
ref_model.weight.data = model.weight.clone()
ref_model.bias.data = model.bias.clone()

model = model.to(dev)

softmax = torch.nn.LogSoftmax(dim=1)
loss = torch.nn.NLLLoss()

tensor = torch.randn(N, Cin, h, w, device=dev)
result = model(tensor)

# CHECK-LABEL: test_export_conv2d
#   CHECK: aten.convolution_overrideable
print("test_export_conv2d")
print(torch_mlir.get_mlir(result))

target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, Cout)
ref_target = target.clone()
target = target.to(dev)

test_loss = loss(softmax(result), target)
test_loss.backward()

# CHECK-LABEL: test_export_conv2d_back
# CHECK: aten.convolution_overrideable
# CHECK: aten._log_softmax
# CHECK: aten.nll_loss2d_forward
print("test_export_conv2d_back")
print(torch_mlir.get_mlir(test_loss))