def check_fwd(model, tensor):
    device = torch_mlir.mlir_device()
    result = model(tensor)
    device_model = copy.deepcopy(model).to(device)
    device_tensor = tensor.clone().to(device)
    device_result = device_model(device_tensor)

    compare(result, device_result, "fwd")
    return (device_model, device_result, result)
def main():

    test_status = "PASS!"

    # CHECK-LABEL: test_op_report_vgg_style_lenet
    # CHECK:       PASS!
    print("test_op_report_vgg_style_lenet")

    device = torch_mlir.mlir_device()

    model = Net().to(device)
    ref_tensor = torch.randn((8, 1, 30, 30))
    tensor = ref_tensor.clone().to(device)

    result = model(tensor)
    target = torch.ones((8), dtype=torch.long).to(device)
    loss = F.nll_loss(result, target)
    loss.backward()

    mlir0 = torch_mlir.get_mlir(model.conv1.weight.grad)
    print(mlir0)
    report = torch_mlir.op_report(mlir0)
    print(report)

    report_dict = report
    expected = 32
    if (len(report_dict) != expected):
        print("### ERROR: Expecting", expected,
              "items in the report, but got ", len(report_dict))
        test_status = "FAIL!"

    # Every item should have a read and a write
    for key, value in report_dict.items():
        if not 'reads' in value:
            print(
                f"### ERROR: {key} does not contain the required reads field")
            test_status = "FAIL!"
        if not 'writes' in value:
            print(
                f"### ERROR: {key} does not contain the required writes field")
            test_status = "FAIL!"
        if "convolution" in key:
            if not 'ops:MAC' in value:
                print(
                    f"### ERROR: convolution {key} does not contain the required MAC field"
                )
                test_status = "FAIL!"
        if "mm" in key:
            if not 'ops:MAC' in value:
                print(
                    f"### ERROR: mm {key} does not contain the required MAC field"
                )
                test_status = "FAIL!"

    print(test_status)
def check_back(fwd_path, target, lossmodel):
    device = torch_mlir.mlir_device()
    (device_model, device_result, result) = fwd_path
    device_target = target.clone().to(device)
    ref_loss = lossmodel(result, target)
    ref_loss.backward()
    device_loss = lossmodel(device_result, device_target)
    device_loss.backward()

    compare(ref_loss, device_loss, "back")
    return (device_model, device_result)
def main():
    device = torch_mlir.mlir_device()
    model = Net()
    tensor = torch.randn((64, 1, 28, 28), requires_grad=True)
    # CHECK: PASS! fwd check
    fwd_path = test.check_ref(model, tensor)

    target = torch.ones((64), dtype=torch.long)
    loss = F.nll_loss

    # CHECK: PASS! back check
    test.check_back(fwd_path, target, loss)

    # CHECK: PASS! fc1_weight_grad check
    test.compare(model.fc1.weight.grad, fwd_path[0].fc1.weight.grad,
                 "fc1_weight_grad")
Esempio n. 5
0
 def test_ResA_16(self):
     dev = torch_mlir.mlir_device()
     model = ResA(16).to(dev)
     passed = self._test_model(model,
                               torch.ones((1, 16, 128, 128), device=dev))
     # CHECK-LABEL: test_ResA_16
     #   CHECK: [[V0:%[a-zA-Z0-9]+]], %{{.*}}, %{{.*}} = "aten.native_batch_norm"({{.*}}) {layer_name = "L0-native_batch_norm-0"}
     #   CHECK: [[V1:%[a-zA-Z0-9]+]] = "aten.relu"([[V0]]) {layer_name = "L1-relu-0"}
     #   CHECK: [[V2:%[a-zA-Z0-9]+]] = "aten.convolution_overrideable"([[V1]], {{.*}}) {layer_name = "L2-convolution_overrideable-0"}
     #   CHECK: [[V3:%[a-zA-Z0-9_]+]], %{{.*}}, %{{.*}} = "aten.native_batch_norm"([[V2]]{{.*}}) {layer_name = "L3-native_batch_norm-1"}
     #   CHECK: [[V4:%[a-zA-Z0-9]+]] = "aten.relu"([[V3]]) {layer_name = "L4-relu-1"}
     #   CHECK: [[V5:%[a-zA-Z0-9]+]] = "aten.convolution_overrideable"([[V4]],{{.*}}) {layer_name = "L5-convolution_overrideable-1"}
     #   CHECK: [[V6:%[a-zA-Z0-9_]+]], %{{.*}}, %{{.*}} = "aten.native_batch_norm"([[V5]],{{.*}}) {layer_name = "L6-native_batch_norm-2"}
     #   CHECK: [[V7:%[a-zA-Z0-9]+]] = "aten.relu"([[V6]]) {layer_name = "L7-relu-2"}
     #   CHECK: [[V8:%[a-zA-Z0-9]+]] = "aten.convolution_overrideable"([[V7]],{{.*}}) {layer_name = "L8-convolution_overrideable-2"}
     #   CHECK: {{.*}} = "aten.add"(%arg0, [[V8]], {{.*}}) {layer_name = "L9-add-0"}
     self.assertTrue(passed)
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.

import torch
import npcomp.frontends.pytorch as torch_mlir
import npcomp.frontends.pytorch.test as test

# RUN: python %s | FileCheck %s

dev = torch_mlir.mlir_device()
t0 = torch.randn((4, 16, 4), device=dev)
t1 = torch.randn((4, 16, 4), device=dev)

t3 = torch.randn((4, 64), device=dev)
t4 = torch.randn((4, 64), device=dev)

t2 = t0 + t1
t5 = t3 + t4

t6 = t5.view((4, 4, 4, 4))
t7 = t2.view((4, 4, 4, 4))

t8 = t6 + t7

t0_cpu = t0.to('cpu')
t1_cpu = t1.to('cpu')

# CHECK: PASS! add_views_0 check
test.compare(t2, t0_cpu + t1_cpu, "add_views_0")