Ejemplo n.º 1
0
    def setUpClass(cls):

        def xgraph_build_func(xgraph):
            raise NotImplementedError("")

        def xgraph_optimizer(xgraph):
            raise NotImplementedError("")

        def xgraph_quantizer(xgraph):
            raise NotImplementedError("")

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        target_registry = TargetRegistry()
        target_registry.register_target('test',
                                        xgraph_optimizer,
                                        xgraph_quantizer,
                                        xgraph_compiler,
                                        xgraph_build_func)

        @register_op_support_check('test', 'Convolution')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test', 'Pooling')
        def pooling_op_support(X, bXs, tXs):
            return True
Ejemplo n.º 2
0
class TestDPUContrib(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()
    rt_manager = RtManager()

    @classmethod
    def setUpClass(cls):
        # Import DPU module
        from pyxir.contrib.dpuv1 import dpuv1
        # from pyxir.contrib.dpuv1.dpuv1_target import\
        #     xgraph_dpu_v1_optimizer,\
        #     xgraph_dpu_v1_quantizer,\
        #     xgraph_dpu_v1_compiler,\
        #     xgraph_dpu_v1_build_func

        # pyxir.register_target(
        #     'dpuv1',
        #     xgraph_dpu_v1_optimizer,
        #     xgraph_dpu_v1_quantizer,
        #     xgraph_dpu_v1_compiler,
        #     xgraph_dpu_v1_build_func
        # )

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDPUContrib.target_registry.unregister_target('dpuv1')
        TestDPUContrib.target_registry.unregister_target('DPUCADX8G')

    def test_supported_ops(self):
        dpuv1_ops = TestDPUContrib.target_registry\
            .get_supported_op_check_names('dpuv1')

        assert 'BatchNorm' in dpuv1_ops
        assert 'BiasAdd' in dpuv1_ops
        assert 'Concat' in dpuv1_ops
        assert 'Convolution' in dpuv1_ops
        assert 'Conv2DTranspose' in dpuv1_ops
        assert 'DPU' in dpuv1_ops
        assert 'Eltwise' in dpuv1_ops
        assert 'Pad' in dpuv1_ops
        assert 'Pooling' in dpuv1_ops
        assert 'Mean' in dpuv1_ops
        assert 'pReLU' in dpuv1_ops
        assert 'ReLU' in dpuv1_ops
        assert 'Scale' in dpuv1_ops

    @unittest.skipIf(skip_tf,
                     "Skipping Tensorflow related test because tensorflow is"
                     "not available")
    def test_import_ext_quantizer(self):
        if TestDPUContrib.target_registry.is_target('DPUCADX8G'):
            TestDPUContrib.target_registry.unregister_target('DPUCADX8G')
        if TestDPUContrib.rt_manager.exists_op('cpu-np', 'DPU'):
            TestDPUContrib.rt_manager.unregister_op('cpu-np', 'DPU')
        from pyxir.contrib.target import DPUCADX8G_external_quantizer
Ejemplo n.º 3
0
    def setUpClass(cls):
        def xgraph_build_func(xgraph):
            raise NotImplementedError("")

        def xgraph_optimizer(xgraph):
            raise NotImplementedError("")

        def xgraph_quantizer(xgraph):
            raise NotImplementedError("")

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        target_registry = TargetRegistry()
        target_registry.register_target(
            "test",
            xgraph_optimizer,
            xgraph_quantizer,
            xgraph_compiler,
            xgraph_build_func,
        )

        @register_op_support_check("test", "Convolution")
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check("test", "Pooling")
        def pooling_op_support(X, bXs, tXs):
            return True

        @register_op_support_check("test", "Concat")
        def concat_op_support(X, bXs, tXs):
            return False

        @register_op_support_check("test", "Eltwise")
        def eltwise_op_support(X, bXs, tXs):
            return True

        @register_op_support_check("test", "ReLU")
        def relu_op_support(X, bXs, tXs):
            return True
Ejemplo n.º 4
0
class TestDPUContrib(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        # Import DPU module
        from pyxir.contrib.dpuv1 import dpuv1
        # from pyxir.contrib.dpuv1.dpuv1_target import\
        #     xgraph_dpu_v1_optimizer,\
        #     xgraph_dpu_v1_quantizer,\
        #     xgraph_dpu_v1_compiler,\
        #     xgraph_dpu_v1_build_func

        # pyxir.register_target(
        #     'dpuv1',
        #     xgraph_dpu_v1_optimizer,
        #     xgraph_dpu_v1_quantizer,
        #     xgraph_dpu_v1_compiler,
        #     xgraph_dpu_v1_build_func
        # )

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDPUContrib.target_registry.unregister_target('dpuv1')
        TestDPUContrib.target_registry.unregister_target('DPUCADX8G')

    def test_supported_ops(self):
        dpuv1_ops = TestDPUContrib.target_registry\
            .get_supported_op_check_names('dpuv1')

        assert 'BatchNorm' in dpuv1_ops
        assert 'BiasAdd' in dpuv1_ops
        assert 'Concat' in dpuv1_ops
        assert 'Convolution' in dpuv1_ops
        assert 'Conv2DTranspose' in dpuv1_ops
        assert 'DPU' in dpuv1_ops
        assert 'Eltwise' in dpuv1_ops
        assert 'Pad' in dpuv1_ops
        assert 'Pooling' in dpuv1_ops
        assert 'Mean' in dpuv1_ops
        assert 'pReLU' in dpuv1_ops
        assert 'ReLU' in dpuv1_ops
        assert 'Scale' in dpuv1_ops
Ejemplo n.º 5
0
class TestDpuv1OpSupport(unittest.TestCase):

    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def test():
            raise NotImplementedError("")

        TestDpuv1OpSupport.target_registry.register_target(
            'dpuv1', {}, test, test, test, test)

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDpuv1OpSupport.target_registry.unregister_target('dpuv1')
        TestDpuv1OpSupport.target_registry.unregister_target('DPUCADX8G')
Ejemplo n.º 6
0
class TestUltra96OpSupport(unittest.TestCase):

    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def test():
            raise NotImplementedError("")

        TestUltra96OpSupport.target_registry.register_target(
            "dpuv2-ultra96", {}, test, test, test, test)

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestUltra96OpSupport.target_registry.unregister_target("dpuv2-ultra96")
        # TestUltra96OpSupport.target_registry.unregister_target('DPUCZDX8G-ultra96')

    def test_batchnorm_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import batchnorm_op_support

        X = XLayer(
            type=["BatchNorm"],
            name="bn1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert batchnorm_op_support(X, [], [])

        X = XLayer(
            type=["BatchNorm"],
            name="bn1",
            shapes=[-1, 2570, 4, 4],
            sizes=[2570 * 16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert not batchnorm_op_support(X, [], [])

    def test_biasadd_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import biasadd_op_support

        X = XLayer(
            type=["BiasAdd"],
            name="bn1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert biasadd_op_support(X, [], [])

        X = XLayer(
            type=["BiasAdd"],
            name="bn1",
            shapes=[-1, 2570, 4, 4],
            sizes=[2570 * 16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert not biasadd_op_support(X, [], [])

    def test_concat_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import concat_op_support

        X = XLayer(
            type=["Concat"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert concat_op_support(X, [], [])

        X = XLayer(
            type=["Concat"],
            name="layer1",
            shapes=[-1, 2570, 4, 4],
            sizes=[2570 * 16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert not concat_op_support(X, [], [])

    def test_conv2d_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import conv2d_op_support

        X = XLayer(
            type=["Convolution"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
                "channels": [4, 2],
                "groups": 1,
            },
        )

        assert conv2d_op_support(X, [], [])

        X = XLayer(
            type=["Convolution"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [3, 3], [1, 1]],
                "channels": [4, 2],
                "groups": 1,
            },
        )

        assert not conv2d_op_support(X, [], [])

    def test_conv2d_transpose_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import conv2d_transpose_op_support

        X = XLayer(
            type=["Conv2DTranspose"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
                "channels": [4, 2],
                "groups": 1,
            },
        )

        assert conv2d_transpose_op_support(X, [], [])

        X = XLayer(
            type=["Conv2DTranspose"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
                "channels": [2570, 2],
                "groups": 1,
            },
        )

        assert not conv2d_transpose_op_support(X, [], [])

    def test_dpuv2_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import dpu_op_support

        X = XLayer(
            type=["DPU"],
            name="layer1",
            shapes=[[-1, 2, 4, 4], [-1, 1, 4, 4]],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={},
        )

        assert dpu_op_support(X, [], [])

    def test_eltwise_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import eltwise_op_support

        X = XLayer(
            type=["Eltwise"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={},
        )

        assert eltwise_op_support(X, [], [])

    def test_pad_pooling_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import pad_op_support

        X = XLayer(
            type=["Pad"],
            name="pad1",
            shapes=[-1, 2, 6, 6],
            sizes=[72],
            bottoms=[],
            tops=["layer1"],
            targets=[],
            attrs={"padding": [[0, 0], [0, 0], [2, 2], [2, 2]]},
        )

        tX = XLayer(
            type=["Pooling"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=["pad1"],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [3, 3],
                "padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
            },
        )

        assert pad_op_support(X, [], [tX])

        X = XLayer(
            type=["Pad"],
            name="pad1",
            shapes=[-1, 2, 6, 6],
            sizes=[72],
            bottoms=[],
            tops=["layer1"],
            targets=[],
            attrs={"padding": [[0, 0], [0, 0], [5, 2], [5, 2]]},
        )

        tX = XLayer(
            type=["Pooling"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=["pad1"],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [3, 3],
                "padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
            },
        )

        assert not pad_op_support(X, [], [tX])

    def test_pad_convolution_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import pad_op_support

        X = XLayer(
            type=["Pad"],
            name="pad1",
            shapes=[-1, 2, 6, 6],
            sizes=[72],
            bottoms=[],
            tops=["layer1"],
            targets=[],
            attrs={"padding": [[0, 0], [0, 0], [1, 1], [1, 1]]},
        )

        tX = XLayer(
            type=["Convolution"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=["pad1"],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
                "channels": [4, 2],
                "groups": 1,
            },
        )

        assert pad_op_support(X, [], [tX])

        X = XLayer(
            type=["Pad"],
            name="pad1",
            shapes=[-1, 2, 6, 6],
            sizes=[72],
            bottoms=[],
            tops=["layer1"],
            targets=[],
            attrs={"padding": [[0, 0], [0, 0], [2, 2], [2, 2]]},
        )

        tX = XLayer(
            type=["Convolution"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=["pad1"],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "dilation": [1, 1],
                "padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
                "channels": [4, 2],
                "groups": 1,
            },
        )

        assert not pad_op_support(X, [], [tX])

    def test_pooling_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import pooling_op_support

        X = XLayer(
            type=["Pooling"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [3, 3],
                "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
            },
        )

        assert pooling_op_support(X, [], [])

        X = XLayer(
            type=["Pooling"],
            name="layer1",
            shapes=[-1, 2570, 4, 4],
            sizes=[2570 * 16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "data_layout": "NCHW",
                "kernel_size": [2, 2],
                "strides": [1, 1],
                "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
            },
        )

        assert not pooling_op_support(X, [], [])

    def test_mean_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import mean_op_support

        X = XLayer(
            type=["Mean"],
            name="layer1",
            shapes=[-1, 2, 1, 1],
            sizes=[2],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "axes": [2, 3],
                "keepdims": True,
                "exclude": False
            },
        )

        assert mean_op_support(X, [], [])

        X = XLayer(
            type=["Mean"],
            name="layer1",
            shapes=[-1, 1, 4, 4],
            sizes=[16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={
                "axes": [1],
                "keepdims": True,
                "exclude": False
            },
        )

        assert not mean_op_support(X, [], [])

    def test_mean_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import prelu_op_support

        X = XLayer(
            type=["pReLU"],
            name="layer1",
            shapes=[-1, 2, 1, 1],
            sizes=[2],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"alpha": 0.1},
        )

        assert prelu_op_support(X, [], [])

        X = XLayer(
            type=["pReLU"],
            name="layer1",
            shapes=[-1, 1, 4, 4],
            sizes=[16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"alpha": 0.2},
        )

        assert not prelu_op_support(X, [], [])

    def test_relu_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import relu_op_support

        X = XLayer(
            type=["ReLU"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={},
        )

        assert relu_op_support(X, [], [])

    def test_relu6_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import relu6_op_support

        X = XLayer(
            type=["ReLU6"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={},
        )

        assert relu6_op_support(X, [], [])

    def test_scale_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import scale_op_support

        X = XLayer(
            type=["Scale"],
            name="layer1",
            shapes=[-1, 2, 4, 4],
            sizes=[32],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert scale_op_support(X, [], [])

        X = XLayer(
            type=["Scale"],
            name="layer1",
            shapes=[-1, 2570, 4, 4],
            sizes=[2570 * 16],
            bottoms=[],
            tops=[],
            targets=[],
            attrs={"axis": 1},
        )

        assert not scale_op_support(X, [], [])
Ejemplo n.º 7
0
    def tearDownClass(cls):

        target_registry = TargetRegistry()
        target_registry.unregister_target("test")
Ejemplo n.º 8
0
class TestSubgraphBuildFunc(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def xgraph_build_func_simple(xgraph):
            return subgraph.xgraph_build_func(
                xgraph=xgraph,
                target='test_simple',
                xtype='TEST_SIMPLE',
                layout='NCHW',
            )

        def xgraph_build_func(xgraph):
            return subgraph.xgraph_build_func(xgraph=xgraph,
                                              target='test',
                                              xtype='TEST',
                                              layout='NHWC')

        def xgraph_optimizer(xgraph):
            raise NotImplementedError("")

        def xgraph_quantizer(xgraph):
            raise NotImplementedError("")

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        TestSubgraphBuildFunc.target_registry.register_target(
            'test', xgraph_optimizer, xgraph_quantizer, xgraph_compiler,
            xgraph_build_func)
        TestSubgraphBuildFunc.target_registry.register_target(
            'test_simple', xgraph_optimizer, xgraph_quantizer, xgraph_compiler,
            xgraph_build_func_simple)

        @register_op_support_check('test', 'Convolution')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test', 'Pooling')
        def pooling_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test', 'Concat')
        def concat_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test_simple', 'Convolution')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test_simple', 'Pooling')
        def pooling_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test_simple', 'Concat')
        def concat_op_support(X, bXs, tXs):
            return True

    @classmethod
    def tearDownClass(cls):

        TestSubgraphBuildFunc.target_registry.unregister_target('test')
        TestSubgraphBuildFunc.target_registry.unregister_target('test_simple')

    def test_basic(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=[],
                   tops=['add1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['add1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='add1',
                   type=['Eltwise'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1', 'in2'],
                   tops=[],
                   layer=['add1'],
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test_simple'])
        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test_simple')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        # print(layers)
        assert len(dpu_xgraph) == 5

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'TEST_SIMPLE'
        assert layers[2].type[0] == 'TupleGetItem'
        assert layers[3].type[0] == 'Input'
        assert layers[4].type[0] == 'Eltwise'

        assert layers[0].bottoms == []
        assert layers[0].tops == ['xp0']

        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['pool1']
        assert layers[1].attrs['target'] == 'test_simple'
        assert layers[1].attrs['input_names'] == ['xinput0']
        assert layers[1].attrs['output_names'] == ['pool1']
        assert layers[1].attrs['input_layers']['xinput0'] == ['conv1']
        assert layers[1].attrs['output_layers']['pool1'] == ['pool1']
        assert layers[1].attrs['__bottom_tensors'] == {'xinput0': ['in1']}
        assert layers[1].attrs['__top_tensors'] == {'pool1': ['add1']}

        assert layers[2].bottoms == ['xp0']
        assert layers[2].tops == ['add1']

        assert layers[3].bottoms == []
        assert layers[3].tops == ['add1']

        assert layers[4].bottoms == ['pool1', 'in2']
        assert layers[4].tops == []

    def test_two_partitions_interrupt(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1', 'bn1'],
                   layer=['conv1'],
                   targets=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   }),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 4, 3, 3],
                   sizes=[36],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   }),
            XLayer(name='bn1',
                   type=['BatchNorm'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['bn1'],
                   data=BatchData(np.array([1, 1]), np.array([0, 0]),
                                  np.array([1, 1]), np.array([0, 0])),
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 6, 3, 3],
                   sizes=[54],
                   bottoms=['pool1', 'bn1'],
                   tops=['conv2'],
                   layer=['concat1'],
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 10, 2, 2],
                   sizes=[40],
                   bottoms=['concat1'],
                   tops=[],
                   layer=['conv2'],
                   targets=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   })
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory\
            .build_from_xlayer(net)

        p_xgraph = partition(xgraph, ['test_simple'])

        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test_simple')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        assert len(dpu_xgraph) == 7

        assert layers[0].type[0] == 'Input'
        assert layers[0].bottoms == []
        assert layers[0].tops == ['xp0']

        assert layers[1].type[0] == 'TEST_SIMPLE'
        assert layers[1].shapes == [[1, 2, 3, 3], [1, 4, 3, 3]]
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['conv1', 'pool1']
        assert layers[1].attrs['input_names'] == ['xinput0']
        assert set(layers[1].attrs['output_names']) == set(['pool1', 'conv1'])
        assert layers[1].attrs['target'] == 'test_simple'
        assert layers[1].attrs['__bottom_tensors'] == {'xinput0': ['in1']}
        assert layers[1].attrs['orig_bottom_tensors'] == {'xinput0': ['in1']}
        assert layers[1].attrs['__top_tensors'] == \
            {'conv1': ['bn1'], 'pool1': ['concat1']}
        assert layers[1].attrs['orig_top_tensors'] == \
            {'conv1': ['bn1'], 'pool1': ['concat1']}

        assert layers[2].type[0] == 'TupleGetItem'
        assert layers[2].name == 'pool1'
        assert layers[2].bottoms == ['xp0']
        assert layers[2].shapes == [1, 4, 3, 3]
        assert layers[2].tops == ['concat1']
        assert layers[2].attrs['index'] == 1

        assert layers[3].type[0] == 'TupleGetItem'
        assert layers[3].name == 'conv1'
        assert layers[3].bottoms == ['xp0']
        assert layers[3].shapes == [1, 2, 3, 3]
        assert layers[3].tops == ['bn1']
        assert layers[3].attrs['index'] == 0

        assert layers[4].type[0] == 'BatchNorm'
        assert layers[4].name == 'bn1'
        assert layers[4].bottoms == ['conv1']
        assert layers[4].shapes == [1, 2, 3, 3]
        assert layers[4].tops == ['concat1']

        assert layers[5].type[0] == 'Concat'
        assert layers[5].name == 'concat1'
        assert layers[5].bottoms == ['pool1', 'bn1']
        assert layers[5].shapes == [1, 6, 3, 3]
        assert layers[5].tops == ['conv2']

        assert layers[6].type[0] == 'Convolution'
        assert layers[6].name == 'conv2'
        assert layers[6].bottoms == ['concat1']
        assert layers[6].shapes == [1, 10, 2, 2]
        assert layers[6].tops == []

    def test_basic_diff_layout(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=[],
                   tops=['add1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['add1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='add1',
                   type=['Eltwise'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1', 'in2'],
                   tops=[],
                   layer=['add1'],
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test'])
        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        # print(layers)
        assert (len(dpu_xgraph) == 6)

        assert (layers[0].type[0] == 'Input')
        assert (layers[0].name == 'in1')
        assert (layers[0].bottoms == [])
        assert (layers[0].tops == ['conv1_bottom_NCHW-NHWC'])

        assert (layers[1].type[0] == 'Transpose')
        assert (layers[1].name == 'conv1_bottom_NCHW-NHWC')
        assert (layers[1].bottoms == ['in1'])
        assert (layers[1].tops == ['xp0'])

        assert (layers[2].type[0] == 'TEST')
        assert (layers[2].bottoms == ['conv1_bottom_NCHW-NHWC'])
        assert (layers[2].tops == ['pool1'])
        assert (layers[2].attrs['target'] == 'test')
        assert (layers[2].attrs['input_names'] == ['xinput0'])
        assert (layers[2].attrs['output_names'] == ['pool1'])
        assert (layers[2].attrs['input_layers']['xinput0'] == ['conv1'])
        assert (layers[2].attrs['output_layers']['pool1'] == ['pool1'])
        assert (layers[2].attrs['__bottom_tensors'] == {
            'xinput0': ['conv1_bottom_NCHW-NHWC']
        })
        assert (layers[2].attrs['orig_bottom_tensors'] == {'xinput0': ['in1']})
        assert (layers[2].attrs['__top_tensors'] == {
            'pool1': ['pool1_top_NHWC-NCHW']
        })
        assert (layers[2].attrs['orig_top_tensors'] == {'pool1': ['add1']})

        assert (layers[3].type[0] == 'TupleGetItem')
        assert (layers[3].bottoms == ['xp0'])
        assert (layers[3].tops == ['add1'])
        assert layers[3].attrs['transpose'] is True
        assert layers[3].attrs['axes'] == [0, 3, 1, 2]

        # assert(layers[4].type[0] == 'Transpose')
        # assert(layers[4].name == 'pool1_top_NHWC-NCHW')
        # assert(layers[4].bottoms == ['pool1'])
        # assert(layers[4].tops == ['add1'])
        # assert layers[4].attrs['axes'] == [0, 3, 1, 2]

        assert layers[4].type[0] == 'Input'
        assert layers[4].name == 'in2'
        assert layers[4].bottoms == []
        assert layers[4].tops == ['add1']

        assert layers[5].type[0] == 'Eltwise'
        assert layers[5].name == 'add1'
        assert layers[5].bottoms == ['pool1', 'in2']
        assert layers[5].tops == []

    def test_two_partition_inputs(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv2'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['in2'],
                   tops=['concat1'],
                   layer=['conv2'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 4, 2, 2],
                   sizes=[16],
                   bottoms=['pool1', 'conv2'],
                   tops=['dense1'],
                   layer=['concat1'],
                   attrs={'axis': 1},
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['dense1'],
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test'])
        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        assert len(dpu_xgraph) == 7

        assert layers[0].type[0] == 'Input'
        assert layers[0].name == 'in1'
        assert layers[0].bottoms == []
        assert layers[0].tops == ['conv1_bottom_NCHW-NHWC']
        assert layers[0].target == 'cpu'
        assert layers[0].subgraph is None

        assert layers[1].type[0] == 'Transpose'
        assert layers[1].name == 'conv1_bottom_NCHW-NHWC'
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['xp2']
        assert layers[1].target == 'cpu'
        assert layers[1].subgraph is None

        assert layers[2].type[0] == 'Input'
        assert layers[2].name == 'in2'
        assert layers[2].bottoms == []
        assert layers[2].tops == ['conv2_bottom_NCHW-NHWC']
        assert layers[2].target == 'cpu'
        assert layers[2].subgraph is None

        assert layers[3].type[0] == 'Transpose'
        assert layers[3].name == 'conv2_bottom_NCHW-NHWC'
        assert layers[3].bottoms == ['in2']
        assert layers[3].tops == ['xp2']
        assert layers[3].target == 'cpu'
        assert layers[3].subgraph is None

        assert layers[4].type[0] == 'TEST'
        assert layers[4].name == 'xp2'
        assert layers[4].bottoms == [
            'conv1_bottom_NCHW-NHWC', 'conv2_bottom_NCHW-NHWC'
        ]
        assert layers[4].tops == ['concat1']
        assert layers[4].attrs['target'] == 'test'
        assert layers[4].attrs['input_names'] == ['xinput0', 'xinput1']
        assert layers[4].attrs['output_names'] == ['concat1']
        assert layers[4].attrs['input_layers']['xinput0'] == ['conv1']
        assert layers[4].attrs['input_layers']['xinput1'] == ['conv2']
        assert layers[4].attrs['output_layers']['concat1'] == ['concat1']
        assert (layers[4].attrs['__bottom_tensors'] == {
            'xinput0': ['conv1_bottom_NCHW-NHWC'],
            'xinput1': ['conv2_bottom_NCHW-NHWC']
        })
        assert (layers[4].attrs['orig_bottom_tensors'] == {
            'xinput0': ['in1'],
            'xinput1': ['in2']
        })
        assert layers[4].attrs['__top_tensors'] == {
            'concat1': ['merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW']
        }
        assert layers[4].attrs['orig_top_tensors'] == {'concat1': ['dense1']}
        assert layers[4].target == 'cpu'
        assert layers[4].subgraph is None

        assert layers[5].type[0] == 'TupleGetItem'
        assert layers[5].name == 'concat1'
        assert layers[5].bottoms == ['xp2']
        assert layers[5].tops == ['dense1']
        assert layers[5].target == 'cpu'
        assert layers[5].subgraph is None
        assert layers[5].attrs['transpose'] is True

        # assert layers[6].type[0] == 'Transpose'
        # assert layers[6].name ==\
        #     'merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW'
        # assert layers[6].bottoms == ['concat1']
        # assert layers[6].tops == ['dense1']
        # assert layers[6].target == 'cpu'
        # assert layers[6].subgraph is None

        assert layers[6].type[0] == 'Dense'
        assert layers[6].name == 'dense1'
        assert layers[6].bottoms == ['concat1']
        assert layers[6].tops == []
        assert layers[6].target == 'cpu'
        assert layers[6].subgraph is None

    def test_two_partition_diff_layout(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 4, 4, 1],
                   sizes=[16],
                   bottoms=[],
                   tops=['in2_transpose'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='in2_transpose',
                   type=['Transpose'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=['in2'],
                   tops=['conv2'],
                   layer=['in2'],
                   attrs={'axes': [0, 3, 1, 2]},
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['in2_transpose'],
                   tops=['concat1'],
                   layer=['conv2'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 4, 2, 2],
                   sizes=[16],
                   bottoms=['pool1', 'conv2'],
                   tops=['concat1_transpose'],
                   layer=['concat1'],
                   attrs={'axis': 1},
                   targets=[]),
            XLayer(name='concat1_transpose',
                   type=['Transpose'],
                   shapes=[1, 2, 2, 4],
                   sizes=[16],
                   bottoms=['concat1'],
                   tops=['dense1'],
                   layer=['concat1'],
                   attrs={'axes': [0, 2, 3, 1]},
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[],
                   bottoms=['concat1_transpose'],
                   tops=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['dense1'],
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test'])
        p_xlayers = p_xgraph.get_layers()

        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        assert len(dpu_xgraph) == 6

        assert layers[0].type[0] == 'Input'
        assert layers[0].name == 'in1'
        assert layers[0].shapes == [1, 1, 4, 4]
        assert layers[0].bottoms == []
        assert layers[0].tops == ['conv1_bottom_NCHW-NHWC']
        assert layers[0].target == 'cpu'
        assert layers[0].subgraph is None

        assert layers[1].type[0] == 'Transpose'
        assert layers[1].name == 'conv1_bottom_NCHW-NHWC'
        assert layers[1].shapes == [1, 4, 4, 1]
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['xp2']
        assert layers[1].target == 'cpu'
        assert layers[1].subgraph is None

        assert layers[2].type[0] == 'Input'
        assert layers[2].name == 'in2'
        assert layers[2].shapes == [1, 4, 4, 1]
        assert layers[2].bottoms == []
        assert layers[2].tops == ['xp2']
        assert layers[2].target == 'cpu'
        assert layers[2].subgraph is None

        assert layers[3].type[0] == 'TEST'
        assert layers[3].name == 'xp2'
        assert layers[3].shapes == [[1, 2, 2, 4]]
        assert layers[3].bottoms == ['conv1_bottom_NCHW-NHWC', 'in2']
        assert layers[3].tops == ['concat1']
        assert layers[3].target == 'cpu'
        assert layers[3].subgraph is None
        assert layers[3].attrs['target'] == 'test'
        assert layers[3].attrs['input_names'] == ['xinput0', 'xinput1']
        assert layers[3].attrs['output_names'] == ['concat1']
        assert layers[3].attrs['input_layers']['xinput0'] == ['conv1']
        assert layers[3].attrs['input_layers']['xinput1'] == ['conv2']
        assert layers[3].attrs['output_layers']['concat1'] == ['concat1']
        assert (layers[3].attrs['__bottom_tensors'] == {
            'xinput0': ['conv1_bottom_NCHW-NHWC'],
            'xinput1': ['in2']
        })
        assert (layers[3].attrs['orig_bottom_tensors'] == {
            'xinput0': ['in1'],
            'xinput1': ['in2']
        })
        assert layers[3].attrs['__top_tensors'] == {'concat1': ['dense1']}
        assert layers[3].attrs['orig_top_tensors'] == {'concat1': ['dense1']}

        assert layers[4].type[0] == 'TupleGetItem'
        assert layers[4].name == 'concat1'
        assert layers[4].shapes == [1, 2, 2, 4]
        assert layers[4].bottoms == ['xp2']
        assert layers[4].tops == ['dense1']
        assert layers[4].target == 'cpu'
        assert layers[4].subgraph is None

        assert layers[5].type[0] == 'Dense'
        assert layers[5].name == 'dense1'
        assert layers[5].shapes == [1, 20]
        assert layers[5].bottoms == ['concat1']
        assert layers[5].tops == []
        assert layers[5].target == 'cpu'
        assert layers[5].subgraph is None

    def test_two_partition_inputs_complex(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv2'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['in2'],
                   tops=['concat1'],
                   layer=['conv2'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 4, 2, 2],
                   sizes=[16],
                   bottoms=['pool1', 'conv2'],
                   tops=['dense1'],
                   layer=['concat1'],
                   attrs={'axis': 1},
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['dense1'],
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test'])
        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        assert len(dpu_xgraph) == 7

        assert layers[0].type[0] == 'Input'
        assert layers[0].name == 'in1'
        assert layers[0].shapes == [1, 1, 4, 4]
        assert layers[0].bottoms == []
        assert layers[0].tops == ['conv1_bottom_NCHW-NHWC']
        assert layers[0].target == 'cpu'
        assert layers[0].subgraph is None

        assert layers[1].type[0] == 'Transpose'
        assert layers[1].name == 'conv1_bottom_NCHW-NHWC'
        assert layers[1].shapes == [1, 4, 4, 1]
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['xp2']
        assert layers[1].target == 'cpu'
        assert layers[1].subgraph is None

        assert layers[2].type[0] == 'Input'
        assert layers[2].name == 'in2'
        assert layers[2].shapes == [1, 1, 4, 4]
        assert layers[2].bottoms == []
        assert layers[2].tops == ['conv2_bottom_NCHW-NHWC']
        assert layers[2].target == 'cpu'
        assert layers[2].subgraph is None

        assert layers[3].type[0] == 'Transpose'
        assert layers[3].name == 'conv2_bottom_NCHW-NHWC'
        assert layers[3].shapes == [1, 4, 4, 1]
        assert layers[3].bottoms == ['in2']
        assert layers[3].tops == ['xp2']
        assert layers[3].target == 'cpu'
        assert layers[3].subgraph is None

        assert layers[4].type[0] == 'TEST'
        assert layers[4].name == 'xp2'
        assert layers[4].shapes == [[1, 2, 2, 4]]
        assert layers[4].bottoms == [
            'conv1_bottom_NCHW-NHWC', 'conv2_bottom_NCHW-NHWC'
        ]
        assert layers[4].tops == ['concat1']
        assert layers[4].target == 'cpu'
        assert layers[4].subgraph is None
        assert layers[4].tops == ['concat1']
        assert layers[4].attrs['target'] == 'test'
        assert layers[4].attrs['input_names'] == ['xinput0', 'xinput1']
        assert layers[4].attrs['output_names'] == ['concat1']
        assert layers[4].attrs['input_layers']['xinput0'] == ['conv1']
        assert layers[4].attrs['input_layers']['xinput1'] == ['conv2']
        assert layers[4].attrs['output_layers']['concat1'] == ['concat1']
        assert (layers[4].attrs['__bottom_tensors'] == {
            'xinput0': ['conv1_bottom_NCHW-NHWC'],
            'xinput1': ['conv2_bottom_NCHW-NHWC']
        })
        assert (layers[4].attrs['orig_bottom_tensors'] == {
            'xinput0': ['in1'],
            'xinput1': ['in2']
        })
        assert layers[4].attrs['__top_tensors'] ==\
            {'concat1':
                ['merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW']}
        assert layers[4].attrs['orig_top_tensors'] ==\
            {'concat1': ['dense1']}

        assert layers[5].type[0] == 'TupleGetItem'
        assert layers[5].name == 'concat1'
        assert layers[5].shapes == [1, 4, 2, 2]
        assert layers[5].bottoms == ['xp2']
        assert layers[5].tops == ['dense1']
        assert layers[5].attrs['transpose'] is True
        assert layers[5].attrs['axes'] == [0, 3, 1, 2]

        # assert layers[6].type[0] == 'Transpose'
        # assert layers[6].name ==\
        #     'merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW'
        # assert layers[6].shapes == [1, 4, 2, 2]
        # assert layers[6].bottoms == ['concat1']
        # assert layers[6].tops == ['dense1']

        assert layers[6].type[0] == 'Dense'
        assert layers[6].name == 'dense1'
        assert layers[6].shapes == [1, 20]
        assert layers[6].bottoms == ['concat1']
        assert layers[6].tops == []

    def test_inception_like_block(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['concat1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['concat1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 2, 4, 4],
                   sizes=[32],
                   bottoms=['in1', 'in2'],
                   tops=['conv1', 'conv2'],
                   layer=['concat1'],
                   attrs={'axis': 1},
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 4, 3, 3],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 4, 2, 2],
                   sizes=[],
                   bottoms=['conv1'],
                   tops=['concat2'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 4, 2, 2],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=['concat2'],
                   layer=['conv2'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   },
                   targets=[]),
            XLayer(name='concat2',
                   type=['Concat'],
                   shapes=[1, 8, 2, 2],
                   sizes=[32],
                   bottoms=['pool1', 'conv2'],
                   tops=['dense1'],
                   layer=['concat2'],
                   attrs={'axis': 1},
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[20],
                   bottoms=['concat2'],
                   tops=[],
                   layer=['dense1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   targets=[])
        ]
        xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['test'])
        dpu_xgraph = TestSubgraphBuildFunc.target_registry\
            .get_target_build_func('test')(p_xgraph)

        layers = dpu_xgraph.get_layers()
        assert len(dpu_xgraph) == 7

        assert layers[0].type[0] == 'Input'
        assert layers[0].name == 'in1'
        assert layers[0].shapes == [1, 1, 4, 4]
        assert layers[0].bottoms == []
        assert layers[0].tops ==\
            ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC']
        assert layers[0].target == 'cpu'
        assert layers[0].subgraph is None

        assert layers[1].type[0] == 'Transpose'
        assert layers[1].name ==\
            '0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC'
        assert layers[1].shapes == [1, 4, 4, 1]
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['xp0']
        assert layers[1].target == 'cpu'
        assert layers[1].subgraph is None

        assert layers[2].type[0] == 'Input'
        assert layers[2].name == 'in2'
        assert layers[2].shapes == [1, 1, 4, 4]
        assert layers[2].bottoms == []
        assert layers[2].tops ==\
            ['1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC']
        assert layers[2].target == 'cpu'
        assert layers[2].subgraph is None

        assert layers[3].type[0] == 'Transpose'
        assert layers[3].name ==\
            '1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC'
        assert layers[3].shapes == [1, 4, 4, 1]
        assert layers[3].bottoms == ['in2']
        assert layers[3].tops == ['xp0']
        assert layers[3].target == 'cpu'
        assert layers[3].subgraph is None

        assert layers[4].type[0] == 'TEST'
        assert layers[4].name == 'xp0'
        assert layers[4].shapes == [[1, 2, 2, 8]]
        assert layers[4].bottoms ==\
            ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC',
             '1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC']
        assert layers[4].tops == ['concat2']
        assert layers[4].target == 'cpu'
        assert layers[4].subgraph is None
        assert layers[4].tops == ['concat2']
        assert layers[4].attrs['target'] == 'test'
        assert layers[4].attrs['input_names'] == ['xinput0', 'xinput1']
        assert layers[4].attrs['output_names'] == ['concat2']
        assert layers[4].attrs['input_layers']['xinput0'] == ['concat1']
        assert layers[4].attrs['input_layers']['xinput1'] == ['concat1']
        assert layers[4].attrs['output_layers']['concat2'] == ['concat2']
        assert (layers[4].attrs['__bottom_tensors'] == {
            'xinput0':
            ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom'
             '_NCHW-NHWC'],
            'xinput1':
            ['1_split_conv1_bottom_NCHW-NHWC_conv2_bottom'
             '_NCHW-NHWC']
        })
        assert (layers[4].attrs['orig_bottom_tensors'] == {
            'xinput0': ['in1'],
            'xinput1': ['in2']
        })
        assert layers[4].attrs['__top_tensors'] ==\
            {'concat2':
                ['merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW']}
        assert layers[4].attrs['orig_top_tensors'] ==\
            {'concat2': ['dense1']}

        assert layers[5].type[0] == 'TupleGetItem'
        assert layers[5].name == 'concat2'
        assert layers[5].shapes == [1, 8, 2, 2]
        assert layers[5].bottoms == ['xp0']
        assert layers[5].tops == ['dense1']

        # assert layers[6].type[0] == 'Transpose'
        # assert layers[6].name ==\
        #     'merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW'
        # assert layers[6].shapes == [1, 8, 2, 2]
        # assert layers[6].bottoms == ['concat2']
        # assert layers[6].tops == ['dense1']

        assert layers[6].type[0] == 'Dense'
        assert layers[6].name == 'dense1'
        assert layers[6].shapes == [1, 20]
        assert layers[6].bottoms == ['concat2']
        assert layers[6].tops == []
Ejemplo n.º 9
0
    def test_two_partitions_through_interruption(self):
        # A layer inside a residual type branch os not supported
        # Here: BatchNorm
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1', 'bn1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv1'],
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 4, 3, 3],
                   sizes=[36],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='bn1',
                   type=['BatchNorm'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   data=BatchData(np.array([1, 1]), np.array([0, 0]),
                                  np.array([1, 1]), np.array([0, 0])),
                   layer=['bn1'],
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 6, 3, 3],
                   sizes=[54],
                   bottoms=['pool1', 'bn1'],
                   tops=['conv2'],
                   layer=['concat1'],
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 10, 2, 2],
                   sizes=[40],
                   bottoms=['concat1'],
                   tops=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv2'],
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert len(p_xgraph.get_layer_names()) == 6
        assert p_xgraph.get_subgraph_names() == ['xp0']

        p_xlayers = p_xgraph.get_layers()
        assert p_xlayers[0].type[0] in ['Input']
        assert p_xlayers[1].type[0] in ['Convolution']
        assert p_xlayers[2].type[0] in ['Pooling']
        assert p_xlayers[3].type[0] in ['BatchNorm']
        assert p_xlayers[4].type[0] in ['Concat']
        assert p_xlayers[5].type[0] in ['Convolution']

        assert p_xlayers[0].target == 'cpu'
        assert p_xlayers[1].target == 'test'
        assert p_xlayers[2].target == 'test'
        assert p_xlayers[3].target == 'cpu'
        assert p_xlayers[4].target == 'cpu'
        assert p_xlayers[5].target == 'cpu'

        assert p_xlayers[0].subgraph is None
        assert p_xlayers[1].subgraph == 'xp0'
        assert p_xlayers[2].subgraph == 'xp0'
        assert p_xlayers[3].subgraph is None
        assert p_xlayers[4].subgraph is None
        assert p_xlayers[5].subgraph is None

        assert p_xlayers[3].name == 'bn1'
        assert p_xlayers[3].bottoms == ['conv1']
        assert p_xlayers[3].tops == ['concat1']

        assert p_xlayers[4].name == 'concat1'
        assert p_xlayers[4].bottoms == ['pool1', 'bn1']
        assert p_xlayers[4].tops == ['conv2']

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert len(subgraphs) == 1
        xp0 = subgraphs[0]
        assert xp0.name == 'xp0'
        xp0_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp0.subgraph_data)

        assert xp0.bottoms == ['in1']
        assert xp0.tops == ['bn1', 'concat1']
        assert xp0.shapes == [[1, 2, 3, 3], [1, 4, 3, 3]]
        assert xp0.sizes == [18, 36]
        assert xp0.attrs['target'] == 'test'
        assert xp0.attrs['__bottom_tensors'] == {'xinput0': ['in1']}
        assert xp0.attrs['orig_bottom_tensors'] == {'xinput0': ['in1']}
        assert xp0.attrs['__top_tensors'] == \
            {'conv1': ['bn1'], 'pool1': ['concat1']}
        assert xp0.attrs['orig_top_tensors'] == \
            {'conv1': ['bn1'], 'pool1': ['concat1']}

        assert (len(xp0_xgraph) == 3)
        xp0_layers = xp0_xgraph.get_layers()

        assert [X.name for X in xp0_xgraph.get_input_layers()] == ['xinput0']
        # TODO: XGraph only recognizes output layers when they have no top
        #   layers
        assert [X.name for X in xp0_xgraph.get_output_layers()] ==\
            ['pool1']

        assert xp0_layers[0].type[0] == 'Input'
        assert xp0_layers[0].layer[0] == 'conv1'
        assert xp0_layers[1].type[0] == 'Convolution'
        assert xp0_layers[2].type[0] == 'Pooling'

        assert xp0_layers[0].bottoms == []
        assert xp0_layers[0].tops == ['conv1']
        assert xp0_layers[1].bottoms == ['xinput0']
        assert xp0_layers[1].tops == ['pool1']
        assert xp0_layers[2].bottoms == ['conv1']
        assert xp0_layers[2].tops == []
Ejemplo n.º 10
0
class TestDPUContrib(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()
    rt_manager = RtManager()

    @classmethod
    def setUpClass(cls):
        # Import DPU module
        from pyxir.contrib.dpuv1 import dpuv1

        # from pyxir.contrib.dpuv1.dpuv1_target import\
        #     xgraph_dpu_v1_optimizer,\
        #     xgraph_dpu_v1_quantizer,\
        #     xgraph_dpu_v1_compiler,\
        #     xgraph_dpu_v1_build_func

        # pyxir.register_target(
        #     'dpuv1',
        #     xgraph_dpu_v1_optimizer,
        #     xgraph_dpu_v1_quantizer,
        #     xgraph_dpu_v1_compiler,
        #     xgraph_dpu_v1_build_func
        # )

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDPUContrib.target_registry.unregister_target("dpuv1")
        TestDPUContrib.target_registry.unregister_target("DPUCADX8G")

    def test_supported_ops(self):
        dpuv1_ops = TestDPUContrib.target_registry.get_supported_op_check_names(
            "dpuv1")

        assert "BatchNorm" in dpuv1_ops
        assert "BiasAdd" in dpuv1_ops
        assert "Concat" in dpuv1_ops
        assert "Convolution" in dpuv1_ops
        assert "Conv2DTranspose" in dpuv1_ops
        assert "DPU" in dpuv1_ops
        assert "Eltwise" in dpuv1_ops
        assert "Pad" in dpuv1_ops
        assert "Pooling" in dpuv1_ops
        assert "Mean" in dpuv1_ops
        assert "pReLU" in dpuv1_ops
        assert "ReLU" in dpuv1_ops
        assert "Scale" in dpuv1_ops

    @unittest.skipIf(
        skip_tf,
        "Skipping Tensorflow related test because tensorflow is"
        "not available",
    )
    def test_import_ext_quantizer(self):
        if TestDPUContrib.target_registry.is_target("DPUCADX8G"):
            TestDPUContrib.target_registry.unregister_target("DPUCADX8G")
        if TestDPUContrib.rt_manager.exists_op("cpu-np", "DPU"):
            TestDPUContrib.rt_manager.unregister_op("cpu-np", "DPU")
        from pyxir.contrib.target import DPUCADX8G_external_quantizer
Ejemplo n.º 11
0
 def tearDownClass(cls):
     target_registry = TargetRegistry()
     target_registry.unregister_target('test')
     target_registry.unregister_target('qsim')
Ejemplo n.º 12
0
class TestONNXFrontend(unittest.TestCase):

    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def xgraph_build_func(xgraph):
            raise NotImplementedError("")

        def xgraph_optimizer(xgraph, target):
            return xgraph

        def xgraph_quantizer(xgraph, inputs_func, **kwargs):
            # test_quant_file = os.path.join(FILE_DIR, 'test_quant_info.txt')
            # open(test_quant_file, 'w').close()
            # q_output = QuantizerOutput('xgraph')
            # q_output.add('xp0', None, test_quant_file, None)
            # xgraph.set_quantizer_output(q_output)
            for X in xgraph.get_layers():
                if 'Convolution' in X.type:
                    X.attrs['vai_quant'] = [
                        'vai_quant_in', 'vai_quant_out', 'vai_quant_weights',
                        'vai_quant_biases'
                    ]
                    X.attrs['vai_quant_in'] = [8, 8]
                    X.attrs['vai_quant_out'] = [8, 5]
                    X.attrs['vai_quant_weights'] = [5, 8]
                    X.attrs['vai_quant_biases'] = [5, 5]
                if 'Pooling' in X.type:
                    X.attrs['vai_quant'] = ['vai_quant_in', 'vai_quant_out']
                    X.attrs['vai_quant_in'] = [8, 8]
                    X.attrs['vai_quant_out'] = [8, 5]
            return xgraph

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        cls.target_registry.register_target('test_dpu', xgraph_optimizer,
                                            xgraph_quantizer, xgraph_compiler,
                                            xgraph_build_func)

        @register_op_support_check('test_dpu', 'Convolution')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test_dpu', 'BiasAdd')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test_dpu', 'Pooling')
        def pooling_op_support(X, bXs, tXs):
            return True

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestONNXFrontend.target_registry.unregister_target('test_dpu')

    def test_simple_model(self):
        x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
                                          [None, 1, 4, 4])
        x_val = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                            [13, 14, 15, 16]]]]).astype(np.float32)
        # x_init = helper.make_tensor('x', TensorProto.FLOAT, (1, 1, 4, 4),
        #                             list(x_val.reshape(-1)))

        # Create one output (ValueInfoProto)
        z = helper.make_tensor_value_info('z', TensorProto.FLOAT,
                                          [None, 2, 2, 2])

        W_val = np.array([[[[1, 1], [1, 1]]], [[[1, -1],
                                                [1, 1]]]]).astype(np.float32)
        W = helper.make_tensor('W', TensorProto.FLOAT, (2, 1, 2, 2),
                               list(W_val.reshape(-1)))

        B_val = np.array([1, -1]).astype(np.float32)
        B = helper.make_tensor('B', TensorProto.FLOAT, (2, ),
                               list(B_val.reshape((-1))))

        conv_node = onnx.helper.make_node('Conv',
                                          inputs=['x', 'W', 'B'],
                                          outputs=['y'],
                                          kernel_shape=[2, 2],
                                          pads=[1, 1, 0, 0])

        pool_node = onnx.helper.make_node('AveragePool',
                                          inputs=['y'],
                                          outputs=['z'],
                                          kernel_shape=[2, 2],
                                          pads=[0, 0, 0, 0],
                                          strides=[2, 2])

        # Create the graph (GraphProto)
        graph_def = onnx.helper.make_graph(
            [conv_node, pool_node],
            'test-model',
            [x],
            [z],
            [W, B]  # x_init
        )

        # Create the model (ModelProto)
        model_def = onnx.helper.make_model(graph_def,
                                           producer_name='onnx-example')

        xgraph = from_onnx(model_def)

        xlayers = xgraph.get_layers()
        assert len(xlayers) == 4

        assert xlayers[0].name == 'x'
        assert xlayers[0].type[0] == 'Input'
        assert xlayers[0].shapes == [-1, 1, 4, 4]
        assert xlayers[0].attrs['onnx_id'] == 'x'

        assert xlayers[1].name == 'y_Conv'
        assert xlayers[1].type[0] == 'Convolution'
        assert xlayers[1].shapes == [-1, 2, 4, 4]
        assert xlayers[1].attrs['padding'] == [(0, 0), (0, 0), (1, 0), (1, 0)]
        assert xlayers[1].attrs['strides'] == [1, 1]
        assert xlayers[1].attrs['dilation'] == [1, 1]
        assert xlayers[1].attrs['kernel_size'] == [2, 2]
        assert xlayers[1].attrs['channels'] == [1, 2]
        assert xlayers[1].attrs['data_layout'] == 'NCHW'
        assert xlayers[1].attrs['kernel_layout'] == 'OIHW'
        assert xlayers[1].attrs['groups'] == 1
        assert xlayers[1].attrs['onnx_id'] == 'y'

        assert xlayers[2].name == 'y'
        assert xlayers[2].shapes == [-1, 2, 4, 4]
        assert xlayers[2].attrs['axis'] == 1
        assert xlayers[2].attrs['onnx_id'] == 'y'

        assert xlayers[3].name == 'z'
        assert xlayers[3].shapes == [-1, 2, 2, 2]
        assert xlayers[3].type[0] == 'Pooling'
        assert xlayers[3].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert xlayers[3].attrs['strides'] == [2, 2]
        assert xlayers[3].attrs['kernel_size'] == [2, 2]
        assert xlayers[3].attrs['data_layout'] == 'NCHW'
        assert xlayers[3].attrs['type'] == 'Avg'
        assert xlayers[3].attrs['onnx_id'] == 'z'

    def test_simple_model_opaque_func(self):
        x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
                                          [None, 1, 4, 4])
        x_val = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                            [13, 14, 15, 16]]]]).astype(np.float32)
        # x_init = helper.make_tensor('x', TensorProto.FLOAT, (1, 1, 4, 4),
        #                             list(x_val.reshape(-1)))

        # Create one output (ValueInfoProto)
        z = helper.make_tensor_value_info('z', TensorProto.FLOAT,
                                          [None, 2, 2, 2])

        W_val = np.array([[[[1, 1], [1, 1]]], [[[1, -1],
                                                [1, 1]]]]).astype(np.float32)
        W = helper.make_tensor('W', TensorProto.FLOAT, (2, 1, 2, 2),
                               list(W_val.reshape(-1)))

        B_val = np.array([1, -1]).astype(np.float32)
        B = helper.make_tensor('B', TensorProto.FLOAT, (2, ),
                               list(B_val.reshape((-1))))

        conv_node = onnx.helper.make_node('Conv',
                                          inputs=['x', 'W', 'B'],
                                          outputs=['y'],
                                          kernel_shape=[2, 2],
                                          pads=[1, 1, 0, 0])

        pool_node = onnx.helper.make_node('AveragePool',
                                          inputs=['y'],
                                          outputs=['z'],
                                          kernel_shape=[2, 2],
                                          pads=[0, 0, 0, 0],
                                          strides=[2, 2])

        # Create the graph (GraphProto)
        graph_def = onnx.helper.make_graph(
            [conv_node, pool_node],
            'test-model',
            [x],
            [z],
            [W, B]  # x_init]
        )

        # Create the model (ModelProto)
        model_def = onnx.helper.make_model(graph_def,
                                           producer_name='onnx-example')
        test_file = os.path.join(FILE_DIR, 'test.onnx')
        onnx.save(model_def, test_file)

        xgraph = XGraph(name='test')
        of = OpaqueFuncRegistry.Get('pyxir.onnx.from_onnx')
        of(xgraph, test_file)

        assert xgraph.get_name() == 'test-model'

        xlayers = xgraph.get_layers()
        assert len(xlayers) == 4

        assert xlayers[0].name == 'x'
        assert xlayers[0].type[0] == 'Input'
        assert xlayers[0].shapes == [-1, 1, 4, 4]
        assert xlayers[0].attrs['onnx_id'] == 'x'

        assert xlayers[1].name == 'y_Conv'
        assert xlayers[1].type[0] == 'Convolution'
        assert xlayers[1].shapes == [-1, 2, 4, 4]
        assert xlayers[1].attrs['padding'] == [(0, 0), (0, 0), (1, 0), (1, 0)]
        assert xlayers[1].attrs['strides'] == [1, 1]
        assert xlayers[1].attrs['dilation'] == [1, 1]
        assert xlayers[1].attrs['kernel_size'] == [2, 2]
        assert xlayers[1].attrs['channels'] == [1, 2]
        assert xlayers[1].attrs['data_layout'] == 'NCHW'
        assert xlayers[1].attrs['kernel_layout'] == 'OIHW'
        assert xlayers[1].attrs['groups'] == 1
        assert xlayers[1].attrs['onnx_id'] == 'y'

        assert xlayers[2].name == 'y'
        assert xlayers[2].shapes == [-1, 2, 4, 4]
        assert xlayers[2].attrs['axis'] == 1
        assert xlayers[2].attrs['onnx_id'] == 'y'

        assert xlayers[3].name == 'z'
        assert xlayers[3].shapes == [-1, 2, 2, 2]
        assert xlayers[3].type[0] == 'Pooling'
        assert xlayers[3].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert xlayers[3].attrs['strides'] == [2, 2]
        assert xlayers[3].attrs['kernel_size'] == [2, 2]
        assert xlayers[3].attrs['data_layout'] == 'NCHW'
        assert xlayers[3].attrs['type'] == 'Avg'
        assert xlayers[3].attrs['onnx_id'] == 'z'

        of = OpaqueFuncRegistry.Get('pyxir.partition')
        of(xgraph, ['test_dpu'], "")

        assert xgraph.get_name() == 'test-model'
        assert len(xgraph) == 4

        xlayers = xgraph.get_layers()
        assert xlayers[0].name == 'x'
        assert xlayers[0].target == 'cpu'
        assert xlayers[0].subgraph is None

        assert xlayers[1].name == 'y_Conv'
        assert xlayers[1].target == 'test_dpu'
        assert xlayers[1].subgraph == 'xp0'

        assert xlayers[2].name == 'y'
        assert xlayers[2].type == ['BiasAdd']
        assert xlayers[2].target == 'test_dpu'
        assert xlayers[2].subgraph == 'xp0'

        assert xlayers[3].name == 'z'
        assert xlayers[3].type == ['Pooling']
        assert xlayers[3].target == 'test_dpu'
        assert xlayers[3].subgraph == 'xp0'

        os.remove(test_file)

    def test_prequantize_model(self):
        x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
                                          [None, 1, 4, 4])
        x_val = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                            [13, 14, 15, 16]]]]).astype(np.float32)
        # x_init = helper.make_tensor('x', TensorProto.FLOAT, (1, 1, 4, 4),
        #                             list(x_val.reshape(-1)))

        # Create one output (ValueInfoProto)
        z = helper.make_tensor_value_info('z', TensorProto.FLOAT,
                                          [None, 2, 2, 2])

        W_val = np.array([[[[1, 1], [1, 1]]], [[[1, -1],
                                                [1, 1]]]]).astype(np.float32)
        W = helper.make_tensor('W', TensorProto.FLOAT, (2, 1, 2, 2),
                               list(W_val.reshape(-1)))

        B_val = np.array([1, -1]).astype(np.float32)
        B = helper.make_tensor('B', TensorProto.FLOAT, (2, ),
                               list(B_val.reshape((-1))))

        conv_node = onnx.helper.make_node('Conv',
                                          inputs=['x', 'W', 'B'],
                                          outputs=['y'],
                                          kernel_shape=[2, 2],
                                          pads=[1, 0, 1, 0])

        pool_node = onnx.helper.make_node('AveragePool',
                                          inputs=['y'],
                                          outputs=['z'],
                                          kernel_shape=[2, 2],
                                          pads=[0, 0, 0, 0],
                                          strides=[2, 2])

        # Create the graph (GraphProto)
        graph_def = onnx.helper.make_graph(
            [conv_node, pool_node],
            'test-model',
            [x],
            [z],
            [W, B]  # x_init]
        )

        # Create the model (ModelProto)
        model_def = onnx.helper.make_model(graph_def,
                                           producer_name='onnx-example')

        test_file = os.path.join(FILE_DIR, 'test_pre.onnx')

        def inputs_func():
            pass

        prequantize_onnx_model(model_def, 'test_dpu', inputs_func, test_file)

        new_onnx_model = onnx.load(test_file)

        new_xgraph = from_onnx(new_onnx_model)
        assert new_xgraph.get('y_Conv').attrs['vai_quant'] == \
            ['vai_quant_in', 'vai_quant_out', 'vai_quant_weights',
             'vai_quant_biases']
        assert new_xgraph.get('y_Conv').attrs['vai_quant_in'] == [8, 8]

        assert new_xgraph.get('z').attrs['vai_quant'] == \
            ['vai_quant_in', 'vai_quant_out']
        assert new_xgraph.get('z').attrs['vai_quant_in'] == [8, 8]

        quant_file = os.path.join(FILE_DIR, "quant_info.txt")
        new_xgraph.save_quant_info_txt(quant_file)

        os.remove(test_file)
        os.remove(quant_file)
Ejemplo n.º 13
0
    def test_multi_top_tensors(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['t1', 't2'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='t1',
                   type=['Transpose'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1'],
                   tops=['s1'],
                   layer=['t1'],
                   internal=1,
                   targets=[],
                   attrs={'axes': [0, 2, 3, 1]}),
            XLayer(name='t2',
                   type=['Transpose'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1'],
                   tops=['s2', 's3'],
                   layer=['t2'],
                   internal=1,
                   targets=[],
                   attrs={'axes': [0, 2, 3, 1]}),
            XLayer(name='s1',
                   type=['Sqrt'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['t1'],
                   tops=[],
                   layer=['s1'],
                   internal=0,
                   targets=[]),
            XLayer(name='s2',
                   type=['Sqrt'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['t2'],
                   tops=[],
                   layer=['s2'],
                   internal=0,
                   targets=[]),
            XLayer(name='s3',
                   type=['Sqrt'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['t2'],
                   tops=[],
                   layer=['s3'],
                   internal=0,
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert len(p_xgraph.get_layer_names()) == 8
        assert p_xgraph.get_subgraph_names() == ['xp0']

        p_xlayers = p_xgraph.get_layers()
        assert p_xlayers[0].type[0] in ['Input']
        assert p_xlayers[1].type[0] in ['Convolution']
        assert p_xlayers[2].type[0] in ['Pooling']
        assert p_xlayers[3].type[0] in ['Transpose']
        assert p_xlayers[4].type[0] in ['Sqrt']
        assert p_xlayers[5].type[0] in ['Transpose']
        assert p_xlayers[6].type[0] in ['Sqrt']
        assert p_xlayers[7].type[0] in ['Sqrt']

        assert p_xlayers[0].target == 'cpu'
        assert p_xlayers[1].target == 'test'
        assert p_xlayers[2].target == 'test'
        assert p_xlayers[3].target == 'cpu'
        assert p_xlayers[4].target == 'cpu'
        assert p_xlayers[5].target == 'cpu'
        assert p_xlayers[6].target == 'cpu'
        assert p_xlayers[7].target == 'cpu'

        assert p_xlayers[0].subgraph is None
        assert p_xlayers[1].subgraph == 'xp0'
        assert p_xlayers[2].subgraph == 'xp0'
        assert p_xlayers[3].subgraph is None
        assert p_xlayers[4].subgraph is None
        assert p_xlayers[5].subgraph is None
        assert p_xlayers[6].subgraph is None
        assert p_xlayers[7].subgraph is None

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert len(subgraphs) == 1
        xp0 = subgraphs[0]
        assert xp0.name == 'xp0'
        xp0_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp0.subgraph_data)

        assert xp0.bottoms == ['in1']
        assert xp0.tops == ['t1', 't2']
        assert xp0.shapes == [[1, 2, 2, 2], [1, 2, 2, 2]]
        assert xp0.sizes == [8, 8]
        assert len(xp0_xgraph) == 3

        __bottom_tensors = xp0.attrs['__bottom_tensors']
        orig_bottom_tensors = xp0.attrs['orig_bottom_tensors']

        assert len(__bottom_tensors) == 1
        assert 'xinput0' in __bottom_tensors
        assert __bottom_tensors['xinput0'] == ['in1']

        assert len(orig_bottom_tensors) == 1
        assert 'xinput0' in orig_bottom_tensors
        assert orig_bottom_tensors['xinput0'] == ['in1']

        __top_tensors = xp0.attrs['__top_tensors']
        orig_top_tensors = xp0.attrs['orig_top_tensors']

        assert len(__top_tensors) == 1
        assert 'pool1' in __top_tensors
        assert __top_tensors['pool1'] == ['t1', 't2']

        assert len(orig_top_tensors) == 1
        assert 'pool1' in orig_top_tensors
        assert orig_top_tensors['pool1'] == ['s1', 's2', 's3']
Ejemplo n.º 14
0
class RuntimeDecentQSim(BaseRuntime):
    """Runtime for Decent quantizer simulation"""

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()

    def __init__(self,
                 name,
                 xgraph: XGraph,
                 device: str = 'cpu',
                 batch_size: int = -1,
                 placeholder: bool = False,
                 last_layers: List[str] = None,
                 **kwargs):
        super(RuntimeDecentQSim,
              self).__init__(name, xgraph, device, batch_size, placeholder,
                             last_layers)

        meta_attrs = self.xgraph.meta_attrs

        if 'quant_keys' not in meta_attrs:
            raise ValueError("Trying to simulate unquantized model. Make sure to first"\
                             " quantize the model.")

        qkey = meta_attrs['quant_keys'][0]
        self.q_eval = meta_attrs[qkey]['q_eval']
        self.gpu = 0

        Xps = RuntimeDecentQSim.xgraph_partitioner.get_subgraphs(xgraph)
        assert len(Xps) == 1, "Decent quantizer simulation only supports one partition"\
            " currently"
        self.Xp = Xps[0]
        target = self.Xp.attrs['target']
        opt_xgraph = RuntimeDecentQSim.target_registry.get_target_optimizer(
            target)(self.xgraph, target=target)
        self.rt_xgraph = RuntimeDecentQSim.target_registry.get_target_build_func(
            target
        )(
            copy.deepcopy(opt_xgraph),
            data_layout=
            'NHWC'  # NOTE XGraph's should be built in NHWC data layout, this is
            # important for DPUCADX8G where DPU execution happens in NCHW
            # but quantization simulation in NHWC
        )

    def _init_net(self, network: List[XLayer], params: Dict[str, np.ndarray]):
        # Do nothing
        pass

    def run_input(self, X: XLayer, inputs: Dict[str, Union[np.ndarray, List[np.ndarray]]])\
            -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
        return None

    def run_transpose(self, X: XLayer, inputs: Dict[str, Union[np.ndarray, List[np.ndarray]]])\
            -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
        assert len(X.bottoms) == 1
        return np.transpose(inputs[X.bottoms[0]],
                            axes=tuple(X.attrs['axes'][:]))

    def run_dpu(self, X: XLayer, inputs: Dict[str, Union[np.ndarray, List[np.ndarray]]])\
            -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
        import tensorflow as tf
        tf.compat.v1.reset_default_graph()
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu)
        input_graph_def = tf.Graph().as_graph_def()
        input_graph_def.ParseFromString(
            tf.io.gfile.GFile(self.q_eval, "rb").read())
        tf.import_graph_def(input_graph_def, name='')

        input_names = X.attrs["input_names"]
        input_map = {
            X.attrs["__bottom_tensors"][in_name][0]: in_name
            for in_name in input_names
        }
        in_tensors = {
            k:
            tf.compat.v1.get_default_graph().get_tensor_by_name(input_map[k] +
                                                                ":0")
            for k in X.bottoms
        }

        feed_dict = {in_tensors[k]: inputs[k] for k in X.bottoms}

        out_names = X.attrs["output_names"]
        out_tensor_names = [X.attrs["output_layers"][o][-1] for o in out_names]

        out_tensors = [
            tf.compat.v1.get_default_graph().get_tensor_by_name(o + "/aquant" +
                                                                ":0")
            for o in out_names
        ]

        with tf.compat.v1.Session() as sess:
            out = sess.run(out_tensors, feed_dict=feed_dict)
            return out if isinstance(out, list) else [out]

    def run_tuple_get_item(self, X: XLayer, inputs: Dict[str, Union[np.ndarray, List[np.ndarray]]])\
            -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
        assert len(X.bottoms) == 1
        index = X.attrs['index']
        data = inputs[X.bottoms[0]][index]
        if 'transpose' in X.attrs and X.attrs['transpose'] is True:
            return np.transpose(data, axes=tuple(X.attrs['axes'][:]))
        return data

    def run_tuple(self, X: XLayer, inputs: Dict[str, Union[np.ndarray, List[np.ndarray]]])\
            -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
        return [inputs[b] for b in X.bottoms]

    def run(self,
            inputs: Dict[str, np.ndarray],
            outputs: List[str] = [],
            stop: str = None,
            force_stepwise: bool = False,
            debug: bool = False) -> List[np.ndarray]:
        """Override run method"""
        for X in self.rt_xgraph.get_layers():
            if 'Input' in X.type:
                outs = self.run_input(X, inputs)
            elif 'Transpose' in X.type:
                outs = self.run_transpose(X, inputs)
            elif 'DPU' in X.type:
                outs = self.run_dpu(X, inputs)
            elif 'TupleGetItem' in X.type:
                outs = self.run_tuple_get_item(X, inputs)
            elif 'Tuple' in X.type:
                outs = self.run_tuple(X, inputs)
            else:
                raise NotImplementedError(
                    "Unsupported operation in decentq simulation: {}".format(
                        X.type[0]))
            if outs is not None:
                inputs[X.name] = outs
        return [inputs[o] for o in outputs]
Ejemplo n.º 15
0
    def test_basic(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=[],
                   tops=['add1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv1'],
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['add1'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='add1',
                   type=['Eltwise'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1', 'in2'],
                   tops=[],
                   layer=['add1'],
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert len(p_xgraph.get_layer_names()) == 5
        assert p_xgraph.get_subgraph_names() == ['xp0']

        p_xlayers = p_xgraph.get_layers()
        assert p_xlayers[0].type[0] in ['Input']
        assert p_xlayers[1].type[0] in ['Convolution']
        assert p_xlayers[2].type[0] in ['Pooling']
        assert p_xlayers[3].type[0] in ['Input']
        assert p_xlayers[4].type[0] in ['Eltwise']

        assert p_xlayers[0].target == 'cpu'
        assert p_xlayers[1].target == 'test'
        assert p_xlayers[2].target == 'test'
        assert p_xlayers[3].target == 'cpu'
        assert p_xlayers[4].target == 'cpu'

        assert p_xlayers[0].subgraph is None
        assert p_xlayers[1].subgraph == 'xp0'
        assert p_xlayers[2].subgraph == 'xp0'
        assert p_xlayers[3].subgraph is None
        assert p_xlayers[4].subgraph is None

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert len(subgraphs) == 1
        xp0 = subgraphs[0]
        assert xp0.name == 'xp0'
        xp0_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp0.subgraph_data)

        assert xp0.bottoms == ['in1']
        assert xp0.tops == ['add1']
        assert xp0.shapes == [[1, 2, 2, 2]]
        assert xp0.sizes == [8]

        assert len(xp0_xgraph) == 3
        xp0_layers = xp0_xgraph.get_layers()

        assert xp0_layers[0].type[0] == 'Input'
        assert xp0_layers[0].layer[0] == 'conv1'
        assert xp0_layers[1].type[0] == 'Convolution'
        assert xp0_layers[2].type[0] == 'Pooling'

        assert xp0_layers[0].bottoms == []
        assert xp0_layers[0].tops == ['conv1']
        assert xp0_layers[1].bottoms == ['xinput0']
        assert xp0_layers[1].tops == ['pool1']
        assert xp0_layers[2].bottoms == ['conv1']
        assert xp0_layers[2].tops == []
Ejemplo n.º 16
0
    def test_inception_like_block(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['concat1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['concat1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 2, 4, 4],
                   sizes=[32],
                   bottoms=['in1', 'in2'],
                   tops=['conv1', 'conv2'],
                   layer=['concat1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 4, 3, 3],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=['pool1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv1'],
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 4, 2, 2],
                   sizes=[],
                   bottoms=['conv1'],
                   tops=['concat2'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 4, 2, 2],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=['concat2'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv2'],
                   targets=[]),
            XLayer(name='concat2',
                   type=['Concat'],
                   shapes=[1, 8, 2, 2],
                   sizes=[32],
                   bottoms=['pool1', 'conv2'],
                   tops=['dense1'],
                   layer=['concat2'],
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[20],
                   bottoms=['concat2'],
                   tops=[],
                   layer=['dense1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)

        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert (len(p_xgraph.get_layer_names()) == 8)
        p_xlayers = p_xgraph.get_layers()

        assert (p_xlayers[0].target == 'cpu')
        assert (p_xlayers[1].target == 'cpu')
        assert (p_xlayers[2].target == 'test')
        assert (p_xlayers[3].target == 'test')
        assert (p_xlayers[4].target == 'test')
        assert (p_xlayers[5].target == 'test')
        assert (p_xlayers[6].target == 'test')
        assert (p_xlayers[7].target == 'cpu')

        assert (p_xlayers[0].subgraph is None)
        assert (p_xlayers[1].subgraph is None)
        assert (p_xlayers[2].subgraph == 'xp0')
        assert (p_xlayers[3].subgraph == 'xp0')
        assert (p_xlayers[4].subgraph == 'xp0')
        assert (p_xlayers[5].subgraph == 'xp0')
        assert (p_xlayers[6].subgraph == 'xp0')
        assert (p_xlayers[7].subgraph is None)

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert (len(subgraphs) == 1)
        xp0 = subgraphs[0]
        assert (xp0.name == 'xp0')
        xp0_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp0.subgraph_data)

        assert (xp0.bottoms == ['in1', 'in2'])
        assert (xp0.tops == ['dense1'])
        assert (xp0.shapes == [[1, 8, 2, 2]])
        assert (xp0.sizes == [32])

        assert (len(xp0_xgraph) == 7)
        xp0_layers = xp0_xgraph.get_layers()

        assert (xp0_layers[0].type[0] == 'Input')
        assert (xp0_layers[0].layer[0] == 'concat1')
        assert (xp0_layers[1].type[0] == 'Input')
        assert (xp0_layers[1].layer[0] == 'concat1')
        assert (xp0_layers[2].type[0] == 'Concat')
        assert (xp0_layers[3].type[0] == 'Convolution')
        assert (xp0_layers[4].type[0] == 'Pooling')
        assert (xp0_layers[5].type[0] == 'Convolution')
        assert (xp0_layers[6].type[0] == 'Concat')

        assert (xp0_layers[0].bottoms == [])
        assert (xp0_layers[0].tops == ['concat1'])
        assert (xp0_layers[1].bottoms == [])
        assert (xp0_layers[1].tops == ['concat1'])
        assert (xp0_layers[2].bottoms == ['xinput0', 'xinput1'])
        assert (xp0_layers[2].tops == ['conv1', 'conv2'])
        assert (xp0_layers[3].bottoms == ['concat1'])
        assert (xp0_layers[3].tops == ['pool1'])
        assert (xp0_layers[4].bottoms == ['conv1'])
        assert (xp0_layers[4].tops == ['concat2'])
        assert (xp0_layers[5].bottoms == ['concat1'])
        assert (xp0_layers[5].tops == ['concat2'])
        assert (xp0_layers[6].bottoms == ['pool1', 'conv2'])
        assert (xp0_layers[6].tops == [])
Ejemplo n.º 17
0
    def test_two_partition_inputs(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv2'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv1'],
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['concat1'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['in2'],
                   tops=['concat1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv2'],
                   targets=[]),
            XLayer(name='concat1',
                   type=['Concat'],
                   shapes=[1, 4, 2, 2],
                   sizes=[16],
                   bottoms=['pool1', 'conv2'],
                   tops=['dense1'],
                   layer=['concat1'],
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[],
                   bottoms=['concat1'],
                   tops=[],
                   layer=['dense1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert len(p_xgraph.get_layer_names()) == 7
        assert p_xgraph.get_subgraph_names() == ['xp2']

        p_xlayers = p_xgraph.get_layers()

        assert p_xlayers[0].target == 'cpu'
        assert p_xlayers[1].target == 'test'
        assert p_xlayers[2].target == 'test'
        assert p_xlayers[3].target == 'cpu'
        assert p_xlayers[4].target == 'test'
        assert p_xlayers[5].target == 'test'
        assert p_xlayers[6].target == 'cpu'

        assert p_xlayers[0].subgraph is None
        assert p_xlayers[1].subgraph == 'xp2'
        assert p_xlayers[2].subgraph == 'xp2'
        assert p_xlayers[3].subgraph is None
        assert p_xlayers[4].subgraph == 'xp2'
        assert p_xlayers[5].subgraph == 'xp2'
        assert p_xlayers[6].subgraph is None

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert len(subgraphs) == 1
        xp2 = subgraphs[0]
        assert xp2.name == 'xp2'
        xp2_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp2.subgraph_data)

        assert xp2.bottoms == ['in1', 'in2']
        assert xp2.tops == ['dense1']
        assert xp2.shapes == [[1, 4, 2, 2]]
        assert xp2.sizes == [16]

        assert len(xp2_xgraph) == 6
        xp2_layers = xp2_xgraph.get_layers()

        assert (xp2_layers[0].type[0] == 'Input')
        assert (xp2_layers[0].layer[0] == 'conv1')
        assert (xp2_layers[1].type[0] == 'Convolution')
        assert (xp2_layers[2].type[0] == 'Pooling')
        assert (xp2_layers[3].type[0] == 'Input')
        assert (xp2_layers[3].layer[0] == 'conv2')
        assert (xp2_layers[4].type[0] == 'Convolution')
        assert (xp2_layers[5].type[0] == 'Concat')

        assert (xp2_layers[0].bottoms == [])
        assert (xp2_layers[0].tops == ['conv1'])
        assert (xp2_layers[1].bottoms == ['xinput0'])
        assert (xp2_layers[1].tops == ['pool1'])
        assert (xp2_layers[2].bottoms == ['conv1'])
        assert (xp2_layers[2].tops == ['concat1'])
        assert (xp2_layers[3].bottoms == [])
        assert (xp2_layers[3].tops == ['conv2'])
        assert (xp2_layers[4].bottoms == ['xinput1'])
        assert (xp2_layers[4].tops == ['concat1'])
        assert (xp2_layers[5].bottoms == ['pool1', 'conv2'])
        assert (xp2_layers[5].tops == [])
Ejemplo n.º 18
0
    def test_multiple_partitions_largest_last(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['t1'],
                   layer=['conv1'],
                   data=ConvData(weights=np.array([1, 1], dtype=np.float32),
                                 biases=np.array([0, 0], dtype=np.float32)),
                   targets=[]),
            XLayer(name='t1',
                   type=['Transpose'],
                   shapes=[1, 3, 3, 2],
                   sizes=[18],
                   bottoms=['conv1'],
                   tops=['conv2'],
                   layer=['t1'],
                   targets=[],
                   attrs={'axes': [0, 2, 3, 1]}),
            XLayer(name='conv2',
                   type=['Convolution'],
                   shapes=[1, 3, 3, 2],
                   sizes=[18],
                   bottoms=['t1'],
                   tops=['pool1'],
                   layer=['conv2'],
                   data=ConvData(weights=np.array([1, 1], dtype=np.float32),
                                 biases=np.array([0, 0], dtype=np.float32)),
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv2'],
                   tops=[],
                   layer=['pool1'],
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert len(p_xgraph.get_layer_names()) == 5
        # ! Only xp1 because only one subgraph can exist for now (largest)
        assert set(p_xgraph.get_subgraph_names()) == set(['xp1'])

        p_xlayers = p_xgraph.get_layers()
        assert (p_xlayers[0].type[0] in ['Input'])
        assert (p_xlayers[1].type[0] in ['Convolution'])
        assert (p_xlayers[2].type[0] in ['Transpose'])
        assert (p_xlayers[3].type[0] in ['Convolution'])
        assert (p_xlayers[4].type[0] in ['Pooling'])

        assert (p_xlayers[0].target == 'cpu')
        assert (p_xlayers[1].target == 'cpu')
        assert (p_xlayers[2].target == 'cpu')
        assert (p_xlayers[3].target == 'test')
        assert (p_xlayers[4].target == 'test')

        assert (p_xlayers[0].subgraph is None)
        assert (p_xlayers[1].subgraph is None)
        assert (p_xlayers[2].subgraph is None)
        assert (p_xlayers[3].subgraph == 'xp1')
        assert (p_xlayers[4].subgraph == 'xp1')

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert (len(subgraphs) == 1)
        xp1 = subgraphs[0]
        assert (xp1.name == 'xp1')
        xp1_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp1.subgraph_data)

        assert (xp1.bottoms == ['t1'])
        assert (xp1.tops == [])
        assert (xp1.shapes == [[1, 2, 2, 2]])
        assert (xp1.sizes == [8])

        assert (len(xp1_xgraph) == 3)
        xp1_layers = xp1_xgraph.get_layers()

        assert (xp1_layers[0].type[0] == 'Input')
        assert (xp1_layers[0].layer[0] == 'conv2')
        assert (xp1_layers[1].type[0] == 'Convolution')
        assert (xp1_layers[2].type[0] == 'Pooling')

        assert (xp1_layers[0].bottoms == [])
        assert (xp1_layers[0].tops == ['conv2'])
        assert (xp1_layers[1].bottoms == ['xinput0'])
        assert (xp1_layers[1].tops == ['pool1'])
        assert (xp1_layers[2].bottoms == ['conv2'])
        assert (xp1_layers[2].tops == [])
Ejemplo n.º 19
0
    def test_multiple_partitions(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=[],
                   tops=['add1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['conv1'],
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['add1'],
                   layer=['pool1'],
                   targets=[]),
            XLayer(name='add1',
                   type=['Eltwise'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['pool1', 'in2'],
                   tops=[],
                   layer=['add1'],
                   targets=[]),
            XLayer(name='bn1',
                   type=['BatchNorm'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['add1'],
                   tops=['pool2'],
                   data=BatchData(np.array([1, 1]), np.array([0, 0]),
                                  np.array([1, 1]), np.array([0, 0])),
                   layer=['bn1'],
                   targets=[]),
            XLayer(name='pool2',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['bn1'],
                   tops=[],
                   layer=['pool2'],
                   targets=[])
        ]
        xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
        TargetRegistry().annotate_ops(xgraph)
        p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition(
            xgraph, ['test'])

        assert (len(p_xgraph.get_layer_names()) == 7)
        # ! Only xp0 because only one subgraph can exist for now (largest)
        assert (set(p_xgraph.get_subgraph_names()) == set(['xp0']))

        p_xlayers = p_xgraph.get_layers()
        assert (p_xlayers[0].type[0] in ['Input'])
        assert (p_xlayers[1].type[0] in ['Convolution'])
        assert (p_xlayers[2].type[0] in ['Pooling'])
        assert (p_xlayers[3].type[0] in ['Input'])
        assert (p_xlayers[4].type[0] in ['Eltwise'])
        assert (p_xlayers[5].type[0] in ['BatchNorm'])
        assert (p_xlayers[6].type[0] in ['Pooling'])

        assert (p_xlayers[0].target == 'cpu')
        assert (p_xlayers[1].target == 'test')
        assert (p_xlayers[2].target == 'test')
        assert (p_xlayers[3].target == 'cpu')
        assert (p_xlayers[4].target == 'cpu')
        assert (p_xlayers[5].target == 'cpu')
        # ! CPU because only one subgraph can exist for now (largest)
        assert (p_xlayers[6].target == 'cpu')

        assert (p_xlayers[0].subgraph is None)
        assert (p_xlayers[1].subgraph == 'xp0')
        assert (p_xlayers[2].subgraph == 'xp0')
        assert (p_xlayers[3].subgraph is None)
        assert (p_xlayers[4].subgraph is None)
        assert (p_xlayers[5].subgraph is None)
        assert (p_xlayers[6].subgraph is None)

        subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(
            p_xgraph)

        assert (len(subgraphs) == 1)
        xp0 = subgraphs[0]
        assert (xp0.name == 'xp0')
        xp0_xgraph = TestXGraphPartitioner.xgraph_factory\
            .build_from_xlayer(xp0.subgraph_data)

        assert (xp0.bottoms == ['in1'])
        assert (xp0.tops == ['add1'])
        assert (xp0.shapes == [[1, 2, 2, 2]])
        assert (xp0.sizes == [8])

        assert (len(xp0_xgraph) == 3)
        xp0_layers = xp0_xgraph.get_layers()

        assert (xp0_layers[0].type[0] == 'Input')
        assert (xp0_layers[0].layer[0] == 'conv1')
        assert (xp0_layers[1].type[0] == 'Convolution')
        assert (xp0_layers[2].type[0] == 'Pooling')

        assert (xp0_layers[0].bottoms == [])
        assert (xp0_layers[0].tops == ['conv1'])
        assert (xp0_layers[1].bottoms == ['xinput0'])
        assert (xp0_layers[1].tops == ['pool1'])
        assert (xp0_layers[2].bottoms == ['conv1'])
        assert (xp0_layers[2].tops == [])
Ejemplo n.º 20
0
class TestUltra96OpSupport(unittest.TestCase):

    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def test():
            raise NotImplementedError("")

        TestUltra96OpSupport.target_registry.register_target(
            'dpuv2-ultra96', {}, test, test, test, test)

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestUltra96OpSupport.target_registry.unregister_target('dpuv2-ultra96')
        # TestUltra96OpSupport.target_registry.unregister_target('DPUCZDX8G-ultra96')

    def test_batchnorm_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            batchnorm_op_support

        X = XLayer(type=['BatchNorm'],
                   name='bn1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert batchnorm_op_support(X, [], [])

        X = XLayer(type=['BatchNorm'],
                   name='bn1',
                   shapes=[-1, 2570, 4, 4],
                   sizes=[2570 * 16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert not batchnorm_op_support(X, [], [])

    def test_biasadd_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            biasadd_op_support

        X = XLayer(type=['BiasAdd'],
                   name='bn1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert biasadd_op_support(X, [], [])

        X = XLayer(type=['BiasAdd'],
                   name='bn1',
                   shapes=[-1, 2570, 4, 4],
                   sizes=[2570 * 16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert not biasadd_op_support(X, [], [])

    def test_concat_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            concat_op_support

        X = XLayer(type=['Concat'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert concat_op_support(X, [], [])

        X = XLayer(type=['Concat'],
                   name='layer1',
                   shapes=[-1, 2570, 4, 4],
                   sizes=[2570 * 16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert not concat_op_support(X, [], [])

    def test_conv2d_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            conv2d_op_support

        X = XLayer(type=['Convolution'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [1, 1],
                       'dilation': [1, 1],
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]],
                       'channels': [4, 2],
                       'groups': 1
                   })

        assert conv2d_op_support(X, [], [])

        X = XLayer(type=['Convolution'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [1, 1],
                       'dilation': [1, 1],
                       'padding': [[0, 0], [0, 0], [3, 3], [1, 1]],
                       'channels': [4, 2],
                       'groups': 1
                   })

        assert not conv2d_op_support(X, [], [])

    def test_conv2d_transpose_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            conv2d_transpose_op_support

        X = XLayer(type=['Conv2DTranspose'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [1, 1],
                       'dilation': [1, 1],
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]],
                       'channels': [4, 2],
                       'groups': 1
                   })

        assert conv2d_transpose_op_support(X, [], [])

        X = XLayer(type=['Conv2DTranspose'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [1, 1],
                       'dilation': [1, 1],
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]],
                       'channels': [2570, 2],
                       'groups': 1
                   })

        assert not conv2d_transpose_op_support(X, [], [])

    def test_dpuv2_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            dpu_op_support

        X = XLayer(type=['DPU'],
                   name='layer1',
                   shapes=[[-1, 2, 4, 4], [-1, 1, 4, 4]],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={})

        assert dpu_op_support(X, [], [])

    def test_eltwise_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            eltwise_op_support

        X = XLayer(type=['Eltwise'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={})

        assert eltwise_op_support(X, [], [])

    def test_pad_pooling_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            pad_op_support

        X = XLayer(type=['Pad'],
                   name='pad1',
                   shapes=[-1, 2, 6, 6],
                   sizes=[72],
                   bottoms=[],
                   tops=['layer1'],
                   targets=[],
                   attrs={'padding': [[0, 0], [0, 0], [2, 2], [2, 2]]})

        tX = XLayer(type=['Pooling'],
                    name='layer1',
                    shapes=[-1, 2, 4, 4],
                    sizes=[32],
                    bottoms=['pad1'],
                    tops=[],
                    targets=[],
                    attrs={
                        'data_layout': 'NCHW',
                        'kernel_size': [2, 2],
                        'strides': [3, 3],
                        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]]
                    })

        assert pad_op_support(X, [], [tX])

        X = XLayer(type=['Pad'],
                   name='pad1',
                   shapes=[-1, 2, 6, 6],
                   sizes=[72],
                   bottoms=[],
                   tops=['layer1'],
                   targets=[],
                   attrs={'padding': [[0, 0], [0, 0], [5, 2], [5, 2]]})

        tX = XLayer(type=['Pooling'],
                    name='layer1',
                    shapes=[-1, 2, 4, 4],
                    sizes=[32],
                    bottoms=['pad1'],
                    tops=[],
                    targets=[],
                    attrs={
                        'data_layout': 'NCHW',
                        'kernel_size': [2, 2],
                        'strides': [3, 3],
                        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]]
                    })

        assert not pad_op_support(X, [], [tX])

    def test_pad_convolution_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            pad_op_support

        X = XLayer(type=['Pad'],
                   name='pad1',
                   shapes=[-1, 2, 6, 6],
                   sizes=[72],
                   bottoms=[],
                   tops=['layer1'],
                   targets=[],
                   attrs={'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]})

        tX = XLayer(type=['Convolution'],
                    name='layer1',
                    shapes=[-1, 2, 4, 4],
                    sizes=[32],
                    bottoms=['pad1'],
                    tops=[],
                    targets=[],
                    attrs={
                        'data_layout': 'NCHW',
                        'kernel_size': [2, 2],
                        'strides': [1, 1],
                        'dilation': [1, 1],
                        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
                        'channels': [4, 2],
                        'groups': 1
                    })

        assert pad_op_support(X, [], [tX])

        X = XLayer(type=['Pad'],
                   name='pad1',
                   shapes=[-1, 2, 6, 6],
                   sizes=[72],
                   bottoms=[],
                   tops=['layer1'],
                   targets=[],
                   attrs={'padding': [[0, 0], [0, 0], [2, 2], [2, 2]]})

        tX = XLayer(type=['Convolution'],
                    name='layer1',
                    shapes=[-1, 2, 4, 4],
                    sizes=[32],
                    bottoms=['pad1'],
                    tops=[],
                    targets=[],
                    attrs={
                        'data_layout': 'NCHW',
                        'kernel_size': [2, 2],
                        'strides': [1, 1],
                        'dilation': [1, 1],
                        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
                        'channels': [4, 2],
                        'groups': 1
                    })

        assert not pad_op_support(X, [], [tX])

    def test_pooling_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            pooling_op_support

        X = XLayer(type=['Pooling'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [3, 3],
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   })

        assert pooling_op_support(X, [], [])

        X = XLayer(type=['Pooling'],
                   name='layer1',
                   shapes=[-1, 2570, 4, 4],
                   sizes=[2570 * 16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'data_layout': 'NCHW',
                       'kernel_size': [2, 2],
                       'strides': [1, 1],
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]]
                   })

        assert not pooling_op_support(X, [], [])

    def test_mean_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            mean_op_support

        X = XLayer(type=['Mean'],
                   name='layer1',
                   shapes=[-1, 2, 1, 1],
                   sizes=[2],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'axes': [2, 3],
                       'keepdims': True,
                       'exclude': False
                   })

        assert mean_op_support(X, [], [])

        X = XLayer(type=['Mean'],
                   name='layer1',
                   shapes=[-1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={
                       'axes': [1],
                       'keepdims': True,
                       'exclude': False
                   })

        assert not mean_op_support(X, [], [])

    def test_mean_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            prelu_op_support

        X = XLayer(type=['pReLU'],
                   name='layer1',
                   shapes=[-1, 2, 1, 1],
                   sizes=[2],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'alpha': 0.1})

        assert prelu_op_support(X, [], [])

        X = XLayer(type=['pReLU'],
                   name='layer1',
                   shapes=[-1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'alpha': 0.2})

        assert not prelu_op_support(X, [], [])

    def test_relu_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            relu_op_support

        X = XLayer(type=['ReLU'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={})

        assert relu_op_support(X, [], [])

    def test_relu6_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            relu6_op_support

        X = XLayer(type=['ReLU6'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={})

        assert relu6_op_support(X, [], [])

    def test_scale_support(self):
        from pyxir.contrib.dpuv2.ultra96_op_support import \
            scale_op_support

        X = XLayer(type=['Scale'],
                   name='layer1',
                   shapes=[-1, 2, 4, 4],
                   sizes=[32],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert scale_op_support(X, [], [])

        X = XLayer(type=['Scale'],
                   name='layer1',
                   shapes=[-1, 2570, 4, 4],
                   sizes=[2570 * 16],
                   bottoms=[],
                   tops=[],
                   targets=[],
                   attrs={'axis': 1})

        assert not scale_op_support(X, [], [])
Ejemplo n.º 21
0
class XGraph(object):

    target_registry = TargetRegistry()
    """
    The XGraph data structure for storing the model graph, accessing properties
    and doing graph level transformations

    Arguments:
    ----------
    name: str
        the XGraph name
    """
    @classmethod
    def _from_xgraph(cls, _xgraph: lpx.XGraph):
        xg = XGraph.__new__(cls)
        xg._xgraph = _xgraph
        xg.init()
        return xg

    def __init__(self, name='XGraph'):
        self._xgraph = lpx.XGraph(name)
        self.init()

    def init(self):
        # color map
        self.cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
                   "#fdb462", "#b3de69", "#fccde5")

        # Quantization
        self.quantizer_output = None

        # Compilation
        self.compiler_output = None

        self._reset()

    def _reset(self):
        # type: () -> None
        """
        Reset the dependent attributes (e.g. input and output layers) of this
        XGraph based on the current pydot graph
        """

        xlayers = self.get_layers()

        for X in xlayers:
            # Setup targets
            self.__setup_targets_for_X(X)

    def get_name(self):
        # type: () -> str
        return self._xgraph.get_name()

    def set_name(self, name: str):
        self._xgraph.set_name(name)

    @property
    def meta_attrs(self):
        return XAttrDict(self._xgraph.meta_attrs)

    @meta_attrs.setter
    def meta_attrs(self, d: dict):
        _xattr_dict = XAttrDict(lpx.XAttrMap())
        for key, value in d.items():
            _xattr_dict[key] = value

        self._xgraph.meta_attrs = _xattr_dict._get_xattr_map()

    ##########
    # LAYERS #
    ##########

    def get_input_names(self):
        # type: () -> List[str]
        return StrVector(self._xgraph.get_input_names())

    def get_input_layers(self):
        # type: () -> List[XLayer]
        return [self.get(il) for il in self.get_input_names()]

    def get_input_shapes(self):
        # type: () -> Dict[str, List[int]]
        ils = self.get_input_layers()
        return {il.name: il.shapes[:] for il in ils}

    def get_output_names(self):
        # type: () -> List[str]
        return StrVector(self._xgraph.get_output_names())

    def get_output_layers(self):
        # type: () -> List[XLayer]
        return [self.get(ol) for ol in self.get_output_names()]

    def get_output_shapes(self):
        # type: () -> Dict[str, List[int]]
        ols = self.get_output_layers()
        return {ol.name: ol.shapes[:] for ol in ols}

    def get(self, layer_name):
        # type: (str) -> XLayer
        """ Return an XLayer object by name """
        return XLayer._from_xlayer(self._xgraph.get(layer_name))

    def get_layer_names(self):
        # type: () -> List[str]
        """ Return all layer names in topological order """
        return StrVector(self._xgraph.get_layer_names())

    def get_layers(self):
        # type: (boolean) -> List[XLayer]
        """ Return all layers in topological order """
        return [self.get(ln) for ln in self.get_layer_names()]

    def get_bottom_layers(self, layer_name):
        # type: (str) -> List[XLayer]
        """
        Get the bottom layers of the provided layer
        """
        return [self.get(b) for b in self.get(layer_name).bottoms]

    def get_top_layers(self, layer_name):
        # type: (str) -> List[XLayer]
        """
        Get the top layers of the provided layer
        """
        return [self.get(t) for t in self.get(layer_name).tops]

    # CHECKS

    def exists_layer(self, layer_name):
        # type: (str) -> bool
        return layer_name in self._xgraph

    # SET

    def add(self, X):
        # type: (XLayer) -> None
        """ Add the provided XLayer object to the graph """
        # TODO: topological assumption here??

        if not isinstance(X, XLayer):
            raise ValueError("xlayer argument should be of type: XLayer but"
                             " was: {}".format(type(X)))

        self._xgraph.add(X._get_xlayer())

        # Setup targets
        X = self.get(X.name)
        self.__setup_targets_for_X(X)

        # Check bottom and top layers again
        bottom_Xs = self.get_bottom_layers(X.name)
        top_Xs = self.get_top_layers(X.name)
        for b_X in bottom_Xs:
            self.__setup_targets_for_X(b_X)
        for t_X in top_Xs:
            self.__setup_targets_for_X(t_X)

    def insert(self, X):
        # type: (XLayer) -> None
        """ Insert the provided XLayer object in the graph between
            two other layers """

        if len(X.bottoms) != 1 or len(X.tops) != 1:
            raise ValueError("Undefined behaviour: can't insert a node if"
                             " there are multiple bottom layers or multiple"
                             " top layers")

        bX = self.get(X.bottoms[0])
        tX = self.get(X.tops[0])

        new_tops = [(bXt if bXt != tX.name else X.name) for bXt in bX.tops]
        new_bottoms = [(tXb if tXb != bX.name else X.name)
                       for tXb in tX.bottoms]

        self.add(X)

        bX.tops = new_tops
        self.update(bX.name)

        tX.bottoms = new_bottoms
        self.update(tX.name)

    def update(self, X_name):
        # type: (str) -> None
        """
        Update the given xlayer
        """
        layer_name = X_name  # X.name

        # if not isinstance(X, XLayer):
        #     raise ValueError("xlayer argument should be of type: XLayer but"
        #                      " was: {}".format(type(X)))

        self._xgraph.update(X_name)
        X = self.get(layer_name)

        # Setup targets
        self.__setup_targets_for_X(X)

        # Check bottom and top layers again
        bottom_Xs = self.get_bottom_layers(X.name)
        top_Xs = self.get_top_layers(X.name)

        for b_X in bottom_Xs:
            self.__setup_targets_for_X(b_X)
        for t_X in top_Xs:
            self.__setup_targets_for_X(t_X)

    def remove(self, layer_name):
        # type: (str) -> None
        """ Remove the layer with given name and link the bottom and top
            layers. """

        # Retrieve bottom and top layers before removal
        bottoms = self.get(layer_name).bottoms[:]
        tops = self.get(layer_name).tops[:]

        # Link bottom and top layers
        bottom_Xs = [self.get(b) for b in bottoms]
        top_Xs = [self.get(t) for t in tops]

        for bX in bottom_Xs:
            new_tops = [
                ([bXt] if bXt != layer_name else [tX.name for tX in top_Xs])
                for bXt in bX.tops
            ]

            # Flatten
            new_tops = [e for sl in new_tops for e in sl]
            bX.tops = new_tops

        for tX in top_Xs:
            new_bottoms = [
                ([tXb] if tXb != layer_name else [bX.name for bX in bottom_Xs])
                for tXb in tX.bottoms
            ]

            # Flatten
            new_bottoms = [e for sl in new_bottoms for e in sl]
            tX.bottoms = new_bottoms

        # Bottom and top links have changed so clear X bottoms and tops
        #   before removing
        X = self.get(layer_name)
        X.bottoms = []
        X.tops = []

        self._xgraph.remove(layer_name)

        # Check bottom and top layers again
        # for b_X in [self.get(b) for b in bottoms]:
        #     self.__setup_targets_for_X(b_X)
        # for t_X in [self.get(t) for t in tops]:
        #     self.__setup_targets_for_X(t_X)
        for b in bottoms:
            self.update(b)
        for t in tops:
            self.update(t)

    #############
    # SUBGRAPHS #
    #############

    def get_subgraph_names(self):
        # type: () -> List[str]
        """
        Return the names of all the subgraphs
        """
        return list(
            set([
                X.subgraph for X in self.get_layers() if X.subgraph is not None
            ]))

    ################
    # QUANTIZATION #
    ################

    def is_quantized(self) -> bool:
        return self.quantizer_output is not None or\
            "is_quantized" in self.meta_attrs and \
            self.meta_attrs["is_quantized"]

    def set_quantizer_output(self, q_output: QuantizerOutput) -> None:
        self.quantizer_output = q_output

    def get_quantizer_output(self) -> QuantizerOutput:
        """
        Quantization information can be stored both in q_output attribute
        and in meta attributes
        TODO: Merge approaches
        """
        if not self.is_quantized():
            raise ValueError("No quantization output found. Quantize this"
                             " XGraph object before retrieving the"
                             " quantization output")

        if (self.quantizer_output is not None
                and "is_quantized" in self.meta_attrs
                and self.meta_attrs["is_quantized"]):
            warnings.warn("Quantization info found both in XGraph meta"
                          " attributes and q_output attribute")

        if self.quantizer_output is not None:
            return self.quantizer_output

        # Retrieve quantization output from meta attributes
        q_output = QuantizerOutput(self.get_name())
        if "quant_keys" not in self.meta_attrs:
            raise ValueError("Expected `quant_keys` attribute in meta"
                             " attributes")

        for q_key in self.meta_attrs["quant_keys"]:
            q_output.add(q_key=q_key,
                         q_file=self.meta_attrs[q_key]['q_file'],
                         q_info=self.meta_attrs[q_key]['q_info'],
                         orig_pb=self.meta_attrs[q_key]['orig_pb'])
            logger.debug("QOutput q_info: {}".format(
                self.meta_attrs[q_key]['q_info']))

        return q_output

    def save_quant_info_txt(self, filename) -> str:
        lines = []
        idx = 1
        for X in self.get_layers():
            if "vai_quant" in X.attrs:
                line = [str(idx), X.name]
                for quant_elem in X.attrs['vai_quant']:
                    line.extend([str(i) for i in X.attrs[quant_elem]])
                lines.append(line)
                idx += 1

        s = '\n'.join([' '.join(line) for line in lines])

        with open(filename, 'w') as f:
            f.write(s)

    ###############
    # COMPILATION #
    ###############

    def is_compiled(self):
        # type () -> boolean
        return self.compiler_output is not None

    def set_compiler_output(self, c_output):
        # type: (CompilerOutput) -> None
        self.compiler_output = c_output

    def get_compiler_output(self):
        # type: () -> CompilerOutput
        if not self.is_compiled():
            raise ValueError("No compilation output found. Compile this"
                             " XGraph object before retrieving the"
                             " compilation output")
        return self.compiler_output

    ##################
    # HELPER METHODS #
    ##################

    def copy(self):
        xg = XGraph(self.get_name())
        xg.meta_attrs = self.meta_attrs.to_dict()
        xg.quantizer_output = self.quantizer_output
        xg.compiler_output = self.compiler_output
        for X in self.get_layers():
            # Make sure top are empty to be able to add layer
            # TODO: slow? how many copies are made in total?
            X_copy = X.copy()
            X_copy.tops = []
            xg.add(X_copy)
        return xg

    def copy_from(self, xg: 'XGraph'):
        self._xgraph.copy(xg._xgraph)

    def visualize(self, outputfile):
        # type: () -> None
        """ Visualize this xgraph using pydot """
        try:
            from . import pydot_tools
            import pydot
        except ImportError:
            raise ImportError("XGraph functionality depends on the 'pydot'"
                              " package. Please make sure that Pydot is"
                              " installed before trying to visualize XGraphs")

        pdg = pydot.Dot(self.get_name(), graph_type='digraph', rankdir='BT')

        cm_idx = 1
        target_to_cm = {}
        for X in self.get_layers():
            pydot_attrs = copy.copy(pydot_tools.LAYER_STYLE_DEFAULT)

            if 'Input' in X.type:
                pydot_attrs["shape"] = "oval"
                pydot_attrs["fillcolor"] = self.cm[0]

            if X.target != 'cpu':
                if X.target not in target_to_cm:
                    target_to_cm[X.target] = cm_idx
                    if cm_idx < (len(self.cm) - 1):
                        cm_idx += 1
                pydot_attrs["fillcolor"] = self.cm[target_to_cm[X.target]]

            # Add '-pdg' to fix issues of pydot with names with format
            #   '[...]:0' where ':0' gets removed
            node = pydot.Node(pydot.quote_if_necessary(X.name + '-pdg'),
                              **pydot_attrs)

            pdg.add_node(node)

            for b in X.bottoms:
                src_nodes = pdg.get_node(pydot.quote_if_necessary(b + '-pdg'))

                if len(src_nodes) == 0:
                    raise ValueError(
                        "Pydot could not find layer with name: {}".format(b))
                assert len(src_nodes) == 1

                src_node = src_nodes[0]

                edge_label = b + "->" + X.name
                # logger.debug("--Add bottom edge: {}".format(edge_label))
                pdg.add_edge(pydot.Edge(src_node, node, label=edge_label))

        pydot_tools.visualize(pdg, outputfile)

    def __setup_targets_for_X(self, X):
        # type: (XLayer) -> None
        """
        Setup the supported targets for the provided XLayer
        """
        # Check with registered targets, which device can execute
        #   this XLayer and add those targets to the XLayer device
        #   attribute
        X.targets = []

        bottom_Xs = self.get_bottom_layers(X.name)
        top_Xs = self.get_top_layers(X.name)

        for device in XGraph.target_registry.get_targets():
            if device.can_execute(X, bottom_Xs, top_Xs):
                X.targets.append(device.name)

    #########################
    # __*__ IMPLEMENTATIONS #
    #########################

    def __contains__(self, layer_name: str):
        # type: (str) -> int
        """ Reports whether a layer with given name exists in the XGraph """
        return layer_name in self._xgraph

    def __len__(self):
        # type: () -> int
        return len(self._xgraph)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo):
        # type: (dict) -> XGraph
        """
        NOTE: We override the __deepcopy__ method because of internal C++
        XGraph data structure
        """
        xg_copy = self.copy()
        memo[id(xg_copy)] = xg_copy
        return xg_copy
Ejemplo n.º 22
0
class TestRelay(unittest.TestCase):

    target_registry = TargetRegistry()

    # INPUTS/OUTPUTS

    # BASIC NN OPS

    @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
    def test_simple_network(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 1, 4, 4), "float32")
        )
        weight = relay.var("weight")

        # simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=data,
            weight=weight,
            kernel_size=(2, 2),
            channels=2,
            padding=(0, 0)
        )

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        weight = np.reshape(np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]],
                                     dtype=np.float32),
                            (2, 1, 2, 2))

        xgraph = xf_relay.from_relay(mod, {'weight': weight})

        layers = xgraph.get_layers()

        inputs = {
            'data': np.reshape(np.array([
                [10, 10, 0, 40],
                [50, 10, 0, 80],
                [30, 50, 10, 0],
                [10, 90, 30, 40]]), (1, 1, 4, 4))
        }
        res = run._run_network_cpu(xgraph, inputs)
        # print(res[0])

        expected_outpt = np.array([[
            [[180., 40., 80.],
             [160., 160., 190.],
             [160., 340., 100.]],

            [[30., 10., 120.],
             [110., 20., 80.],
             [170., 90., 50.]]
        ]])

        np.testing.assert_array_equal(res[0], expected_outpt)
Ejemplo n.º 23
0
class TestTargetRegistry(unittest.TestCase):

    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):
        def xgraph_build_func(xgraph):
            raise NotImplementedError("")

        def xgraph_optimizer(xgraph):
            raise NotImplementedError("")

        def xgraph_quantizer(xgraph):
            raise NotImplementedError("")

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        cls.target_registry.register_target('test', xgraph_optimizer,
                                            xgraph_quantizer, xgraph_compiler,
                                            xgraph_build_func)

        @register_op_support_check('test', 'Convolution')
        def conv_op_support(X, bXs, tXs):
            return True

        @register_op_support_check('test', 'Pooling')
        def pooling_op_support(X, bXs, tXs):
            return True

    @classmethod
    def tearDownClass(cls):
        cls.target_registry.unregister_target('test')

    def test_initialization(self):
        self.assertTrue(
            set(TestTargetRegistry.target_registry.get_target_names()) == set(
                ['cpu', 'test']))

    def test_register_target_twice(self):

        with self.assertRaises(ValueError) as context:

            def xgraph_build_func(xgraph):
                raise NotImplementedError("")

            def xgraph_optimizer(xgraph):
                raise NotImplementedError("")

            def xgraph_quantizer(xgraph):
                raise NotImplementedError("")

            def xgraph_compiler(xgraph):
                raise NotImplementedError("")

            TestTargetRegistry.target_registry.register_target(
                'test', xgraph_optimizer, xgraph_quantizer, xgraph_compiler,
                xgraph_build_func)

            self.assertTrue("Target: test is already registered." in str(
                context.exception))

    def test_target_build_func(self):
        bf = TestTargetRegistry.target_registry.get_target_build_func('test')
        assert callable(bf)

    def test_target_optimizer(self):
        of = TestTargetRegistry.target_registry.get_target_optimizer('test')
        assert callable(of)

    def test_target_quantizer(self):
        qf = TestTargetRegistry.target_registry.get_target_quantizer('test')
        assert callable(qf)

    def test_target_compiler(self):
        cf = TestTargetRegistry.target_registry.get_target_compiler('test')
        assert callable(cf)

    def test_get_target(self):
        t = TestTargetRegistry.target_registry.get_target('test')
        assert isinstance(t, Target)

        with self.assertRaises(ValueError) as context:
            t = TestTargetRegistry.target_registry.get_target('notarget')

            self.assertTrue(
                "notarget is not registered" in str(context.exception))

    def test_register_op_support(self):

        test_ops = TestTargetRegistry.target_registry.\
            get_supported_op_check_names('test')
        assert set(test_ops) == set(['Convolution', 'Pooling'])

        @register_op_support_check('test', 'Test')
        def test_op_support(X, bXs, tXs):
            raise NotImplementedError("")

        test_ops = TestTargetRegistry.target_registry.\
            get_supported_op_check_names('test')
        assert set(test_ops) == set(['Convolution', 'Pooling', 'Test'])
Ejemplo n.º 24
0
class TestDPUCZDX8G(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()
    rt_manager = RtManager()

    @classmethod
    def setUpClass(cls):

        # Import DPU module
        from pyxir.contrib.dpuv2 import dpuv2

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDPUCZDX8G.target_registry.unregister_target("dpuv2-zcu104")
        TestDPUCZDX8G.target_registry.unregister_target("dpuv2-zcu102")
        TestDPUCZDX8G.target_registry.unregister_target("DPUCZDX8G-zcu102")
        TestDPUCZDX8G.target_registry.unregister_target("DPUCZDX8G-zcu104")
        TestDPUCZDX8G.target_registry.unregister_target("dpuv2-ultra96")
        TestDPUCZDX8G.target_registry.unregister_target("DPUCZDX8G-ultra96")
        TestDPUCZDX8G.target_registry.unregister_target("dpuv2-som")
        TestDPUCZDX8G.target_registry.unregister_target("DPUCZDX8G-som")

    # @unittest.skipIf(skip_tf, "Skipping Tensorflow related test because tensorflow is"
    #                 "not available")
    # def test_import_ext_quantizer(self):
    #     if TestDPUCZDX8G.target_registry.is_target('DPUCZDX8G-ultra96'):
    #         TestDPUCZDX8G.target_registry.unregister_target('DPUCZDX8G-ultra96')
    #         TestDPUCZDX8G.target_registry.unregister_target('DPUCZDX8G-zcu104')
    #         TestDPUCZDX8G.target_registry.unregister_target('DPUCZDX8G-zcu102')
    #         TestDPUCZDX8G.target_registry.unregister_target('DPUCZDX8G-som')
    #     if TestDPUCZDX8G.rt_manager.exists_op('cpu-np', 'DPU'):
    #         TestDPUCZDX8G.rt_manager.unregister_op('cpu-np', 'DPU')
    #     from pyxir.contrib.target import DPUCZDX8G_external_quantizer

    @unittest.skipIf(not is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G VART test")
    def test_compile_conv2d_pool2d(self):
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [1, 1],
            [1, 1],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Strided
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [2, 2],
            [1, 1],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [3, 3],
            [1, 1],
            "Avg",
            [2, 2],
            [1, 1],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Padded
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [1, 1],
            "Max",
            [4, 4],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 8, 8, 1),
            (2, 1, 3, 3),
            [2, 2],
            [1, 1],
            [1, 1],
            "Avg",
            [4, 4],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Dilated
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [2, 2],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 10, 10, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [4, 4],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 28, 28, 512),
            (512, 512, 3, 3),
            [2, 2, 2, 2],
            [1, 1],
            [2, 2],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )

    @unittest.skipIf(is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G DNNC/DNNDK test")
    def test_compile_conv2d_pool2d_dnnc(self):
        conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [1, 1],
            [1, 1],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Strided
        conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [2, 2],
            [1, 1],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [3, 3],
            [1, 1],
            "Avg",
            [2, 2],
            [1, 1],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Padded
        conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [1, 1],
            "Max",
            [4, 4],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        conv2d_pool2d_nhwc_oihw_test(
            (1, 8, 8, 1),
            (2, 1, 3, 3),
            [2, 2],
            [1, 1],
            [1, 1],
            "Avg",
            [4, 4],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        # Dilated
        conv2d_pool2d_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [2, 2],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        conv2d_pool2d_nhwc_oihw_test(
            (1, 10, 10, 1),
            (2, 1, 2, 2),
            [1, 1],
            [1, 1],
            [4, 4],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )
        conv2d_pool2d_nhwc_oihw_test(
            (1, 28, 28, 512),
            (512, 512, 3, 3),
            [2, 2, 2, 2],
            [1, 1],
            [2, 2],
            "Max",
            [2, 2],
            [0, 0],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )

    @unittest.skipIf(not is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G VART test")
    def test_compile_depthwise_conv2d_pool2d(self):
        xcompiler_conv2d_pool2d_nhwc_oihw_test(
            (1, 3, 3, 8),
            (8, 1, 3, 3),
            [1, 1, 1, 1],
            [1, 1],
            [1, 1],
            "Max",
            [2, 2],
            [0, 0],
            conv_groups=8,
            targets=["DPUCZDX8G-zcu104", "DPUCZDX8G-zcu102", "DPUCZDX8G-som"],
            expected_nb_subgraphs=3,
        )

    @unittest.skipIf(not is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G VART test")
    def test_compile_scale_conv2d(self):
        # Standalone scale/batchnorm unsupported in DPUCAHX8H compiler
        xcompiler_scale_conv2d_nhwc_oihw_test(
            (1, 299, 299, 3),
            (64, 3, 7, 7),
            [3, 3],
            [2, 2],
            [1, 1],
            target="DPUCZDX8G-zcu104",
            expected_nb_subgraphs=3,
        )

    @unittest.skipIf(not is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G VART test")
    def test_compile_resnetv1_block(self):
        xcompiler_resnetv1_block_test(
            in_shape=(1, 112, 112, 64),
            pool_size=[3, 3],
            pool_strides=[2, 2],
            w1_shape=(256, 64, 1, 1),
            w2_shape=(64, 64, 1, 1),
            w3_shape=(64, 64, 3, 3),
            w4_shape=(256, 64, 1, 1),
            c3_padding=[1, 1, 1, 1],
            target="DPUCZDX8G-zcu104",
        )

    @unittest.skipIf(is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G DNNC/DNNDK test")
    def test_compile_conv2d_leaky_relu_dnnc(self):
        conv2d_leaky_relu_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [1, 1],
            [1, 1],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )

    @unittest.skipIf(not is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G VART test")
    def test_compile_conv2d_leaky_relu(self):
        xcompiler_conv2d_leaky_relu_nhwc_oihw_test(
            (1, 4, 4, 1),
            (2, 1, 2, 2),
            [0, 0],
            [1, 1],
            [1, 1],
            targets=[
                "DPUCZDX8G-zcu104",
                "DPUCZDX8G-zcu102",
                "DPUCZDX8G-ultra96",
                "DPUCZDX8G-som",
            ],
        )

    @unittest.skipIf(is_dpuczdx8g_vart_flow_enabled(),
                     "DPUCZDX8G DNNC/DNNDK test")
    def test_dnnc_out_names(self):
        multi_output_conv2d_naming_test(["nn.conv-134", "nn.relu-22"])
        multi_output_conv2d_naming_test(["nn..con.v--1.", "n.n.-relu-2-"])
        conv2d_pool2d_naming_test(["conv1", "nn.conv-1"],
                                  ["nn.pool1", "nn.pool-1"])

    def test_supported_ops(self):
        ultra96_ops = TestDPUCZDX8G.target_registry.get_supported_op_check_names(
            "dpuv2-ultra96")

        assert "BatchNorm" in ultra96_ops
        assert "BiasAdd" in ultra96_ops
        assert "Concat" in ultra96_ops
        assert "Convolution" in ultra96_ops
        assert "Conv2DTranspose" in ultra96_ops
        assert "DPU" in ultra96_ops
        assert "Eltwise" in ultra96_ops
        assert "Pad" in ultra96_ops
        assert "Pooling" in ultra96_ops
        assert "Mean" in ultra96_ops
        assert "pReLU" in ultra96_ops
        assert "ReLU" in ultra96_ops
        assert "ReLU6" in ultra96_ops
        assert "Scale" in ultra96_ops

        zcu102_ops = TestDPUCZDX8G.target_registry.get_supported_op_check_names(
            "dpuv2-zcu102")

        assert "BatchNorm" in zcu102_ops
        assert "BiasAdd" in zcu102_ops
        assert "Concat" in zcu102_ops
        assert "Convolution" in zcu102_ops
        assert "Conv2DTranspose" in zcu102_ops
        assert "DPU" in zcu102_ops
        assert "Eltwise" in zcu102_ops
        assert "Pad" in zcu102_ops
        assert "Pooling" in zcu102_ops
        assert "Mean" in zcu102_ops
        assert "pReLU" in zcu102_ops
        assert "ReLU" in zcu102_ops
        assert "ReLU6" in zcu102_ops
        assert "Scale" in zcu102_ops

        zcu104_ops = TestDPUCZDX8G.target_registry.get_supported_op_check_names(
            "dpuv2-zcu104")

        assert "BatchNorm" in zcu104_ops
        assert "BiasAdd" in zcu104_ops
        assert "Concat" in zcu104_ops
        assert "Convolution" in zcu104_ops
        assert "Conv2DTranspose" in zcu104_ops
        assert "DPU" in zcu104_ops
        assert "Eltwise" in zcu104_ops
        assert "Pad" in zcu104_ops
        assert "Pooling" in zcu104_ops
        assert "Mean" in zcu104_ops
        assert "pReLU" in zcu104_ops
        assert "ReLU" in zcu104_ops
        assert "ReLU6" in zcu104_ops
        assert "Scale" in zcu104_ops

        som_ops = TestDPUCZDX8G.target_registry.get_supported_op_check_names(
            "dpuv2-som")

        assert "BatchNorm" in som_ops
        assert "BiasAdd" in som_ops
        assert "Concat" in som_ops
        assert "Convolution" in som_ops
        assert "Conv2DTranspose" in som_ops
        assert "DPU" in som_ops
        assert "Eltwise" in som_ops
        assert "Pad" in som_ops
        assert "Pooling" in som_ops
        assert "Mean" in som_ops
        assert "pReLU" in som_ops
        assert "ReLU" in som_ops
        assert "ReLU6" in som_ops
        assert "Scale" in som_ops
        assert "Upsampling2D" in som_ops

    def test_small(self):
        net = [
            XLayer(
                name="in1",
                type=["Input"],
                shapes=[1, 1, 4, 4],
                sizes=[16],
                bottoms=[],
                tops=["conv1"],
                layer=["in1"],
                targets=[],
            ),
            XLayer(
                name="in2",
                type=["Input"],
                shapes=[1, 2, 2, 2],
                sizes=[8],
                bottoms=[],
                tops=["dense1"],
                layer=["in2"],
                targets=[],
            ),
            XLayer(
                name="conv1",
                type=["Convolution"],
                shapes=[1, 2, 3, 3],
                sizes=[18],
                bottoms=["in1"],
                tops=["pool1"],
                layer=["conv1"],
                data=ConvData(np.array([1, 1]), np.array([0, 0])),
                attrs={
                    "data_layout": "NCHW",
                    "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
                    "kernel_size": [3, 3],
                    "strides": [1, 1],
                    "dilation": [1, 1],
                    "groups": 1,
                    "channels": [2, 2],
                },
                targets=[],
            ),
            XLayer(
                name="pool1",
                type=["Pooling"],
                shapes=[1, 2, 2, 2],
                sizes=[8],
                bottoms=["conv1"],
                tops=["dense1"],
                layer=["pool1"],
                attrs={
                    "data_layout": "NCHW",
                    "padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
                    "kernel_size": [3, 3],
                    "strides": [1, 1],
                },
                targets=[],
            ),
            XLayer(
                name="dense1",
                type=["Dense"],
                shapes=[1, 20],
                sizes=[20],
                bottoms=["pool1", "in2"],
                tops=[],
                data=ConvData(np.array([1, 1]), np.array([0, 0])),
                layer=["dense1"],
                targets=[],
            ),
        ]
        xgraph = TestDPUCZDX8G.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ["dpuv2-zcu104"])
        dpu_xgraph = TestDPUCZDX8G.target_registry.get_target_build_func(
            "dpuv2-zcu104")(p_xgraph)

        assert len(dpu_xgraph) == 6
        layers = dpu_xgraph.get_layers()

        assert layers[0].type[0] == "Input"

        assert layers[1].type[0] == "Transpose"
        assert layers[1].bottoms == ["in1"]
        assert layers[1].tops == ["xp0"]

        assert layers[2].type[0] == "DPU"
        assert layers[2].bottoms == ["conv1_bottom_NCHW-NHWC"]
        assert layers[2].tops == ["pool1"]
        assert layers[2].shapes == [[1, 2, 2, 2]]
        assert layers[2].attrs["target"] == "dpuv2-zcu104"
        assert layers[2].attrs["input_names"] == ["xinput0"]
        assert layers[2].attrs["output_names"] == ["pool1"]
        assert layers[2].attrs["input_layers"]["xinput0"] == ["conv1"]
        assert layers[2].attrs["output_layers"]["pool1"] == ["pool1"]
        assert layers[2].attrs["__top_tensors"] == {
            "pool1": ["pool1_top_NHWC-NCHW"]
        }
        assert layers[2].attrs["orig_top_tensors"] == {"pool1": ["dense1"]}
        assert layers[2].attrs["__bottom_tensors"] == {
            "xinput0": ["conv1_bottom_NCHW-NHWC"]
        }
        assert layers[2].attrs["orig_bottom_tensors"] == {"xinput0": ["in1"]}

        # Merged TupleGetItem and Transpose layer
        assert layers[3].type[0] == "TupleGetItem"
        assert layers[3].name == "pool1"
        assert layers[3].shapes == [1, 2, 2, 2]
        assert layers[3].bottoms == ["xp0"]
        assert layers[3].tops == ["dense1"]
        assert layers[3].attrs["transpose"] is True

        assert layers[4].type[0] == "Input"
        assert layers[4].name == "in2"
        assert layers[4].tops == ["dense1"]

        assert layers[5].type[0] == "Dense"
        assert layers[5].name == "dense1"
        assert layers[5].shapes == [1, 20]
        assert layers[5].bottoms == ["pool1", "in2"]
        assert layers[5].tops == []
Ejemplo n.º 25
0
    def setUpClass(cls):
        def xgraph_build_func(xgraph):
            raise NotImplementedError("")

        def xgraph_optimizer(xgraph):
            raise NotImplementedError("")

        def xgraph_quantizer(xgraph):
            raise NotImplementedError("")

        def xgraph_compiler(xgraph):
            raise NotImplementedError("")

        target_registry = TargetRegistry()
        target_registry.register_target(
            "test-DPU",
            xgraph_optimizer,
            xgraph_quantizer,
            xgraph_compiler,
            xgraph_build_func,
        )

        @register_op_support_check("test-DPU", "Convolution")
        def conv_op_support(X, bXs, tXs):
            data_layout = X.attrs['data_layout']
            kernel_h, kernel_w = X.attrs['kernel_size']
            stride_h, stride_w = X.attrs['strides']
            dilation_h, dilation_w = X.attrs['dilation']
            padding_h, padding_w = X.attrs['padding'][data_layout.index('H')],\
                X.attrs['padding'][data_layout.index('W')]
            padding_h_top, padding_h_bot = padding_h
            padding_w_left, padding_w_right = padding_w
            ch_in, ch_out = X.attrs['channels']
            groups = X.attrs['groups']

            return kernel_h >= 1 and kernel_h <= 16 and\
                kernel_w >= 1 and kernel_w <= 16 and\
                stride_h >= 1 and stride_h <= 4 and\
                stride_w >= 1 and stride_w <= 4 and\
                padding_h_top >= 0 and padding_h_top <= kernel_h - 1 and\
                padding_h_bot >= 0 and padding_h_bot <= kernel_h - 1 and\
                padding_w_left >= 0 and padding_w_left <= kernel_w - 1 and\
                padding_w_right >= 0 and padding_w_right <= kernel_w - 1 and\
                ch_in >= 1 and ch_in <= 4096 and\
                ch_out >= 1 and ch_out <= 4096 and\
                dilation_h * ch_in <= 4096 and\
                (dilation_h == 1 or stride_h == 1) and\
                dilation_w * ch_in <= 4096 and\
                (dilation_w == 1 or stride_w == 1)

        @register_op_support_check("test-DPU", "Pooling")
        def pooling_op_support(X, bXs, tXs):
            data_layout = X.attrs['data_layout']

            kernel_h, kernel_w = X.attrs['kernel_size']
            stride_h, stride_w = X.attrs['strides']
            padding_h, padding_w = X.attrs['padding'][data_layout.index('H')],\
                X.attrs['padding'][data_layout.index('W')]
            padding_h_top, padding_h_bot = padding_h
            padding_w_left, padding_w_right = padding_w

            channels = X.shapes[data_layout.index('C')]

            return kernel_h >= 1 and kernel_h <= 8 and\
                kernel_w >= 1 and kernel_w <= 8 and\
                stride_h >= 1 and stride_h <= 4 and\
                stride_w >= 1 and stride_w <= 4 and\
                padding_h_top >= 0 and padding_h_top <= 4 and\
                padding_h_bot >= 0 and padding_h_bot <= 4 and\
                padding_w_left >= 0 and padding_w_left <= 4 and\
                padding_w_right >= 0 and padding_w_right <= 4 and\
                channels >= 1 and channels <= 4096
Ejemplo n.º 26
0
class TestDPUContrib(unittest.TestCase):

    xgraph_partitioner = XGraphPartitioner()
    xgraph_factory = XGraphFactory()
    target_registry = TargetRegistry()

    @classmethod
    def setUpClass(cls):

        # Import DPU module
        from pyxir.contrib.dpuv2 import dpuv2

    @classmethod
    def tearDownClass(cls):
        # Unregister dpu for other tests
        TestDPUContrib.target_registry.unregister_target('dpuv2-zcu104')
        TestDPUContrib.target_registry.unregister_target('dpuv2-zcu102')
        TestDPUContrib.target_registry.unregister_target('DPUCZDX8G-zcu102')
        TestDPUContrib.target_registry.unregister_target('DPUCZDX8G-zcu104')
        TestDPUContrib.target_registry.unregister_target('dpuv2-ultra96')
        TestDPUContrib.target_registry.unregister_target('DPUCZDX8G-ultra96')

    def test_supported_ops(self):
        ultra96_ops = TestDPUContrib.target_registry\
            .get_supported_op_check_names('dpuv2-ultra96')

        assert 'BatchNorm' in ultra96_ops
        assert 'BiasAdd' in ultra96_ops
        assert 'Concat' in ultra96_ops
        assert 'Convolution' in ultra96_ops
        assert 'Conv2DTranspose' in ultra96_ops
        assert 'DPU' in ultra96_ops
        assert 'Eltwise' in ultra96_ops
        assert 'Pad' in ultra96_ops
        assert 'Pooling' in ultra96_ops
        assert 'Mean' in ultra96_ops
        assert 'pReLU' in ultra96_ops
        assert 'ReLU' in ultra96_ops
        assert 'ReLU6' in ultra96_ops
        assert 'Scale' in ultra96_ops

        zcu102_ops = TestDPUContrib.target_registry\
            .get_supported_op_check_names('dpuv2-zcu102')

        assert 'BatchNorm' in zcu102_ops
        assert 'BiasAdd' in zcu102_ops
        assert 'Concat' in zcu102_ops
        assert 'Convolution' in zcu102_ops
        assert 'Conv2DTranspose' in zcu102_ops
        assert 'DPU' in zcu102_ops
        assert 'Eltwise' in zcu102_ops
        assert 'Pad' in zcu102_ops
        assert 'Pooling' in zcu102_ops
        assert 'Mean' in zcu102_ops
        assert 'pReLU' in zcu102_ops
        assert 'ReLU' in zcu102_ops
        assert 'ReLU6' in zcu102_ops
        assert 'Scale' in zcu102_ops

        zcu104_ops = TestDPUContrib.target_registry\
            .get_supported_op_check_names('dpuv2-zcu104')

        assert 'BatchNorm' in zcu104_ops
        assert 'BiasAdd' in zcu104_ops
        assert 'Concat' in zcu104_ops
        assert 'Convolution' in zcu104_ops
        assert 'Conv2DTranspose' in zcu104_ops
        assert 'DPU' in zcu104_ops
        assert 'Eltwise' in zcu104_ops
        assert 'Pad' in zcu104_ops
        assert 'Pooling' in zcu104_ops
        assert 'Mean' in zcu104_ops
        assert 'pReLU' in zcu104_ops
        assert 'ReLU' in zcu104_ops
        assert 'ReLU6' in zcu104_ops
        assert 'Scale' in zcu104_ops

    def test_small(self):
        net = [
            XLayer(name='in1',
                   type=['Input'],
                   shapes=[1, 1, 4, 4],
                   sizes=[16],
                   bottoms=[],
                   tops=['conv1'],
                   layer=['in1'],
                   targets=[]),
            XLayer(name='in2',
                   type=['Input'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=[],
                   tops=['dense1'],
                   layer=['in2'],
                   targets=[]),
            XLayer(name='conv1',
                   type=['Convolution'],
                   shapes=[1, 2, 3, 3],
                   sizes=[18],
                   bottoms=['in1'],
                   tops=['pool1'],
                   layer=['conv1'],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]],
                       'kernel_size': [3, 3],
                       'strides': [1, 1],
                       'dilation': [1, 1],
                       'groups': 1,
                       'channels': [2, 2]
                   },
                   targets=[]),
            XLayer(name='pool1',
                   type=['Pooling'],
                   shapes=[1, 2, 2, 2],
                   sizes=[8],
                   bottoms=['conv1'],
                   tops=['dense1'],
                   layer=['pool1'],
                   attrs={
                       'data_layout': 'NCHW',
                       'padding': [[0, 0], [0, 0], [1, 1], [1, 1]],
                       'kernel_size': [3, 3],
                       'strides': [1, 1],
                   },
                   targets=[]),
            XLayer(name='dense1',
                   type=['Dense'],
                   shapes=[1, 20],
                   sizes=[20],
                   bottoms=['pool1', 'in2'],
                   tops=[],
                   data=ConvData(np.array([1, 1]), np.array([0, 0])),
                   layer=['dense1'],
                   targets=[])
        ]
        xgraph = TestDPUContrib.xgraph_factory.build_from_xlayer(net)
        p_xgraph = partition(xgraph, ['dpuv2-zcu104'])
        dpu_xgraph = TestDPUContrib.target_registry\
            .get_target_build_func('dpuv2-zcu104')(p_xgraph)

        assert len(dpu_xgraph) == 6
        layers = dpu_xgraph.get_layers()

        assert layers[0].type[0] == 'Input'

        assert layers[1].type[0] == 'Transpose'
        assert layers[1].bottoms == ['in1']
        assert layers[1].tops == ['xp0']

        assert layers[2].type[0] == 'DPU'
        assert layers[2].bottoms == ['conv1_bottom_NCHW>NHWC']
        assert layers[2].tops == ['pool1']
        assert layers[2].shapes == [[1, 2, 2, 2]]
        assert layers[2].attrs['target'] == 'dpuv2-zcu104'
        assert layers[2].attrs['input_names'] == ['xinput0']
        assert layers[2].attrs['output_names'] == ['pool1']
        assert layers[2].attrs['input_layers']['xinput0'] == ['conv1']
        assert layers[2].attrs['output_layers']['pool1'] == ['pool1']
        assert layers[2].attrs['__top_tensors'] ==\
            {'pool1': ['pool1_top_NHWC>NCHW']}
        assert layers[2].attrs['orig_top_tensors'] ==\
            {'pool1': ['dense1']}
        assert layers[2].attrs['__bottom_tensors'] ==\
            {'xinput0': ['conv1_bottom_NCHW>NHWC']}
        assert layers[2].attrs['orig_bottom_tensors'] ==\
            {'xinput0': ['in1']}

        # Merged TupleGetItem and Transpose layer
        assert layers[3].type[0] == 'TupleGetItem'
        assert layers[3].name == 'pool1'
        assert layers[3].shapes == [1, 2, 2, 2]
        assert layers[3].bottoms == ['xp0']
        assert layers[3].tops == ['dense1']
        assert layers[3].attrs['transpose'] is True

        assert layers[4].type[0] == 'Input'
        assert layers[4].name == 'in2'
        assert layers[4].tops == ['dense1']

        assert layers[5].type[0] == 'Dense'
        assert layers[5].name == 'dense1'
        assert layers[5].shapes == [1, 20]
        assert layers[5].bottoms == ['pool1', 'in2']
        assert layers[5].tops == []
Ejemplo n.º 27
0
# limitations under the License.

"""Utilities for testing DPUCZDX8G compilation"""

import os
import xir
import shutil
import numpy as np
import pyxir as px

from pyxir.target_registry import TargetRegistry
from pyxir.graph import XGraph
from pyxir.graph.xgraph_factory import XGraphFactory

XGRAPH_FACTORY = XGraphFactory()
TARGET_REGISTRY = TargetRegistry()
FILE_PATH = os.path.dirname(os.path.realpath(__file__))


def remove_all_files_with_suffix(dir_path, suffix):
    files_with_suffix = [f for f in os.listdir(dir_path) if f.endswith(suffix)]
    [os.remove(os.path.join(FILE_PATH, f)) for f in files_with_suffix]


def get_child_subgraphs(graph: "Graph"):
    assert graph is not None, "'graph' should not be None."
    root_subgraph = graph.get_root_subgraph()
    assert (
        root_subgraph is not None
    ), "Failed to get root subgraph of input Graph object."
    if root_subgraph.is_leaf: