Beispiel #1
0
    def create_sample_graph(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     QUANTIZED_PACKED(), {
                         'X': x,
                         'W': w1
                     },
                     kernel_shape=[2, 2])
        conv1.is_quantized = True

        pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})

        # One output
        y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #2
0
    def make_simple_model(self) -> Model:
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3]),
            dimension_format='NHWC',
        )

        # Conv
        conv = Conv('conv', [1, 4, 4, 1],
                    Float32(), {
                        'X': x,
                        'W': w
                    },
                    kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 4, 4, 1], Float32(), {'input': conv})

        # add ops to the graph
        graph.add_op_and_inputs(y)
        model = Model()
        model.graph = graph
        return model
    def test_dynamic_create_unary(self) -> None:
        """Test code for unary operators."""
        unary_ops = [
            'Identity', 'QTZ_binary_mean_scaling', 'Transpose',
            'QTZ_linear_mid_tread_half', 'MaxPool', 'AveragePool', 'Reshape',
            'Softmax'
        ]

        # unary input
        shape = [1, 3, 3, 3]
        x = Constant('const', Float32(), np.zeros(shape))

        name = 'test'
        dtype = Float32()

        for op in unary_ops:
            shape = [1, 3, 3, 3]
            module = importlib.import_module('core.operators')
            try:
                op_def = getattr(module, op)
                input_ops = {n: x for n in op_def.input_names}
                shape = self.reverse_shape(shape) if op == 'Transpose' \
                    else [1, 2, 2, 3] if op == 'MaxPool' or op == 'AveragePool' \
                    else shape
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic unary operator load test passed!")
Beispiel #4
0
    def test_conv(self) -> None:
        """Test code for Conv."""
        # get Conv's input names
        i_names = Conv.input_names
        self.assertTrue({'X', 'W'}.issubset(set(i_names)))

        # set x to MaxPool m's input
        x = Input(
            'input',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 5])
        )
        inputs: Dict[str, Operator] = {i_names[0]: x, i_names[1]: w}
        c = Conv(
            "conv1",
            [1, 2, 2, 3],
            Float32(),
            inputs,
            kernel_shape=[2, 2]
        )

        self.assertEqual(c.batchsize, 1)
        self.assertEqual(c.height, 2)
        self.assertEqual(c.width, 2)
        self.assertEqual(c.channel, 3)
        self.assertEqual(c.kernel_height, 2)
        self.assertEqual(c.kernel_width, 2)

        print("Conv test passed!")
    def test_dynamic_create_binary(self) -> None:
        """Test code for binary operators."""
        x = Constant('const1', Float32(), np.zeros([1, 3, 3, 3]))

        w = Constant('const2', Float32(), np.zeros([1, 2, 2, 3]))

        binary_ops = ['Conv', 'Add']

        name = 'test'
        dtype = Float32()

        for op in binary_ops:
            shape = [1, 3, 3, 3]
            module = importlib.import_module('core.operators')
            try:
                op_def = getattr(module, op)
                shape = [1, 2, 2, 3] if op == 'Conv' else shape
                input_ops = {n: opw for n, opw in zip(op_def.input_names, [x, w])} \
                    if op == 'Conv' else {n: x for n in op_def.input_names}
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic binary operator load test passed!")
Beispiel #6
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Float32(), np.array(1))
        s2 = Constant('aq_const2', Float32(), np.array(2))
        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
        conv2.a_quantizer = [aq]
        conv2.quantizer = kq

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #7
0
    def test_add_consistency1(self) -> None:
        """Test code for 'Add', which succeeds."""
        a = Constant('const1', Float32(), np.zeros([1, 3, 3]))
        b = Constant('const2', Float32(), np.zeros([3]))
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        add = Add('add1', [1, 3, 3], Float32(), input_ops)

        print("Consistency test for 'Add' #1 passed!")
Beispiel #8
0
    def test_add_consistency2(self) -> None:
        """Test code for 'Add', which fails."""
        a = Constant('const1', Float32(), np.zeros([1, 3, 3]))
        b = Constant('const2', Float32(), np.zeros([2]))
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        try:
            add = Add('add1', [1, 3, 3], Float32(), input_ops)
        except AssertionError:
            print("Consistency test for 'Add' #2 passed!")

        else:
            self.assertTrue(False, "Consistency test for 'Add' #2 failed.")
Beispiel #9
0
    def test_pool_consistency(self) -> None:
        """Test code for Pool."""
        x = Constant('const1', Float32(), np.zeros([1, 3, 3, 3]))
        input_ops = {'X': cast(Operator, x)}

        add = MaxPool('max_pool1', [1, 2, 2, 3],
                      Float32(),
                      input_ops,
                      kernel_shape=[3, 3],
                      pads=[1, 1, 1, 1],
                      strides=[2, 2])

        print("Consistency test for pooling operator passed!")
Beispiel #10
0
    def create_sample_graph(data: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # constant and internal nodes
        w = Constant('weight', Float32(), data)
        i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w})
        q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3], Float32(),
                                    {'input': i1})

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2])

        # One output
        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv})
        rs = Reshape('reshape', [1, 48], Float32(), {'data': i2})
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #11
0
    def test_maxpool(self) -> None:
        """Test code for MaxPool."""
        # get MaxPool's input names
        i_names = MaxPool.input_names
        self.assertEqual(i_names, ['X'])

        # set x to MaxPool m's input
        x = Constant('const', Float32(), np.zeros([1, 3, 3, 3]))
        inputs: Dict[str, Operator] = {i_names[0]: x}
        m = MaxPool("MaxPool", [1, 2, 2, 3],
                    Float32(),
                    inputs,
                    kernel_shape=[2, 2])

        print("MaxPool test passed!")
Beispiel #12
0
    def test_conv_consistency(self) -> None:
        """Test code for Conv."""
        x = Input(
            'const1',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 3]))
        input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}

        add = Conv('conv_under_test', [1, 3, 3, 3],
                   Float32(),
                   input_ops,
                   pads=[1, 1, 2, 2],
                   strides=[2, 2])

        print("Consistency test for conv operator passed!")
Beispiel #13
0
    def test_pass_propagate_output_type_backward(self) -> None:
        """Test pass."""
        data1 = np.float32(np.random.rand(1, 2, 2, 3))
        graph1 = self.create_sample_graph(data1)

        pass_propagate_output_type_backward(graph1)

        self.assertEqual(graph1.get_op('conv1').dtype, Float32(),
                         '[Failed] Found dtype of SpaceToDepth not propagate correctly')

        print("Test pass #7 propagate output type backward passed!")
Beispiel #14
0
    def test_graph_conv(self) -> None:
        """Test code for making a simple graph with Conv."""
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 3]))

        # Conv
        conv = Conv(
            'conv',
            [1, 4, 4, 3],
            Float32(),
            {
                'X': x,
                'W': w
            },  # you can get these keys by 'Conv.input_names'
            kernel_shape=[2, 2])

        # One output
        y = Output(
            'output',
            [1, 4, 4, 3],
            Float32(),
            {'input': conv}  # you can get this key by 'Output.input_names'
        )

        # add ops to the graph
        graph.add_op(x)
        graph.add_op(w)
        graph.add_op(conv)
        graph.add_op(y)

        self.assertTrue(graph.check_nodes(),
                        "All inputs of operators must match their outputs.")
        print("Graph test passed!")
Beispiel #15
0
    def test_dynamic_create_batchnorm(self) -> None:
        """Test code for n-ary operators (BatchNormalization). """
        x = Constant('const', Float32(), np.zeros([1, 3, 3, 3]))

        nary_ops = ['BatchNormalization']

        name = 'test'
        shape = [1, 3, 3, 3]
        dtype = Float32()

        for op in nary_ops:
            module = importlib.import_module('core.operators')
            try:
                op_def = getattr(module, op)
                input_ops = {n: x for n in op_def.input_names}
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic batchnorm operator load test passed!")
Beispiel #16
0
    def test_pass_quantize_convolutions(self) -> None:
        """Test pass."""
        data1 = np.float32(np.random.rand(1, 2, 2, 3))
        data2 = np.float32(np.random.rand(1, 2, 2, 3))
        graph1 = self.create_sample_graph(data1, data2)

        pass_quantize_convolutions(graph1)

        self.assertEqual(graph1.get_op('aqtz1').dtype, QUANTIZED_NOT_PACKED(),
                         '[Failed] Found output dtype of activation quantizer not proper')
        self.assertEqual(graph1.get_op('kqtz1').dtype, PackedUint32(),
                         '[Failed] Found output dtype of kernel quantizer not proper')
        self.assertEqual(graph1.get_op('conv2').dtype, Float32(),
                         '[Failed] Found output dtype of conv not proper')

        print("Test pass #5 quantize_convolutions passed!")
Beispiel #17
0
    def create_transposed_graph(self, data: np.ndarray) -> Graph:
        graph = Graph()
        data = data.transpose([3, 2, 1, 0])

        # input
        x = Input('placeholder', [1, 5, 5, 3],
                  Float32(),
                  dimension_format='NHWC')

        # constant and internal nodes
        w = Constant('weight', Float32(), data, dimension_format='NHWC')

        i = Identity('identity1', [1, 2, 2, 3],
                     Float32(), {'input': w},
                     dimension_format='NHWC')

        q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3],
                                    Float32(), {'input': i},
                                    dimension_format='NHWC')

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2],
                    dimension_format='NHWC')

        rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})

        # One output
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #18
0
    def create_sample_graph() -> Graph:
        graph = Graph()

        x = Input('placeholder', [2], Float32())

        s1 = Constant('potato_1', Float32(), np.array([1, 2]))
        s2 = Constant('potato_2', Float32(), np.array([1, 3]))
        add1 = Add('potatoes', [2], Float32(), {'A': s1, 'B': s2})
        add2 = Add('more_potatoes', [2], Float32(), {'A': x, 'B': add1})

        # One output
        y = Output('output', [2], Float32(), {'input': add2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #19
0
    def create_sample_graph_2(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))
        add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})

        y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #20
0
from core.data_types import DataType, Float32, Float64, Int8, Int16, Int32, \
    Int64, Uint8, Uint16, Uint32, Uint64, Bool, String
from core.exceptions import UnsupportedNode, UnsupportedDataType
from core.operators import Operator, Conv, Identity, QTZ_binary_mean_scaling, \
    BatchNormalization, QTZ_linear_mid_tread_half, Add, \
    Pool, MaxPool, AveragePool, Reshape, Softmax, Transpose
import core.operators as dlk_op
import numpy as np
import importlib
import functools

DLK_DTYPE_MAP: Dict[str, Optional[DataType]] = {
    # any
    'UNDEFINED': None,
    # primitives
    'FLOAT': Float32(),
    'INT': Int32(),
    'UINT8': Uint8(),
    'INT8': Int8(),
    'UINT16': Uint16(),
    'INT16': Int16(),
    'INT32': Int32(),
    'INT64': Int64(),
    'f': Float32(),
    'i': Int32(),

    # primitive vector
    'FLOATS': None,
    'INTS': None,

    # custom
Beispiel #21
0
    def create_graph(self, graph):

        x1 = Input(
            'input1',
            [1, 4, 4, 3],
            Float32(),
        )

        w1 = Constant(
            'weight1',
            Float32(),
            np.zeros([1, 2, 2, 3])
        )

        conv1 = Conv(
            'conv1',
            [1, 3, 3, 3],
            Float32(),
            {'X': x1, 'W': w1},
            kernel_shape=[2, 2]
        )

        w2 = Constant(
            'weight2',
            Float32(),
            np.zeros([3, 2, 2, 3])
        )

        conv2 = Conv(
            'conv2',
            [1, 2, 2, 3],
            Float32(),
            {'X': conv1, 'W': w2},
            kernel_shape=[2, 2]
        )

        x2 = Input(
            'input2',
            [3, 3, 3, 3],
            Float32(),
        )

        x3 = Input(
            'input3',
            [3, 3, 3, 3],
            Float32(),
        )

        conv3 = Conv(
            'conv3',
            [3, 2, 2, 3],
            Float32(),
            {'X': x2, 'W': conv2},
            kernel_shape=[2, 2]
        )

        conv4 = Conv(
            'conv4',
            [1, 2, 2, 3],
            Float32(),
            {'X': x3, 'W': conv3},
            kernel_shape=[2, 2]
        )

        y = Output(
            'output',
            [1, 2, 2, 3],
            Float32(),
            {'input': conv4}
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)
Beispiel #22
0
    def create_sample_graph(self, data1: np.ndarray, data2: np.ndarray,
                            data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        w = Constant('weight', Float32(), data1)

        i = Identity('identity1', [3, 2, 2, 3], Float32(), {'input': w})

        t = Transpose('transpose1', [3, 2, 2, 3],
                      Float32(), {'data': i},
                      perm=[3, 2, 1, 0])

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': t})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': q
                     },
                     kernel_shape=[2, 2])

        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv1})

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': i2,
            'Y': s1,
            'Z': s2
        })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          Float32(), {'data': aq},
                          perm=[0, 1, 2, 3])

        w2 = Constant('weight2', Float32(), data2)

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': q2
                     },
                     kernel_shape=[2, 2])

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        i3 = Identity('identity3', [1, 3, 3, 3], Float32(), {'input': aq2})

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': i3,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #23
0
    def create_precompute_graph(self, data1: np.ndarray, data2: np.ndarray,
                                data3: np.ndarray) -> Graph:
        graph = Graph()

        # two inputs
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        scaling1, qdata = self.binary_mean_scaling(
            data1.transpose([3, 2, 1, 0]))
        w = Constant('weight', Float32(), qdata * scaling1)

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w
                     },
                     kernel_shape=[2, 2])

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': conv1,
            'Y': s1,
            'Z': s2
        })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          Float32(), {'data': aq},
                          perm=[0, 1, 2, 3])

        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2', Float32(), qdata2 * scaling2)

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': w2
                     },
                     kernel_shape=[2, 2])

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': aq2,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #24
0
    def create_quantized_graph(self, data: np.ndarray, data2: np.ndarray, data3: np.ndarray) \
            -> Tuple[Graph, np.float32, np.float32]:
        graph = Graph()

        # two inputs
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        from modules.packer import Packer
        packer = Packer(1, 32)
        data = data.transpose([3, 2, 1, 0])
        scaling, qdata = self.binary_mean_scaling(data)
        shape = list(data.shape)
        w = Constant(
            'weight',
            Float32(),
            qdata * scaling,
        )

        q = QTZ_binary_mean_scaling('qtz1', shape, Float32(), {'input': w})
        q.scaling_factor = scaling

        # Conv
        conv1 = Conv(
            'conv1',
            [1, 4, 4, 3],
            Float32(),
            {
                'X': x,
                'W': w
            },
            kernel_shape=[2, 2],
        )

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3],
                                       QUANTIZED_NOT_PACKED(), {
                                           'X': conv1,
                                           'Y': s1,
                                           'Z': s2
                                       })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          QUANTIZED_NOT_PACKED(), {'data': aq},
                          perm=[0, 1, 2, 3])

        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata2),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        # quantizer connected to conv2 as 'conv2.quantizer'
        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Uint32(),
                                     {'input': w2})
        q2.scaling_factor = scaling2

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': w2
                     },
                     kernel_shape=[2, 2],
                     quantized=True)
        conv2.quantizer = q2

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': aq2,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph, scaling, scaling2
Beispiel #25
0
import core.operators as dlk_op
from core.data_types import DataType, Float32, Float64, Int8, Int16, Int32, \
    Int64, Uint8, Uint16, Uint32, Uint64, Bool, String
from core.exceptions import UnsupportedNode, UnsupportedDataType
from core.graph import Graph
from core.operators import Operator, Conv, Identity, QTZ_binary_mean_scaling, \
    BatchNormalization, QTZ_linear_mid_tread_half, Add, \
    MaxPool, AveragePool, Reshape, Softmax, Transpose, Relu, SpaceToDepth, \
    Mul, QTZ_binary_channel_wise_mean_scaling, ConcatOnDepth, Maximum, DepthToSpace, ResizeNearestNeighbor, \
    Split, Pad, MatMul, Gather, Unique, Cast, Minimum, StridedSlice, Prod, Shape, LeakyRelu

DLK_DTYPE_MAP: Dict[str, Optional[DataType]] = {
    # any
    'DT_INVALID': Float32,
    # primitives
    'DT_FLOAT': Float32(),
    'DT_INT32': Int32(),
    'DT_UINT8': Uint8(),
    'DT_INT8': Int8(),
    'DT_UINT16': Uint16(),
    'DT_INT16': Int16(),
    'DT_INT64': Int64(),
    'f': Float32(),
    'i': Int32(),

    # primitive vector
    'FLOATS': None,
    'INTS': None,

    # custom
    'DT_BOOL': Bool(),
Beispiel #26
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w1
                     },
                     kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Int32(), np.array([2], dtype=np.int32))
        s2 = Constant('aq_const2', Float32(), np.array([2.0],
                                                       dtype=np.float32))
        aq1 = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': conv1,
            'Y': s1,
            'Z': s2
        })

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(),
                                     {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq1,
                         'W': kq
                     },
                     kernel_shape=[2, 2])
        conv2.a_quantizer = [aq1]
        conv2.quantizer = kq
        conv2.is_quantized = True

        sc = Constant('bn_scale', Float32(), np.random.rand(3))
        be = Constant('bn_b', Float32(), np.random.rand(3))
        mu = Constant('bn_mu', Float32(), np.random.rand(3))
        va = Constant('bn_var', Float32(), np.random.rand(3))
        bn = BatchNormalization('bn', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'scale': sc,
            'B': be,
            'mean': mu,
            'var': va
        })

        # activation quantizer
        s3 = Constant('aq_const3', Int32(), np.array([2], dtype=np.int32))
        s4 = Constant('aq_const4', Float32(), np.array([2.0],
                                                       dtype=np.float32))
        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': bn,
            'Y': s3,
            'Z': s4
        })

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': aq2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #27
0
    def create_sample_graph3(self, data1: np.ndarray, data2: np.ndarray,
                             data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        w = Constant('weight', Float32(), data1)

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': w})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': q
                     },
                     kernel_shape=[2, 2])

        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv1})

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': i2,
            'Y': s1,
            'Z': s2
        })

        w2 = Constant('weight2', Float32(), data2)

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': q2
                     },
                     kernel_shape=[2, 2])

        w3 = Constant('weight3', Float32(), data3)

        q3 = QTZ_binary_mean_scaling('qtz3', [3, 2, 2, 3], Float32(),
                                     {'input': w3})

        conv3 = Conv('conv3', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': q3
                     },
                     kernel_shape=[2, 2])

        y1 = Output('output1', [1, 3, 3, 3], Float32(), {'input': conv2})

        y2 = Output('output2', [1, 3, 3, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y1)
        graph.add_op_and_inputs(y2)

        return graph
Beispiel #28
0
    def create_quantized_graph2(self, data1: np.ndarray, data2: np.ndarray,
                                data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        scaling1, qdata1 = self.binary_mean_scaling(data1)
        w = Constant('weight', Float32(), qdata1 * scaling1)

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': w})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w
                     },
                     kernel_shape=[2, 2])

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3],
                                       QUANTIZED_NOT_PACKED(), {
                                           'X': conv1,
                                           'Y': s1,
                                           'Z': s2
                                       })

        from modules.packer import Packer
        packer = Packer(1, 32)
        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata2),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})
        q2.scaling_factor = scaling2

        conv2 = Conv(
            'conv2',
            [1, 3, 3, 3],
            Float32(),
            {
                'X': aq,
                'W': w2
            },
            kernel_shape=[2, 2],
            quantized=True,
        )
        conv2.quantizer = q2

        scaling3, qdata3 = self.binary_mean_scaling(data3)
        w3 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata3),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        q3 = QTZ_binary_mean_scaling('qtz3', [3, 2, 2, 3], Float32(),
                                     {'input': w3})
        q3.scaling_factor = scaling3

        conv3 = Conv('conv3', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': w3
                     },
                     kernel_shape=[2, 2],
                     quantized=True)
        conv3.quantizer = q3

        y1 = Output('output1', [1, 3, 3, 3], Float32(), {'input': conv2})

        y2 = Output('output2', [1, 3, 3, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y1)
        graph.add_op_and_inputs(y2)

        return graph, scaling2, scaling3
Beispiel #29
0
class DTypeChanger(GraphRunner):
    """Optimization class that changes dypes.

    This runner must run before PrecomputeRunner.
    """
    class Path(Enum):
        INPUT = 1,
        WEIGHT = 2,
        OTHER = 3

    _packed_dtype = {
        Path.INPUT: QUANTIZED_NOT_PACKED(),
        Path.WEIGHT: Uint32(),
        Path.OTHER: Float32()
    }
    _a_quantizers = {'QTZ_linear_mid_tread_half'}
    _w_quantizers = {
        'QTZ_binary_mean_scaling', 'QTZ_binary_channel_wise_mean_scaling'
    }
    _conv = {'Conv'}

    def __init__(self, graph: Graph) -> None:
        """Set up internal varibles."""
        self._output_convs: Dict[Operator, List[Conv]] = {}
        self._packed_input_path: Dict[str, Any] = {}

        super().__init__(graph, depth_first=False)

    # 1st phase: check nodes which dtype must be changed

    def _check_dtype_state(self, node: Operator) -> None:
        """checks the state of each node regarding dtype.

        - whether the node is after conv and before activation quantizer
        - whether the node is after activation and before conv
        """
        outputs = node.output_op_list
        convs: List[Conv] = sum([
            self._output_convs[out]
            for out in outputs if self._output_convs.get(out) is not None
        ], [])

        # determine the path of node is input or weight or others
        path = self.Path.WEIGHT
        for out in outputs:
            p = self._packed_input_path[out.name] if out.op_type not in self._conv \
                else self.Path.INPUT if node == out.input_ops['X'] \
                else self.Path.WEIGHT
            if path == self.Path.WEIGHT:
                path = p
            elif path == p:
                pass
            else:  # output have different paths
                ValueError(
                    'multiple outputs must have the same kind of paths.')

        is_not_before_a_quantizer = reduce(
            lambda x, y: x and y,
            [out.op_type not in self._a_quantizers for out in outputs])
        if convs and is_not_before_a_quantizer:
            self._output_convs[node] = convs

        self._packed_input_path[node.name] = path

    def run_backward_by_default(self, node: Operator, **kwargs: Any) -> None:
        self._check_dtype_state(node)

    def run_backward_output(self, node: Output, **kwargs: Any) -> None:
        self._packed_input_path[node.name] = self.Path.OTHER

    def run_backward_conv(self, node: Conv, **kwargs: Any) -> None:
        self._output_convs[node] = [node]

    # 2nd phase: change data type

    def turn(self, **kwargs: Any) -> None:
        """Set up qconv list"""
        output_convs: List[Conv] = sum(list(self._output_convs.values()), [])
        for conv in output_convs:
            # get all ascendants of conv
            ascendants = [
                k for k in self._output_convs.keys()
                if conv in self._output_convs[k]
            ]

            # whether some weight quantizer is in ascendants
            wqtz_in_asc = reduce(
                lambda x, y: x or y,
                list(map(lambda n: n.op_type in self._w_quantizers,
                         ascendants)))
            # whether some activation quantizer is in ascendants
            aqtz_in_asc = reduce(
                lambda x, y: x or y,
                list(map(lambda n: n.op_type in self._a_quantizers,
                         ascendants)))
            # if both, add conv to the list
            if wqtz_in_asc and aqtz_in_asc:
                kwargs['qconv'].add(conv)

    def _set_dtype(self, node: Operator, qconv: List[Conv]) -> None:
        def before_qconv() -> bool:
            """Return if the node is before a quantized convolver"""
            convs: List[Conv] = self._output_convs[
                node] if self._output_convs.get(node) else []
            # consistency check
            is_qconv: List[bool] = list(map(lambda x: x in qconv, convs))
            all_is_qconv = reduce(lambda x, y: x and y, is_qconv, True)
            some_is_qconv = reduce(lambda x, y: x or y, is_qconv, False)
            assert convs == [] or (all_is_qconv == some_is_qconv), \
                f'{node.name} connects to both of a quantized convolver and non-quantized one.'

            return convs != [] and all_is_qconv

        def get_dtype() -> Optional[DataType]:
            """Return dtype along with which path the node is on: 'input' or 'weight' of a conv"""
            path = self._packed_input_path.get(node.name)
            return self._packed_dtype[path] if path is not None else None

        dtype = get_dtype()
        conv = self._output_convs.get(node)
        if dtype is not None and before_qconv():
            node.dtype = dtype

    def run_forward_by_default(self, node: Operator, **kwargs: Any) -> None:
        self._set_dtype(node, kwargs['qconv'])