Example #1
0
    def test_dynamic_create_unary(self) -> None:
        """Test code for unary operators."""
        unary_ops = [
            'Identity', 'BinaryMeanScalingQuantizer', 'Transpose',
            'LinearMidTreadHalfQuantizer', 'MaxPool', 'AveragePool', 'Reshape',
            'Softmax'
        ]

        # unary input
        shape = [1, 3, 3, 3]
        x = Constant('const', Float32(), np.zeros(shape))

        name = 'test'
        dtype = Float32()

        for op in unary_ops:
            shape = [1, 3, 3, 3]
            module = importlib.import_module(
                'blueoil.converter.core.operators')
            try:
                op_def = getattr(module, op)
                input_ops = {n: x for n in op_def.input_names}
                shape = self.reverse_shape(shape) if op == 'Transpose' \
                    else [1, 2, 2, 3] if op == 'MaxPool' or op == 'AveragePool' \
                    else shape
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic unary operator load test passed!")
Example #2
0
    def make_simple_model(self) -> Graph:
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3]),
            dimension_format='NHWC',
        )

        # Conv
        conv = Conv('conv', [1, 4, 4, 1],
                    Float32(), {
                        'X': x,
                        'W': w
                    },
                    kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 4, 4, 1], Float32(), {'input': conv})

        # add ops to the graph
        graph.add_op_and_inputs(y)
        return graph
Example #3
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Float32(), np.array(1))
        s2 = Constant('aq_const2', Float32(), np.array(2))
        aq = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
        conv2.a_quantizer = [aq]
        conv2.quantizer = kq

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
    def test_dynamic_create_batchnorm(self) -> None:
        """Test code for n-ary operators (BatchNormalization)."""
        x = Constant(
            'const',
            Float32(),
            np.zeros([1, 3, 3, 3])
        )

        nary_ops = [
            'BatchNormalization'
        ]

        name = 'test'
        shape = [1, 3, 3, 3]
        dtype = Float32()

        for op in nary_ops:
            module = importlib.import_module('blueoil.converter.core.operators')
            try:
                op_def = getattr(module, op)
                input_ops = {n: x for n in op_def.input_names}
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic batchnorm operator load test passed!")
Example #5
0
    def test_add_consistency2(self) -> None:
        """Test code for 'Add', which fails."""
        a = Constant(
            'const1',
            Float32(),
            np.zeros([1, 3, 3])
        )
        b = Constant(
            'const2',
            Float32(),
            np.zeros([2])
        )
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        try:
            Add(
                'add1',
                [1, 3, 3],
                Float32(),
                input_ops
            )
        except AssertionError:
            print("Consistency test for 'Add' #2 passed!")

        else:
            self.assertTrue(False, "Consistency test for 'Add' #2 failed.")
Example #6
0
    def test_dynamic_create_binary(self) -> None:
        """Test code for binary operators."""
        x = Constant('const1', Float32(), np.zeros([1, 3, 3, 3]))

        w = Constant('const2', Float32(), np.zeros([1, 2, 2, 3]))

        binary_ops = ['Conv', 'Add']

        name = 'test'
        dtype = Float32()

        for op in binary_ops:
            shape = [1, 3, 3, 3]
            module = importlib.import_module(
                'blueoil.converter.core.operators')
            try:
                op_def = getattr(module, op)
                shape = [1, 2, 2, 3] if op == 'Conv' else shape
                input_ops = {n: opw for n, opw in zip(op_def.input_names, [x, w])} \
                    if op == 'Conv' else {n: x for n in op_def.input_names}
                args = [name, shape, dtype, input_ops]
                obj = op_def(*args)
                self.assertEqual(obj.name, name)
            except Exception as e:
                print(f'failed in testing {op}.')
                raise e

        print("Dynamic binary operator load test passed!")
    def test_add_consistency1(self) -> None:
        """Test code for 'Add', which succeeds."""
        a = Constant('const1', Float32(), np.zeros([1, 3, 3]))
        b = Constant('const2', Float32(), np.zeros([3]))
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        add = Add('add1', [1, 3, 3], Float32(), input_ops)

        print("Consistency test for 'Add' #1 passed!")
    def test_pool_consistency(self) -> None:
        """Test code for Pool."""
        x = Constant('const1', Float32(), np.zeros([1, 3, 3, 3]))
        input_ops = {'X': cast(Operator, x)}

        add = MaxPool('max_pool1', [1, 2, 2, 3],
                      Float32(),
                      input_ops,
                      kernel_shape=[3, 3],
                      pads=[1, 1, 1, 1],
                      strides=[2, 2])

        print("Consistency test for pooling operator passed!")
Example #9
0
    def create_sample_graph(data: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # constant and internal nodes
        w = Constant('weight', Float32(), data)
        i1 = Identity('identity1', [1, 2, 2, 3], Float32(), {'input': w})
        q = BinaryMeanScalingQuantizer('qtz1', [1, 2, 2, 3], Float32(),
                                       {'input': i1})

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2])

        # One output
        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv})
        rs = Reshape('reshape', [1, 48], Float32(), {'data': i2})
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #10
0
    def test_maxpool(self) -> None:
        """Test code for MaxPool."""
        # get MaxPool's input names
        i_names = MaxPool.input_names
        self.assertEqual(i_names, ['X'])

        # set x to MaxPool m's input
        x = Constant('const', Float32(), np.zeros([1, 3, 3, 3]))
        inputs: Dict[str, Operator] = {i_names[0]: x}
        m = MaxPool("MaxPool", [1, 2, 2, 3],
                    Float32(),
                    inputs,
                    kernel_shape=[2, 2])

        print("MaxPool test passed!")
    def test_conv_consistency(self) -> None:
        """Test code for Conv."""
        x = Input(
            'const1',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 3]))
        input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}

        add = Conv('conv_under_test', [1, 3, 3, 3],
                   Float32(),
                   input_ops,
                   pads=[1, 1, 2, 2],
                   strides=[2, 2])

        print("Consistency test for conv operator passed!")
Example #12
0
    def create_sample_graph(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], QUANTIZED_PACKED(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})

        # One output
        y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #13
0
    def test_graph_conv(self) -> None:
        """Test code for making a simple graph with Conv."""
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3])
        )

        # Conv
        conv = Conv(
            'conv',
            [1, 4, 4, 3],
            Float32(),
            {'X': x, 'W': w},  # you can get these keys by 'Conv.input_names'
            kernel_shape=[2, 2]
        )

        # One output
        y = Output(
            'output',
            [1, 4, 4, 3],
            Float32(),
            {'input': conv}  # you can get this key by 'Output.input_names'
        )

        # add ops to the graph
        graph.add_op(x)
        graph.add_op(w)
        graph.add_op(conv)
        graph.add_op(y)

        self.assertTrue(graph.check_nodes(), "All inputs of operators must match their outputs.")
        print("Graph test passed!")
Example #14
0
    def test_pass_propagate_output_type_backward(self) -> None:
        """Test pass."""
        data1 = np.float32(np.random.rand(1, 2, 2, 3))
        graph1 = self.create_sample_graph(data1)

        pass_propagate_output_type_backward(graph1)

        self.assertEqual(graph1.get_op('conv1').dtype, Float32(),
                         '[Failed] Found dtype of SpaceToDepth not propagate correctly')

        print("Test pass #7 propagate output type backward passed!")
Example #15
0
def pass_insert_cast(graph: Graph) -> None:
    """Insert Cast Operator if needed

    Args:
        graph (Graph): The input graph. It will be modified in-place.

    """
    cast_idx = 0
    exec_list = sort_graph(graph)
    for m in exec_list:
        if m.dtype != QUANTIZED_PACKED():
            continue
        to_be_updated = {}
        to_be_updated_names = []
        for out_name, out_nodes in m.output_ops.items():
            cast_needed = []
            new_out_nodes = []
            channels = m.channels
            for out_node in out_nodes:
                if out_node.preserve_quantization:
                    if out_node.op_type != 'Conv' or out_node.is_quantized:
                        new_out_nodes.append(out_node)
                        continue
                if isinstance(
                        out_node, Output
                ):  # need to shrink the number of output channels
                    channels = out_node.channels
                cast_needed.append(out_node)
            if not cast_needed:
                continue
            shape = [1, m.height, m.width, channels]
            cast_op = Cast(
                f'automatic_cast_{cast_idx}',
                shape,  # TODO(primenumber): fix shape
                Float32(),
                {'x': m},
                'NHWC')
            cast_idx += 1
            for out_node in cast_needed:
                cast_op.add_output('y', out_node)
                in_names = []
                for in_name, in_node in out_node.input_ops.items():
                    if m == in_node:
                        in_names.append(in_name)
                for in_name in in_names:
                    out_node.add_input(in_name, cast_op)
            new_out_nodes.append(cast_op)
            graph.add_op(cast_op)
            to_be_updated_names.append(out_name)
            to_be_updated.update({out_name: new_out_nodes})

        for out_name in to_be_updated_names:
            m.remove_output(out_name)
        m.add_outputs(to_be_updated)
Example #16
0
    def test_conv(self) -> None:
        """Test code for Conv."""
        # get Conv's input names
        i_names = Conv.input_names
        self.assertTrue({'X', 'W'}.issubset(set(i_names)))

        # set x to MaxPool m's input
        x = Input(
            'input',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 5]))
        inputs: Dict[str, Operator] = {i_names[0]: x, i_names[1]: w}
        c = Conv("conv1", [1, 2, 2, 3], Float32(), inputs, kernel_shape=[2, 2])

        self.assertEqual(c.batchsize, 1)
        self.assertEqual(c.height, 2)
        self.assertEqual(c.width, 2)
        self.assertEqual(c.channel, 3)
        self.assertEqual(c.kernel_height, 2)
        self.assertEqual(c.kernel_width, 2)

        print("Conv test passed!")
Example #17
0
    def test_pass_quantize_convolutions(self) -> None:
        """Test pass."""
        data1 = np.float32(np.random.rand(1, 2, 2, 3))
        data2 = np.float32(np.random.rand(1, 2, 2, 3))
        graph1 = self.create_sample_graph(data1, data2)

        pass_quantize_convolutions(graph1)

        self.assertEqual(graph1.get_op('aqtz1').dtype, QUANTIZED_PACKED(),
                         '[Failed] Found output dtype of activation quantizer not proper')
        self.assertEqual(graph1.get_op('kqtz1').dtype, PackedUint32(),
                         '[Failed] Found output dtype of kernel quantizer not proper')
        self.assertEqual(graph1.get_op('conv2').dtype, Float32(),
                         '[Failed] Found output dtype of conv not proper')

        print("Test pass #5 quantize_convolutions passed!")
Example #18
0
    def create_expected_graph(data: np.ndarray) -> Graph:
        graph = Graph()

        data = data.transpose([3, 2, 1, 0])

        # input
        x = Input('placeholder', [1, 5, 5, 3],
                  Float32(),
                  dimension_format='NHWC')

        # constant and internal nodes
        w = Constant('weight', Float32(), data, dimension_format='NHWC')
        i1 = Identity('identity1', [1, 2, 2, 3],
                      Float32(), {'input': w},
                      dimension_format='NHWC')
        q = BinaryMeanScalingQuantizer('qtz1', [1, 2, 2, 3],
                                       Float32(), {'input': i1},
                                       dimension_format='NHWC')

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2],
                    dimension_format='NHWC')

        # One output
        rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #19
0
    def create_sample_graph() -> Graph:
        graph = Graph()

        x = Input('placeholder', [2], Float32())

        s1 = Constant('potato_1', Float32(), np.array([1, 2]))
        s2 = Constant('potato_2', Float32(), np.array([1, 3]))
        add1 = Add('potatoes', [2], Float32(), {'A': s1, 'B': s2})
        add2 = Add('more_potatoes', [2], Float32(), {'A': x, 'B': add1})

        # One output
        y = Output('output', [2], Float32(), {'input': add2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #20
0
    def create_sample_graph_2(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))
        add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})

        y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #21
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w1
                     },
                     kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Int32(), np.array([2], dtype=np.int32))
        s2 = Constant('aq_const2', Float32(), np.array([2.0],
                                                       dtype=np.float32))
        aq1 = LinearMidTreadHalfQuantizer('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': conv1,
            'Y': s1,
            'Z': s2
        })

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = BinaryMeanScalingQuantizer('kqtz1', [1, 2, 2, 3], Float32(),
                                        {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq1,
                         'W': kq
                     },
                     kernel_shape=[2, 2])
        conv2.a_quantizer = [aq1]
        conv2.quantizer = kq
        conv2.is_quantized = True

        sc = Constant('bn_scale', Float32(), np.random.rand(3))
        be = Constant('bn_b', Float32(), np.random.rand(3))
        mu = Constant('bn_mu', Float32(), np.random.rand(3))
        va = Constant('bn_var', Float32(), np.random.rand(3))
        bn = BatchNormalization('bn', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'scale': sc,
            'B': be,
            'mean': mu,
            'var': va
        })

        # activation quantizer
        s3 = Constant('aq_const3', Int32(), np.array([2], dtype=np.int32))
        s4 = Constant('aq_const4', Float32(), np.array([2.0],
                                                       dtype=np.float32))
        aq2 = LinearMidTreadHalfQuantizer('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': bn,
            'Y': s3,
            'Z': s4
        })

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': aq2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Example #22
0
File: tf.py Project: ytfksw/blueoil
import blueoil.converter.core.operators as dlk_op
from blueoil.converter.core.data_types import DataType, Float32, Float64, Int8, Int16, Int32, \
    Int64, Uint8, Uint16, Uint32, Uint64, Bool, String
from blueoil.converter.core.exceptions import UnsupportedNode, UnsupportedDataType
from blueoil.converter.core.graph import Graph
from blueoil.converter.core.operators import Operator, Conv, \
    Identity, BinaryMeanScalingQuantizer, \
    BatchNormalization, LinearMidTreadHalfQuantizer, Add, Sub, \
    MaxPool, AveragePool, Reshape, Softmax, Transpose, Relu, SpaceToDepth, \
    Mul, BinaryChannelWiseMeanScalingQuantizer, ConcatOnDepth, Maximum, \
    DepthToSpace, ResizeNearestNeighbor, \
    Split, Pad, MatMul, Gather, Unique, Cast, Minimum, StridedSlice, Prod, Shape, LeakyRelu

DLK_DTYPE_MAP: Dict[str, Optional[DataType]] = {
    # any
    'DT_INVALID': Float32(),
    # primitives
    'DT_FLOAT': Float32(),
    'DT_INT32': Int32(),
    'DT_UINT8': Uint8(),
    'DT_INT8': Int8(),
    'DT_UINT16': Uint16(),
    'DT_INT16': Int16(),
    'DT_INT64': Int64(),
    'f': Float32(),
    'i': Int32(),

    # primitive vector
    'FLOATS': None,
    'INTS': None,
Example #23
0
    Int64, Uint8, Uint16, Uint32, Uint64, Bool, String
from blueoil.converter.core.exceptions import UnsupportedNode, UnsupportedDataType
from blueoil.converter.core.graph import Graph
from blueoil.converter.core.operators import Operator, Conv, \
    Identity, BinaryMeanScalingQuantizer, \
    BatchNormalization, QTZ_linear_mid_tread_half, Add, \
    MaxPool, AveragePool, Reshape, Softmax, Transpose, Relu, SpaceToDepth, \
    Mul, BinaryChannelWiseMeanScalingQuantizer, ConcatOnDepth, Maximum, \
    DepthToSpace, ResizeNearestNeighbor, \
    Split, Pad, MatMul, Gather, Unique, Cast, Minimum, StridedSlice, Prod, Shape, LeakyRelu

DLK_DTYPE_MAP: Dict[str, Optional[DataType]] = {
    # any
    'DT_INVALID': Float32,
    # primitives
    'DT_FLOAT': Float32(),
    'DT_INT32': Int32(),
    'DT_UINT8': Uint8(),
    'DT_INT8': Int8(),
    'DT_UINT16': Uint16(),
    'DT_INT16': Int16(),
    'DT_INT64': Int64(),
    'f': Float32(),
    'i': Int32(),

    # primitive vector
    'FLOATS': None,
    'INTS': None,

    # custom
    'DT_BOOL': Bool(),