コード例 #1
0
    def create_sample_graph(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     QUANTIZED_PACKED(), {
                         'X': x,
                         'W': w1
                     },
                     kernel_shape=[2, 2])
        conv1.is_quantized = True

        pool1 = SpaceToDepth('s2d', [1, 2, 2, 12], Float32(), {'input': conv1})

        # One output
        y = Output('output', [1, 2, 2, 12], Float32(), {'input': pool1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #2
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Float32(), np.array(1))
        s2 = Constant('aq_const2', Float32(), np.array(2))
        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq, 'W': kq}, kernel_shape=[2, 2])
        conv2.a_quantizer = [aq]
        conv2.quantizer = kq

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': conv2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #3
0
    def create_expected_graph(data: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # constant and internal nodes
        w = Constant('weight', Float32(), data)
        q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3], Float32(),
                                    {'input': w})

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2])

        # One output
        rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #4
0
    def make_simple_model(self) -> Model:
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 3]),
            dimension_format='NHWC',
        )

        # Conv
        conv = Conv('conv', [1, 4, 4, 1],
                    Float32(), {
                        'X': x,
                        'W': w
                    },
                    kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 4, 4, 1], Float32(), {'input': conv})

        # add ops to the graph
        graph.add_op_and_inputs(y)
        model = Model()
        model.graph = graph
        return model
コード例 #5
0
    def test_conv(self) -> None:
        """Test code for Conv."""
        # get Conv's input names
        i_names = Conv.input_names
        self.assertTrue({'X', 'W'}.issubset(set(i_names)))

        # set x to MaxPool m's input
        x = Input(
            'input',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant(
            'weight',
            Float32(),
            np.zeros([1, 2, 2, 5])
        )
        inputs: Dict[str, Operator] = {i_names[0]: x, i_names[1]: w}
        c = Conv(
            "conv1",
            [1, 2, 2, 3],
            Float32(),
            inputs,
            kernel_shape=[2, 2]
        )

        self.assertEqual(c.batchsize, 1)
        self.assertEqual(c.height, 2)
        self.assertEqual(c.width, 2)
        self.assertEqual(c.channel, 3)
        self.assertEqual(c.kernel_height, 2)
        self.assertEqual(c.kernel_width, 2)

        print("Conv test passed!")
コード例 #6
0
ファイル: optimizer.py プロジェクト: tkng/blueoil
    def run_forward_conv(self, node: Conv, **kwargs: Any) -> None:
        bits: List[int] = []
        aqtzers: List[Quantizer] = []
        if node_is_qconv(node):
            for x in self._qconv_qconv[node]:
                if node_is_activation_quantizer(x):
                    bits.append(x.nbit)
                    aqtzers.append(x)

        if not (len(set(bits)) == 1):
            ValueError('Values are not consistent')
        else:
            node.a_quantizer = aqtzers
コード例 #7
0
    def test_conv_consistency(self) -> None:
        """Test code for Conv."""
        x = Input(
            'const1',
            [1, 3, 3, 3],
            Float32(),
        )
        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 3]))
        input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}

        add = Conv('conv_under_test', [1, 3, 3, 3],
                   Float32(),
                   input_ops,
                   pads=[1, 1, 2, 2],
                   strides=[2, 2])

        print("Consistency test for conv operator passed!")
コード例 #8
0
    def create_transposed_graph(self, data: np.ndarray) -> Graph:
        graph = Graph()
        data = data.transpose([3, 2, 1, 0])

        # input
        x = Input('placeholder', [1, 5, 5, 3],
                  Float32(),
                  dimension_format='NHWC')

        # constant and internal nodes
        w = Constant('weight', Float32(), data, dimension_format='NHWC')

        i = Identity('identity1', [1, 2, 2, 3],
                     Float32(), {'input': w},
                     dimension_format='NHWC')

        q = QTZ_binary_mean_scaling('qtz1', [1, 2, 2, 3],
                                    Float32(), {'input': i},
                                    dimension_format='NHWC')

        # Conv
        conv = Conv('conv', [1, 4, 4, 3],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2],
                    dimension_format='NHWC')

        rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})

        # One output
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #9
0
    def create_sample_graph_2(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))
        add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})

        y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #10
0
ファイル: test_graph.py プロジェクト: yasumura-lm/blueoil
    def test_graph_conv(self) -> None:
        """Test code for making a simple graph with Conv."""
        graph = Graph()

        # two inputs
        x = Input(
            'input',
            [1, 5, 5, 3],
            Float32(),
        )

        w = Constant('weight', Float32(), np.zeros([1, 2, 2, 3]))

        # Conv
        conv = Conv(
            'conv',
            [1, 4, 4, 3],
            Float32(),
            {
                'X': x,
                'W': w
            },  # you can get these keys by 'Conv.input_names'
            kernel_shape=[2, 2])

        # One output
        y = Output(
            'output',
            [1, 4, 4, 3],
            Float32(),
            {'input': conv}  # you can get this key by 'Output.input_names'
        )

        # add ops to the graph
        graph.add_op(x)
        graph.add_op(w)
        graph.add_op(conv)
        graph.add_op(y)

        self.assertTrue(graph.check_nodes(),
                        "All inputs of operators must match their outputs.")
        print("Graph test passed!")
コード例 #11
0
    def create_sample_graph(data: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [3, 5, 5, 1],
                  Float32(),
                  dimension_format='CWHN')

        # constant and internal nodes
        w = Constant('weight', Float32(), data, dimension_format='CWHN')
        i1 = Identity('identity1', [3, 2, 2, 1],
                      Float32(), {'input': w},
                      dimension_format='CWHN')
        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 1],
                                    Float32(), {'input': i1},
                                    dimension_format='CWHN')

        # Conv
        conv = Conv('conv', [3, 4, 4, 1],
                    Float32(), {
                        'X': x,
                        'W': q
                    },
                    kernel_shape=[2, 2],
                    dimension_format='CWHN')

        # One output
        rs = Reshape('reshape', [1, 48], Float32(), {'data': conv})
        y = Output(
            'output',
            [1, 48],
            Float32(),
            {'input': rs},
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #12
0
    def create_sample_graph(data1: np.ndarray, data2: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        # activation quantizer
        s1 = Constant('aq_const1', Int32(), np.array([2], dtype=np.int32))
        s2 = Constant('aq_const2', Float32(), np.array([2.0], dtype=np.float32))
        aq1 = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {'X': conv1, 'Y': s1, 'Z': s2})

        # Conv2
        w2 = Constant('weight2', Float32(), data2)
        kq = QTZ_binary_mean_scaling('kqtz1', [1, 2, 2, 3], Float32(), {'input': w2})
        conv2 = Conv('conv2', [1, 3, 3, 3], Float32(), {'X': aq1, 'W': kq}, kernel_shape=[2, 2])
        conv2.a_quantizer = [aq1]
        conv2.quantizer = kq
        conv2.is_quantized = True

        sc = Constant('bn_scale', Float32(), np.random.rand(3))
        be = Constant('bn_b', Float32(), np.random.rand(3))
        mu = Constant('bn_mu', Float32(), np.random.rand(3))
        va = Constant('bn_var', Float32(), np.random.rand(3))
        bn = BatchNormalization('bn', [1, 3, 3, 3], Float32(), {'X': conv2,
                                                                'scale': sc,
                                                                'B': be,
                                                                'mean': mu,
                                                                'var': va})

        # activation quantizer
        s3 = Constant('aq_const3', Int32(), np.array([2], dtype=np.int32))
        s4 = Constant('aq_const4', Float32(), np.array([2.0], dtype=np.float32))
        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {'X': bn, 'Y': s3, 'Z': s4})

        # One output
        y = Output('output', [1, 3, 3, 3], Float32(), {'input': aq2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #13
0
    def create_sample_graph3(self, data1: np.ndarray, data2: np.ndarray,
                             data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        w = Constant('weight', Float32(), data1)

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': w})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': q
                     },
                     kernel_shape=[2, 2])

        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv1})

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': i2,
            'Y': s1,
            'Z': s2
        })

        w2 = Constant('weight2', Float32(), data2)

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': q2
                     },
                     kernel_shape=[2, 2])

        w3 = Constant('weight3', Float32(), data3)

        q3 = QTZ_binary_mean_scaling('qtz3', [3, 2, 2, 3], Float32(),
                                     {'input': w3})

        conv3 = Conv('conv3', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': q3
                     },
                     kernel_shape=[2, 2])

        y1 = Output('output1', [1, 3, 3, 3], Float32(), {'input': conv2})

        y2 = Output('output2', [1, 3, 3, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y1)
        graph.add_op_and_inputs(y2)

        return graph
コード例 #14
0
ファイル: tf.py プロジェクト: ki-lm/blueoil
    def create_new_node(self, node: Node, op_dic: Dict[str, Operator],
                        current_format: str, input_format_list: List[str],
                        nodes_to_remove) -> Operator:
        """Create a new operator node. This might be tooooo long code...
        Parameters
        ----------
        node : Node
            TF node corresponding to the operator
        op_dic : Dict from str to Operator
            Dict of preceding operators
        current_format : Dict from str to str
            Dict of data format of current node
        input_format_list : Dict from str to str
            Dict of data format of corresponding inputs of current node
        Returns
        -------
        new_op : Operator
            Newly created dlk operator
        """
        op_type = self.convert_operator(node.op_type)
        try:
            module = importlib.import_module('core.operators')
            class_def = getattr(module, op_type)
        except AttributeError:
            message = f'Operator {op_type} is not supported.'
            raise UnsupportedNode(message)

        new_op: Operator

        def get_inputs(cdef: Type[Operator],
                       current_node: Any) -> Dict[str, Operator]:
            input_names = cdef.input_names
            in_ops: Dict[str, Operator] = {}
            in_ops_order: List[int] = []
            for n, op in zip(input_names, current_node.inputs):
                in_ops[n] = op_dic[op]
                in_ops_order.append(n)
            return in_ops, in_ops_order

        input_ops, input_ops_order = get_inputs(class_def, node)

        # Here find the shape and data type for the op
        def infer_shape(attrs: Dict[str, Any]) -> List[int]:
            shape_dict = {
                n: input_ops[n].shape
                for n in class_def.input_names if input_ops.get(n)
            }
            return class_def.infer_shape(shape_dict, current_format,
                                         input_format_list, attrs)

        def infer_dtype() -> DataType:
            if node.get_dtype() is not None:
                return node.get_dtype()
            else:
                return list(input_ops.values())[0].dtype

        shape: List[int] = list(map(int, node.get_shape()))
        dtype = infer_dtype()

        if True in (d < 0 for d in shape):
            shape = [1]

        if op_type == 'Conv':
            strides = node.attribute('strides')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            # calculated pads size for tf
            input_format = input_format_list[0]
            kernel_format = input_format_list[1]

            in_h = input_ops['X'].shape[input_format.index('H')]
            in_w = input_ops['X'].shape[input_format.index('W')]
            filt_h = input_ops['W'].shape[kernel_format.index('H')]
            filt_w = input_ops['W'].shape[kernel_format.index('W')]
            stride_h = strides[0]
            stride_w = strides[1]

            pads: List[int] = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': [filt_h, filt_w],
                    'strides': strides,
                    'pads': pads
                }
                shape = infer_shape(attributes)

            new_op = Conv(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                kernel_shape=[filt_h, filt_w],
                strides=strides,
                pads=pads,
            )
        elif op_type == 'BatchNormalization':
            epsilon = node.attribute('epsilon')[0]
            is_test = not node.attribute('is_training')

            if not shape:
                attributes = {'epsilon': epsilon, 'is_test': is_test}
                shape = infer_shape(attributes)

            new_op = BatchNormalization(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                epsilon=epsilon,
                is_test=is_test,
            )
        elif op_type == 'Add':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Add(node.name,
                         shape,
                         dtype,
                         input_ops,
                         dimension_format=current_format)
        elif op_type == 'Identity':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Identity(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'QTZ_linear_mid_tread_half':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = QTZ_linear_mid_tread_half(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'QTZ_binary_mean_scaling':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = QTZ_binary_mean_scaling(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Reshape':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Reshape(node.name,
                             shape,
                             dtype,
                             input_ops,
                             dimension_format=current_format)
        elif op_type == 'Softmax':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Softmax(node.name,
                             shape,
                             dtype,
                             input_ops,
                             dimension_format=current_format)
        elif op_type == 'MaxPool':

            kernel_shape = node.attribute('ksize')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            strides = node.attribute('strides')[0][1:3]

            in_h = input_ops['X'].height
            in_w = input_ops['X'].width
            filt_h = kernel_shape[0]
            filt_w = kernel_shape[1]
            stride_h = strides[0]
            stride_w = strides[1]

            pads = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': kernel_shape,
                    'pads': pads,
                    'strides': strides
                }
                shape = infer_shape(attributes)

            new_op = MaxPool(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                kernel_shape=kernel_shape,
                pads=pads,
                strides=strides,
            )
        elif op_type == 'AveragePool':

            kernel_shape = node.attribute('ksize')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            strides = node.attribute('strides')[0][1:3]

            in_h = input_ops['X'].height
            in_w = input_ops['X'].width
            filt_h = kernel_shape[0]
            filt_w = kernel_shape[1]
            stride_h = strides[0]
            stride_w = strides[1]

            pads = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': kernel_shape,
                    'pads': pads,
                    'strides': strides
                }
                shape = infer_shape(attributes)

            new_op = AveragePool(
                node.name,
                shape,
                dtype,
                input_ops,
                kernel_shape=kernel_shape,
                pads=pads,
                strides=strides,
            )
        elif op_type == 'Transpose':

            perm = node.attribute("perm")

            if not shape:
                attributes = {'perm': perm}
                shape = infer_shape(attributes)

            new_op = Transpose(
                node.name,
                shape,
                dtype,
                input_ops,
                perm=perm,
            )
        elif op_type == 'Relu':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Relu(node.name, shape, dtype, input_ops)
        elif op_type == 'LeakyRelu':

            alpha = node.attribute("alpha")[0]

            if not shape:
                attributes = {'alpha': alpha}
                shape = infer_shape(attributes)

            new_op = LeakyRelu(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                alpha=alpha,
            )
        elif op_type == 'SpaceToDepth':
            bs = node.attribute('block_size')
            if not bs:
                raise ValueError(
                    f'{op_type} {node.name} block size not specified')

            if not shape:
                attributes = {'block_size': bs[0]}
                shape = infer_shape(attributes)

            new_op = SpaceToDepth(node.name,
                                  shape,
                                  dtype,
                                  input_ops,
                                  dimension_format=current_format,
                                  block_size=bs[0])
        elif op_type == 'Mul':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Mul(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'QTZ_binary_channel_wise_mean_scaling':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = QTZ_binary_channel_wise_mean_scaling(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'ConcatOnDepth':
            axis = input_ops[input_ops_order[-1]]
            if current_format.index('C') != axis:
                ValueError(
                    'f{op_type} {node.name} concatenation is only supported on the depth axis'
                )

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = ConcatOnDepth(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )

            input_axis_name = input_ops_order[-1]
            nodes_to_remove.append(new_op.input_ops[input_axis_name])
            new_op.remove_input(input_axis_name)
        elif op_type == 'Maximum':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Maximum(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'DepthToSpace':
            bs = node.attribute('block_size')
            if not bs:
                raise ValueError(
                    f'{op_type} {node.name} block size not specified')

            if not shape:
                attributes = {'block_size': bs[0]}
                shape = infer_shape(attributes)

            new_op = DepthToSpace(node.name,
                                  shape,
                                  dtype,
                                  input_ops,
                                  dimension_format=current_format,
                                  block_size=bs[0])
        elif op_type == 'ResizeNearestNeighbor':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = ResizeNearestNeighbor(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Split':
            num_split = node.attribute('num_split')[0]

            if not isinstance(num_split, int):
                raise ValueError(
                    f'{op_type} {node.name} only supports integer value')

            if not shape:
                attributes = {'split': num_split}
                shape = infer_shape(attributes)

            new_op = Split(node.name,
                           shape,
                           dtype,
                           input_ops,
                           dimension_format=current_format,
                           num_split=num_split)
            input_axis_name = input_ops_order[0]
            nodes_to_remove.append(new_op.input_ops[input_axis_name])
            new_op.remove_input(input_axis_name)
        elif op_type == 'Pad':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Pad(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'MatMul':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = MatMul(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Gather':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Gather(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Unique':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Unique(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Cast':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Cast(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Minimum':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Minimum(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'StridedSlice':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = StridedSlice(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Prod':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Prod(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Shape':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Shape(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        else:
            raise UnsupportedNode(
                f'TensorFlow importer cannot convert {op_type} operator node!')

        return new_op
コード例 #15
0
    def create_graph(self, graph):

        x1 = Input(
            'input1',
            [1, 4, 4, 3],
            Float32(),
        )

        w1 = Constant(
            'weight1',
            Float32(),
            np.zeros([1, 2, 2, 3])
        )

        conv1 = Conv(
            'conv1',
            [1, 3, 3, 3],
            Float32(),
            {'X': x1, 'W': w1},
            kernel_shape=[2, 2]
        )

        w2 = Constant(
            'weight2',
            Float32(),
            np.zeros([3, 2, 2, 3])
        )

        conv2 = Conv(
            'conv2',
            [1, 2, 2, 3],
            Float32(),
            {'X': conv1, 'W': w2},
            kernel_shape=[2, 2]
        )

        x2 = Input(
            'input2',
            [3, 3, 3, 3],
            Float32(),
        )

        x3 = Input(
            'input3',
            [3, 3, 3, 3],
            Float32(),
        )

        conv3 = Conv(
            'conv3',
            [3, 2, 2, 3],
            Float32(),
            {'X': x2, 'W': conv2},
            kernel_shape=[2, 2]
        )

        conv4 = Conv(
            'conv4',
            [1, 2, 2, 3],
            Float32(),
            {'X': x3, 'W': conv3},
            kernel_shape=[2, 2]
        )

        y = Output(
            'output',
            [1, 2, 2, 3],
            Float32(),
            {'input': conv4}
        )

        # add ops to the graph
        graph.add_op_and_inputs(y)
コード例 #16
0
    def create_sample_graph(self, data1: np.ndarray, data2: np.ndarray,
                            data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        w = Constant('weight', Float32(), data1)

        i = Identity('identity1', [3, 2, 2, 3], Float32(), {'input': w})

        t = Transpose('transpose1', [3, 2, 2, 3],
                      Float32(), {'data': i},
                      perm=[3, 2, 1, 0])

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': t})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': q
                     },
                     kernel_shape=[2, 2])

        i2 = Identity('identity2', [1, 4, 4, 3], Float32(), {'input': conv1})

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': i2,
            'Y': s1,
            'Z': s2
        })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          Float32(), {'data': aq},
                          perm=[0, 1, 2, 3])

        w2 = Constant('weight2', Float32(), data2)

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': q2
                     },
                     kernel_shape=[2, 2])

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        i3 = Identity('identity3', [1, 3, 3, 3], Float32(), {'input': aq2})

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': i3,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #17
0
    def create_precompute_graph(self, data1: np.ndarray, data2: np.ndarray,
                                data3: np.ndarray) -> Graph:
        graph = Graph()

        # two inputs
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        scaling1, qdata = self.binary_mean_scaling(
            data1.transpose([3, 2, 1, 0]))
        w = Constant('weight', Float32(), qdata * scaling1)

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w
                     },
                     kernel_shape=[2, 2])

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3], Float32(), {
            'X': conv1,
            'Y': s1,
            'Z': s2
        })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          Float32(), {'data': aq},
                          perm=[0, 1, 2, 3])

        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2', Float32(), qdata2 * scaling2)

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': w2
                     },
                     kernel_shape=[2, 2])

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': aq2,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
コード例 #18
0
ファイル: optimizer.py プロジェクト: tkng/blueoil
    def run_forward_conv(self, node: Conv, **kwargs: Any) -> None:
        ops: List[Operator] = [
            node.input_ops[i] for i in node.input_names
            if node.input_ops.get(i)
        ]

        if self._hard_quantized and node in kwargs['qconv']:
            # data is to be packed
            ops_have_precomp_values = list(
                map(lambda x: self._has_precompute_value(x), ops))
            ops_are_prunable = list(map(lambda x: self._is_prunable(x), ops))

            # check which input node can be pruned
            if reduce(
                    lambda x, y: x and y,
                    ops_have_precomp_values):  # all input has concrete values
                node.run_forward()
                self._precomp_dic[node.name] = True  # this node can be pruned
                quantizers = {
                    op.name: self._quantizers[op.name]
                    for op in ops if self._quantizers.get(op.name)
                }
                if len(quantizers) > 1:
                    ValueError(
                        f'{node.name}: multiple quantized inputs with {node.op_type} are not supported.'
                    )
                self._quantizers[node.name] = list(quantizers.values())[0]

            else:  # an input (must be weight) is to be quantized and packed
                self._precomp_dic[node.name] = False
                node.is_quantized = True
                packer = Packer(self._quantized_bitwidth, self._wordsize)
                quantizers = {
                    op.name: self._quantizers[op.name]
                    for op in ops if self._quantizers.get(op.name)
                }
                if len(quantizers) > 1:
                    ValueError(
                        f'{node.name}: multiple quantized inputs with {node.op_type} are not supported.'
                    )
                node.quantizer = list(quantizers.values())[0]

                for key, op in zip(node.input_names, ops):

                    if self._is_prunable(op):
                        shape = op.shape
                        op_data = node.quantizer.binarizer(op.data)
                        data = packer.run(op_data.astype(np.float32),
                                          op.dimension)
                        dtype = op.dtype
                        new_op = Constant(op.name + '_new',
                                          dtype,
                                          data,
                                          packed=True,
                                          actual_shape=shape)
                        node.add_input(key, new_op)
                        self._graph.add_op(new_op)
                        self._prune(op)

        else:
            self._precompute_or_prune_inputs(node)
コード例 #19
0
    def create_quantized_graph2(self, data1: np.ndarray, data2: np.ndarray,
                                data3: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        # constant and internal nodes
        scaling1, qdata1 = self.binary_mean_scaling(data1)
        w = Constant('weight', Float32(), qdata1 * scaling1)

        q = QTZ_binary_mean_scaling('qtz1', [3, 2, 2, 3], Float32(),
                                    {'input': w})

        # Conv
        conv1 = Conv('conv1', [1, 4, 4, 3],
                     Float32(), {
                         'X': x,
                         'W': w
                     },
                     kernel_shape=[2, 2])

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3],
                                       QUANTIZED_NOT_PACKED(), {
                                           'X': conv1,
                                           'Y': s1,
                                           'Z': s2
                                       })

        from modules.packer import Packer
        packer = Packer(1, 32)
        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata2),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Float32(),
                                     {'input': w2})
        q2.scaling_factor = scaling2

        conv2 = Conv(
            'conv2',
            [1, 3, 3, 3],
            Float32(),
            {
                'X': aq,
                'W': w2
            },
            kernel_shape=[2, 2],
            quantized=True,
        )
        conv2.quantizer = q2

        scaling3, qdata3 = self.binary_mean_scaling(data3)
        w3 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata3),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        q3 = QTZ_binary_mean_scaling('qtz3', [3, 2, 2, 3], Float32(),
                                     {'input': w3})
        q3.scaling_factor = scaling3

        conv3 = Conv('conv3', [1, 3, 3, 3],
                     Float32(), {
                         'X': aq,
                         'W': w3
                     },
                     kernel_shape=[2, 2],
                     quantized=True)
        conv3.quantizer = q3

        y1 = Output('output1', [1, 3, 3, 3], Float32(), {'input': conv2})

        y2 = Output('output2', [1, 3, 3, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y1)
        graph.add_op_and_inputs(y2)

        return graph, scaling2, scaling3
コード例 #20
0
    def create_quantized_graph(self, data: np.ndarray, data2: np.ndarray, data3: np.ndarray) \
            -> Tuple[Graph, np.float32, np.float32]:
        graph = Graph()

        # two inputs
        x = Input(
            'placeholder',
            [1, 5, 5, 3],
            Float32(),
        )

        from modules.packer import Packer
        packer = Packer(1, 32)
        data = data.transpose([3, 2, 1, 0])
        scaling, qdata = self.binary_mean_scaling(data)
        shape = list(data.shape)
        w = Constant(
            'weight',
            Float32(),
            qdata * scaling,
        )

        q = QTZ_binary_mean_scaling('qtz1', shape, Float32(), {'input': w})
        q.scaling_factor = scaling

        # Conv
        conv1 = Conv(
            'conv1',
            [1, 4, 4, 3],
            Float32(),
            {
                'X': x,
                'W': w
            },
            kernel_shape=[2, 2],
        )

        s1 = Constant('aq_const1', Float32(), np.array(1))

        s2 = Constant('aq_const2', Float32(), np.array(2))

        aq = QTZ_linear_mid_tread_half('aqtz1', [1, 4, 4, 3],
                                       QUANTIZED_NOT_PACKED(), {
                                           'X': conv1,
                                           'Y': s1,
                                           'Z': s2
                                       })

        dummy = Transpose('dummy', [1, 4, 4, 3],
                          QUANTIZED_NOT_PACKED(), {'data': aq},
                          perm=[0, 1, 2, 3])

        scaling2, qdata2 = self.binary_mean_scaling(data2)
        w2 = Constant('weight2',
                      Uint32(),
                      packer.run(qdata2),
                      packed=True,
                      actual_shape=[3, 2, 2, 3])

        # quantizer connected to conv2 as 'conv2.quantizer'
        q2 = QTZ_binary_mean_scaling('qtz2', [3, 2, 2, 3], Uint32(),
                                     {'input': w2})
        q2.scaling_factor = scaling2

        conv2 = Conv('conv2', [1, 3, 3, 3],
                     Float32(), {
                         'X': dummy,
                         'W': w2
                     },
                     kernel_shape=[2, 2],
                     quantized=True)
        conv2.quantizer = q2

        s3 = Constant('aq_const1', Float32(), np.array(1))

        s4 = Constant('aq_const2', Float32(), np.array(2))

        aq2 = QTZ_linear_mid_tread_half('aqtz2', [1, 3, 3, 3], Float32(), {
            'X': conv2,
            'Y': s3,
            'Z': s4
        })

        w3 = Constant('weight3', Float32(), data3)

        conv3 = Conv('conv3', [1, 2, 2, 3],
                     Float32(), {
                         'X': aq2,
                         'W': w3
                     },
                     kernel_shape=[2, 2])

        # One output
        y = Output('output', [1, 2, 2, 3], Float32(), {'input': conv3})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph, scaling, scaling2