Beispiel #1
0
    def test_add_consistency2(self) -> None:
        """Test code for 'Add', which fails."""
        a = Constant(
            'const1',
            Float32(),
            np.zeros([1, 3, 3])
        )
        b = Constant(
            'const2',
            Float32(),
            np.zeros([2])
        )
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        try:
            Add(
                'add1',
                [1, 3, 3],
                Float32(),
                input_ops
            )
        except AssertionError:
            print("Consistency test for 'Add' #2 passed!")

        else:
            self.assertTrue(False, "Consistency test for 'Add' #2 failed.")
Beispiel #2
0
    def create_sample_graph() -> Graph:
        graph = Graph()

        x = Input('placeholder', [2], Float32())

        s1 = Constant('potato_1', Float32(), np.array([1, 2]))
        s2 = Constant('potato_2', Float32(), np.array([1, 3]))
        add1 = Add('potatoes', [2], Float32(), {'A': s1, 'B': s2})
        add2 = Add('more_potatoes', [2], Float32(), {'A': x, 'B': add1})

        # One output
        y = Output('output', [2], Float32(), {'input': add2})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
    def test_add_consistency1(self) -> None:
        """Test code for 'Add', which succeeds."""
        a = Constant('const1', Float32(), np.zeros([1, 3, 3]))
        b = Constant('const2', Float32(), np.zeros([3]))
        input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}
        add = Add('add1', [1, 3, 3], Float32(), input_ops)

        print("Consistency test for 'Add' #1 passed!")
Beispiel #4
0
    def create_sample_graph_2(data1: np.ndarray) -> Graph:
        graph = Graph()

        # input
        x = Input('placeholder', [1, 5, 5, 3], Float32())

        # Conv1
        w1 = Constant('weight1', Float32(), data1)
        conv1 = Conv('conv1', [1, 4, 4, 3], Float32(), {'X': x, 'W': w1}, kernel_shape=[2, 2])

        s1 = Constant('const1', Float32(), np.zeros([1, 4, 4, 3]))
        add1 = Add('add', [1, 4, 4, 3], Float32(), {'A': conv1, 'B': s1})

        y = Output('output', [1, 4, 4, 3], Float32(), {'input': add1})

        # add ops to the graph
        graph.add_op_and_inputs(y)

        return graph
Beispiel #5
0
    def create_new_node(self, node: Node, op_dic: Dict[str, Operator],
                        current_format: str, input_format_list: List[str],
                        nodes_to_remove) -> Operator:
        """Create a new operator node. This might be tooooo long code...

        Args:
            node (Node): TF node corresponding to the operator
            op_dic (dict): Dict of preceding operators
            current_format (dict): Dict of data format of current node
            input_format_list (dict): Dict of data format of corresponding inputs of
                current node

        Returns:
            Operator: Newly created dlk operator
        """
        op_type = self.convert_operator(node.op_type)
        try:
            module = importlib.import_module(
                'blueoil.converter.core.operators')
            class_def = getattr(module, op_type)
        except AttributeError:
            message = f'Operator {op_type} is not supported.'
            raise UnsupportedNode(message)

        new_op: Operator

        def get_inputs(cdef: Type[Operator],
                       current_node: Any) -> Dict[str, Operator]:
            input_names = cdef.input_names
            in_ops: Dict[str, Operator] = {}
            in_ops_order: List[int] = []
            for n, op in zip(input_names, current_node.inputs):
                in_ops[n] = op_dic[op]
                in_ops_order.append(n)
            return in_ops, in_ops_order

        input_ops, input_ops_order = get_inputs(class_def, node)

        # Here find the shape and data type for the op
        def infer_shape(attrs: Dict[str, Any]) -> List[int]:
            shape_dict = {
                n: input_ops[n].shape
                for n in class_def.input_names if input_ops.get(n)
            }
            return class_def.infer_shape(shape_dict, current_format,
                                         input_format_list, attrs)

        def infer_dtype() -> DataType:
            if node.get_dtype() is not None:
                return node.get_dtype()
            else:
                return list(input_ops.values())[0].dtype

        shape: List[int] = list(map(int, node.get_shape()))
        dtype = infer_dtype()

        if True in (d < 0 for d in shape):
            shape = [1]

        if op_type == 'Conv':
            strides = node.attribute('strides')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            # calculated pads size for tf
            input_format = input_format_list[0]
            kernel_format = input_format_list[1]

            in_h = input_ops['X'].shape[input_format.index('H')]
            in_w = input_ops['X'].shape[input_format.index('W')]
            filt_h = input_ops['W'].shape[kernel_format.index('H')]
            filt_w = input_ops['W'].shape[kernel_format.index('W')]
            stride_h = strides[0]
            stride_w = strides[1]

            pads: List[int] = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': [filt_h, filt_w],
                    'strides': strides,
                    'pads': pads
                }
                shape = infer_shape(attributes)

            new_op = Conv(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                kernel_shape=[filt_h, filt_w],
                strides=strides,
                pads=pads,
            )
        elif op_type == 'BatchNormalization':
            epsilon = node.attribute('epsilon')[0]
            is_test = not node.attribute('is_training')

            if not shape:
                attributes = {'epsilon': epsilon, 'is_test': is_test}
                shape = infer_shape(attributes)

            new_op = BatchNormalization(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                epsilon=epsilon,
                is_test=is_test,
            )
        elif op_type == 'Add':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Add(node.name,
                         shape,
                         dtype,
                         input_ops,
                         dimension_format=current_format)
        elif op_type == 'Sub':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Sub(node.name,
                         shape,
                         dtype,
                         input_ops,
                         dimension_format=current_format)
        elif op_type == 'Identity':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Identity(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'LinearMidTreadHalfQuantizer':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = LinearMidTreadHalfQuantizer(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'BinaryMeanScalingQuantizer':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = BinaryMeanScalingQuantizer(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Reshape':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Reshape(node.name,
                             shape,
                             dtype,
                             input_ops,
                             dimension_format=current_format)
        elif op_type == 'Softmax':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Softmax(node.name,
                             shape,
                             dtype,
                             input_ops,
                             dimension_format=current_format)
        elif op_type == 'MaxPool':

            kernel_shape = node.attribute('ksize')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            strides = node.attribute('strides')[0][1:3]

            in_h = input_ops['X'].height
            in_w = input_ops['X'].width
            filt_h = kernel_shape[0]
            filt_w = kernel_shape[1]
            stride_h = strides[0]
            stride_w = strides[1]

            pads = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': kernel_shape,
                    'pads': pads,
                    'strides': strides
                }
                shape = infer_shape(attributes)

            new_op = MaxPool(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                kernel_shape=kernel_shape,
                pads=pads,
                strides=strides,
            )
        elif op_type == 'AveragePool':

            kernel_shape = node.attribute('ksize')[0][1:3]
            padding = node.attribute('padding')[0].decode(encoding='utf-8')
            strides = node.attribute('strides')[0][1:3]

            in_h = input_ops['X'].height
            in_w = input_ops['X'].width
            filt_h = kernel_shape[0]
            filt_w = kernel_shape[1]
            stride_h = strides[0]
            stride_w = strides[1]

            pads = []
            if padding == 'SAME':
                if in_h % stride_h == 0:
                    pad_along_height = max(filt_h - stride_h, 0)
                else:
                    pad_along_height = max(filt_h - (in_h % stride_h), 0)
                if in_w % stride_w == 0:
                    pad_along_width = max(filt_w - stride_w, 0)
                else:
                    pad_along_width = max(filt_w - (in_w % stride_w), 0)

                pad_top = pad_along_height // 2
                pad_bottom = pad_along_height - pad_top
                pad_left = pad_along_width // 2
                pad_right = pad_along_width - pad_left

                pads = [pad_top, pad_bottom, pad_left, pad_right]

            elif padding == 'VALID':
                pads = [0, 0, 0, 0]

            else:
                raise ValueError(
                    f'{op_type} {node.name} doesn\'t have the supported padding.'
                )

            if not shape:
                attributes = {
                    'kernel_shape': kernel_shape,
                    'pads': pads,
                    'strides': strides
                }
                shape = infer_shape(attributes)

            new_op = AveragePool(
                node.name,
                shape,
                dtype,
                input_ops,
                kernel_shape=kernel_shape,
                pads=pads,
                strides=strides,
            )
        elif op_type == 'Transpose':

            perm = node.attribute("perm")

            if not shape:
                attributes = {'perm': perm}
                shape = infer_shape(attributes)

            new_op = Transpose(
                node.name,
                shape,
                dtype,
                input_ops,
                perm=perm,
            )
        elif op_type == 'Relu':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Relu(node.name, shape, dtype, input_ops)
        elif op_type == 'LeakyRelu':

            alpha = node.attribute("alpha")[0]

            if not shape:
                attributes = {'alpha': alpha}
                shape = infer_shape(attributes)

            new_op = LeakyRelu(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
                alpha=alpha,
            )
        elif op_type == 'SpaceToDepth':
            bs = node.attribute('block_size')
            if not bs:
                raise ValueError(
                    f'{op_type} {node.name} block size not specified')

            if not shape:
                attributes = {'block_size': bs[0]}
                shape = infer_shape(attributes)

            new_op = SpaceToDepth(node.name,
                                  shape,
                                  dtype,
                                  input_ops,
                                  dimension_format=current_format,
                                  block_size=bs[0])
        elif op_type == 'Mul':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Mul(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'BinaryChannelWiseMeanScalingQuantizer':

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = BinaryChannelWiseMeanScalingQuantizer(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'ConcatOnDepth':
            axis = input_ops[input_ops_order[-1]]
            if current_format.index('C') != axis:
                ValueError(
                    'f{op_type} {node.name} concatenation is only supported on the depth axis'
                )

            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = ConcatOnDepth(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )

            input_axis_name = input_ops_order[-1]
            nodes_to_remove.append(new_op.input_ops[input_axis_name])
            new_op.remove_input(input_axis_name)
        elif op_type == 'Maximum':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Maximum(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'DepthToSpace':
            bs = node.attribute('block_size')
            if not bs:
                raise ValueError(
                    f'{op_type} {node.name} block size not specified')

            if not shape:
                attributes = {'block_size': bs[0]}
                shape = infer_shape(attributes)

            new_op = DepthToSpace(node.name,
                                  shape,
                                  dtype,
                                  input_ops,
                                  dimension_format=current_format,
                                  block_size=bs[0])
        elif op_type == 'ResizeNearestNeighbor':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = ResizeNearestNeighbor(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Split':
            num_split = node.attribute('num_split')[0]

            if not isinstance(num_split, int):
                raise ValueError(
                    f'{op_type} {node.name} only supports integer value')

            if not shape:
                attributes = {'split': num_split}
                shape = infer_shape(attributes)

            new_op = Split(node.name,
                           shape,
                           dtype,
                           input_ops,
                           dimension_format=current_format,
                           num_split=num_split)
            input_axis_name = input_ops_order[0]
            nodes_to_remove.append(new_op.input_ops[input_axis_name])
            new_op.remove_input(input_axis_name)
        elif op_type == 'Pad':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Pad(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'MatMul':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = MatMul(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Gather':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Gather(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Unique':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Unique(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Cast':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Cast(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Minimum':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Minimum(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'StridedSlice':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = StridedSlice(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Prod':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Prod(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        elif op_type == 'Shape':
            if not shape:
                attributes = {}
                shape = infer_shape(attributes)

            new_op = Shape(
                node.name,
                shape,
                dtype,
                input_ops,
                dimension_format=current_format,
            )
        else:
            raise UnsupportedNode(
                f'TensorFlow importer cannot convert {op_type} operator node!')

        return new_op