Пример #1
0
def matmul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    transposed_a = tf_op.get_attr("transpose_a")
    transposed_b = tf_op.get_attr("transpose_b")

    if a.ndim > 2 or b.ndim > 2:
        raise NotImplementedError(
            "[TensorFlowConverter] Currently, MatMul is supported only 2D * 2D case."
        )

    c_axes = []
    if transposed_a:
        c_axes.append(a.order.axes[-1])
        a_axis_K = a.order.axes[-2]

        if a.order != OrderCN:
            a, = ReinterpretAxis(None, in_order=a.order, out_order=OrderCN)(a)

    else:
        c_axes.append(a.order.axes[-2])
        a_axis_K = a.order.axes[-1]

        if a.order != OrderNC:
            a, = ReinterpretAxis(None, in_order=a.order, out_order=OrderNC)(a)

    if transposed_b:
        b_axis_K = b.order.axes[-1]

        c_axes.append(AxisVar())
        if b.order != OrderNC:
            b, = ReinterpretAxis(None, in_order=b.order, out_order=OrderNC)(b)

    else:
        c_axes.append(AxisVar())
        if b.order != OrderCN:
            b, = ReinterpretAxis(None, in_order=b.order, out_order=OrderCN)(b)

        b_axis_K = b.order.axes[-2]

    if flags.AGGRESSIVE_ORDER_INFERENCE:
        # Assumption: 2 inner multiplied axes are same.
        unify(a_axis_K, b_axis_K)

    c_normalized, = Linear(None)(a, b)
    c, = ReinterpretAxis(None,
                         in_order=c_normalized.order,
                         out_order=Order(c_axes))(c_normalized)

    converter.set_variable(tf_op.outputs[0], c)
Пример #2
0
    def convert(self, inputs: List["tf.Tensor"], outputs: List["tf.Tensor"],
                order_hints: Optional[Dict[Union["tf.Tensor", "tf.Variable"], Order]] = None) -> Graph:
        """convert(model, input_orders=None)

        Args:
            inputs (list of `tf.Tensor`): tensorflow input tensors
            outputs (list of `tf.Tensor`): tensorflow output tensors
            order_hints: Order annotations which helps webdnn's optimizer.

        .. admonition:: Example

            .. code::

                # y = x @ W + b
                x = tf.placeholder(tf.float32, [None, 784])
                W = tf.Variable(tf.zeros([784, 10]))
                b = tf.Variable(tf.zeros([10]))
                y = tf.nn.softmax(tf.matmul(x, W) + b)

                webdnn_graph = TensorFlowConverter().convert([x], [y])

        Returns:
            (:class:`~webdnn.graph.graph.Graph`): WebDNN IR Graph
        """

        for tensor in inputs:
            shape = [Placeholder() if dim.value is None else dim.value for dim in tensor.shape.dims]
            if isinstance(shape[0], Placeholder):
                shape[0] = self._batch_size
            self.set_variable(tensor, Variable(shape, Order([AxisVar() for _ in shape])))

        for op in _listup_operations(inputs, outputs):
            self._convert_operator(op)

        if order_hints:
            for tensor, order in order_hints.items():
                if isinstance(tensor, tf.Variable):
                    tensor = tensor.value()

                variable = self.get_variable(tensor)
                for axis1, axis2 in zip(variable.order.axes, order.axes):
                    unify(axis1, axis2)

        if flags.AGGRESSIVE_ORDER_INFERENCE:
            # 1st dimension of output variable is batch size
            for tensor in outputs:
                variable = self.get_variable(tensor)
                unify(variable.order.axes[0], Axis.N)

        # Remove redundant ReinterpretAxis operators
        graph = Graph([self.get_variable(tensor) for tensor in inputs], [self.get_variable(tensor) for tensor in outputs])
        graph, _ = TensorFlowFrontendOptimizeRule().optimize(graph)

        return graph
Пример #3
0
def _convert_reshape(converter: ChainerConverter,
                     c_op: "chainer.functions.Reshape"):
    x = converter.get_variable(c_op.inputs[0])

    out_shape = c_op.shape
    # noinspection PyTypeChecker
    out_order = Order([AxisVar() for _ in out_shape])
    assert mul(
        out_shape
    ) == x.size, f"[ChainerConverter] Shape mismatch: mul(out_shape)={mul(out_shape)}, x.size={x.size}"

    y, = Reshape(None,
                 in_order=x.order,
                 out_order=out_order,
                 out_shape=out_shape)(x)

    converter.set_variable(c_op.outputs[0](), y)
Пример #4
0
    def _convert_var(self, c_var: "VariableNode", constant=False):
        assert not self.has_variable(c_var), f"{c_var} is already converted"
        ndim = len(c_var.shape)
        order = Order([AxisVar() for _ in range(ndim)])

        if constant:
            data = c_var.data
            if chainer_v2 and data is None:
                # noinspection PyProtectedMember
                data = c_var._variable().data

            n_var = ConstantVariable(chainer.cuda.to_cpu(data),
                                     order)  # force on CPU

        else:
            n_var = Variable(c_var.shape, order)

        self.set_variable(c_var, n_var)
        return n_var
Пример #5
0
    def convert_to_constant_variable(self, tensor: "tf.Tensor", order: Optional[Order] = None) -> ConstantVariable:
        """convert_to_constant_variable(tf_var, order)

        Convert tf.Tensor into :class:`~webdnn.graph.variables.constant_variable.ConstantVariable`.

        This method also registers the mapping information between TensorFlow variable and WebDNN constant variable.
        If specified TensorFlow variable is already registered into converter, converter checks that the shape and order
        is valid

        **This method is provided only for implementing custom converter handler.**

        Args:
            tensor (:code:`tf.Tensor`): TensorFlow tensor
            order: (:class:`~webdnn.graph.order.Order`) data order. As default, default order is used.

        Returns:
            (:class:`~webdnn.graph.variables.constant_variable.ConstantVariable`): converted variable.
        """

        data, = self.session.run([tensor])

        if self.has_variable(tensor):
            variable = self.get_variable(tensor)
            assert variable.shape == tuple(data.shape), f"[TensorFlowConverter] {tensor} is already registered before, and " \
                                                        f"shape mismatch is detected: (registered shape)=" \
                                                        f"{variable.shape}, (given tensor's shape)=" \
                                                        f"{tensor.shape}"
            if order is not None:
                assert variable.order == order, f"[TensorFlowConverter] {tensor} is already registered before, and order " \
                                                f"mismatch is detected: (registered order)={variable.order}, (given " \
                                                f"order)={order}"

        else:
            if order is None:
                order = Order([AxisVar() for _ in range(data.ndim)])

            variable = ConstantVariable(data, order)
            self.set_variable(tensor, variable)

        return variable