Ejemplo n.º 1
0
def _convert_batch_normalization_function(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.batch_normalization.BatchNormalizationFunction"
):
    x = converter.get_variable(c_op.inputs[0])
    unify(x.order.axes[0], Axis.N)
    unify(x.order.axes[1], Axis.C)

    gamma = converter.get_variable(c_op.inputs[1])
    unify_order(gamma.order, OrderC)

    beta = converter.get_variable(c_op.inputs[2])
    unify_order(beta.order, OrderC)

    if len(c_op.inputs) == 5:
        mean = converter.get_variable(c_op.inputs[3])
        unify_order(mean.order, OrderC)

        variance = converter.get_variable(c_op.inputs[4])
        unify_order(variance.order, OrderC)

    elif len(c_op.inputs) == 3:
        mean = 0 if c_op.running_mean is None else ConstantVariable(
            c_op.running_mean, OrderC)
        variance = 1 if c_op.running_var is None else ConstantVariable(
            c_op.running_var, OrderC)

    else:
        raise ValueError(
            "inputs to BatchNormalizationFunction have to be 5 or 3.")

    y = (x - mean) / ((variance + c_op.eps)**0.5) * gamma + beta
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 2
0
def softmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Softmax(None, axis=x.order.axes[-1])(x)

    if flags.AGGRESSIVE_ORDER_INFERENCE:
        # Assumption: Softmax is computed along to Axis.C
        unify(x.order.axes[-1], Axis.C)

    converter.set_variable(tf_op.outputs[0], y)
Ejemplo n.º 3
0
def _convert_softmax(converter: ChainerConverter,
                     c_op: "chainer.functions.Softmax"):
    x = converter.get_variable(c_op.inputs[0])
    y, = Softmax(None, axis=x.order.axes[c_op.axis])(x)

    if flags.AGGRESSIVE_ORDER_INFERENCE:
        # Most of all cast, softmax is performed along to Axis.C
        unify(y.order.axes[c_op.axis], Axis.C)

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 4
0
    def convert(self, inputs: List["tf.Tensor"], outputs: List["tf.Tensor"],
                order_hints: Optional[Dict[Union["tf.Tensor", "tf.Variable"], Order]] = None) -> Graph:
        """convert(model, input_orders=None)

        Args:
            inputs (list of `tf.Tensor`): tensorflow input tensors
            outputs (list of `tf.Tensor`): tensorflow output tensors
            order_hints: Order annotations which helps webdnn's optimizer.

        .. admonition:: Example

            .. code::

                # y = x @ W + b
                x = tf.placeholder(tf.float32, [None, 784])
                W = tf.Variable(tf.zeros([784, 10]))
                b = tf.Variable(tf.zeros([10]))
                y = tf.nn.softmax(tf.matmul(x, W) + b)

                webdnn_graph = TensorFlowConverter().convert([x], [y])

        Returns:
            (:class:`~webdnn.graph.graph.Graph`): WebDNN IR Graph
        """

        for tensor in inputs:
            shape = [Placeholder() if dim.value is None else dim.value for dim in tensor.shape.dims]
            if isinstance(shape[0], Placeholder):
                shape[0] = self._batch_size
            self.set_variable(tensor, Variable(shape, Order([AxisVar() for _ in shape])))

        for op in _listup_operations(inputs, outputs):
            self._convert_operator(op)

        if order_hints:
            for tensor, order in order_hints.items():
                if isinstance(tensor, tf.Variable):
                    tensor = tensor.value()

                variable = self.get_variable(tensor)
                for axis1, axis2 in zip(variable.order.axes, order.axes):
                    unify(axis1, axis2)

        if flags.AGGRESSIVE_ORDER_INFERENCE:
            # 1st dimension of output variable is batch size
            for tensor in outputs:
                variable = self.get_variable(tensor)
                unify(variable.order.axes[0], Axis.N)

        # Remove redundant ReinterpretAxis operators
        graph = Graph([self.get_variable(tensor) for tensor in inputs], [self.get_variable(tensor) for tensor in outputs])
        graph, _ = TensorFlowFrontendOptimizeRule().optimize(graph)

        return graph
Ejemplo n.º 5
0
def matmul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    transposed_a = tf_op.get_attr("transpose_a")
    transposed_b = tf_op.get_attr("transpose_b")

    if a.ndim > 2 or b.ndim > 2:
        raise NotImplementedError(
            "[TensorFlowConverter] Currently, MatMul is supported only 2D * 2D case."
        )

    c_axes = []
    if transposed_a:
        c_axes.append(a.order.axes[-1])
        a_axis_K = a.order.axes[-2]

        if a.order != OrderCN:
            a, = ReinterpretAxis(None, in_order=a.order, out_order=OrderCN)(a)

    else:
        c_axes.append(a.order.axes[-2])
        a_axis_K = a.order.axes[-1]

        if a.order != OrderNC:
            a, = ReinterpretAxis(None, in_order=a.order, out_order=OrderNC)(a)

    if transposed_b:
        b_axis_K = b.order.axes[-1]

        c_axes.append(AxisVar())
        if b.order != OrderNC:
            b, = ReinterpretAxis(None, in_order=b.order, out_order=OrderNC)(b)

    else:
        c_axes.append(AxisVar())
        if b.order != OrderCN:
            b, = ReinterpretAxis(None, in_order=b.order, out_order=OrderCN)(b)

        b_axis_K = b.order.axes[-2]

    if flags.AGGRESSIVE_ORDER_INFERENCE:
        # Assumption: 2 inner multiplied axes are same.
        unify(a_axis_K, b_axis_K)

    c_normalized, = Linear(None)(a, b)
    c, = ReinterpretAxis(None,
                         in_order=c_normalized.order,
                         out_order=Order(c_axes))(c_normalized)

    converter.set_variable(tf_op.outputs[0], c)
Ejemplo n.º 6
0
def _convert_batch_normalization_function(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.batch_normalization.BatchNormalizationFunction"
):
    if chainer.__version__ >= "2.":
        # FIXME: Is it possible to detect in which mode this function was computed, train or test?
        pass

    else:
        if not c_op.test:
            raise NotImplementedError(
                "[ChainerConverter] BatchNormalization with train mode is not supported"
            )

    x = converter.get_variable(c_op.inputs[0])
    unify(x.order.axes[0], Axis.N)
    unify(x.order.axes[1], Axis.C)

    gamma = converter.get_variable(c_op.inputs[1])
    unify_order(gamma.order, OrderC)

    beta = converter.get_variable(c_op.inputs[2])
    unify_order(beta.order, OrderC)

    if len(c_op.inputs) == 5:
        mean = converter.get_variable(c_op.inputs[3])
        unify_order(mean.order, OrderC)

        variance = converter.get_variable(c_op.inputs[4])
        unify_order(variance.order, OrderC)

    elif len(c_op.inputs) == 3:
        mean = 0 if c_op.running_mean is None else ConstantVariable(
            c_op.running_mean, OrderC)
        variance = 1 if c_op.running_var is None else ConstantVariable(
            c_op.running_var, OrderC)
    # chainer.functions.batch_normalization()
    else:
        raise ValueError(
            "inputs to BatchNormalizationFunction have to be 5 or 3.")

    y = (x - mean) / ((variance + c_op.eps)**0.5) * gamma + beta
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 7
0
def check_broadcast_constraints(a: Variable, b: Variable):
    i_a = a.ndim - 1
    i_b = b.ndim - 1
    while i_a >= 0 and i_b >= 0:
        if a.shape[i_a] == b.shape[i_b] or a.shape[i_a] == 1 or b.shape[
                i_b] == 1:
            unify(a.order.axes[i_a], b.order.axes[i_b])

            if (a.shape[i_a] == 1
                    and b.shape[i_b] == 1) or (a.shape[i_a] != 1
                                               and b.shape[i_b] != 1):
                # If broadcast is not occurred, size must be same
                add_placeholder_constraint(a.shape[i_a], b.shape[i_b])

            i_a -= 1
            i_b -= 1

        else:
            raise ValueError(
                f"Broadcast is failed: (a.shape)={a.shape}, (b.shape)={b.shape}"
            )