Ejemplo n.º 1
0
def _convert_reduce_sum(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)
    axes = attrs["axes"].ints
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1
    for a in axes:
        x, = Sum(None, axis=x.order.axes[a])(x)

    if not keepdims:
        x = x.squeeze(axis=[x.order.axes[i] for i in axes])

    converter.set_variable(onnx_op.output[0], x)
Ejemplo n.º 2
0
def _convert_sum(converter: ChainerConverter, c_op: "chainer.functions.Sum"):
    x = converter.get_variable(c_op.inputs[0])
    for axis in list(x.order.axes) if c_op.axis is None else [
            x.order.axes[i] for i in c_op.axis
    ]:
        x, = Sum(None, axis=axis)(x)

        # chainer.functions.sum supported "keepdims" parameter since v1.24
        if chainer.__version__ >= "1.24" and c_op.keepdims and x.ndim > 1:
            pass

        else:
            x = x.squeeze(axis)

    converter.set_variable(c_op.outputs[0](), x)
Ejemplo n.º 3
0
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    # TODO: Conversion result is wrong in case x.shape[category_axis] is placeholder.
    if any(not Placeholder.check_resolved(x.shape_dict[axis])
           for axis in axes):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSumExp\" for dynamic number of categories is not supported"
        )

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 4
0
def sum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    axis = converter.get_variable(tf_op.inputs[1])
    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Operation 'Sum' with dynamic axis  is not supported yet."

    for axis in [
            x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()
    ]:
        x, = Sum(None, axis=axis)(x)

        if not tf_op.get_attr("keep_dims") and x.ndim > 1:
            x = x.squeeze(axis)

    converter.set_variable(tf_op.outputs[0], x)
Ejemplo n.º 5
0
def _convert_log_softmax(converter: ChainerConverter,
                         c_op: "chainer.functions.LogSoftmax"):
    x = converter.get_variable(c_op.inputs[0])
    axis = x.order.axes[1]

    max_x, = Max(None, axis=axis)(x)
    exp_delta_x, = Exp(None)(x - max_x)
    sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x)
    log_sum_delta_exp, = Log(None)(sum_exp_delta_x)

    y = x - (log_sum_delta_exp + max_x)
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 6
0
def sum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    axis = converter.get_variable(tf_op.inputs[1])
    v = x

    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Operation 'Sum' with dynamic axis  is not supported yet."
    for i_axis in sorted(axis.data.astype(int).flatten().tolist(),
                         reverse=True):
        axis = v.order.axes[i_axis]

        v, = Sum(None, axis=axis)(v)

    if tf_op.get_attr("keep_dims") or x.ndim == 1:
        v = v.reshape(order=x.order,
                      shape=[
                          v.shape_dict[a] if a in v.order.axes else 1
                          for a in x.order.axes
                      ])

    converter.set_variable(tf_op.outputs[0], v)
Ejemplo n.º 7
0
def _convert_global_average_pool(converter: ONNXConverter,
                                 onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    if x.ndim == 4:
        x.order.unify(OrderNCHW)

    reduction_size = mul(x.shape[2:])
    reduction_axis = Axis()

    x = x.reshape([x.shape[0], x.shape[1], reduction_size],
                  Order([x.order.axes[0], x.order.axes[1], reduction_axis]))
    y, = Sum(None, axis=reduction_axis)(x)
    y /= reduction_size

    converter.set_variable(onnx_op.output[0], y)
Ejemplo n.º 8
0
def template(x_order=OrderNHWC,
             y_order=OrderNHW,
             axis=Axis.C,
             description: str = ""):
    vx = np.arange(120).reshape(2, 3, 4, 5)
    vy = np.sum(vx, axis=OrderNHWC.axes_dict[axis])

    x = Variable(vx.shape, order=OrderNHWC)
    y, = Sum(None, axis=axis)(x)

    x.change_order(x_order)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Sum {description}",
        graph=Graph([x], [y]),
        backend=["webgl"],
        inputs={
            x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])
        },
        expected={
            y: np.transpose(vy, [OrderNHW.axes_dict[a] for a in y.order.axes])
        },
    )
Ejemplo n.º 9
0
def _convert_log_softmax(converter: ChainerConverter,
                         c_op: "chainer.functions.LogSoftmax"):
    x = converter.get_variable(c_op.inputs[0])
    axis = x.order.axes[1]

    # TODO: Conversion result is wrong in case x.shape[1] is placeholder.
    if not Placeholder.check_resolved(x.shape[1]):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSoftMax\" for dynamic number of cateogries is not supported"
        )

    max_x, = Max(None, axis=axis)(x)
    exp_delta_x, = Exp(None)(x - max_x)
    sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x)
    log_sum_delta_exp, = Log(None)(sum_exp_delta_x)

    y = x - (log_sum_delta_exp + max_x)
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 10
0
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        for softmax in traverse.filter_nodes(traverse.listup_operators(graph),
                                             Softmax):
            x = softmax.inputs["x"]
            y = softmax.outputs["y"]
            axis = softmax.parameters["axis"]
            softmax.remove_all()
            flag_changed = True

            max_x, = Max(None, axis=axis)(x)
            delta_x = x - max_x
            exp_delta_x, = Exp(None)(delta_x)
            sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x)
            new_y = exp_delta_x / sum_exp_delta_x

            new_y.change_order(y.order)
            OptimizeRule.replace_variable(graph, new_y, y)

        return graph, flag_changed
Ejemplo n.º 11
0
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 12
0
def _convert_reduce_logsumexp(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    axes = [x.order.axes[i] for i in attrs["axes"].ints]
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x

    if not keepdims:
        y = y.squeeze(axis=axes)

    converter.set_variable(onnx_op.output[0], y)