Beispiel #1
0
def log1p_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    console.warning(
        "[TensorFlowConverter] In WebDNN, \"Log1p(x)\" is converted into \"Log(1+x)\", which is not enough accurate as Log1p when"
        "x is so small that \"1 + x == 1\" in floating point accuracy.")
    x = converter.get_variable(tf_op.inputs[0])
    y, = Log(None)(1 + x)
    converter.set_variable(tf_op.outputs[0], y)
Beispiel #2
0
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    # TODO: Conversion result is wrong in case x.shape[category_axis] is placeholder.
    if any(not Placeholder.check_resolved(x.shape_dict[axis])
           for axis in axes):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSumExp\" for dynamic number of categories is not supported"
        )

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #3
0
def _convert_log1p(converter: ChainerConverter,
                   c_op: "chainer.functions.Log1p"):
    console.warning(
        "[ChainerConverter] In WebDNN, \"Log1p(x)\" is converted into \"Log(1+x)\", which is not enough accurate as Log1p when"
        "x is so small that \"1 + x == 1\" in floating point accuracy.")
    x = converter.get_variable(c_op.inputs[0])
    y, = Log(None)(x + 1)
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #4
0
def template(x_order=OrderNHWC, y_order=OrderNHWC, description: str = ""):
    vx = np.random.rand(2, 3, 4, 5) + 0.5
    vy = np.log(vx)

    x = Variable(vx.shape, order=OrderNHWC)
    y, = Log(None)(x)

    x.change_order(x_order)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Log {description}",
        graph=Graph([x], [y]),
        inputs={
            x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])
        },
        expected={
            y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])
        },
    )
Beispiel #5
0
def _convert_log_softmax(converter: ChainerConverter,
                         c_op: "chainer.functions.LogSoftmax"):
    x = converter.get_variable(c_op.inputs[0])
    axis = x.order.axes[1]

    max_x, = Max(None, axis=axis)(x)
    exp_delta_x, = Exp(None)(x - max_x)
    sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x)
    log_sum_delta_exp, = Log(None)(sum_exp_delta_x)

    y = x - (log_sum_delta_exp + max_x)
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #6
0
def _convert_reduce_logsumexp(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])
    attrs = attribute_dict(onnx_op)
    axes = [x.order.axes[i] for i in attrs["axes"].ints]
    keepdims = (attrs["keepdims"].i if "keepdims" in attrs else 1) == 1

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x

    if not keepdims:
        y = y.squeeze(axis=axes)

    converter.set_variable(onnx_op.output[0], y)
Beispiel #7
0
def _convert_log_softmax(converter: ChainerConverter,
                         c_op: "chainer.functions.LogSoftmax"):
    x = converter.get_variable(c_op.inputs[0])
    axis = x.order.axes[1]

    # TODO: Conversion result is wrong in case x.shape[1] is placeholder.
    if not Placeholder.check_resolved(x.shape[1]):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSoftMax\" for dynamic number of cateogries is not supported"
        )

    max_x, = Max(None, axis=axis)(x)
    exp_delta_x, = Exp(None)(x - max_x)
    sum_exp_delta_x, = Sum(None, axis=axis)(exp_delta_x)
    log_sum_delta_exp, = Log(None)(sum_exp_delta_x)

    y = x - (log_sum_delta_exp + max_x)
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #8
0
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #9
0
def _convert_log(converter: ONNXConverter, onnx_op: INodeProto):
    x0 = converter.get_variable(onnx_op.input[0])
    y, = Log(None)(x0)
    converter.set_variable(onnx_op.output[0], y)
Beispiel #10
0
def _convert_log2(converter: ChainerConverter, c_op: "chainer.functions.Log2"):
    x = converter.get_variable(c_op.inputs[0])
    y, = Log(None)(x) / np.log(2)
    converter.set_variable(c_op.outputs[0](), y)
Beispiel #11
0
def log_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Log(None)(x)
    converter.set_variable(tf_op.outputs[0], y)