Example #1
0
def slice_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    begin = converter.get_variable(tf_op.inputs[1])
    size = converter.get_variable(tf_op.inputs[2])

    assert isinstance(
        begin, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic position is not supported yet. "
    assert isinstance(
        size, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic size is not supported yet. "

    begin = begin.data.flatten().astype(np.int32).tolist()
    size = size.data.flatten().astype(np.int32).tolist()
    y, = Slice(
        None,
        indices=AxisKeyDict(x.order.axes,
                            [slice(b, b + s) for b, s in zip(begin, size)]))(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #2
0
def mean_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # FIXME: currently supports only the operation is meaning global average pooling.
    # (1, 7, 7, 2048) -> (1, 1, 1, 2048)
    assert tf_op.get_attr("keep_dims") is True

    in_var = converter.get_variable(tf_op.inputs[0])
    in_var.order.unify(OrderNHWC)  # FIXME: assuming input order as NHWC
    out_tf_var = tf_op.outputs[0]
    in_shape = in_var.shape
    out_shape = [s.value for s in out_tf_var.shape.dims]
    assert len(in_shape) == len(out_shape)
    assert out_shape[1] == 1
    assert out_shape[2] == 1
    assert out_shape[0] == in_shape[0]
    assert out_shape[3] == in_shape[3]

    out_var, = AveragePooling2D(None,
                                ksize=tuple(in_shape[1:3]),
                                stride=tuple(in_shape[1:3]),
                                padding=(0, 0))(in_var)
    converter.set_variable(out_tf_var, out_var)
Example #3
0
def sum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    axis = converter.get_variable(tf_op.inputs[1])
    v = x

    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Operation 'Sum' with dynamic axis  is not supported yet."
    for i_axis in sorted(axis.data.astype(int).flatten().tolist(),
                         reverse=True):
        axis = v.order.axes[i_axis]

        v, = Sum(None, axis=axis)(v)

    if tf_op.get_attr("keep_dims") or x.ndim == 1:
        v = v.reshape(order=x.order,
                      shape=[
                          v.shape_dict[a] if a in v.order.axes else 1
                          for a in x.order.axes
                      ])

    converter.set_variable(tf_op.outputs[0], v)
Example #4
0
def matmul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    transposed_a = tf_op.get_attr("transpose_a")
    transposed_b = tf_op.get_attr("transpose_b")

    if a.ndim > 2 or b.ndim > 2:
        raise NotImplementedError(
            "[TensorFlowConverter] Currently, MatMul is supported only 2D * 2D case."
        )

    c_axes = []
    if transposed_a:
        c_axes.append(a.order.axes[-1])

        if a.order != OrderCN:
            a = a.reinterpret_axes(OrderCN)

    else:
        c_axes.append(a.order.axes[-2])

        if a.order != OrderNC:
            a = a.reinterpret_axes(OrderNC)

    if transposed_b:

        c_axes.append(Axis())
        if b.order != OrderNC:
            b = b.reinterpret_axes(OrderNC)

    else:
        c_axes.append(Axis())
        if b.order != OrderCN:
            b = b.reinterpret_axes(OrderCN)

    c_normalized, = Linear(None)(a, b)
    c = c_normalized.reinterpret_axes(Order(c_axes))

    converter.set_variable(tf_op.outputs[0], c)
Example #5
0
def conv2_d_backprop_input_handler(converter: TensorFlowConverter,
                                   tf_op: "tf.Operation"):
    input_sizes = converter.get_variable(
        tf_op.inputs[0])  # input_sizes is not needed
    assert input_sizes.size == 4 and isinstance(input_sizes, ConstantVariable)
    x_shape = input_sizes.data.flatten().astype(
        int).tolist()  # type: List[int]

    w = converter.get_variable(tf_op.inputs[1])  # HWNC
    gy = converter.get_variable(tf_op.inputs[2])  # NHWC

    assert tf_op.get_attr("data_format") == b"NHWC"
    w.order.unify(Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))
    gy.order.unify(OrderNHWC)
    ksize_hw = (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]

    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x_shape[gy.order.axes_dict[Axis.H]],
                                ksize_hw[0], stride_hw[0]),
                   padding_same(x_shape[gy.order.axes_dict[Axis.W]],
                                ksize_hw[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(
            f"[TensorFlowConverter] Conv2D: padding '{padding_name}' is not supported yet."
        )

    x, = Deconvolution2D(None,
                         ksize=ksize_hw,
                         stride=stride_hw,
                         padding=padding)(gy, w)
    converter.set_variable(tf_op.outputs[0], x)
Example #6
0
def pad_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] PadV2 with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    constant_values = converter.get_variable(tf_op.inputs[2]).change_order(
        x.order)

    for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
        xs = []

        if pad_begin > 0:
            multiplier = AxisKeyDict(x.order.axes, [
                pad_begin if a == axis else x.shape_dict[a]
                for a in x.order.axes
            ])
            xs.append(Tile(None, multiplier)(constant_values)[0])

        xs.append(x)

        if pad_end > 0:
            multiplier = AxisKeyDict(x.order.axes, [
                pad_end if a == axis else x.shape_dict[a] for a in x.order.axes
            ])
            xs.append(Tile(None, multiplier)(constant_values)[0])

        if len(xs) > 1:
            x, = Concat(None, axis=axis)(*xs)

    converter.set_variable(tf_op.outputs[0], x)
Example #7
0
def strided_slice_handler(converter: TensorFlowConverter,
                          tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    begin = converter.get_variable(tf_op.inputs[1])
    end = converter.get_variable(tf_op.inputs[2])
    strides = converter.get_variable(tf_op.inputs[3])

    assert isinstance(
        begin, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "
    assert isinstance(
        end, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "
    assert isinstance(
        strides, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "

    begin_mask = tf_op.get_attr("begin_mask")
    end_mask = tf_op.get_attr("end_mask")
    ellipsis_mask = tf_op.get_attr("ellipsis_mask")
    new_axis_mask = tf_op.get_attr("new_axis_mask")
    shrink_axis_mask = tf_op.get_attr("shrink_axis_mask")

    begin = begin.data.flatten().astype(np.int32).tolist()  # type: List[int]
    end = end.data.flatten().astype(np.int32).tolist()  # type: List[int]
    strides = strides.data.flatten().astype(
        np.int32).tolist()  # type: List[int]

    for i in range(x.ndim):
        if begin_mask & (1 << i):
            begin[i] = None

        if end_mask & (1 << i):
            end[i] = None

    slices = []
    for i in range(len(begin)):
        if ellipsis_mask & (1 << i):
            slices.append(Ellipsis)

        elif new_axis_mask & (1 << i):
            # insert new axis
            slices.append(None)

        elif shrink_axis_mask & (1 << i):
            # shrink axis
            slices.append(begin[i])

        else:
            # general slice
            slices.append(slice(begin[i], end[i], strides[i]))

    y = x[slices]
    converter.set_variable(tf_op.outputs[0], y)
Example #8
0
def square_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = x**2
    converter.set_variable(tf_op.outputs[0], y)
Example #9
0
def relu6_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = ClippedRelu(None, cap=6)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #10
0
def stop_gradient_handler(converter: TensorFlowConverter,
                          tf_op: "tf.Operation"):
    console.warning("[TensorFlowConverter] StopGradient is ignored.")
    converter.set_variable(tf_op.outputs[0],
                           converter.get_variable(tf_op.inputs[0]))
Example #11
0
def conv2_d_backprop_input_handler(converter: TensorFlowConverter,
                                   tf_op: "tf.Operation"):
    input_sizes = converter.get_variable(tf_op.inputs[0])
    if not isinstance(input_sizes, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] Conv2DBackpropInput with dynamic shape of output (input of convolution) variable is not supported."
        )
    input_sizes = tuple(input_sizes.data.astype(np.int32).tolist())

    w = converter.get_variable(tf_op.inputs[1])  # HWNC
    w.order.unify(Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))

    gy = converter.get_variable(tf_op.inputs[2])  # NHWC
    data_format = tf_op.get_attr("data_format")
    check_data_format(gy, data_format)

    input_size = np.array([
        input_sizes[gy.order.axes_dict[Axis.H]],
        input_sizes[gy.order.axes_dict[Axis.W]]
    ])

    ksize = np.array([w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]])

    stride = np.array(tf_op.get_attr("strides"))
    assert stride[gy.order.axes_dict[Axis.N]] == 1
    assert stride[gy.order.axes_dict[Axis.C]] == 1
    stride = stride[[gy.order.axes_dict[Axis.H], gy.order.axes_dict[Axis.W]]]

    padding = np.array([
        parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
        parse_padding(tf_op.get_attr("padding"), ksize[1], 1)
    ])

    x, = Deconvolution2D(None,
                         ksize=ksize.tolist(),
                         stride=stride.tolist(),
                         padding=0)(gy, w)

    # Actual padding size is depend on 2 factors
    # 1. padding mode
    # 2. extra apron size (= (input size of convolution) - (size of the tensor expanded by deconvolution))

    expanded_size = np.array([x.shape_dict[Axis.H], x.shape_dict[Axis.W]])
    apron_size = input_size - (expanded_size - padding.sum(axis=1))

    # cancel padding by apron if possible
    for i in (0, 1):
        if padding[i, 0] > apron_size[i]:
            padding[i, 0] -= apron_size[i]
            apron_size[i] = 0

        else:
            apron_size[i] -= padding[i, 0]
            padding[i, 0] = 0

        if padding[i, 1] > apron_size[i]:
            padding[i, 1] -= apron_size[i]
            apron_size[i] = 0

        else:
            apron_size[i] -= padding[i, 1]
            padding[i, 1] = 0

    # append extra apron
    for i, axis in enumerate((Axis.H, Axis.W)):
        if apron_size[i] == 0:
            continue

        data = np.zeros([
            apron_size[i] if a == axis else x.shape_dict[a]
            for a in x.order.axes
        ])
        x, = Concat(None, axis=axis)(x, ConstantVariable(data, x.order))

    # crop without padding
    padding = padding.tolist()  # type: List[List[int]]
    slice_h = slice(None) if padding[0] == [0, 0] else slice(
        padding[0][0], -padding[0][1])
    slice_w = slice(None) if padding[1] == [0, 0] else slice(
        padding[1][0], -padding[1][1])
    if data_format == b"NCHW":
        x = x[:, :, slice_h, slice_w]

    elif data_format == b"NHWC":
        x = x[:, slice_h, slice_w, :]

    else:
        raise NotImplementedError(f"Unknown data format: {data_format}")

    converter.set_variable(tf_op.outputs[0], x)
Example #12
0
def variable_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    converter.convert_to_constant_variable(tf_op.outputs[0])
Example #13
0
def rsqrt_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # Used for batch normalization
    x = converter.get_variable(tf_op.inputs[0])
    y, = Rsqrt(None)(x)
    # noinspection PyTypeChecker
    converter.set_variable(tf_op.outputs[0], y)
Example #14
0
def bias_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    unify_order(b.order, OrderC)
    y = x + b
    converter.set_variable(tf_op.outputs[0], y)
Example #15
0
def identity_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    converter.set_variable(tf_op.outputs[0],
                           converter.get_variable(tf_op.inputs[0]))
Example #16
0
def mirror_pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] PadV2 with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    mode = tf_op.get_attr("mode")  # type: byte

    for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
        xs = []

        if pad_begin > 0:
            if mode == b'SYMMETRIC':
                slices = [
                    slice(pad_begin -
                          1, None, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            elif mode == b'REFLECT':
                slices = [
                    slice(pad_begin, 0, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            else:
                raise NotImplementedError(
                    f"[TensorFlowConverter] Unknown mirror pad mode: {mode}")

            xs.append(x[slices])

        xs.append(x)

        if pad_end > 0:
            if mode == b'SYMMETRIC':
                slices = [
                    slice(-1, -1 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            elif mode == b'REFLECT':
                slices = [
                    slice(-2, -2 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            else:
                raise NotImplementedError(
                    f"[TensorFlowConverter] Unknown mirror pad mode: {mode}")

            xs.append(x[slices])

        if len(xs) > 1:
            x, = Concat(None, axis=axis)(*xs)

    converter.set_variable(tf_op.outputs[0], x)
Example #17
0
def softmax_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Softmax(None, axis=x.order.axes[-1])(x)

    converter.set_variable(tf_op.outputs[0], y)
Example #18
0
def rank_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = ConstantVariable(np.array([x.ndim]), Order([None]))

    converter.set_variable(tf_op.outputs[0], y)
Example #19
0
from webdnn.graph.operators.max import Max
from webdnn.graph.operators.min import Min
from webdnn.graph.operators.prod import Prod
from webdnn.graph.operators.rsqrt import Rsqrt
from webdnn.graph.operators.scalar_add import ScalarAdd
from webdnn.graph.operators.scalar_mul import ScalarMul
from webdnn.graph.operators.select import Select
from webdnn.graph.operators.sigmoid import Sigmoid
from webdnn.graph.operators.sum import Sum
from webdnn.graph.operators.tanh import Tanh
from webdnn.graph.operators.tensordot import Tensordot
from webdnn.graph.order import Order
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console

TensorFlowConverter.register_handler("Abs")(unary_op_handler(Abs))


@TensorFlowConverter.register_handler("Acos")
def acos_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")


@TensorFlowConverter.register_handler("Acosh")
def acosh_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")


TensorFlowConverter.register_handler("Add")(elementwise_binary_op_handler(
Example #20
0
@TensorFlowConverter.register_handler("Dilation2DBackpropFilter")
def dilation2_d_backprop_filter_handler(converter: TensorFlowConverter,
                                        tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")


@TensorFlowConverter.register_handler("Dilation2DBackpropInput")
def dilation2_d_backprop_input_handler(converter: TensorFlowConverter,
                                       tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")


TensorFlowConverter.register_handler("Elu")(unary_op_handler(Elu))


@TensorFlowConverter.register_handler("EluGrad")
def elu_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")


@TensorFlowConverter.register_handler("FractionalAvgPoolGrad")
def fractional_avg_pool_grad_handler(converter: TensorFlowConverter,
                                     tf_op: "tf.Operation"):
    raise NotImplementedError(
        f"[TensorFlowConverter] {tf_op.type} is not supported yet.")

Example #21
0
def expm1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Exp(None)(x)
    y = y - 1
    converter.set_variable(tf_op.outputs[0], y)
Example #22
0
def softplus_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Softplus(None, beta=1)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #23
0
def rsqrt_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y, = Rsqrt(None)(x)
    converter.set_variable(tf_op.outputs[0], y)