Example #1
0
def fused_batch_norm_handler(converter: TensorFlowConverter,
                             tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    scale = converter.get_variable(tf_op.inputs[1])
    offset = converter.get_variable(tf_op.inputs[2])
    mean = converter.get_variable(tf_op.inputs[3])
    variance = converter.get_variable(tf_op.inputs[4])
    epsilon = tf_op.get_attr("epsilon")
    data_format = tf_op.get_attr("data_format")

    if data_format == b"NHWC":
        channel_axis = x.order.axes[3]

    elif data_format == b"NCHW":
        channel_axis = x.order.axes[1]

    else:
        raise NotImplementedError("Unknown data format")

    scale.order.axes[0].unify(channel_axis)
    offset.order.axes[0].unify(channel_axis)
    mean.order.axes[0].unify(channel_axis)
    variance.order.axes[0].unify(channel_axis)

    y = (x - mean) / ((variance + epsilon)**0.5) * scale + offset

    converter.set_variable(tf_op.outputs[0], y)
Example #2
0
def conv2_d_backprop_input_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    input_sizes = converter.get_variable(tf_op.inputs[0])  # input_sizes is not needed
    assert input_sizes.size == 4 and isinstance(input_sizes, ConstantVariable)
    x_shape = input_sizes.data.flatten().astype(int).tolist()  # type: List[int]

    w = converter.get_variable(tf_op.inputs[1])  # HWNC
    gy = converter.get_variable(tf_op.inputs[2])  # NHWC

    assert tf_op.get_attr("data_format") == b"NHWC"
    w.order.unify(Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))
    gy.order.unify(OrderNHWC)
    ksize_hw = (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]

    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x_shape[gy.order.axes_dict[Axis.H]], ksize_hw[0], stride_hw[0]),
                   padding_same(x_shape[gy.order.axes_dict[Axis.W]], ksize_hw[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(f"[TensorFlowConverter] Conv2D: padding '{padding_name}' is not supported yet.")

    x, = Deconvolution2D(None, ksize=ksize_hw, stride=stride_hw, padding=padding)(gy, w)
    converter.set_variable(tf_op.outputs[0], x)
Example #3
0
def range_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    start = converter.get_variable(tf_op.inputs[0])
    limit = converter.get_variable(tf_op.inputs[1])
    delta = converter.get_variable(tf_op.inputs[2])

    if not isinstance(start, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet"
        )

    if not isinstance(limit, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet"
        )

    if not isinstance(delta, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet"
        )

    start = start.data.flatten()[0]
    limit = limit.data.flatten()[0]
    delta = delta.data.flatten()[0]

    y = ConstantVariable(np.arange(start, limit, delta), Order([None]))
    converter.set_variable(tf_op.outputs[0], y)
Example #4
0
def conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    data_format = tf_op.get_attr("data_format")
    check_data_format(x, data_format)

    w = converter.get_variable(tf_op.inputs[1])  # HWCN
    w.order.unify(Order([Axis.KH, Axis.KW, Axis.C, Axis.N]))

    ksize = (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW])

    stride = tuple(tf_op.get_attr("strides"))  # type: Tuple[int,...]
    assert stride[x.order.axes_dict[Axis.N]] == 1
    assert stride[x.order.axes_dict[Axis.C]] == 1
    stride = (stride[x.order.axes_dict[Axis.H]],
              stride[x.order.axes_dict[Axis.W]])

    x, padding = convolution_handler_preprocess(
        x,
        ksize=ksize,
        padding=tf_op.get_attr("padding"),
        dilation_rate=(1, 1),
        data_format=data_format)

    y, = Convolution2D(None, ksize=ksize, stride=stride, padding=padding)(x, w)
    converter.set_variable(tf_op.outputs[0], y)
Example #5
0
def pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] Pad with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    if x.order.check_same_axes(OrderNHWC) and all([
            paddings[x.order.axes_dict[Axis.N]][0] ==
            paddings[x.order.axes_dict[Axis.N]][1] == 0,
            paddings[x.order.axes_dict[Axis.H]][0]
            == paddings[x.order.axes_dict[Axis.H]][1],
            paddings[x.order.axes_dict[Axis.W]][0]
            == paddings[x.order.axes_dict[Axis.W]][1],
            paddings[x.order.axes_dict[Axis.C]][0] ==
            paddings[x.order.axes_dict[Axis.C]][1] == 0
    ]):
        # Padding for only spatial axes: use ZeroPadding2D
        y, = ZeroPadding2D(None,
                           padding=tuple(
                               paddings[x.order.axes_dict[Axis.H]][0],
                               paddings[x.order.axes_dict[Axis.W]][0]))(x)

    else:
        # General case: Use Concat
        for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
            xs = []

            if pad_begin > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_begin if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            xs.append(x)

            if pad_end > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_end if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

        y = x

    converter.set_variable(tf_op.outputs[0], y)
Example #6
0
def strided_slice_handler(converter: TensorFlowConverter,
                          tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    begin = converter.get_variable(tf_op.inputs[1])
    end = converter.get_variable(tf_op.inputs[2])
    strides = converter.get_variable(tf_op.inputs[3])

    assert isinstance(
        begin, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "
    assert isinstance(
        end, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "
    assert isinstance(
        strides, ConstantVariable
    ), "[TensorFlowConverter] op 'Slice' with dynamic index is not supported yet. "

    begin_mask = tf_op.get_attr("begin_mask")
    end_mask = tf_op.get_attr("end_mask")
    ellipsis_mask = tf_op.get_attr("ellipsis_mask")
    new_axis_mask = tf_op.get_attr("new_axis_mask")
    shrink_axis_mask = tf_op.get_attr("shrink_axis_mask")

    begin = begin.data.flatten().astype(np.int32).tolist()  # type: List[int]
    end = end.data.flatten().astype(np.int32).tolist()  # type: List[int]
    strides = strides.data.flatten().astype(
        np.int32).tolist()  # type: List[int]

    for i in range(x.ndim):
        if begin_mask & (1 << i):
            begin[i] = None

        if end_mask & (1 << i):
            end[i] = None

    slices = []
    for i in range(len(begin)):
        if ellipsis_mask & (1 << i):
            slices.append(Ellipsis)

        elif new_axis_mask & (1 << i):
            # insert new axis
            slices.append(None)

        elif shrink_axis_mask & (1 << i):
            # shrink axis
            slices.append(begin[i])

        else:
            # general slice
            slices.append(slice(begin[i], end[i], strides[i]))

    y = x[slices]
    converter.set_variable(tf_op.outputs[0], y)
Example #7
0
def select_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    cond = converter.get_variable(tf_op.inputs[0])
    x1 = converter.get_variable(tf_op.inputs[1])
    x2 = converter.get_variable(tf_op.inputs[2])

    check_broadcast_constraints(cond, x1)
    check_broadcast_constraints(cond, x2)
    check_broadcast_constraints(x1, x2)

    y, = Select(None)(cond, x1, x2)
    converter.set_variable(tf_op.outputs[0], y)
Example #8
0
def transpose_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    indices = converter.get_variable(tf_op.inputs[1])

    if not isinstance(indices, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] Operator 'Transpose' with dynamic indices is not supported yet."
        )

    indices = indices.data.astype(int).flatten().tolist()  # type: List[int]
    y, = Transpose(None)(x)
    y.change_order(Order([x.order.axes[i] for i in indices]))

    converter.set_variable(tf_op.outputs[0], y)
Example #9
0
def tile_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    multiplier = converter.get_variable(tf_op.inputs[1])

    if not isinstance(multiplier, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] Operator 'Tile' with dynamic multiplier is not supported yet."
        )

    multiplier = AxisKeyDict(x.order.axes,
                             multiplier.data.astype(int).flatten().tolist())
    y, = Tile(None, multiplier=multiplier)(x)

    converter.set_variable(tf_op.outputs[0], y)
Example #10
0
def reshape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # input: data, output_shape
    # output: reshaped_data
    # Currently, ignores output_shape.
    in_var = converter.get_variable(tf_op.inputs[0])
    out_tf_var = tf_op.outputs[0]
    # calculate output shape from out_tf_var.shape and in_var.shape
    # out_tf_var.shape can have at most one placeholder.
    out_placeholder_count = 0
    out_placeholder_idx = None
    out_constant_prod = 1
    out_shape = []
    for i, dim_size in enumerate(out_tf_var.shape.dims):
        out_shape.append(dim_size.value)
        if dim_size.value is None:
            out_placeholder_count += 1
            out_placeholder_idx = i
        else:
            out_constant_prod *= dim_size.value
    if out_placeholder_count > 1:
        raise NotImplementedError(
            "[TensorFlowConverter] Reshape: output with more than one placeholder is not supported yet."
        )
    elif out_placeholder_count == 1:
        if in_var.size % out_constant_prod != 0:
            raise ValueError(
                "[TensorFlowConverter] Reshape: invalid reshape output value.")
        out_shape[out_placeholder_idx] = in_var.size // out_constant_prod
    out_var = in_var.reshape(out_shape, Order([None] * len(out_shape)))
    converter.set_variable(out_tf_var, out_var)
Example #11
0
def matmul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    transposed_a = tf_op.get_attr("transpose_a")
    transposed_b = tf_op.get_attr("transpose_b")

    if a.ndim > 2 or b.ndim > 2:
        raise NotImplementedError(
            "[TensorFlowConverter] Currently, MatMul is supported only 2D * 2D case."
        )

    reduced_axes = []
    if transposed_a:
        reduced_axes.append(a.order.axes[0])

    else:
        reduced_axes.append(a.order.axes[1])

    if transposed_b:
        reduced_axes.append(b.order.axes[1])

    else:
        reduced_axes.append(b.order.axes[0])

    c, = Tensordot(None, axes=reduced_axes)(a, b)
    converter.set_variable(tf_op.outputs[0], c)
Example #12
0
def max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    data_format = tf_op.get_attr("data_format")
    check_data_format(x, data_format)

    ksize = tuple(tf_op.get_attr("ksize"))  # type: Tuple[int,...]
    assert ksize[x.order.axes_dict[Axis.N]] == 1
    assert ksize[x.order.axes_dict[Axis.C]] == 1
    ksize = (ksize[x.order.axes_dict[Axis.H]],
             ksize[x.order.axes_dict[Axis.W]])

    stride = tuple(tf_op.get_attr("strides"))  # type: Tuple[int,...]
    assert stride[x.order.axes_dict[Axis.N]] == 1
    assert stride[x.order.axes_dict[Axis.C]] == 1
    stride = (stride[x.order.axes_dict[Axis.H]],
              stride[x.order.axes_dict[Axis.W]])

    x, padding = convolution_handler_preprocess(
        x,
        ksize=ksize,
        padding=tf_op.get_attr("padding"),
        dilation_rate=(1, 1),
        data_format=data_format)

    y, = MaxPooling2D(None,
                      ksize=ksize,
                      stride=stride,
                      padding=padding,
                      cover_all=False)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #13
0
def max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    data_format = tf_op.get_attr("data_format")
    check_data_format(x, data_format)

    ksize = tuple(tf_op.get_attr("ksize"))  # type: Tuple[int,...]
    assert ksize[x.order.axes_dict[Axis.N]] == 1
    assert ksize[x.order.axes_dict[Axis.C]] == 1
    ksize = (ksize[x.order.axes_dict[Axis.H]],
             ksize[x.order.axes_dict[Axis.W]])

    stride = tuple(tf_op.get_attr("strides"))  # type: Tuple[int,...]
    assert stride[x.order.axes_dict[Axis.N]] == 1
    assert stride[x.order.axes_dict[Axis.C]] == 1
    stride = (stride[x.order.axes_dict[Axis.H]],
              stride[x.order.axes_dict[Axis.W]])

    padding = (
        parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
        parse_padding(tf_op.get_attr("padding"), ksize[1], 1),
    )
    x, padding = convert_odd_padding_to_concat(x,
                                               padding=padding,
                                               value=-1.0e10)

    y, = MaxPooling2D(None,
                      ksize=ksize,
                      stride=stride,
                      padding=padding,
                      cover_all=False)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #14
0
def expm1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    console.warning(
        "[TensorFlowConverter] In WebDNN, \"Expm1(x)\" is converted into \"Exp(x)-1\", which is not enough accurate as Expm1 when"
        "x is so small that \"Exp(x) == 1\" in floating point accuracy.")
    x = converter.get_variable(tf_op.inputs[0])
    y = Exp(None)(x)[0] - 1
    converter.set_variable(tf_op.outputs[0], y)
Example #15
0
def avg_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    data_format = tf_op.get_attr("data_format")
    check_data_format(x, data_format)

    ksize = tuple(tf_op.get_attr("ksize"))  # type: Tuple[int,...]
    assert ksize[x.order.axes_dict[Axis.N]] == 1
    assert ksize[x.order.axes_dict[Axis.C]] == 1
    ksize = (ksize[x.order.axes_dict[Axis.H]],
             ksize[x.order.axes_dict[Axis.W]])

    stride = tuple(tf_op.get_attr("strides"))  # type: Tuple[int,...]
    assert stride[x.order.axes_dict[Axis.N]] == 1
    assert stride[x.order.axes_dict[Axis.C]] == 1
    stride = (stride[x.order.axes_dict[Axis.H]],
              stride[x.order.axes_dict[Axis.W]])

    padding = (
        parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
        parse_padding(tf_op.get_attr("padding"), ksize[1], 1),
    )
    x, padding = convert_odd_padding_to_concat(x, padding=padding)

    if any(p > 0 for p in padding):
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
            "(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
            "result will be generated on the edge.")

    y, = AveragePooling2D(None,
                          ksize=ksize,
                          stride=stride,
                          padding=padding,
                          cover_all=False)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #16
0
def pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # Zero padding
    # FIXME: currently, determining padding from shape of input / output. Originally, determining by inputs[1] is correct.

    in_var = converter.get_variable(tf_op.inputs[0])
    in_var.order.unify(OrderNHWC)  # FIXME: assuming input order as NHWC
    out_tf_var = tf_op.outputs[0]
    # calculate output shape from out_tf_var.shape and in_var.shape
    # ZeroPadding2D operator only accepts padding for H and W axes.
    padding = [0, 0]
    for dim in range(in_var.ndim):
        in_size = in_var.shape[dim]
        out_size = out_tf_var.shape.dims[dim].value
        assert isinstance(
            in_size, int
        ), "[TensorFlowConverter] Pad: Placeholder for input shape is not supported yet."
        assert isinstance(
            out_size, int
        ), "[TensorFlowConverter] Pad: Placeholder for output shape is not supported yet."
        axis = in_var.order.axes[dim]
        if axis in [Axis.H, Axis.W]:
            assert (
                out_size - in_size % 2
            ) != 0, "[TensorFlowConverter] Pad: Uneven padding is not supported yet."
            pad_size = (out_size - in_size) // 2
            if axis == Axis.H:
                padding[0] = pad_size
            elif axis == Axis.W:
                padding[1] = pad_size
        else:
            assert out_size == in_size, "[TensorFlowConverter] Pad: padding for axis other than H and W is not supported yet."
    out_var, = ZeroPadding2D(None, padding=tuple(padding))(in_var)
    converter.set_variable(out_tf_var, out_var)
Example #17
0
def avg_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])  # NHWC
    assert tf_op.get_attr("data_format") == b"NHWC"
    x.order.unify(OrderNHWC)

    ksize_nhwc = tf_op.get_attr("ksize")  # type: List[int]
    assert ksize_nhwc[0] == 1
    assert ksize_nhwc[3] == 1
    ksize_hw = ksize_nhwc[1:3]

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]

    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x.shape_dict[Axis.H], ksize_hw[0], stride_hw[0]),
                   padding_same(x.shape_dict[Axis.W], ksize_hw[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(f"[TensorFlowConverter] AvgPool: padding '{padding_name}' is not supported yet.")

    y, = AveragePooling2D(None, ksize=ksize_hw, stride=stride_hw, padding=padding)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #18
0
def log1p_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    console.warning(
        "[TensorFlowConverter] In WebDNN, \"Log1p(x)\" is converted into \"Log(1+x)\", which is not enough accurate as Log1p when"
        "x is so small that \"1 + x == 1\" in floating point accuracy.")
    x = converter.get_variable(tf_op.inputs[0])
    y, = Log(None)(1 + x)
    converter.set_variable(tf_op.outputs[0], y)
Example #19
0
def sum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    axis = converter.get_variable(tf_op.inputs[1])
    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Operation 'Sum' with dynamic axis  is not supported yet."

    for axis in [
            x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()
    ]:
        x, = Sum(None, axis=axis)(x)

        if not tf_op.get_attr("keep_dims") and x.ndim > 1:
            x = x.squeeze(axis)

    converter.set_variable(tf_op.outputs[0], x)
Example #20
0
def conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # FIXME
    x = converter.get_variable(tf_op.inputs[0])  # NHWC
    w = converter.get_variable(tf_op.inputs[1])  # HWCN
    assert tf_op.get_attr("data_format") == b"NHWC"
    unify_order(x.order, OrderNHWC)
    unify_order(w.order, OrderHWCN)
    ksize = (w.shape_dict[Axis.H], w.shape_dict[Axis.W])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]
    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x.shape_dict[Axis.H], ksize[0], stride_hw[0]),
                   padding_same(x.shape_dict[Axis.W], ksize[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(
            f"[TensorFlowConverter] Conv2D: padding '{padding_name}' is not supported yet."
        )

    y, = Convolution2D(None, ksize=ksize, stride=stride_hw, padding=padding)(x,
                                                                             w)
    converter.set_variable(tf_op.outputs[0], y)
Example #21
0
def space_to_depth_handler(converter: TensorFlowConverter,
                           tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    x.order.unify(OrderNHWC)

    y, = Space2Depth(None, r=tf_op.get_attr("block_size"))(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #22
0
def max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # padding: https://www.tensorflow.org/api_guides/python/nn#Notes_on_SAME_Convolution_Padding

    x = converter.get_variable(tf_op.inputs[0])  # NHWC
    assert tf_op.get_attr("data_format") == b"NHWC"
    unify_order(x.order, OrderNHWC)
    ksize_nhwc = tf_op.get_attr("ksize")  # type: List[int]
    assert ksize_nhwc[0] == 1
    assert ksize_nhwc[3] == 1
    ksize = (ksize_nhwc[1], ksize_nhwc[2])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]
    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x.shape_dict[Axis.H], ksize[0], stride_hw[0]),
                   padding_same(x.shape_dict[Axis.W], ksize[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(
            f"[TensorFlowConverter] MaxPool: padding '{padding_name}' is not supported yet."
        )

    y, = MaxPooling2D(None, ksize=ksize, stride=stride_hw, padding=padding)(x)
    converter.set_variable(tf_op.outputs[0], y)
Example #23
0
def reshape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    shape = converter.get_variable(tf_op.inputs[1])

    assert isinstance(shape, ConstantVariable), NotImplementedError(
        f"[TensorFlowConverter] 'Shape' operator with dynamic shape is not supported."
    )

    shape = shape.data.flatten().tolist()  # type: List[int]
    if -1 in shape:
        i = shape.index(-1)
        shape.remove(-1)
        shape.insert(i, x.size // (mul(shape)))

    y = x.reshape(shape, Order([None] * len(shape)))
    converter.set_variable(tf_op.outputs[0], y)
Example #24
0
def expand_dims_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    dim = converter.get_variable(tf_op.inputs[1])

    if not isinstance(dim, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] Operator 'ExpandDims' with dynamic dimension is not supported."
        )

    dim = dim.data.astype(np.int32).flatten()[0]
    new_shape = list(x.shape)
    new_shape.insert(dim, 1)
    new_axes = list(x.order.axes)
    new_axes.insert(dim, Axis())
    converter.set_variable(tf_op.outputs[0],
                           x.reshape(order=Order(new_axes), shape=new_shape))
Example #25
0
def less_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    z, = GreaterEqual(None)(y, x)
    converter.set_variable(tf_op.outputs[0], z)
Example #26
0
def less_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    z = y > x
    converter.set_variable(tf_op.outputs[0], z)
Example #27
0
def cast_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    dst_t = tf_op.get_attr("DstT")

    if dst_t != DT_FLOAT:
        console.warning("[TensorFlowConverter] Operator 'Cast' is ignored.")

    x = converter.get_variable(tf_op.inputs[0])
    converter.set_variable(tf_op.outputs[0], x)
Example #28
0
def shape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    assert all(
        Placeholder.check_resolved(s) for s in x.shape
    ), "[TensorFlowConverter] op 'Shape' with dynamic shape is not supported yet. "

    y = ConstantVariable(np.array(x.shape), Order([None]))
    converter.set_variable(tf_op.outputs[0], y)
Example #29
0
def squared_difference_handler(converter: TensorFlowConverter,
                               tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    y = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(x, y)

    converter.set_variable(tf_op.outputs[0], (x - y)**2)
Example #30
0
def sub_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    a = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])

    check_broadcast_constraints(a, b)

    c = a - b
    converter.set_variable(tf_op.outputs[0], c)