Ejemplo n.º 1
0
def max_pool_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # padding: https://www.tensorflow.org/api_guides/python/nn#Notes_on_SAME_Convolution_Padding

    x = converter.get_variable(tf_op.inputs[0])  # NHWC
    assert tf_op.get_attr("data_format") == b"NHWC"
    unify_order(x.order, OrderNHWC)
    ksize_nhwc = tf_op.get_attr("ksize")  # type: List[int]
    assert ksize_nhwc[0] == 1
    assert ksize_nhwc[3] == 1
    ksize = (ksize_nhwc[1], ksize_nhwc[2])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]
    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x.shape_dict[Axis.H], ksize[0], stride_hw[0]),
                   padding_same(x.shape_dict[Axis.W], ksize[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(
            f"[TensorFlowConverter] MaxPool: padding '{padding_name}' is not supported yet."
        )

    y, = MaxPooling2D(None, ksize=ksize, stride=stride_hw, padding=padding)(x)
    converter.set_variable(tf_op.outputs[0], y)
Ejemplo n.º 2
0
def conv2_d_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # FIXME
    x = converter.get_variable(tf_op.inputs[0])  # NHWC
    w = converter.get_variable(tf_op.inputs[1])  # HWCN
    assert tf_op.get_attr("data_format") == b"NHWC"
    unify_order(x.order, OrderNHWC)
    unify_order(w.order, OrderHWCN)
    ksize = (w.shape_dict[Axis.H], w.shape_dict[Axis.W])

    stride_nhwc = tf_op.get_attr("strides")  # type: List[int]
    assert stride_nhwc[0] == 1
    assert stride_nhwc[3] == 1
    stride_hw = stride_nhwc[1:3]
    padding_name = tf_op.get_attr("padding")  # type: str
    if padding_name == b"SAME":
        padding = (padding_same(x.shape_dict[Axis.H], ksize[0], stride_hw[0]),
                   padding_same(x.shape_dict[Axis.W], ksize[1], stride_hw[1]))
    elif padding_name == b"VALID":
        padding = (0, 0)
    else:
        raise NotImplementedError(
            f"[TensorFlowConverter] Conv2D: padding '{padding_name}' is not supported yet."
        )

    y, = Convolution2D(None, ksize=ksize, stride=stride_hw, padding=padding)(x,
                                                                             w)
    converter.set_variable(tf_op.outputs[0], y)
Ejemplo n.º 3
0
def _convert_selected_item(
    converter: ChainerConverter, c_op:
    "chainer.functions.connection.dilated_convolution_2d.DilatedConvolution2DFunction"
):
    x = converter.get_variable(c_op.inputs[0])
    w = converter.get_variable(c_op.inputs[1])

    unify_order(x.order, OrderNCHW)
    unify_order(w.order, OrderNCHW)

    # when dx == 1, it means ordinary convolution.
    conv_opr = Convolution2D(None,
                             ksize=(w.shape_dict[Axis.H],
                                    w.shape_dict[Axis.W]),
                             stride=(c_op.sy, c_op.sx),
                             padding=(c_op.ph, c_op.pw),
                             dilation_rate=(c_op.dx, c_op.dy))

    y, = conv_opr(x, w)

    if len(c_op.inputs) == 3:
        # with bias
        bias = converter.get_variable(c_op.inputs[2])
        y = y + bias

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 4
0
def _convert_concat(converter: ChainerConverter,
                    c_op: "chainer.functions.Concat"):
    xs = [converter.get_variable(x) for x in c_op.inputs]

    for x1, x2 in combinations(xs, 2):
        unify_order(x1.order, x2.order)

    y, = Concat(None, axis=xs[0].order.axes[c_op.axis])(*xs)
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 5
0
def _convert_average_pooling2d(converter: ChainerConverter, c_op: "chainer.functions.AveragePooling2D"):
    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)

    pool_opr = AveragePooling2D(None,
                                ksize=(c_op.kh, c_op.kw),
                                stride=(c_op.sy, c_op.sx),
                                padding=(c_op.ph, c_op.pw))

    y, = pool_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 6
0
def _convert_batch_normalization_function(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.batch_normalization.BatchNormalizationFunction"
):
    x = converter.get_variable(c_op.inputs[0])
    unify(x.order.axes[0], Axis.N)
    unify(x.order.axes[1], Axis.C)

    gamma = converter.get_variable(c_op.inputs[1])
    unify_order(gamma.order, OrderC)

    beta = converter.get_variable(c_op.inputs[2])
    unify_order(beta.order, OrderC)

    if len(c_op.inputs) == 5:
        mean = converter.get_variable(c_op.inputs[3])
        unify_order(mean.order, OrderC)

        variance = converter.get_variable(c_op.inputs[4])
        unify_order(variance.order, OrderC)

    elif len(c_op.inputs) == 3:
        mean = 0 if c_op.running_mean is None else ConstantVariable(
            c_op.running_mean, OrderC)
        variance = 1 if c_op.running_var is None else ConstantVariable(
            c_op.running_var, OrderC)

    else:
        raise ValueError(
            "inputs to BatchNormalizationFunction have to be 5 or 3.")

    y = (x - mean) / ((variance + c_op.eps)**0.5) * gamma + beta
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 7
0
def _convert_max_pooling2d(converter: ChainerConverter, c_op: "chainer.functions.MaxPooling2D"):
    if not c_op.cover_all:
        raise NotImplementedError("'cover_all=False' property in 'MaxPooling2D' is not supported.")

    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)

    pool_opr = MaxPooling2D(None,
                            ksize=(c_op.kh, c_op.kw),
                            stride=(c_op.sy, c_op.sx),
                            padding=(c_op.ph, c_op.pw))

    y, = pool_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 8
0
def _convert_local_response_normalization(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.local_response_normalization.LocalResponseNormalization"
):
    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)

    n_opr = LocalResponseNormalization(None,
                                       n=c_op.n,
                                       k=c_op.k,
                                       alpha=c_op.alpha,
                                       beta=c_op.beta)

    y, = n_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 9
0
def _convert_average_pooling2d(converter: ChainerConverter, c_op: "chainer.functions.AveragePooling2D"):
    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)

    pool_opr = AveragePooling2D(None,
                                ksize=(c_op.kh, c_op.kw),
                                stride=(c_op.sy, c_op.sx),
                                padding=(c_op.ph, c_op.pw))

    y, = pool_opr(x)

    if ((y.shape_dict[Axis.H] + c_op.ph * 2 - c_op.kh) % c_op.sy != 0) or ((y.shape_dict[Axis.W] + c_op.pw * 2 - c_op.kw) % c_op.sx != 0):
        console.warning(
            "[AveragePooling2D] AveragePooling2D in chainer is performed as cover_all=False mode. "
            "However, AveragePooling2D in WebDNN is always calculated as cover_all=True mode. "
            "Therefore the result may be difference from chainer's output.")

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 10
0
def mean_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # FIXME: currently supports only the operation is meaning global average pooling.
    # (1, 7, 7, 2048) -> (1, 1, 1, 2048)
    assert tf_op.get_attr("keep_dims") is True

    in_var = converter.get_variable(tf_op.inputs[0])
    unify_order(in_var.order, OrderNHWC)  # FIXME: assuming input order as NHWC
    out_tf_var = tf_op.outputs[0]
    in_shape = in_var.shape
    out_shape = [s.value for s in out_tf_var.shape.dims]
    assert len(in_shape) == len(out_shape)
    assert out_shape[1] == 1
    assert out_shape[2] == 1
    assert out_shape[0] == in_shape[0]
    assert out_shape[3] == in_shape[3]

    out_var, = AveragePooling2D(None, ksize=tuple(in_shape[1:3]), stride=tuple(in_shape[1:3]), padding=(0, 0))(in_var)
    converter.set_variable(out_tf_var, out_var)
Ejemplo n.º 11
0
def _convert_max_pooling2d(converter: ChainerConverter, c_op: "chainer.functions.MaxPooling2D"):
    if not c_op.cover_all:
        raise NotImplementedError("'cover_all=False' property in 'MaxPooling2D' is not supported.")

    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)

    pool_opr = MaxPooling2D(None,
                            ksize=(c_op.kh, c_op.kw),
                            stride=(c_op.sy, c_op.sx),
                            padding=(c_op.ph, c_op.pw))
    if c_op.cover_all == False:
        console.warning(
            "[MaxPooling2D] MaxPooling2D in WebDNN is always calculated as cover_all=True mode. "
            "Therefore the result may be difference from chainer's output.")

    y, = pool_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 12
0
def _convert_batch_normalization_function(
    converter: ChainerConverter, c_op:
    "chainer.functions.normalization.batch_normalization.BatchNormalizationFunction"
):
    if chainer.__version__ >= "2.":
        # FIXME: Is it possible to detect in which mode this function was computed, train or test?
        pass

    else:
        if not c_op.test:
            raise NotImplementedError(
                "[ChainerConverter] BatchNormalization with train mode is not supported"
            )

    x = converter.get_variable(c_op.inputs[0])
    unify(x.order.axes[0], Axis.N)
    unify(x.order.axes[1], Axis.C)

    gamma = converter.get_variable(c_op.inputs[1])
    unify_order(gamma.order, OrderC)

    beta = converter.get_variable(c_op.inputs[2])
    unify_order(beta.order, OrderC)

    if len(c_op.inputs) == 5:
        mean = converter.get_variable(c_op.inputs[3])
        unify_order(mean.order, OrderC)

        variance = converter.get_variable(c_op.inputs[4])
        unify_order(variance.order, OrderC)

    elif len(c_op.inputs) == 3:
        mean = 0 if c_op.running_mean is None else ConstantVariable(
            c_op.running_mean, OrderC)
        variance = 1 if c_op.running_var is None else ConstantVariable(
            c_op.running_var, OrderC)
    # chainer.functions.batch_normalization()
    else:
        raise ValueError(
            "inputs to BatchNormalizationFunction have to be 5 or 3.")

    y = (x - mean) / ((variance + c_op.eps)**0.5) * gamma + beta
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 13
0
def _convert_deconvolution_2d(converter: ChainerConverter, c_op: "chainer.functions.connection.deconvolution_2d.Deconvolution2DFunction"):
    x = converter.get_variable(c_op.inputs[0])
    w = converter.get_variable(c_op.inputs[1])

    unify_order(x.order, OrderNCHW)
    unify_order(w.order, OrderCNHW)

    deconv_opr = Deconvolution2D(None,
                                 ksize=(w.shape_dict[Axis.H], w.shape_dict[Axis.W]),
                                 stride=(c_op.sy, c_op.sx),
                                 padding=(c_op.ph, c_op.pw))

    y, = deconv_opr(x, w)

    if len(c_op.inputs) == 3:
        # with bias
        b = converter.get_variable(c_op.inputs[2])
        unify_order(b.order, OrderC)
        y = y + b

    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 14
0
def _convert_depth2space(converter: ChainerConverter,
                         c_op: "chainer.functions.Depth2Space"):
    x = converter.get_variable(c_op.inputs[0])
    unify_order(x.order, OrderNCHW)
    y, = Depth2Space(None, r=c_op.r)(x)
    converter.set_variable(c_op.outputs[0](), y)
Ejemplo n.º 15
0
def bias_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    b = converter.get_variable(tf_op.inputs[1])
    unify_order(b.order, OrderC)
    y = x + b
    converter.set_variable(tf_op.outputs[0], y)