Beispiel #1
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #2
0
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #3
0
    def __call__(self, inputs: List[Variable]) -> Tuple[Variable]:
        conv_opr = AveragePooling2D(generate_unique_name(self.cfunc.label),
                                    ksize=(self.cfunc.kh, self.cfunc.kw),
                                    stride=(self.cfunc.sy, self.cfunc.sx),
                                    padding=(self.cfunc.ph, self.cfunc.pw))

        opr_out, = conv_opr(inputs[0])
        opr_out.change_order(OrderNCHW)

        return opr_out,
Beispiel #4
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling1D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None, in_order=OrderNTC, out_order=OrderNHWC, out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #5
0
def _convert_average_pooling2d(converter: ChainerConverter,
                               c_op: chainer.functions.AveragePooling2D):
    x = converter.get_variable(c_op.inputs[0])

    pool_opr = AveragePooling2D(None,
                                ksize=(c_op.kh, c_op.kw),
                                stride=(c_op.sy, c_op.sx),
                                padding=(c_op.ph, c_op.pw))

    y, = pool_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Beispiel #6
0
def _convert_average_pooling1d(converter: KerasConverter, k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)
    padding = (parse_padding(k_op.padding, ksize[0], 1)[0], 0)

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #7
0
def main(k, s, p, n, h1, w1, c1, expected_shape_dict: Dict[Axis, int]):
    orders_x = [OrderNHWC, OrderHWNC, OrderHWCN, OrderNCHW, OrderCNHW, OrderCHWN]

    for order_x in orders_x:
        op = AveragePooling2D(None, ksize=k, stride=s, padding=p)

        x = Variable((n, h1, w1, c1), OrderNHWC)
        x.change_order(order_x)

        y, = op(x)

        for axis in y.order.axes:
            assert y.shape_dict[axis] == expected_shape_dict[axis]
Beispiel #8
0
def _convert_average_pooling2d(converter: ChainerConverter,
                               c_op: "chainer.functions.AveragePooling2D"):
    x = converter.get_variable(c_op.inputs[0])
    x.order.unify(OrderNCHW)

    pool_opr = AveragePooling2D(None,
                                ksize=(c_op.kh, c_op.kw),
                                stride=(c_op.sy, c_op.sx),
                                padding=(c_op.ph, c_op.pw),
                                cover_all=c_op.cover_all)

    y, = pool_opr(x)

    converter.set_variable(c_op.outputs[0](), y)
Beispiel #9
0
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling2D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(f"[KerasConverter] Unknown data format: {k_op.data_format}")

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #10
0
def _convert_max_pooling2d(converter: KerasConverter, k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    padding = (
        parse_padding(k_op.padding, k_op.pool_size[0], 1),
        parse_padding(k_op.padding, k_op.pool_size[1], 1)
    )
    x, padding = convert_odd_padding_to_concat(x, padding=padding)

    divide_without_padding = any(p > 0 for p in padding)
    # handling tensorflow style padding https://github.com/mil-tokyo/webdnn/issues/694

    y, = AveragePooling2D(None, ksize=k_op.pool_size, stride=k_op.strides, padding=padding, cover_all=False,
                          divide_without_padding=divide_without_padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #11
0
def _convert_average_pooling2d(converter: ChainerConverter, c_op: "chainer.functions.AveragePooling2D"):
    x = converter.get_variable(c_op.inputs[0])
    x.order.unify(OrderNCHW)

    pool_opr = AveragePooling2D(None,
                                ksize=(c_op.kh, c_op.kw),
                                stride=(c_op.sy, c_op.sx),
                                padding=(c_op.ph, c_op.pw))

    y, = pool_opr(x)

    if ((y.shape_dict[Axis.H] + c_op.ph * 2 - c_op.kh) % c_op.sy != 0) or ((y.shape_dict[Axis.W] + c_op.pw * 2 - c_op.kw) % c_op.sx != 0):
        console.warning(
            "[AveragePooling2D] AveragePooling2D in chainer is performed as cover_all=False mode. "
            "However, AveragePooling2D in WebDNN is always calculated as cover_all=True mode. "
            "Therefore the result may be difference from chainer's output.")

    converter.set_variable(c_op.outputs[0](), y)
Beispiel #12
0
def mean_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    # FIXME: currently supports only the operation is meaning global average pooling.
    # (1, 7, 7, 2048) -> (1, 1, 1, 2048)
    assert tf_op.get_attr("keep_dims") is True

    in_var = converter.get_variable(tf_op.inputs[0])
    unify_order(in_var.order, OrderNHWC)  # FIXME: assuming input order as NHWC
    out_tf_var = tf_op.outputs[0]
    in_shape = in_var.shape
    out_shape = [s.value for s in out_tf_var.shape.dims]
    assert len(in_shape) == len(out_shape)
    assert out_shape[1] == 1
    assert out_shape[2] == 1
    assert out_shape[0] == in_shape[0]
    assert out_shape[3] == in_shape[3]

    out_var, = AveragePooling2D(None, ksize=tuple(in_shape[1:3]), stride=tuple(in_shape[1:3]), padding=(0, 0))(in_var)
    converter.set_variable(out_tf_var, out_var)
Beispiel #13
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    padding = (parse_padding(k_op.padding, ksize[0],
                             1), parse_padding(k_op.padding, ksize[1], 1))

    if k_op.padding == "same":
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
            "(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
            "result will be generated on the edge.")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #14
0
def _convert_average_pooling1d(converter: KerasConverter, k_op: keras.layers.AveragePooling1D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None, in_order=x.order, out_order=OrderNHWC, out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (k_op.pool_size[0] // 2, k_op.pool_size[0] // 2)

    else:
        raise NotImplementedError(f"Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=(k_op.pool_size[0], 1), stride=(1, 1), padding=padding)(y)
    z, = Reshape(None, in_order=y.order, out_order=OrderNTC, out_shape=[y.shape[0], y.shape[1], y.shape[3]])(y)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #15
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        x.order.unify(OrderNCHW)

    elif k_op.data_format == "channels_last":
        x.order.unify(OrderNHWC)

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        # https://www.tensorflow.org/api_guides/python/nn#convolution
        if x.shape_dict[Axis.H] % stride[0] == 0:
            pad_h = max(ksize[0] - stride[0], 0)
        else:
            pad_h = max(ksize[0] - (x.shape_dict[Axis.H] % stride[0]), 0)

        if x.shape_dict[Axis.W] % stride[1] == 0:
            pad_w = max(ksize[1] - stride[1], 0)
        else:
            pad_w = max(ksize[1] - (x.shape_dict[Axis.W] % stride[1]), 0)

        padding = (pad_h // 2, pad_w // 2)
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
            "(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
            "result will be generated on the edge.")

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #16
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x, padding = convolution_handler_preprocess(x,
                                                ksize=k_op.pool_size,
                                                padding=k_op.padding,
                                                dilation_rate=(1, 1),
                                                data_format=k_op.data_format)
    if any(p > 0 for p in padding):
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
            "(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
            "result will be generated on the edge.")

    y, = AveragePooling2D(None,
                          ksize=k_op.pool_size,
                          stride=k_op.strides,
                          padding=padding,
                          cover_all=False)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #17
0
def template(x_order=OrderNHWC, y_order=OrderNHWC, description: str = ""):
    vx = np.random.rand(2, 4, 6, 8)
    vy = np.empty((2, 2, 3, 8))
    KH, KW = (2, 2)
    SH, SW = (2, 2)
    PH, PW = (0, 0)

    for n, h2, w2, c in itertools.product(range(vy.shape[0]),
                                          range(vy.shape[1]),
                                          range(vy.shape[2]),
                                          range(vy.shape[3])):
        v = 0
        for (kh, kw) in itertools.product(range(KH), range(KW)):
            h1 = (h2 * SH - PH) + kh
            w1 = (w2 * SW - PW) + kw

            v += 0 if (h1 < 0 or h1 >= 4 or w1 < 0 or w1 >= 6) else vx[n, h1,
                                                                       w1, c]

        vy[n, h2, w2, c] = v / (KH * KW)

    x = Variable(vx.shape, order=x_order)
    y, = AveragePooling2D(None,
                          ksize=(KH, KW),
                          stride=(SH, SW),
                          padding=(PH, PW))(x)
    y.change_order(y_order)

    generate_kernel_test_case(
        description=f"Average Pooling {description}",
        backend=["webgpu", "webgl", "webassembly", "fallback"],
        graph=Graph([x], [y]),
        inputs={
            x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])
        },
        expected={
            y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])
        },
    )
Beispiel #18
0
    def convert_layer_averagepooling2d(self, layer_config: Dict[str, object], inputs: List[Variable]) -> List[
        Variable]:
        """
        Example:
 {'class_name': 'AveragePooling2D',
  'config': {'data_format': 'channels_last',
   'name': 'avg_pool',
   'padding': 'valid',
   'pool_size': [7, 7],
   'strides': [7, 7],
   'trainable': True},
  'inbound_nodes': [[['activation_49', 0, 0, {}]]],
  'name': 'avg_pool'},

        :param layer_config: 
        :param inputs: 
        :return: 
        """
        assert len(inputs) == 1
        input = inputs[0]
        name: str = layer_config["name"]
        ksize: Tuple[int, int] = tuple(layer_config["pool_size"])
        stride: Tuple[int, int] = tuple(layer_config["strides"])
        padding_keras: str = layer_config["padding"]  # valid or same
        if padding_keras == "valid":
            padding = (0, 0)
        elif padding_keras == "same":
            padding = (ksize[0] // 2, ksize[1] // 2)
        else:
            raise ValueError("Unknown padding")
        ksize: Tuple[int, int] = (input.shape_dict[Axis.H], input.shape_dict[Axis.W])

        average_pooling_2d_opr = AveragePooling2D(name,
                                                  ksize=ksize,
                                                  stride=stride,
                                                  padding=padding)
        y, = average_pooling_2d_opr(input)

        return [y]