Exemplo n.º 1
0
def _convert_conv2d(converter: KerasConverter, k_op: "keras.layers.Conv2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.C, Axis.N]))

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    padding = (parse_padding(k_op.padding, ksize[0], dilation_rate[0]),
               parse_padding(k_op.padding, ksize[1], dilation_rate[1]))

    y, = Convolution2D(None,
                       ksize=ksize,
                       stride=stride,
                       padding=padding,
                       dilation_rate=dilation_rate)(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Exemplo n.º 2
0
def _convert_conv2d_transpose(converter: KerasConverter,
                              k_op: "keras.layers.Conv2DTranspose"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    if dilation_rate != (1, 1):
        raise NotImplementedError(
            "[KerasConverter] keras.layers.Convolution2DTranspose with large dilation_rate is not supported"
        )

    padding = (parse_padding(k_op.padding, ksize[0], dilation_rate[0]),
               parse_padding(k_op.padding, ksize[1], dilation_rate[1]))

    y, = Deconvolution2D(None, ksize=ksize, stride=stride, padding=padding)(x,
                                                                            w)
    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Exemplo n.º 3
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    padding = (parse_padding(k_op.padding, ksize[0],
                             1), parse_padding(k_op.padding, ksize[1], 1))

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Exemplo n.º 4
0
def _convert_global_max_pooling2d(converter: KerasConverter,
                                  k_op: "keras.layers.GlobalMaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    y, = MaxPooling2D(None,
                      ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]),
                      stride=(1, 1),
                      padding=(0, 0))(x)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Exemplo n.º 5
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    padding = (parse_padding(k_op.padding, ksize[0],
                             1), parse_padding(k_op.padding, ksize[1], 1))

    if k_op.padding == "same":
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling computes average by dividing number of valid elements in window "
            "(without padding element), but WebDNN divides it by the number of elements including padding element, so different "
            "result will be generated on the edge.")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Exemplo n.º 6
0
def _convert_zero_padding2d(converter: KerasConverter,
                            k_op: "keras.layers.ZeroPadding2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    padding = k_op.padding
    top = padding[0][0]
    if top != padding[0][1]:
        # FIXME: This condition should be checked in each backend
        raise NotImplementedError(
            "[KerasConverter] In current implementation, Padding size of top and bottom must be same."
        )

    left = padding[1][0]
    if left != padding[1][1]:
        # FIXME: This condition should be checked in each backend
        raise NotImplementedError(
            "[KerasConverter] In current implementation, Padding size of left and right must be same."
        )

    y, = ZeroPadding2D(None, (top, left))(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Exemplo n.º 7
0
def _convert_separable_conv2d(converter: KerasConverter,
                              k_op: "keras.layers.SeparableConv2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)
    axis_c_in = Axis.C
    axis_c_out = Axis()
    axis_depth_multiplier = Axis()

    w_depthwise = converter.convert_to_constant_variable(
        k_op.depthwise_kernel,
        Order([Axis.KH, Axis.KW, axis_c_in, axis_depth_multiplier]))

    w_pointwise = converter.convert_to_constant_variable(
        k_op.pointwise_kernel, Order([Axis.KH, Axis.KW, axis_c_in,
                                      axis_c_out]))
    w_pointwise = w_pointwise.reshape(
        shape=[
            x.shape_dict[axis_c_in], k_op.depth_multiplier,
            w_pointwise.shape_dict[axis_c_out]
        ],
        order=Order([axis_c_in, axis_depth_multiplier, axis_c_out]))

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    padding = (parse_padding(k_op.padding, ksize[0], dilation_rate[0]),
               parse_padding(k_op.padding, ksize[1], dilation_rate[1]))

    h, = Im2Col(None,
                ksize=ksize,
                stride=stride,
                padding=padding,
                dilation_rate=dilation_rate)(x)

    # TODO: Support depth-wise convolution natively
    # Currently, depth-wise convolution is not supported natively, and emulated by composition of small convolution operations.
    ys = []
    for i in range(h.shape_dict[axis_c_in]):
        # 1. Depthwise convolution
        #
        # Ideal                             | Current implementation
        # ----------------------------------+----------------------------------------------------
        # h.axes=[N, H, W, KH, KW, C_in]    | g_sub.axes=[N, H, W, KH, KW]
        # w.axes=[KH, KW, C_in, DM]         | w_sub.axes=[KH, KW, DM]
        # g.axes=[N, H, W, C_in, DM]        | g_sub.axes=[N, H, W, DM]

        h_sub, = Slice(
            None,
            indices=AxisKeyDict(
                h.order.axes,
                [i if a == axis_c_in else slice(None)
                 for a in h.order.axes]))(h)
        w_depthwise_sub = w_depthwise[:, :, i, :]
        g_sub, = Tensordot(None, axes=((Axis.KH, Axis.KW),
                                       (Axis.KH, Axis.KW)))(h_sub,
                                                            w_depthwise_sub)

        # 2. Pointwise (projection) convolution
        #
        # Ideal                             | Current implementation
        # ----------------------------------+----------------------------------------------------
        # g.axes=[N, H, W, C_in, DM]        | g_sub.axes=[N, H, W, DM]
        # w.axes=[DM, Cin, C_out]           | w_sub.axes=[DM, C_out]
        # y.axes=[N, H, W, C_out]           | y_sub.axes=[N, H, W, C_out]

        w_pointwise_sub = w_pointwise[i, :, :]
        y_sub, = Tensordot(None,
                           axes=((axis_depth_multiplier, ),
                                 (axis_depth_multiplier, )))(g_sub,
                                                             w_pointwise_sub)
        ys.append(y_sub)

    # Sum up all sub convolution results to one
    while len(ys) > 1:
        ys.append(ys.pop(0) + ys.pop(0))

    y = ys[0]

    # reinterpret axis "C_out" as C
    axes = list(y.order.axes)
    i = axes.index(axis_c_out)
    axes.pop(i)
    axes.insert(i, Axis.C)
    y = y.reinterpret_axes(Order(axes))

    if k_op.data_format == "channels_last":
        y = y.transpose(OrderNHWC)

    elif k_op.data_format == "channels_first":
        y = y.transpose(OrderNCHW)

    else:
        raise NotImplementedError(
            f"[KerasConverter] Unknown data format: {data_format}")

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)