Beispiel #1
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling with padding divides summed values in window by the number "
            "of valid elements, but WebDNN divides it by the number of elements including zero padding, so different "
            "result will be generated on the edge.")

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #2
0
def _convert_batch_normalization(converter: KerasConverter,
                                 k_op: keras.layers.BatchNormalization):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    axis = x.order.axes[k_op.axis]

    variance_data, mean_data = K.batch_get_value(
        [k_op.moving_variance, k_op.moving_mean])

    if k_op.scale:
        gamma_data, = K.batch_get_value([k_op.gamma])
    else:
        gamma_data = np.ones_like(variance_data)

    if k_op.center:
        beta_data, = K.batch_get_value([k_op.beta])
    else:
        beta_data = np.zeros_like(mean_data)

    gamma_div_std_data = gamma_data / np.sqrt(variance_data + k_op.epsilon)
    beta_scaled_data = beta_data - mean_data * gamma_div_std_data

    gamma_div_std = ConstantVariable(gamma_div_std_data, Order([axis]))
    beta_scaled = ConstantVariable(beta_scaled_data, Order([axis]))

    y, = AxiswiseScale(None, axis=axis)(x, gamma_div_std)
    y, = AxiswiseBias(None, axis=axis)(y, beta_scaled)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #3
0
def _convert_conv2d_transpose(converter: KerasConverter,
                              k_op: "keras.layers.Conv2DTranspose"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    if dilation_rate != (1, 1):
        raise NotImplementedError(
            "[KerasConverter] keras.layers.Convolution2DTranspose with large dilation_rate is not supported"
        )

    padding = (parse_padding(k_op.padding, ksize[0], dilation_rate[0]),
               parse_padding(k_op.padding, ksize[1], dilation_rate[1]))

    y, = Deconvolution2D(None, ksize=ksize, stride=stride, padding=padding)(x,
                                                                            w)
    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #4
0
def _convert_conv2d(converter: KerasConverter, k_op: "keras.layers.Conv2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.C, Axis.N]))

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    padding = (parse_padding(k_op.padding, ksize[0], dilation_rate[0]),
               parse_padding(k_op.padding, ksize[1], dilation_rate[1]))

    y, = Convolution2D(None,
                       ksize=ksize,
                       stride=stride,
                       padding=padding,
                       dilation_rate=dilation_rate)(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #5
0
def _convert_conv2d(converter: KerasConverter, k_op: "keras.layers.Conv2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(f"[KerasConverter] Unknown data format is detected: {k_op.data_format}")

    w = converter.convert_to_constant_variable(k_op.kernel, OrderHWCN)

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)
    dilation_rate = tuple(k_op.dilation_rate)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = Convolution2D(None, ksize=ksize, stride=stride, padding=padding, dilation_rate=dilation_rate)(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #6
0
def _convert_conv2d(converter: KerasConverter, k_op: "keras.layers.Conv2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.C, Axis.N]))

    x, padding = convolution_handler_preprocess(
        x,
        ksize=k_op.kernel_size,
        padding=k_op.padding,
        dilation_rate=k_op.dilation_rate,
        data_format=k_op.data_format)
    y, = Convolution2D(None,
                       ksize=k_op.kernel_size,
                       stride=k_op.strides,
                       padding=padding,
                       dilation_rate=k_op.dilation_rate)(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #7
0
def convert_layer_global_average_pooling2d(
        converter: KerasConverter,
        k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    y, = AveragePooling2D(None,
                          ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]),
                          stride=(1, 1),
                          padding=(0, 0))(x)

    # flatten without changing memory layout
    z, = Reshape(None,
                 in_order=y.order,
                 out_order=OrderNC,
                 out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #8
0
def _convert_maximum(converter: KerasConverter, k_op: "keras.layers.Concatenate"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = Concat(None, axis=xs[0].order.axes[k_op.axis])(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #9
0
def _convert_average(converter: KerasConverter, k_op: "keras.layers.Average"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y = ElementwiseAdd(None)(*xs)[0] / len(xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #10
0
def _convert_reshape(converter: KerasConverter, k_op: "keras.layers.Reshape"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    target_shape = [x.shape[0]] + list(k_op.target_shape)
    if len(target_shape) == 2:
        target_order = OrderNC

    elif len(target_shape) == 3:
        target_order = OrderNTC

    elif len(target_shape) == 4:
        target_order = OrderNHWC

    else:
        raise NotImplementedError(
            f"[KerasConverter] Unknown default order: shape={target_shape}")

    console.warning(
        "[KerasConverter] keras.layers.Reshape is parsed new data order as default order (OrderNC in 2D, "
        "OrderNTC in 3D, OrderNHWC in 4D). To handle this, please overwrite keras.layers.Reshape converter "
        "handler.")

    y, = Reshape(None,
                 in_order=x.order,
                 out_order=target_order,
                 out_shape=target_shape)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #11
0
def _convert_average_pooling1d(converter: KerasConverter,
                               k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None,
                 in_order=x.order,
                 out_order=OrderNHWC,
                 out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (k_op.pool_size[0] // 2, 0)

    else:
        raise NotImplementedError(f"Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None,
                          ksize=(k_op.pool_size[0], 1),
                          stride=(1, 1),
                          padding=padding)(y)
    z, = Reshape(None,
                 in_order=y.order,
                 out_order=OrderNTC,
                 out_shape=[y.shape[0], y.shape[1], y.shape[3]])(y)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #12
0
def _convert_subtract(converter: KerasConverter,
                      k_op: "keras.layers.Subtract"):
    x0 = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x1 = converter.get_variable(converter.get_input_tensor(k_op)[1])
    x0.order.unify(x1.order)

    converter.set_variable(converter.get_output_tensor(k_op)[0], x0 - x1)
Beispiel #13
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #14
0
def _convert_multiply(converter: KerasConverter, k_op: "keras.layers.Multiply"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = ElementwiseMul(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #15
0
def _convert_model(converter: KerasConverter, k_op: keras.models.Model):
    graph = converter.convert(k_op)

    # Initial state of nested model
    #
    #    Global Model : [layer] -> tensor(A) -> [...........Model..........] -> tensor(C) -> [layer] ->
    #                 :
    #     Local Model :            tensor(B) -> [layer] -> tensor -> [layer] -> tensor(D)
    #

    # 1. Replace local input variable (converted from tensor(B)) into global input variable (converted from tensor(A))
    #
    #    Global Model : [layer] -> tensor(A) -> [...........Model..........] -> tensor(C) -> [layer] ->
    #                 :             |
    #     Local Model :             +---------> [layer] -> tensor -> [layer] -> tensor(D)
    #
    global_inputs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for global_variable, local_variable in zip(global_inputs, graph.inputs):
        local_variable.replace(global_variable)

    # 2. Register local output variable (converted from tensor(D)) as the variable converted from tensor(C)
    #
    #    Global Model : [layer] -> tensor(A)                                     +---------> [layer] ->
    #                 :             |                                            |
    #     Local Model :             +---------> [layer] -> tensor -> [layer] -> tensor(D)
    #
    global_outputs = converter.get_output_tensor(k_op)
    for global_tensor, local_variable in zip(global_outputs, graph.outputs):
        converter.set_variable(global_tensor, local_variable)
Beispiel #16
0
def _convert_repeat_vector(converter: KerasConverter,
                           k_op: "keras.layers.RepeatVector"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    assert x.order == OrderNC, f"[KerasConverter] Currently only OrderNC is supported for input variable order of " \
                               f"keras.layers.RepeatVector: x.order={x.order}"

    N = x.shape_dict[Axis.N]
    n = k_op.n
    C = x.shape_dict[Axis.C]

    # TODO: Implement more efficient version
    # ex) x.shape=(N=2, C=3), n=2
    #
    #  x(N, C)  *      w(C, n*C)     =      y(N, n*C)     =       y(N, n, C)
    # -----------------------------------------------------------------------------
    # [1, 2, 3]   [1, 0, 0, 1, 0, 0]   [1, 2, 3, 1, 2, 3]   [[1, 2, 3], [1, 2, 3]]
    # [4, 5, 6] * [0, 1, 0, 0, 1, 0] = [4, 5, 6, 4, 5, 6] = [[4, 5, 6], [4, 5, 6]]
    #             [0, 0, 1, 0, 0, 1]
    #

    w = ConstantVariable(np.tile(np.eye(C), (1, n)), OrderCN)

    y, = Linear(None)(x, w)
    y, = Reshape(None,
                 in_order=OrderNC,
                 out_order=OrderNTC,
                 out_shape=[N, n, C])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #17
0
def _convert_zero_padding1d(converter: KerasConverter, k_op: "keras.layers.ZeroPadding1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    x.order.unify(OrderNTC)

    y, = ZeroPadding1D(None, padding=tuple(k_op.padding))(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #18
0
def _convert_average_pooling1d(converter: KerasConverter,
                               k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        # https://www.tensorflow.org/api_guides/python/nn#convolution
        if x.shape_dict[Axis.H] % stride[0] == 0:
            padding = (max(ksize[0] - stride[0], 0) // 2, 0)
        else:
            padding = (max(ksize[0] - (x.shape_dict[Axis.H] % stride[0]), 0) //
                       2, 0)

    else:
        raise NotImplementedError(f"Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #19
0
def _convert_add(converter: KerasConverter, k_op: "keras.layers.Add"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    y, = ElementwiseAdd(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #20
0
def _convert_average(converter: KerasConverter, k_op: "keras.layers.Average"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    # FIXME: More effective implementation
    y = ElementwiseAdd(None)(*xs)[0] / len(xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #21
0
def _convert_reshape(converter: KerasConverter, k_op: "keras.layers.Reshape"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    target_shape = [x.shape[0]] + list(k_op.target_shape)
    # noinspection PyTypeChecker
    target_order = Order([x.order.axes[0]] + [None] * len(k_op.target_shape))

    y = x.reshape(target_shape, target_order)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #22
0
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #23
0
def _convert_repeat_vector(converter: KerasConverter, k_op: "keras.layers.RepeatVector"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    new_axis = Axis()
    multiplier = AxisKeyDict(x.order.axes, [1, 1])
    multiplier[new_axis] = k_op.n

    x = x.reshape(shape=(x.shape[0], 1, x.shape[1]), order=Order([x.order.axes[0], new_axis, x.order.axes[1]]))
    y, = Tile(None, multiplier=multiplier)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #24
0
def _convert_flatten(converter: KerasConverter, k_op: "keras.layers.Flatten"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # flatten without changing memory layout
    y, = Reshape(None,
                 in_order=x.order,
                 out_order=OrderNC,
                 out_shape=[x.shape[0], mul(x.shape[1:])])(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #25
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #26
0
def _convert_leaky_relu(converter: KerasConverter,
                        k_op: "keras.layers.LeakyReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.alpha == 0:
        y, = Relu(None)(x)
    else:
        y, = LeakyRelu(None, slope=k_op.alpha)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #27
0
def _convert_embedding(converter: KerasConverter,
                       k_op: "keras.layers.Embedding"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x = x.reinterpret_axes(OrderNT)

    w = converter.convert_to_constant_variable(k_op.embeddings, OrderCN)

    y, = Embedding(None)(x, w)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #28
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling1D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None, in_order=OrderNTC, out_order=OrderNHWC, out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
Beispiel #29
0
def _convert_embedding(converter: KerasConverter, k_op: keras.layers.Embedding):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if x.order == OrderNC:
        x, = ReinterpretAxis(None, in_order=OrderNC, out_order=OrderNT)(x)

    w = converter.convert_to_constant_variable(k_op.embeddings, OrderCN)

    y, = Embedding(None)(x, w)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
Beispiel #30
0
def _convert_thresholded_relu(converter: KerasConverter,
                              k_op: "keras.layers.ThresholdedReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.theta == 0:
        y, = Relu(None)(x)
    else:
        y, = ThresholdRelu(None, threshold=k_op.theta)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)