コード例 #1
0
def _convert_batch_normalization(converter: KerasConverter,
                                 k_op: keras.layers.BatchNormalization):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    axis = x.order.axes[k_op.axis]

    variance_data, mean_data = K.batch_get_value(
        [k_op.moving_variance, k_op.moving_mean])

    if k_op.scale:
        gamma_data, = K.batch_get_value([k_op.gamma])
    else:
        gamma_data = np.ones_like(variance_data)

    if k_op.center:
        beta_data, = K.batch_get_value([k_op.beta])
    else:
        beta_data = np.zeros_like(mean_data)

    gamma_div_std_data = gamma_data / np.sqrt(variance_data + k_op.epsilon)
    beta_scaled_data = beta_data - mean_data * gamma_div_std_data

    gamma_div_std = ConstantVariable(gamma_div_std_data, Order([axis]))
    beta_scaled = ConstantVariable(beta_scaled_data, Order([axis]))

    y, = AxiswiseScale(None, axis=axis)(x, gamma_div_std)
    y, = AxiswiseBias(None, axis=axis)(y, beta_scaled)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #2
0
def _convert_repeat_vector(converter: KerasConverter,
                           k_op: "keras.layers.RepeatVector"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    assert x.order == OrderNC, f"[KerasConverter] Currently only OrderNC is supported for input variable order of " \
                               f"keras.layers.RepeatVector: x.order={x.order}"

    N = x.shape_dict[Axis.N]
    n = k_op.n
    C = x.shape_dict[Axis.C]

    # TODO: Implement more efficient version
    # ex) x.shape=(N=2, C=3), n=2
    #
    #  x(N, C)  *      w(C, n*C)     =      y(N, n*C)     =       y(N, n, C)
    # -----------------------------------------------------------------------------
    # [1, 2, 3]   [1, 0, 0, 1, 0, 0]   [1, 2, 3, 1, 2, 3]   [[1, 2, 3], [1, 2, 3]]
    # [4, 5, 6] * [0, 1, 0, 0, 1, 0] = [4, 5, 6, 4, 5, 6] = [[4, 5, 6], [4, 5, 6]]
    #             [0, 0, 1, 0, 0, 1]
    #

    w = ConstantVariable(np.tile(np.eye(C), (1, n)), OrderCN)

    y, = Linear(None)(x, w)
    y, = Reshape(None,
                 in_order=OrderNC,
                 out_order=OrderNTC,
                 out_shape=[N, n, C])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #3
0
def _convert_subtract(converter: KerasConverter,
                      k_op: "keras.layers.Subtract"):
    x0 = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x1 = converter.get_variable(converter.get_input_tensor(k_op)[1])
    x0.order.unify(x1.order)

    converter.set_variable(converter.get_output_tensor(k_op)[0], x0 - x1)
コード例 #4
0
ファイル: pooling.py プロジェクト: wathela/webdnn
def _convert_average_pooling1d(converter: KerasConverter,
                               k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None,
                 in_order=x.order,
                 out_order=OrderNHWC,
                 out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (k_op.pool_size[0] // 2, 0)

    else:
        raise NotImplementedError(f"Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None,
                          ksize=(k_op.pool_size[0], 1),
                          stride=(1, 1),
                          padding=padding)(y)
    z, = Reshape(None,
                 in_order=y.order,
                 out_order=OrderNTC,
                 out_shape=[y.shape[0], y.shape[1], y.shape[3]])(y)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #5
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_multiply(converter: KerasConverter, k_op: "keras.layers.Multiply"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = ElementwiseMul(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #6
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_maximum(converter: KerasConverter, k_op: "keras.layers.Concatenate"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = Concat(None, axis=xs[0].order.axes[k_op.axis])(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #7
0
ファイル: pooling.py プロジェクト: wathela/webdnn
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)
        console.warning(
            "[KerasConverter] keras.layers.AveragePooling with padding divides summed values in window by the number "
            "of valid elements, but WebDNN divides it by the number of elements including zero padding, so different "
            "result will be generated on the edge.")

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #8
0
ファイル: pooling.py プロジェクト: wathela/webdnn
def convert_layer_global_average_pooling2d(
        converter: KerasConverter,
        k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    y, = AveragePooling2D(None,
                          ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]),
                          stride=(1, 1),
                          padding=(0, 0))(x)

    # flatten without changing memory layout
    z, = Reshape(None,
                 in_order=y.order,
                 out_order=OrderNC,
                 out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #9
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_average(converter: KerasConverter, k_op: "keras.layers.Average"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y = ElementwiseAdd(None)(*xs)[0] / len(xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #10
0
def _convert_reshape(converter: KerasConverter, k_op: "keras.layers.Reshape"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    target_shape = [x.shape[0]] + list(k_op.target_shape)
    if len(target_shape) == 2:
        target_order = OrderNC

    elif len(target_shape) == 3:
        target_order = OrderNTC

    elif len(target_shape) == 4:
        target_order = OrderNHWC

    else:
        raise NotImplementedError(
            f"[KerasConverter] Unknown default order: shape={target_shape}")

    console.warning(
        "[KerasConverter] keras.layers.Reshape is parsed new data order as default order (OrderNC in 2D, "
        "OrderNTC in 3D, OrderNHWC in 4D). To handle this, please overwrite keras.layers.Reshape converter "
        "handler.")

    y, = Reshape(None,
                 in_order=x.order,
                 out_order=target_order,
                 out_shape=target_shape)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #11
0
ファイル: convolutional.py プロジェクト: zhangaz1/webdnn
def _convert_zero_padding1d(converter: KerasConverter, k_op: "keras.layers.ZeroPadding1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    x.order.unify(OrderNTC)

    y, = ZeroPadding1D(None, padding=tuple(k_op.padding))(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #12
0
def _convert_average_pooling1d(converter: KerasConverter,
                               k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        # https://www.tensorflow.org/api_guides/python/nn#convolution
        if x.shape_dict[Axis.H] % stride[0] == 0:
            padding = (max(ksize[0] - stride[0], 0) // 2, 0)
        else:
            padding = (max(ksize[0] - (x.shape_dict[Axis.H] % stride[0]), 0) //
                       2, 0)

    else:
        raise NotImplementedError(f"Unknown padding: {k_op.padding}")

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #13
0
ファイル: pooling.py プロジェクト: wathela/webdnn
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format: {k_op.data_format}")

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #14
0
def _convert_add(converter: KerasConverter, k_op: "keras.layers.Add"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    y, = ElementwiseAdd(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #15
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #16
0
def _convert_average(converter: KerasConverter, k_op: "keras.layers.Average"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    # FIXME: More effective implementation
    y = ElementwiseAdd(None)(*xs)[0] / len(xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #17
0
def _convert_reshape(converter: KerasConverter, k_op: "keras.layers.Reshape"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    target_shape = [x.shape[0]] + list(k_op.target_shape)
    # noinspection PyTypeChecker
    target_order = Order([x.order.axes[0]] + [None] * len(k_op.target_shape))

    y = x.reshape(target_shape, target_order)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #18
0
def _convert_leaky_relu(converter: KerasConverter,
                        k_op: "keras.layers.LeakyReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.alpha == 0:
        y, = Relu(None)(x)
    else:
        y, = LeakyRelu(None, slope=k_op.alpha)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #19
0
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #20
0
ファイル: core.py プロジェクト: zhangaz1/webdnn
def _convert_repeat_vector(converter: KerasConverter, k_op: "keras.layers.RepeatVector"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    new_axis = Axis()
    multiplier = AxisKeyDict(x.order.axes, [1, 1])
    multiplier[new_axis] = k_op.n

    x = x.reshape(shape=(x.shape[0], 1, x.shape[1]), order=Order([x.order.axes[0], new_axis, x.order.axes[1]]))
    y, = Tile(None, multiplier=multiplier)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #21
0
def _convert_flatten(converter: KerasConverter, k_op: "keras.layers.Flatten"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # flatten without changing memory layout
    y, = Reshape(None,
                 in_order=x.order,
                 out_order=OrderNC,
                 out_shape=[x.shape[0], mul(x.shape[1:])])(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #22
0
def _convert_up_sampling2d(converter: KerasConverter,
                           k_op: "keras.layers.UpSampling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    #concat
    # TODO
    raise NotImplementedError(
        '[KerasConverter] keras.layers.UpSampling2D is not supported')
コード例 #23
0
def _convert_thresholded_relu(converter: KerasConverter,
                              k_op: "keras.layers.ThresholdedReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.theta == 0:
        y, = Relu(None)(x)
    else:
        y, = ThresholdRelu(None, threshold=k_op.theta)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #24
0
ファイル: pooling.py プロジェクト: 255BITS/webdnn
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling1D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None, in_order=OrderNTC, out_order=OrderNHWC, out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #25
0
ファイル: square.py プロジェクト: zhangaz1/webdnn
def square_converter_handler(converter: KerasConverter,
                             keras_layer: SquareLayer):
    keras_x = converter.get_input_tensor(keras_layer)[0]
    webdnn_x = converter.get_variable(keras_x)

    webdnn_operator = SquareOperator(None)

    webdnn_y, = webdnn_operator(webdnn_x)
    keras_y = converter.get_output_tensor(keras_layer)[0]

    converter.set_variable(keras_y, webdnn_y)
コード例 #26
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_minimum(converter: KerasConverter, k_op: "keras.layers.Minimum"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y = xs[0]
    for x in xs[1:]:
        cond = y > x
        y, = Select(None)(cond, x, y)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #27
0
def _convert_max_pooling2d(converter: KerasConverter, k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    padding = (
        parse_padding(k_op.padding, k_op.pool_size[0], 1),
        parse_padding(k_op.padding, k_op.pool_size[1], 1)
    )
    x, padding = convert_odd_padding_to_concat(x, padding=padding, value=-1.0e10)

    y, = MaxPooling2D(None, ksize=k_op.pool_size, stride=k_op.strides, padding=padding, cover_all=False)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #28
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    padding = (parse_padding(k_op.padding, ksize[0],
                             1), parse_padding(k_op.padding, ksize[1], 1))

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #29
0
def _convert_max_pooling1d(converter: KerasConverter, k_op: "keras.layers.MaxPooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)
    padding = (parse_padding(k_op.padding, ksize[0], 1)[0], 0)

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #30
0
def _convert_global_max_pooling1d(converter: KerasConverter,
                                  k_op: "keras.layers.GlobalMaxPooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = MaxPooling2D(None,
                      ksize=(x.shape[1], 1),
                      stride=(1, 1),
                      padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)