コード例 #1
0
ファイル: convolutional.py プロジェクト: fossabot/hash2face
def _convert_zero_padding1d(converter: KerasConverter,
                            k_op: "keras.layers.ZeroPadding1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    x.order.unify(OrderNTC)

    y, = ZeroPadding1D(None, padding=tuple(k_op.padding))(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #2
0
ファイル: merge.py プロジェクト: wathela/webdnn
def _convert_average(converter: KerasConverter, k_op: "keras.layers.Average"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    y = ElementwiseAdd(None)(*xs)[0] / len(xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #3
0
ファイル: merge.py プロジェクト: wathela/webdnn
def _convert_maximum(converter: KerasConverter,
                     k_op: "keras.layers.Concatenate"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]
    y, = Concat(None, axis=xs[0].order.axes[k_op.axis])(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #4
0
def _convert_leaky_relu(converter: KerasConverter,
                        k_op: "keras.layers.LeakyReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.alpha == 0:
        y, = Relu(None)(x)
    else:
        y, = LeakyRelu(None, slope=k_op.alpha)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #5
0
def _convert_multiply(converter: KerasConverter,
                      k_op: "keras.layers.Multiply"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]

    y, = ElementwiseMul(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #6
0
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #7
0
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: "keras.layers.GlobalAveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #8
0
ファイル: embeddings.py プロジェクト: saibabanadh/webdnn
def _convert_embedding(converter: KerasConverter,
                       k_op: "keras.layers.Embedding"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x = x.reinterpret_axes(OrderNT)

    w = converter.convert_to_constant_variable(k_op.embeddings, OrderCN)

    y, = Embedding(None)(x, w)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #9
0
ファイル: core.py プロジェクト: zhangaz1/webdnn
def _convert_repeat_vector(converter: KerasConverter, k_op: "keras.layers.RepeatVector"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    new_axis = Axis()
    multiplier = AxisKeyDict(x.order.axes, [1, 1])
    multiplier[new_axis] = k_op.n

    x = x.reshape(shape=(x.shape[0], 1, x.shape[1]), order=Order([x.order.axes[0], new_axis, x.order.axes[1]]))
    y, = Tile(None, multiplier=multiplier)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #10
0
def _convert_flatten(converter: KerasConverter, k_op: "keras.layers.Flatten"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # flatten without changing memory layout
    y, = Reshape(None,
                 in_order=x.order,
                 out_order=OrderNC,
                 out_shape=[x.shape[0], mul(x.shape[1:])])(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #11
0
def _convert_reshape(converter: KerasConverter, k_op: "keras.layers.Reshape"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    target_shape = [x.shape[0]] + list(k_op.target_shape)
    # noinspection PyTypeChecker
    target_order = Order([x.order.axes[0]] + [None] * len(k_op.target_shape))

    y = x.reshape(target_shape, target_order)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #12
0
def _convert_embedding(converter: KerasConverter, k_op: keras.layers.Embedding):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if x.order == OrderNC:
        x, = ReinterpretAxis(None, in_order=OrderNC, out_order=OrderNT)(x)

    w = converter.convert_to_constant_variable(k_op.embeddings, OrderCN)

    y, = Embedding(None)(x, w)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #13
0
def _convert_thresholded_relu(converter: KerasConverter,
                              k_op: "keras.layers.ThresholdedReLU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    if k_op.theta == 0:
        y, = Relu(None)(x)
    else:
        y, = ThresholdRelu(None, threshold=k_op.theta)(x)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #14
0
def _convert_add(converter: KerasConverter, k_op: "keras.layers.Add"):
    xs = [
        converter.get_variable(tensor)
        for tensor in converter.get_input_tensor(k_op)
    ]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = ElementwiseAdd(None)(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #15
0
ファイル: pooling.py プロジェクト: 255BITS/webdnn
def _convert_global_average_pooling1d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling1D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y, = Reshape(None, in_order=OrderNTC, out_order=OrderNHWC, out_shape=[x.shape[0], x.shape[1], 1, x.shape[2]])(x)
    y, = AveragePooling2D(None, ksize=(x.shape[1], 1), stride=(1, 1), padding=(0, 0))(y)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #16
0
ファイル: convolutional.py プロジェクト: unixnme/webdnn
def _convert_conv2d_transpose(converter: KerasConverter,
                              k_op: "keras.layers.Conv2DTranspose"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(
            f"[KerasConverter] Unknown data format is detected: {k_op.data_format}"
        )

    w = converter.convert_to_constant_variable(k_op.kernel, OrderHWNC)

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        # @see https://github.com/tensorflow/tensorflow/blob/e5cf6f0c13b6053e4c58af6a951b204fde263172/tensorflow/python/ops/nn_ops.py#L507-L519
        pad_extra_shape = [k - 1 for k in ksize]

        if any(p % 2 != 0 for p in pad_extra_shape):
            raise NotImplementedError(
                f"[KerasConverter] Currently WebDNN doesn't supports different size padding: "
                f"  (pad_extra_shape)=f{pad_extra_shape}")

        padding = tuple(p // 2 for p in pad_extra_shape)

    w = converter.convert_to_constant_variable(k_op.kernel, OrderHWNC)

    ksize = tuple(k_op.kernel_size)
    stride = tuple(k_op.strides)

    if k_op.padding == "valid":
        padding = (0, 0)

    elif k_op.padding == "same":
        padding = (ksize[0] // 2, ksize[1] // 2)

    else:
        raise ValueError(f"[KerasConverter] Unknown padding: {k_op.padding}")

    y, = Deconvolution2D(None, ksize=ksize, stride=stride, padding=padding)(x,
                                                                            w)
    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #17
0
def _convert_dense(converter: KerasConverter, k_op: "keras.layers.Dense"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    w = converter.convert_to_constant_variable(k_op.kernel, OrderCN)
    y, = Linear(None)(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #18
0
ファイル: square.py プロジェクト: zhangaz1/webdnn
def square_converter_handler(converter: KerasConverter,
                             keras_layer: SquareLayer):
    keras_x = converter.get_input_tensor(keras_layer)[0]
    webdnn_x = converter.get_variable(keras_x)

    webdnn_operator = SquareOperator(None)

    webdnn_y, = webdnn_operator(webdnn_x)
    keras_y = converter.get_output_tensor(keras_layer)[0]

    converter.set_variable(keras_y, webdnn_y)
コード例 #19
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_minimum(converter: KerasConverter, k_op: "keras.layers.Minimum"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y = xs[0]
    for x in xs[1:]:
        cond = y > x
        y, = Select(None)(cond, x, y)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #20
0
def _convert_max_pooling1d(converter: KerasConverter, k_op: "keras.layers.MaxPooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)
    padding = (parse_padding(k_op.padding, ksize[0], 1)[0], 0)

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #21
0
def _convert_max_pooling2d(converter: KerasConverter, k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    padding = (
        parse_padding(k_op.padding, k_op.pool_size[0], 1),
        parse_padding(k_op.padding, k_op.pool_size[1], 1)
    )
    x, padding = convert_odd_padding_to_concat(x, padding=padding, value=-1.0e10)

    y, = MaxPooling2D(None, ksize=k_op.pool_size, stride=k_op.strides, padding=padding, cover_all=False)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #22
0
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    ksize = tuple(k_op.pool_size)
    stride = tuple(k_op.strides)
    padding = (parse_padding(k_op.padding, ksize[0],
                             1), parse_padding(k_op.padding, ksize[1], 1))

    y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #23
0
ファイル: core.py プロジェクト: zhangaz1/webdnn
def _convert_dense(converter: KerasConverter, k_op: "keras.layers.Dense"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    w = converter.convert_to_constant_variable(k_op.kernel, Order([None, None]))
    y, = Tensordot(None, axes=[x.order.axes[-1], w.order.axes[0]])(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, Order([None]))
        b.order.axes[0].unify(w.order.axes[1])
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #24
0
ファイル: pooling.py プロジェクト: fossabot/hash2face
def _convert_max_pooling2d(converter: KerasConverter,
                           k_op: "keras.layers.MaxPooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    x, padding = convolution_handler_preprocess(x,
                                                ksize=k_op.pool_size,
                                                padding=k_op.padding,
                                                dilation_rate=(1, 1),
                                                data_format=k_op.data_format)
    y, = MaxPooling2D(None,
                      ksize=k_op.pool_size,
                      stride=k_op.strides,
                      padding=padding,
                      cover_all=False)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #25
0
def _convert_conv2d_transpose(converter: KerasConverter,
                              k_op: "keras.layers.Conv2DTranspose"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    w = converter.convert_to_constant_variable(
        k_op.kernel, Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))

    if tuple(k_op.dilation_rate) != (1, 1):
        raise NotImplementedError(
            "[KerasConverter] keras.layers.Convolution2DTranspose with large dilation_rate is not supported"
        )

    padding = (parse_padding(k_op.padding, k_op.kernel_size[0],
                             k_op.dilation_rate[0]),
               parse_padding(k_op.padding, k_op.kernel_size[1],
                             k_op.dilation_rate[1]))

    if any(p[0] != p[1] for p in padding):
        pad_col2im = tuple(p[0] if p[0] == p[1] else 0 for p in padding)
        pad_extra = tuple((0, 0) if p[0] == p[1] else p for p in padding)
        y, = Deconvolution2D(None,
                             ksize=k_op.kernel_size,
                             stride=k_op.strides,
                             padding=pad_col2im)(x, w)

        if k_op.data_format == "channels_first":
            y = y[:, :, pad_extra[0][0]:-pad_extra[0][1],
                  pad_extra[1][0]:-pad_extra[1][1]]

        elif k_op.data_format == "channels_last":
            y = y[:, pad_extra[0][0]:-pad_extra[0][1],
                  pad_extra[1][0]:-pad_extra[1][1], :]

        else:
            raise NotImplementedError(
                f"Unknown data format: {k_op.data_format}")

    else:
        y, = Deconvolution2D(None,
                             ksize=k_op.kernel_size,
                             stride=k_op.strides,
                             padding=tuple(p[0] for p in padding))(x, w)

    if k_op.use_bias:
        b = converter.convert_to_constant_variable(k_op.bias, OrderC)
        y = y + b

    y = do_activation(k_op.activation, y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #26
0
def _convert_average_pooling1d(converter: KerasConverter,
                               k_op: "keras.layers.AveragePooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    ksize = (k_op.pool_size[0], 1)
    stride = (k_op.strides[0], 1)
    padding = (parse_padding(k_op.padding, ksize[0], 1), 0)

    y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=padding)(y)
    z = y.reshape([y.shape[0], y.shape[1], y.shape[3]], OrderNTC)

    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #27
0
def _convert_global_max_pooling1d(converter: KerasConverter,
                                  k_op: "keras.layers.GlobalMaxPooling1D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])

    # FIXME: More effective implementation
    y = x.reshape([x.shape[0], x.shape[1], 1, x.shape[2]], OrderNHWC)
    y, = MaxPooling2D(None,
                      ksize=(x.shape[1], 1),
                      stride=(1, 1),
                      padding=(0, 0))(y)

    # flatten without changing memory layout
    z = y.reshape([y.shape[0], mul(y.shape[1:])], OrderNC)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #28
0
def _convert_max_pooling2d(converter: KerasConverter, k_op: "keras.layers.AveragePooling2D"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    check_data_format(x, k_op.data_format)

    padding = (
        parse_padding(k_op.padding, k_op.pool_size[0], 1),
        parse_padding(k_op.padding, k_op.pool_size[1], 1)
    )
    x, padding = convert_odd_padding_to_concat(x, padding=padding)

    divide_without_padding = any(p > 0 for p in padding)
    # handling tensorflow style padding https://github.com/mil-tokyo/webdnn/issues/694

    y, = AveragePooling2D(None, ksize=k_op.pool_size, stride=k_op.strides, padding=padding, cover_all=False,
                          divide_without_padding=divide_without_padding)(x)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #29
0
ファイル: pooling.py プロジェクト: 255BITS/webdnn
def convert_layer_global_average_pooling2d(converter: KerasConverter, k_op: keras.layers.GlobalAveragePooling2D):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    if k_op.data_format == "channels_first":
        assert x.order == OrderNCHW

    elif k_op.data_format == "channels_last":
        assert x.order == OrderNHWC

    else:
        raise ValueError(f"[KerasConverter] Unknown data format: {k_op.data_format}")

    y, = AveragePooling2D(None, ksize=(x.shape_dict[Axis.H], x.shape_dict[Axis.W]), stride=(1, 1), padding=(0, 0))(x)

    # flatten without changing memory layout
    z, = Reshape(None, in_order=y.order, out_order=OrderNC, out_shape=[y.shape[0], mul(y.shape[1:])])(y)
    converter.set_variable(converter.get_output_tensor(k_op)[0], z)
コード例 #30
0
def _convert_elu(converter: KerasConverter, k_op: "keras.layers.ELU"):
    x = converter.get_variable(converter.get_input_tensor(k_op)[0])
    alpha = float(k_op.alpha)

    if alpha == 1.0:
        y, = Elu(None)(x)

    elif alpha == 0.0:
        y, = Relu(None)(x)

    else:
        y1, = Elu(None)(x)
        y2, = Relu(None)(x)
        y = y1 * alpha + y2 * (1 - alpha)

    converter.set_variable(converter.get_output_tensor(k_op)[0], y)