コード例 #1
0
ファイル: concat_test.py プロジェクト: VislaLabs/webdnn-1
def test_mix_order():
    vx1 = np.random.rand(2, 3, 4, 5)
    vx2 = np.random.rand(2, 3, 4, 5)
    vx3 = np.random.rand(2, 3, 4, 5)
    vx4 = np.random.rand(2, 3, 4, 5)
    vy = np.concatenate((vx1, vx2, vx3, vx4), 1)

    x1 = Variable(vx1.shape, order=OrderNHWC)
    x2 = Variable(vx2.shape, order=OrderNHWC)
    x3 = Variable(vx3.shape, order=OrderNHWC)
    x4 = Variable(vx4.shape, order=OrderNHWC)

    x2.change_order(OrderCNHW)
    vx2 = np.rollaxis(vx2, 3, 0)

    x3.change_order(OrderCHWN)
    vx3 = np.rollaxis(np.rollaxis(vx3, 3, 0), 1, 4)

    x4.change_order(OrderNCHW)
    vx4 = np.rollaxis(vx4, 3, 1)

    y, = Concat(None, axis=Axis.H)(x1, x2, x3, x4)
    y.change_order(OrderNHWC)

    generate_kernel_test_case(description=f"concat_mix_order",
                              graph=Graph([x1, x2, x3, x4], [y]),
                              inputs={
                                  x1: vx1,
                                  x2: vx2,
                                  x3: vx3,
                                  x4: vx4
                              },
                              expected={y: vy})
コード例 #2
0
ファイル: split_variable.py プロジェクト: zhangaz1/webdnn
def _split_pooling_2d(graph: Graph, op: Pooling2D, v: Variable,
                      v_pair: Sequence[Variable], axis: Axis):
    s1 = v_pair[0].shape_dict[axis]
    x = op.inputs["x"]
    y = op.outputs["y"]
    op.remove_all()

    if v == x:
        x_0, x_1 = v_pair
        s, k, p = (op.SH, op.KH, op.PH) if axis == Axis.H else (op.SW, op.KW,
                                                                op.PW)

        raise NotImplementedError

    elif v == y:
        y_0, y_1 = v_pair
        s, k, p = (op.SH, op.KH, op.PH) if axis == Axis.H else (op.SW, op.KW,
                                                                op.PW)

        x_0_range = (0 * s - k // 2, (y_0.shape_dict[axis] - 1) * s + k)
        x_1_range = (y_0.shape_dict[axis] * s - k // 2,
                     (y.shape_dict[axis] - 1) * s + k)

        indices = AxisKeyDict(OrderNHWC.axes,
                              [slice(None) for _ in OrderNHWC.axes])

        indices_0 = AxisKeyDict(indices)
        indices_0[axis] = slice(max(x_0_range[0], 0),
                                min(x_0_range[1], x.shape_dict[axis]))

        indices_1 = AxisKeyDict(indices)
        indices_1[axis] = slice(max(x_1_range[0], 0),
                                min(x_1_range[1], x.shape_dict[axis]))

        x_0, = Slice(None, indices=indices_0)(x)
        x_1, = Slice(None, indices=indices_1)(x)

        if p > 0:
            data = ConstantVariable(
                np.zeros([
                    p if a == axis else x.shape_dict[a] for a in x.order.axes
                ]), x.order)
            x_0, = Concat(None, axis=axis)(data, x_0)
            x_1, = Concat(None, axis=axis)(x_1, data)

        op_0, op_1 = op.copy(), op.copy()
        new_padding = (0, op.PW) if axis == Axis.H else (op.PH, 0)
        op_0.parameters["padding"] = new_padding
        op_1.parameters["padding"] = new_padding

        y_0_new, = op_0(x_0)
        y_1_new, = op_1(x_1)

        OptimizeRule.replace_variable(graph, y_0_new.transpose_like(y_0), y_0)
        OptimizeRule.replace_variable(graph, y_1_new.transpose_like(y_1), y_1)

    else:
        raise UnexpectedAndPleaseReportError()
コード例 #3
0
def convert_odd_padding_to_concat(x: Variable,
                                  padding: Sequence[Tuple[int, int]],
                                  value: float = 0.0):
    # Currently WebDNN does not support different-size-padding.
    for i, ((pad_begin, pad_end),
            axis) in enumerate(zip(padding, (Axis.H, Axis.W))):
        if pad_begin != pad_end:
            xs = []
            if pad_begin > 0:
                data = np.full([
                    pad_begin if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ],
                               value,
                               dtype=np.float32)
                xs.append(ConstantVariable(data, x.order))

            xs.append(x)

            if pad_end > 0:
                data = np.full([
                    pad_end if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ],
                               value,
                               dtype=np.float32)
                xs.append(ConstantVariable(data, x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

            padding = tuple(
                (0, 0) if j == i else padding[j] for j in range(len(padding)))

    return x, tuple(p[0] for p in padding)
コード例 #4
0
ファイル: concat_test.py プロジェクト: VislaLabs/webdnn-1
def test_2d_odd():
    vx1 = np.random.rand(2, 3)
    vx2 = np.random.rand(2, 3)
    vx3 = np.random.rand(2, 3)
    vx4 = np.random.rand(2, 3)
    vx5 = np.random.rand(2, 3)
    vy = np.concatenate((vx1, vx2, vx3, vx4, vx5), 0)

    x1 = Variable(vx1.shape, order=OrderNC)
    x2 = Variable(vx2.shape, order=OrderNC)
    x3 = Variable(vx3.shape, order=OrderNC)
    x4 = Variable(vx4.shape, order=OrderNC)
    x5 = Variable(vx5.shape, order=OrderNC)
    y, = Concat(None, axis=Axis.N)(x1, x2, x3, x4, x5)

    generate_kernel_test_case(description=f"concat_2d_odd",
                              graph=Graph([x1, x2, x3, x4, x5], [y]),
                              inputs={
                                  x1: vx1,
                                  x2: vx2,
                                  x3: vx3,
                                  x4: vx4,
                                  x5: vx5
                              },
                              expected={y: vy})
コード例 #5
0
ファイル: concat_test.py プロジェクト: unixnme/webdnn
def test_middle_axis():
    vx1 = np.random.rand(2, 3, 4, 5)
    vx2 = np.random.rand(2, 3, 4, 5)
    vx3 = np.random.rand(2, 3, 4, 5)
    vx4 = np.random.rand(2, 3, 4, 5)
    vy = np.concatenate((vx1, vx2, vx3, vx4), 1)

    x1 = Variable(vx1.shape, order=OrderNHWC)
    x2 = Variable(vx2.shape, order=OrderNHWC)
    x3 = Variable(vx3.shape, order=OrderNHWC)
    x4 = Variable(vx4.shape, order=OrderNHWC)
    y, = Concat(None, axis=Axis.H)(x1, x2, x3, x4)

    generate_kernel_test_case(
        description=f"concat_in_middle_axis",
        graph=Graph([x1, x2, x3, x4], [y]),
        inputs={
            x1: vx1,
            x2: vx2,
            x3: vx3,
            x4: vx4
        },
        expected={y: vy},
        EPS=1e-10,
        ABS_EPS=1e-10
    )
コード例 #6
0
ファイル: unroll_concat.py プロジェクト: VislaLabs/webdnn-1
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        for concat in traverse.filter_nodes(traverse.listup_operators(graph), Concat):
            if len(concat.inputs) == 2:
                # Unrolling is not needed
                continue

            flag_changed = True
            xs = [concat.inputs[f"x{i}"] for i in range(len(concat.inputs))]
            y = concat.outputs["y"]
            concat.remove_all()

            while len(xs) > 1:
                hs = []
                while len(xs) > 0:
                    if len(xs) == 1:
                        hs.append(xs.pop(0))

                    else:
                        x0, x1 = xs.pop(0), xs.pop(0)
                        h, = Concat(None, axis=concat.axis)(x0, x1)
                        hs.append(h)

                xs = hs

            OptimizeRule.replace_variable(graph, y, xs[0].transpose_like(y))

        return graph, flag_changed
コード例 #7
0
ファイル: activation.py プロジェクト: LabBros/webdnn
def _convert_crelu(converter: ChainerConverter,
                   c_op: "chainer.functions.CReLU"):
    x = converter.get_variable(c_op.inputs[0])
    y1, = Relu(None)(x)
    y2, = Relu(None)(-x)
    y, = Concat(None, axis=x.order.axes[c_op.axis])(y1, y2)
    converter.set_variable(c_op.outputs[0](), y)
コード例 #8
0
ファイル: merge.py プロジェクト: fossabot/hash2face
def _convert_maximum(converter: KerasConverter, k_op: "keras.layers.Concatenate"):
    xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    y, = Concat(None, axis=xs[0].order.axes[k_op.axis])(*xs)
    converter.set_variable(converter.get_output_tensor(k_op)[0], y)
コード例 #9
0
def _convert_concat(converter: ChainerConverter, c_op: "chainer.functions.Concat"):
    xs = [converter.get_variable(x) for x in c_op.inputs]

    for x1, x2 in combinations(xs, 2):
        x1.order.unify(x2.order)

    y, = Concat(None, axis=xs[0].order.axes[c_op.axis])(*xs)
    converter.set_variable(c_op.outputs[0](), y)
コード例 #10
0
    def optimize_pair(self, graph: Graph, op1: Concat, op2: ElementwiseMul):
        x0, x1 = op1.inputs["x0"], op1.inputs["x1"]
        c, _ = _get_constant_and_variable(op2, "x0", "x1")
        if c is None:
            return False
        if c.order != Order([op1.axis]):
            return False

        y2 = op2.outputs["y"]
        c0 = ConstantVariable(c.data[:x0.shape_dict[op1.axis]], c.order)
        c1 = ConstantVariable(c.data[x0.shape_dict[op1.axis]:], c.order)

        op1.remove_all()
        op2.remove_all()

        y, = Concat(None, axis=op1.axis)((x0 * c0), (x1 * c1))
        OptimizeRule.replace_variable(graph, y2, y.change_order(y2.order))
        return True
コード例 #11
0
def pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] Pad with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    if x.order.check_same_axes(OrderNHWC) and all([
            paddings[x.order.axes_dict[Axis.N]][0] ==
            paddings[x.order.axes_dict[Axis.N]][1] == 0,
            paddings[x.order.axes_dict[Axis.H]][0]
            == paddings[x.order.axes_dict[Axis.H]][1],
            paddings[x.order.axes_dict[Axis.W]][0]
            == paddings[x.order.axes_dict[Axis.W]][1],
            paddings[x.order.axes_dict[Axis.C]][0] ==
            paddings[x.order.axes_dict[Axis.C]][1] == 0
    ]):
        # Padding for only spatial axes: use ZeroPadding2D
        y, = ZeroPadding2D(None,
                           padding=tuple(
                               paddings[x.order.axes_dict[Axis.H]][0],
                               paddings[x.order.axes_dict[Axis.W]][0]))(x)

    else:
        # General case: Use Concat
        for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
            xs = []

            if pad_begin > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_begin if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            xs.append(x)

            if pad_end > 0:
                xs.append(
                    ConstantVariable(
                        np.zeros([
                            pad_end if a is axis else x.shape_dict[a]
                            for a in x.order.axes
                        ]), x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

        y = x

    converter.set_variable(tf_op.outputs[0], y)
コード例 #12
0
ファイル: tensor.py プロジェクト: you74674/webdnn
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    axis = xs[0].order.axes[attrs["axis"].i]

    y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
コード例 #13
0
ファイル: split_variable.py プロジェクト: iver56/webdnn
def _split_tensorwise(graph: Graph, op: Operator, v: Variable,
                      v_pair: Sequence[Variable], axis: Axis):
    s1 = v_pair[0].shape_dict[axis]
    xs = dict(op.inputs)
    ys = dict(op.outputs)
    op.remove_all()

    op_0 = op.copy()
    op_1 = op.copy()

    for key in xs.keys():
        x = xs[key]
        if x == v:
            x_0, x_1 = v_pair

        else:
            if axis not in x.order.axes or x.shape_dict[axis] == 1:
                # broadcasting
                x_0 = x_1 = x

            else:
                x_0, x_1 = SplitAxis(None, axis=axis, sections=[s1])(x)

        op_0.append_input(key, x_0)
        op_1.append_input(key, x_1)

    op_0.exec()
    op_1.exec()

    for key in ys.keys():
        y = ys[key]
        if y == v:
            OptimizeRule.replace_variable(
                graph, op_0.outputs[key].transpose_like(v_pair[0]), v_pair[0])
            OptimizeRule.replace_variable(
                graph, op_1.outputs[key].transpose_like(v_pair[1]), v_pair[1])

        else:
            y_0 = op_0.outputs[key]
            y_1 = op_1.outputs[key]
            y_new, = Concat(None, axis=axis)(y_0, y_1)
            OptimizeRule.replace_variable(graph, y_new.transpose_like(y), y)
コード例 #14
0
def test_attach():
    x = Variable((2, 3, 4, 5), OrderNCHW)
    y, = Concat(None, axis=Axis.N)(x)

    graph = ChainerConverter().convert([x], [y])

    assert len(graph.inputs) == 1
    AttachConcatWorkspace().optimize(graph)
    assert len(graph.inputs) == 2
    AttachConcatWorkspace().optimize(graph)
    assert len(graph.inputs) == 2
コード例 #15
0
def concat_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    xs = [converter.get_variable(tf_tensor) for tf_tensor in tf_op.inputs]
    axis = xs.pop()
    # TODO
    assert isinstance(
        axis, ConstantVariable
    ), "[TensorFlowConverter] Dynamic axis concatenation is not supported yet."
    axis = xs[0].order.axes[int(axis.data.flatten()[0])]

    for x0, x1 in itertools.permutations(xs):
        x0.order.unify(x1.order)

    y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(tf_op.outputs[0], y)
コード例 #16
0
ファイル: split_variable.py プロジェクト: zhangaz1/webdnn
def _split_tensorwise(graph: Graph, op: Operator, v: Variable,
                      v_pair: Sequence[Variable], axis: Axis):
    s1 = v_pair[0].shape_dict[axis]
    s2 = v_pair[1].shape_dict[axis]
    xs = dict(op.inputs)
    ys = dict(op.outputs)
    op.remove_all()

    op_0 = op.copy()
    op_1 = op.copy()

    for key, x in xs.items():
        if x == v:
            x_0, x_1 = v_pair

        else:
            if axis in x.order.axes:
                x_0, x_1 = SplitAxis(None, axis=axis, sections=[s1])(x)

            else:
                # splitting is not occurred
                x_0 = x_1 = x

        op_0.append_input(key, x_0)
        op_1.append_input(key, x_1)

    for key, y in ys.items():
        if y == v:
            y_0, y_1 = v_pair

        else:
            if axis in y.order.axes:
                # TODO (Kiikurage)
                # Attribute attached to "y" is not copied to neither "y_0" or "y_1"
                y_0 = Variable([
                    s1 if a == axis else y.shape_dict[a] for a in y.order.axes
                ], y.order)
                y_1 = Variable([
                    s2 if a == axis else y.shape_dict[a] for a in y.order.axes
                ], y.order)
                y_new, = Concat(None, axis=axis)(y_0, y_1)
                OptimizeRule.replace_variable(graph, y, y_new)

            else:
                raise UnexpectedAndPleaseReportError

        op_0.append_output(key, y_0)
        op_1.append_output(key, y_1)
コード例 #17
0
ファイル: keras.py プロジェクト: zilongzhong/webdnn
    def convert_layer_concatenate(self, layer_config: Dict[str, object], inputs: List[Variable]) -> List[Variable]:
        """
        Example:
          {'name': 'mixed0', 'trainable': True, 'axis': 3}

        :param layer_config:
        :param inputs:
        :return:
        """
        name: str = layer_config["name"]

        axis = inputs[0].order.axes[layer_config["axis"]]
        concat_opr = Concat(name, axis=axis)
        y, = concat_opr(*inputs)

        return [y]
コード例 #18
0
ファイル: tensor.py プロジェクト: VislaLabs/webdnn-1
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]
    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    if all(isinstance(x, ConstantVariable) for x in xs):
        # generate actual data as constant
        concat_data = np.concatenate([x.data for x in xs],
                                     axis=attrs["axis"].i)
        y = ConstantVariable(concat_data, xs[0].order)
    else:
        axis = xs[0].order.axes[attrs["axis"].i]

        y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
コード例 #19
0
def main(order1: Order, order2: Order, concat_axis: Axis):
    default_order = {1: OrderC, 2: OrderNC, 4: OrderNHWC}

    op = Concat(None, axis=concat_axis)
    x1 = Variable(np.arange(order1.ndim) + 1, default_order[order1.ndim])
    x2 = Variable(np.arange(order2.ndim) + 1, default_order[order2.ndim])

    x1.change_order(order1)
    x2.change_order(order2)

    y, = op(x1, x2)
    for axis in y.order.axes:
        if axis == concat_axis:
            assert y.shape_dict[
                axis] == x1.shape_dict[axis] + x2.shape_dict[axis]

        else:
            assert y.shape_dict[axis] == x1.shape_dict[axis]
コード例 #20
0
def _convert_concat(converter: ONNXConverter, onnx_op: INodeProto):
    xs = [converter.get_variable(v) for v in onnx_op.input]

    for x in xs[1:]:
        xs[0].order.unify(x.order)

    attrs = attribute_dict(onnx_op)
    axis = xs[0].order.axes[attrs["axis"].i]

    if isinstance(xs[0], ConstantVariable):
        data = []
        for x in xs:
            data.append(x.data)
        data = np.concatenate(data, attrs["axis"].i)
        y = ConstantVariable(data, Order([None] * len(data.shape)))
    else:
        y, = Concat(None, axis=axis)(*xs)
    converter.set_variable(onnx_op.output[0], y)
コード例 #21
0
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        for match in traverse.search_sub_structure(graph, [LSTM]):
            lstm = match[0]  # type: LSTM

            if lstm.has_attribute(LSTMOptimized):
                continue

            x = lstm.inputs["x"]
            w_input = lstm.inputs["w_input"]
            w_hidden = lstm.inputs["w_hidden"]
            if isinstance(w_input, ConstantVariable) and isinstance(w_hidden, ConstantVariable):
                w_input.change_order(OrderCN)
                w_hidden.change_order(OrderCN)
                w_all = ConstantVariable(np.vstack([w_input.data, w_hidden.data]), OrderCN)
            else:
                w_all, = Concat(None, axis=Axis.C)(w_input, w_hidden)  # type: Variable
                w_all.change_order(OrderCN)

            attr = LSTMOptimized(lstm)

            N = x.shape_dict[Axis.N]
            C1 = attr.C1
            C2 = attr.C2

            x_and_h = Variable([C1 + C2, N], OrderCN)
            workspace = Variable([N, 4 * C2], OrderNC)

            lstm.remove_input(w_input)
            lstm.remove_input(w_hidden)
            lstm.append_input("x_and_h", x_and_h)
            lstm.append_input("workspace", workspace)
            lstm.append_input("w_all", w_all)
            lstm.attributes.add(attr)

            flag_changed = True

        return graph, flag_changed
コード例 #22
0
def test_minor_axis():
    vx1 = np.random.rand(2, 3, 4, 5)
    vx2 = np.random.rand(2, 3, 4, 5)
    vx3 = np.random.rand(2, 3, 4, 5)
    vx4 = np.random.rand(2, 3, 4, 5)
    vy = np.concatenate((vx1, vx2, vx3, vx4), 3)

    x1 = Variable(vx1.shape, order=OrderNHWC)
    x2 = Variable(vx2.shape, order=OrderNHWC)
    x3 = Variable(vx3.shape, order=OrderNHWC)
    x4 = Variable(vx4.shape, order=OrderNHWC)
    y, = Concat(None, axis=Axis.C)(x1, x2, x3, x4)

    generate_kernel_test_case(description=f"concat_in_minor_axis",
                              backend=["fallback", "webassembly", "webgpu"],
                              graph=Graph([x1, x2, x3, x4], [y]),
                              inputs={
                                  x1: vx1,
                                  x2: vx2,
                                  x3: vx3,
                                  x4: vx4
                              },
                              expected={y: vy})
コード例 #23
0
def convolution_handler_preprocess(x: Variable, ksize: Tuple[int, int], padding: str, dilation_rate: Tuple[int, int],
                                   data_format: Union[str, bytes]):
    check_data_format(x, data_format)

    padding = (parse_padding(padding, ksize[0], dilation_rate[0]), parse_padding(padding, ksize[1], dilation_rate[1]))

    # Currently WebDNN does not support different-size-padding.
    for i, ((pad_begin, pad_end), axis) in enumerate(zip(padding, (Axis.H, Axis.W))):
        if pad_begin != pad_end:
            xs = []
            if pad_begin > 0:
                xs.append(ConstantVariable(np.zeros([pad_begin if a == axis else x.shape_dict[a] for a in x.order.axes]), x.order))

            xs.append(x)

            if pad_end > 0:
                xs.append(ConstantVariable(np.zeros([pad_end if a == axis else x.shape_dict[a] for a in x.order.axes]), x.order))

            if len(xs) > 1:
                x, = Concat(None, axis=axis)(*xs)

            padding = tuple((0, 0) if j == i else padding[j] for j in range(len(padding)))

    return x, tuple(p[0] for p in padding)
コード例 #24
0
def pad_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] PadV2 with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    constant_values = converter.get_variable(tf_op.inputs[2]).change_order(
        x.order)

    for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
        xs = []

        if pad_begin > 0:
            multiplier = AxisKeyDict(x.order.axes, [
                pad_begin if a == axis else x.shape_dict[a]
                for a in x.order.axes
            ])
            xs.append(Tile(None, multiplier)(constant_values)[0])

        xs.append(x)

        if pad_end > 0:
            multiplier = AxisKeyDict(x.order.axes, [
                pad_end if a == axis else x.shape_dict[a] for a in x.order.axes
            ])
            xs.append(Tile(None, multiplier)(constant_values)[0])

        if len(xs) > 1:
            x, = Concat(None, axis=axis)(*xs)

    converter.set_variable(tf_op.outputs[0], x)
コード例 #25
0
    def optimize_pair(self, op1: Concat, op2: ElementwiseMul):
        x0, x1 = op1.inputs["x0"], op1.inputs["x1"]
        c, _ = _get_constant_and_variable(op2, "x0", "x1")
        if c is None:
            return False
        if c.order != Order([op1.axis]):
            return False

        y2 = op2.outputs["y"]
        c0 = ConstantVariable(c.data[:x0.shape_dict[op1.axis]], c.order)
        c1 = ConstantVariable(c.data[x0.shape_dict[op1.axis]:], c.order)

        op1.remove_all()
        op2.remove_all()

        y, = Concat(None, axis=op1.axis)((x0 * c0),
                                         (x1 * c1))  # type: Variable
        y.replace(y2, with_assert=False)
        return True
コード例 #26
0
def conv2_d_backprop_input_handler(converter: TensorFlowConverter,
                                   tf_op: "tf.Operation"):
    input_sizes = converter.get_variable(tf_op.inputs[0])
    if not isinstance(input_sizes, ConstantVariable):
        raise NotImplementedError(
            "[TensorFlowConverter] Conv2DBackpropInput with dynamic shape of output (input of convolution) variable is not supported."
        )
    input_sizes = tuple(input_sizes.data.astype(np.int32).tolist())

    w = converter.get_variable(tf_op.inputs[1])  # HWNC
    w.order.unify(Order([Axis.KH, Axis.KW, Axis.N, Axis.C]))

    gy = converter.get_variable(tf_op.inputs[2])  # NHWC
    data_format = tf_op.get_attr("data_format")
    check_data_format(gy, data_format)

    input_size = np.array([
        input_sizes[gy.order.axes_dict[Axis.H]],
        input_sizes[gy.order.axes_dict[Axis.W]]
    ])

    ksize = np.array([w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]])

    stride = np.array(tf_op.get_attr("strides"))
    assert stride[gy.order.axes_dict[Axis.N]] == 1
    assert stride[gy.order.axes_dict[Axis.C]] == 1
    stride = stride[[gy.order.axes_dict[Axis.H], gy.order.axes_dict[Axis.W]]]

    padding = np.array([
        parse_padding(tf_op.get_attr("padding"), ksize[0], 1),
        parse_padding(tf_op.get_attr("padding"), ksize[1], 1)
    ])

    x, = Deconvolution2D(None,
                         ksize=ksize.tolist(),
                         stride=stride.tolist(),
                         padding=0)(gy, w)

    # Actual padding size is depend on 2 factors
    # 1. padding mode
    # 2. extra apron size (= (input size of convolution) - (size of the tensor expanded by deconvolution))

    expanded_size = np.array([x.shape_dict[Axis.H], x.shape_dict[Axis.W]])
    apron_size = input_size - (expanded_size - padding.sum(axis=1))

    # cancel padding by apron if possible
    for i in (0, 1):
        if padding[i, 0] > apron_size[i]:
            padding[i, 0] -= apron_size[i]
            apron_size[i] = 0

        else:
            apron_size[i] -= padding[i, 0]
            padding[i, 0] = 0

        if padding[i, 1] > apron_size[i]:
            padding[i, 1] -= apron_size[i]
            apron_size[i] = 0

        else:
            apron_size[i] -= padding[i, 1]
            padding[i, 1] = 0

    # append extra apron
    for i, axis in enumerate((Axis.H, Axis.W)):
        if apron_size[i] == 0:
            continue

        data = np.zeros([
            apron_size[i] if a == axis else x.shape_dict[a]
            for a in x.order.axes
        ])
        x, = Concat(None, axis=axis)(x, ConstantVariable(data, x.order))

    # crop without padding
    padding = padding.tolist()  # type: List[List[int]]
    slice_h = slice(None) if padding[0] == [0, 0] else slice(
        padding[0][0], -padding[0][1])
    slice_w = slice(None) if padding[1] == [0, 0] else slice(
        padding[1][0], -padding[1][1])
    if data_format == b"NCHW":
        x = x[:, :, slice_h, slice_w]

    elif data_format == b"NHWC":
        x = x[:, slice_h, slice_w, :]

    else:
        raise NotImplementedError(f"Unknown data format: {data_format}")

    converter.set_variable(tf_op.outputs[0], x)
コード例 #27
0
ファイル: tensor.py プロジェクト: VislaLabs/webdnn-1
def _convert_pad(converter: ONNXConverter, onnx_op: INodeProto):
    x = converter.get_variable(onnx_op.input[0])

    attrs = attribute_dict(onnx_op)

    pads = attrs["pads"].ints
    if len(pads) != 2 * x.ndim:
        raise ValueError(
            "[ONNXConverter] The length of parameter \"pads\" in \"Pad\" node must be double of input tensor's dimension"
        )
    pads_begin = pads[:x.ndim]
    pads_end = pads[x.ndim:]

    mode = attrs["mode"].s if "mode" in attrs else b"constant"
    value = attrs["value"].f if "value" in attrs else 0
    constant_values = ConstantVariable(np.full([1] * x.ndim, value), x.order)

    for pad_begin, pad_end, axis in zip(pads_begin, pads_end, x.order.axes):
        xs = []

        if pad_begin > 0:
            if mode == b"constant":
                multiplier = AxisKeyDict(x.order.axes, [
                    pad_begin if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ])
                xs.append(Tile(None, multiplier)(constant_values)[0])

            elif mode == b"reflect":
                slices = [
                    slice(pad_begin, 0, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]
                xs.append(x[slices])

            elif mode == b"edge":
                slices = [
                    slice(pad_begin -
                          1, None, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]
                xs.append(x[slices])

            else:
                raise NotImplementedError(
                    f"[ONNXConverter] Unknown mode \"{mode}\"")

        xs.append(x)

        if pad_end > 0:
            if mode == b"constant":
                multiplier = AxisKeyDict(x.order.axes, [
                    pad_end if a == axis else x.shape_dict[a]
                    for a in x.order.axes
                ])
                xs.append(Tile(None, multiplier)(constant_values)[0])

            elif mode == b"reflect":
                slices = [
                    slice(-2, -2 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]
                xs.append(x[slices])

            elif mode == b"edge":
                slices = [
                    slice(-1, -1 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]
                xs.append(x[slices])

            else:
                raise NotImplementedError(
                    f"[ONNXConverter] Unknown mode \"{mode}\"")

        if len(xs) > 1:
            x, = Concat(None, axis=axis)(*xs)

    converter.set_variable(onnx_op.output[0], x)
コード例 #28
0
ファイル: array.py プロジェクト: xczhanjun/webdnn
def _convert_concat(converter: ChainerConverter,
                    c_op: "chainer.functions.Concat"):
    xs = [converter.get_variable(x) for x in c_op.inputs]
    y, = Concat(None, axis=xs[0].order.axes[c_op.axis])(*xs)
    converter.set_variable(c_op.outputs[0](), y)
コード例 #29
0
def test_invalid_size():
    op = Concat(None, axis=Axis.C)

    v1 = Variable((2, 3, 4, 5), OrderNHWC)
    v2 = Variable((2, 3, 7, 6), OrderNHWC)
    v3, = op(v1, v2)
コード例 #30
0
def mirror_pad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])

    paddings = converter.get_variable(tf_op.inputs[1])
    if not isinstance(paddings, ConstantVariable):
        raise NotImplementedError(
            '[TensorFlowConverter] PadV2 with dynamic padding size is not supported'
        )

    paddings = paddings.data.astype(np.int).tolist()

    mode = tf_op.get_attr("mode")  # type: byte

    for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings):
        xs = []

        if pad_begin > 0:
            if mode == b'SYMMETRIC':
                slices = [
                    slice(pad_begin -
                          1, None, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            elif mode == b'REFLECT':
                slices = [
                    slice(pad_begin, 0, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            else:
                raise NotImplementedError(
                    f"[TensorFlowConverter] Unknown mirror pad mode: {mode}")

            xs.append(x[slices])

        xs.append(x)

        if pad_end > 0:
            if mode == b'SYMMETRIC':
                slices = [
                    slice(-1, -1 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            elif mode == b'REFLECT':
                slices = [
                    slice(-2, -2 - pad_end, -1) if a == axis else slice(None)
                    for a in x.order.axes
                ]

            else:
                raise NotImplementedError(
                    f"[TensorFlowConverter] Unknown mirror pad mode: {mode}")

            xs.append(x[slices])

        if len(xs) > 1:
            x, = Concat(None, axis=axis)(*xs)

    converter.set_variable(tf_op.outputs[0], x)