コード例 #1
0
    def __call__(self, *xs: Variable):
        axis = self.axis
        axis_index = xs[0].order.axes_dict[axis]
        axes = xs[0].order.axes

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0

        for i, x in enumerate(xs):
            assert x.order.check_same_axes(xs[0].order), "Input variable of Concat operator must have same axes: " \
                                                         f"x0.order.axes={xs[0].order.axes}, x{i}.order.axes={xs[i].order.axes}"

            for other_axis in [
                    other_axis for other_axis in axes if other_axis != axis
            ]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]
                                              ) and Placeholder.check_resolved(
                                                  x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis], "Input variable of Concat operator must be same " \
                                                                                     f"shape except the specified axis: " \
                                                                                     f"x0.shape_dict[{axis}]={xs[0].shape_dict[axis]}, " \
                                                                                     f"x{i}.shape_dict[{axis}]={xs[i].shape_dict[axis]}"

            self.append_input(f"x{i}", x)
            y_shape[axis_index] += x.shape_dict[axis]

        y = Variable(y_shape, xs[0].order)
        self.append_output("y", y)
        return y,
コード例 #2
0
    def exec(self):
        x = self.inputs["x"]
        w = self.inputs["w"]

        assert x.order.check_same_axes(OrderNCHW), f"""
[Convolution2D] Input variable of Convolution2D must have N, C, H, and W axes:
    (x.order.axes) = {x.order.axes}"""

        assert w.order.check_same_axes(Order([Axis.N, Axis.KH, Axis.KW, Axis.C])), f"""
[Convolution2D] Kernel variable of Convolution2D must have N, C, KH, and KW axes:
    (w.order.axes) = {w.order.axes}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.KH]) and Placeholder.check_resolved(w.shape_dict[Axis.KW]):
            assert (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]) == self.ksize, f"""
[Convolution2D] Kernel variable of Convolution2D must be same spatial size as ksize parameter:
    (w.shape_dict[Axis.KH]) = {w.shape_dict[Axis.KH]}
    (w.shape_dict[Axis.KW]) = {w.shape_dict[Axis.KW]}
    (self.ksize) = {self.ksize}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], f"""
[Convolution2D] Input and Kernel variables of Convolution2D must be same channel size:
    (x.shape_dict[Axis.C]) = {x.shape_dict[Axis.C]}
    (w.shape_dict[Axis.C]) = {w.shape_dict[Axis.C]}"""

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] + 2 * self.PH - self.WH) // self.SH + 1
        W2 = (x.shape_dict[Axis.W] + 2 * self.PW - self.WW) // self.SW + 1
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_output("y", y)
        return y,
コード例 #3
0
ファイル: concat.py プロジェクト: fossabot/hash2face
    def exec(self):
        xs = [self.inputs[f"x{i}"] for i in range(len(self.inputs))]
        axis = self.axis
        axis_index = xs[0].order.axes_dict[axis]
        axes = xs[0].order.axes

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0
        y_order = xs[0].order

        for a in y_order.axes:
            if a == axis:
                continue

            self.attributes.add(Tensorwise(self, a))

        for i, x in enumerate(xs):
            assert x.order.check_same_axes(xs[0].order), f"""
[Concat] Input variable of Concat operator must have same axes
  (x0.order.axes) = {xs[0].order.axes}
  (x{i}.order.axes) = {xs[i].order.axes}"""

            for other_axis in [other_axis for other_axis in axes if other_axis != axis]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]) and Placeholder.check_resolved(x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis], f"""
[Concat] Input variable of Concat operator must be same shape except the specified axis:
  (x0.shape_dict[{other_axis}]) = {xs[0].shape_dict[other_axis]}
  (x{i}.shape_dict[{other_axis}]) = {xs[i].shape_dict[other_axis]}"""

            y_shape[axis_index] += x.shape_dict[axis]

        y = Variable(y_shape, y_order)
        self.append_output("y", y)
        return y,
コード例 #4
0
ファイル: elementwise.py プロジェクト: newpouy/webdnn
    def __call__(self, *xs: "variable.Variable"):
        y_axes = []
        y_shape_dict = AxisKeyDict()
        for i, x in enumerate(xs):
            for axis in x.order.axes:
                if axis in y_axes:
                    if y_shape_dict[axis] == 1:
                        # broadcast
                        y_shape_dict[axis] = x.shape_dict[axis]
                else:
                    y_axes.append(axis)
                    y_shape_dict[axis] = x.shape_dict[axis]

                if Placeholder.check_resolved(x.shape_dict[axis]):
                    if Placeholder.check_resolved(y_shape_dict[axis]):
                        assert y_shape_dict[axis] == x.shape_dict[axis] or x.shape_dict[axis] == 1, \
                            "All input variables of elementwise operator should be same shape: " \
                            f"y.shape_dict[{axis}]={y_shape_dict[axis]}, " \
                            f"x{i}.shape_dict[{axis}]={x.shape_dict[axis]}"
                    else:
                        y_shape_dict[axis] = x.shape_dict[axis]

            self.append_input(f"x{i}", x)

        y = variable.Variable([y_shape_dict[axis] for axis in y_axes],
                              Order(y_axes))
        self.append_output("y", y)
        return y,
コード例 #5
0
ファイル: deconvolution2d.py プロジェクト: zhangaz1/webdnn
    def __call__(self, x: Variable, w: Variable):
        assert x.order.check_same_axes(OrderNCHW), f"""
[Deconvolution2D] Input variable of Deconvolution2D must have N, C, H, and W axes:
    (x.order.axes) = {x.order.axes}"""

        assert w.order.check_same_axes(Order([Axis.N, Axis.KH, Axis.KW, Axis.C])), f"""
[Deconvolution2D] Kernel variable of Deconvolution2D must have N, C, KH, and KW axes:
    (w.order.axes) = {w.order.axes}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.KH]) and Placeholder.check_resolved(w.shape_dict[Axis.KW]):
            assert (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]) == self.ksize, f"""
[Deconvolution2D] Kernel variable of Deconvolution2D must be same spatial size as ksize parameter:
    (w.shape_dict[Axis.KH]) = {w.shape_dict[Axis.KH]}
    (w.shape_dict[Axis.KW]) = {w.shape_dict[Axis.KW]}
    (self.ksize) = {self.ksize}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], f"""
[Deconvolution2D] Input and Kernel variables of Deconvolution2D must be same channel size:
    (x.shape_dict[Axis.C]) = {x.shape_dict[Axis.C]}
    (w.shape_dict[Axis.C]) = {w.shape_dict[Axis.C]}"""

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x.shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
コード例 #6
0
ファイル: convert_rgba_to_r.py プロジェクト: zhangaz1/webdnn
    def __call__(self, *xs: "variable.Variable"):
        y_axes = []
        y_shape_dict = AxisKeyDict()

        # Check variable in descent order of the number of dimensions.
        # Without this procedure, in case that x0.order=C and x1.order=NC, the output order is CN. Expected result is NC.
        xs_order = [(i, x) for i, x in enumerate(xs)]
        xs_order.sort(key=lambda d: d[1].ndim, reverse=True)

        for i, x in xs_order:
            for axis in x.order.axes:
                if axis in y_axes:
                    if y_shape_dict[axis] == 1:
                        # broadcast
                        y_shape_dict[axis] = x.shape_dict[axis]
                else:
                    y_axes.append(axis)
                    y_shape_dict[axis] = x.shape_dict[axis]

                if Placeholder.check_resolved(x.shape_dict[axis]):
                    if Placeholder.check_resolved(y_shape_dict[axis]):
                        assert y_shape_dict[axis] == x.shape_dict[axis] or x.shape_dict[axis] == 1, \
                            "All input variables of elementwise operator should be same shape: " \
                            f"y.shape_dict[{axis}]={y_shape_dict[axis]}, " \
                            f"x{i}.shape_dict[{axis}]={x.shape_dict[axis]}"
                    else:
                        y_shape_dict[axis] = x.shape_dict[axis]

        y = variable.Variable([y_shape_dict[axis] for axis in y_axes], Order(y_axes))
        ChannelMode.set(y, ChannelModeEnum.R)

        for i, x in enumerate(xs):
            self.append_input(f"x{i}", x)
        self.append_output("y", y)
        return y,
コード例 #7
0
ファイル: convolution2d.py プロジェクト: wathela/webdnn
    def __call__(self, x: Variable, w: Variable) -> Tuple[Variable]:
        assert x.order.check_same_axes(OrderNCHW), \
            "Input variable of Convolution2D must have N, C, H, and W axes.: " \
            f"x.order.axes={x.order.axes}"

        assert w.order.check_same_axes(OrderNCHW), \
            "Kernel variable of Convolution2D must have N, C, H, and W axes.: " \
            f"w.order.axes={w.order.axes}"

        if Placeholder.check_resolved(w.shape_dict[Axis.H]) and Placeholder.check_resolved(w.shape_dict[Axis.W]):
            assert (w.shape_dict[Axis.H], w.shape_dict[Axis.W]) == self.ksize, \
                "Kernel variable of Convolution2D must be same spatial size as ksize parameter: " \
                f"w.shape_dict[Axis.H]={w.shape_dict[Axis.H]}, " \
                f"w.shape_dict[Axis.W]={w.shape_dict[Axis.W]}, " \
                f"self.ksize={self.ksize}"

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], \
                "Input and Kernel variables of Convolution2D must be same channel size: " \
                f"x.shape_dict[Axis.C]={x.shape_dict[Axis.C]}, " \
                f"w.shape_dict[Axis.C]={w.shape_dict[Axis.C]}"

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] + 2 * self.PH - self.WH) // self.SH + 1
        W2 = (x.shape_dict[Axis.W] + 2 * self.PW - self.WW) // self.SW + 1
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
コード例 #8
0
    def __call__(self, x: Variable, w: Variable):
        assert set(x.order.axes) == {Axis.N, Axis.C, Axis.H, Axis.W}, \
            "Input variable of Deconvolution2D must have N, C, H, and W axes.: " \
            f"x.order.axes={x.order.axes}"

        assert set(w.order.axes) == {Axis.N, Axis.C, Axis.H, Axis.W}, \
            "Kernel variable of Deconvolution2D must have N, C, H, and W axes.: " \
            f"w.order.axes={w.order.axes}"

        if Placeholder.check_resolved(w.shape_dict[Axis.H]) and Placeholder.check_resolved(w.shape_dict[Axis.W]):
            assert (w.shape_dict[Axis.H], w.shape_dict[Axis.W]) == self.ksize, \
                "Kernel variable of Deconvolution2D must be same spatial size as ksize parameter: " \
                f"w.shape_dict[Axis.H]={w.shape_dict[Axis.H]}, " \
                f"w.shape_dict[Axis.W]={w.shape_dict[Axis.W]}, " \
                f"self.ksize={self.ksize}"

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], \
                "Input and Kernel variables of Deconvolution2D must be same channel size: " \
                f"x.shape_dict[Axis.C]={x.shape_dict[Axis.C]}, " \
                f"w.shape_dict[Axis.C]={w.shape_dict[Axis.C]}"

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x.shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
コード例 #9
0
    def __call__(self, *xs: Variable):
        """
        Args:
            *xs (:class:`~webdnn.graph.variable.Variable`): Inputs

        Returns:
            tuple of :class:`~webdnn.graph.variable.Variable`: Output
        """
        concat_axis = self.parameters["axis"]  # type: Axis
        axis_index = xs[0].order.axes_dict[concat_axis]
        axes_set = set(xs[0].order.axes)

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0

        for i, x in enumerate(xs):
            assert set(x.order.axes) == axes_set
            for other_axis in [other_axis for other_axis in axes_set if other_axis != concat_axis]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]) and Placeholder.check_resolved(x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis]

            self.append_input(f"x{i}", x)
            y_shape[axis_index] += x.shape_dict[concat_axis]

        y = Variable(y_shape, xs[0].order)
        self.append_output("y", y)
        return y,
コード例 #10
0
    def __call__(self, x: Variable, w: Variable):
        """
        Args:
            x (:class:`~webdnn.graph.variable.Variable`): Input
            w (:class:`~webdnn.graph.variable.Variable`): Filter

        Returns:
            tuple of :class:`~webdnn.graph.variable.Variable`: Output
        """
        x_shape_dict = x.shape_dict
        w_shape_dict = w.shape_dict
        if Placeholder.check_resolved(
                w_shape_dict[Axis.H]) and Placeholder.check_resolved(
                    w_shape_dict[Axis.W]):
            assert (w_shape_dict[Axis.H], w_shape_dict[Axis.W]) == self.ksize
        if Placeholder.check_resolved(
                w_shape_dict[Axis.C]) and Placeholder.check_resolved(
                    x_shape_dict[Axis.C]):
            assert w_shape_dict[Axis.C] == x_shape_dict[Axis.C]

        N = x_shape_dict[Axis.N]
        H2 = (x_shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x_shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w_shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
コード例 #11
0
    def append(self, v: Variable, offset: IntLike = -1):
        if offset == -1:
            if Placeholder.check_resolved(
                    offset) and Placeholder.check_resolved(v.size):
                offset = self.static_size
            else:
                offset = self.dynamic_size

        self.allocations[v] = Allocation(offset, v.size)
コード例 #12
0
def test_with_placeholder():
    link = chainer.links.Deconvolution2D(None, 16, ksize=3, stride=1, pad=1)
    vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
    vy = link(vx)

    N = Placeholder(label="N")
    H = Placeholder(label="H")
    W = Placeholder(label="W")
    px = PlaceholderVariable([N, 3, H, W])
    py = link(px)

    graph = ChainerConverter().convert([px], [py])

    x = graph.inputs[0]
    y = graph.outputs[0]

    N.value = 1
    H.value = 16
    W.value = 16
    generate_kernel_test_case(
        description=f"[chainer] L.Deconvolution2D with placeholder",
        graph=graph,
        backend=["webgpu", "webassembly"],
        inputs={x: vx.data},
        expected={y: vy.data},
        EPS=1e-2
    )
コード例 #13
0
def test_with_placeholder():
    link = chainer.links.BatchNormalization(size=3)
    vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
    with chainer.using_config('train', False):
        vy = link(vx)

    N = Placeholder(label="N")
    H = Placeholder(label="H")
    W = Placeholder(label="W")
    px = PlaceholderVariable([N, 3, H, W])
    with chainer.using_config('train', False):
        py = link(px)

    graph = ChainerConverter().convert([px], [py])

    x = graph.inputs[0]
    y = graph.outputs[0]

    N.value = 1
    H.value = 16
    W.value = 16
    generate_kernel_test_case(
        description=f"[chainer] L.FixedBatchNormalization with placeholder",
        graph=graph,
        backend=["webgpu", "webassembly"],
        inputs={x: vx.data},
        expected={y: vy.data},
    )
コード例 #14
0
ファイル: matmul_test.py プロジェクト: zhangaz1/webdnn
def test_with_placeholder():
    vx1 = chainer.Variable(np.random.rand(10, 12).astype(np.float32) * 2 - 1)
    vx2 = chainer.Variable(np.random.rand(12, 14).astype(np.float32) * 2 - 1)
    vy = chainer.functions.matmul(vx1, vx2, False, False)

    M = Placeholder(label="M")
    K = Placeholder(label="K")
    N = Placeholder(label="N")
    px1 = PlaceholderVariable([M, K])
    px2 = PlaceholderVariable([K, N])
    py = chainer.functions.matmul(px1, px2, False, False)

    graph = ChainerConverter().convert([px1, px2], [py])

    M.value = 10
    K.value = 12
    N.value = 14
    generate_kernel_test_case(
        description=f"[chainer] F.matmul with placeholder",
        graph=graph,
        backend=["webgpu", "webassembly"],
        inputs={
            graph.inputs[0]: vx1.data,
            graph.inputs[1]: vx2.data
        },
        expected={graph.outputs[0]: vy.data})
コード例 #15
0
ファイル: split_axis_test.py プロジェクト: zhangaz1/webdnn
def test_with_placeholder():
    vx = chainer.Variable(np.random.rand(2, 20, 4, 5).astype(np.float32))
    vy1, vy2, vy3 = chainer.functions.split_axis(vx, [5, 15], 1)

    N = Placeholder(label="N")
    H = Placeholder(label="H")
    W = Placeholder(label="W")
    px = PlaceholderVariable([N, 20, H, W])
    py1, py2, py3 = chainer.functions.split_axis(px, [5, 15], 1)

    graph = ChainerConverter().convert([px], [py1, py2, py3])

    N.value = 2
    H.value = 4
    W.value = 5
    generate_kernel_test_case(
        description=f"[chainer] F.split_axis with placeholder",
        graph=graph,
        backend=["webgpu", "webassembly"],
        inputs={graph.inputs[0]: vx.data},
        expected={
            graph.outputs[0]: vy1.data,
            graph.outputs[1]: vy2.data,
            graph.outputs[2]: vy3.data
        },
    )
コード例 #16
0
ファイル: maximum_test.py プロジェクト: zhangaz1/webdnn
def test_with_placeholder():
    vx0 = chainer.Variable(np.random.rand(10, 11, 12).astype(np.float32))
    vx1 = chainer.Variable(np.random.rand(10, 11, 12).astype(np.float32))
    vy = chainer.functions.maximum(vx0, vx1)

    A = Placeholder(label="A")
    B = Placeholder(label="B")
    C = Placeholder(label="C")
    px0 = PlaceholderVariable([A, B, C])
    px1 = PlaceholderVariable([A, B, C])
    py = chainer.functions.maximum(px0, px1)

    graph = ChainerConverter().convert([px0, px1], [py])

    A.value = 10
    B.value = 11
    C.value = 12
    generate_kernel_test_case(
        description=f"[chainer] F.maximum with placeholder",
        graph=graph,
        backend=["webgpu", "webassembly"],
        inputs={
            graph.inputs[0]: vx0.data,
            graph.inputs[1]: vx1.data
        },
        expected={graph.outputs[0]: vy.data})
コード例 #17
0
def add_placeholder_constraint(p1: Union[int, Placeholder], p2: Union[int, Placeholder]):
    if Placeholder.check_resolved(p1) and Placeholder.check_resolved(p2):
        assert Placeholder.force_int(p1) == Placeholder.force_int(p2), f"Conflict is detected: " \
                                                                       f"(p1)={Placeholder.force_int(p1)}, " \
                                                                       f"(p2)={Placeholder.force_int(p2)}"

    elif Placeholder.check_resolved(p1) and not Placeholder.check_resolved(p2):
        p2.value = Placeholder.force_int(p1)

    elif not Placeholder.check_resolved(p1) and Placeholder.check_resolved(p1):
        p1.value = Placeholder.force_int(p2)
コード例 #18
0
def placeholder_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    shape = [
        Placeholder() if dim.size() is -1 else dim.size()
        for dim in tf_op.get_attr("shape").dim
    ]
    if any(not Placeholder.check_resolved(s) for s in shape):
        raise NotImplementedError(
            f"[TensorFlowConverter] Operator \"Placeholder\" with dynamic shape variable is not supported yet."
        )

    converter.set_variable(tf_op.outputs[0],
                           Variable(shape, Order([None for _ in shape])))
コード例 #19
0
ファイル: kernel.py プロジェクト: xczhanjun/webdnn
    def unresolved_placeholders(self):
        result = []

        if not Placeholder.check_resolved(self.width):
            result += [self.width]

        if not Placeholder.check_resolved(self.height):
            result += [self.height]

        if not Placeholder.check_resolved(self.depth):
            result += [self.depth]

        return result
コード例 #20
0
ファイル: kernel.py プロジェクト: fossabot/hash2face
    def get_depend_placeholders(self) -> Set[Placeholder]:
        result = set()

        if not Placeholder.check_resolved(self.width):
            result.update(self.width.get_depend_placeholders())

        if not Placeholder.check_resolved(self.height):
            result.update(self.height.get_depend_placeholders())

        if not Placeholder.check_resolved(self.depth):
            result.update(self.depth.get_depend_placeholders())

        return result
コード例 #21
0
ファイル: axiswise_bias.py プロジェクト: xczhanjun/webdnn
    def __call__(self, x: Variable, b: Variable):
        assert b.ndim == 1, f"Bias variable of AxiswiseBias operator should be 1D variable: b.ndim={b.ndim}"

        axis = self.axis
        if Placeholder.check_resolved(
                x.shape_dict[axis]) and Placeholder.check_resolved(b.size):
            assert x.shape_dict[axis] == b.size, f"Dimension mismatch: x.shape_dict[{axis}]={x.shape_dict[axis]}, " \
                                                 f"b.shape_dict[{axis}]={b.shape_dict[axis]}"

        y = Variable(x.shape, x.order)
        self.append_input("x", x)
        self.append_input("b", b)
        self.append_output("y", y)
        return y,
コード例 #22
0
    def __call__(self, x: Variable, s: Variable):
        assert s.ndim == 1, f"Scale variable of AxiswiseScale operator should be 1D variable: s.ndim={s.ndim}"

        axis = self.axis
        if Placeholder.check_resolved(
                x.shape_dict[axis]) and Placeholder.check_resolved(s.size):
            assert x.shape_dict[axis] == s.size, f"Dimension mismatch: x.shape_dict[{axis}]={x.shape_dict[axis]}, " \
                                                 f"s.shape_dict[{axis}]={b.shape_dict[axis]}"

        y = Variable(x.shape, x.order)
        self.append_input("x", x)
        self.append_input("s", s)
        self.append_output("y", y)
        return y,
コード例 #23
0
    def append(self, var: Variable, offset: Union[int, Placeholder] = -1, buffer_type: Optional[BufferType] = None):
        if buffer_type is None:
            if Placeholder.check_resolved(offset) and Placeholder.check_resolved(var.size):
                buffer_type = BufferType.Static
            else:
                buffer_type = BufferType.Dynamic

        if offset == -1:
            if buffer_type is BufferType.Static:
                offset = self.static_size
            else:
                offset = self.dynamic_size

        self.allocations[var.name] = Allocation(var, offset, buffer_type)
コード例 #24
0
ファイル: sgemm.py プロジェクト: fossabot/hash2face
    def exec(self):
        A = self.inputs["A"]
        B = self.inputs["B"]
        if Placeholder.check_resolved(A.size) and Placeholder.check_resolved(
                self.M * self.K):
            assert A.size == self.M * self.K
        if Placeholder.check_resolved(B.size) and Placeholder.check_resolved(
                self.N * self.K):
            assert B.size == self.N * self.K

        C = Variable(self.parameters["out_shape"],
                     self.parameters["out_order"])
        self.append_output("C", C)

        return C,
コード例 #25
0
    def __call__(self, A: Variable, B: Variable):
        if Placeholder.check_resolved(A.size) and Placeholder.check_resolved(
                self.M * self.K):
            assert A.size == self.M * self.K
        if Placeholder.check_resolved(B.size) and Placeholder.check_resolved(
                self.N * self.K):
            assert B.size == self.N * self.K

        C = Variable(self.parameters["out_shape"],
                     self.parameters["out_order"])

        self.append_input("A", A)
        self.append_input("B", B)
        self.append_output("C", C)
        return C,
コード例 #26
0
ファイル: allocator.py プロジェクト: fossabot/hash2face
def allocate(graph: Graph) -> WebGLMemoryLayout:
    nodes = traverse.listup_nodes(graph)
    operators = traverse.filter_nodes(nodes, Operator)  # type: List[Operator]
    variables = traverse.filter_nodes(nodes, Variable)  # type: List[Variable]

    for i, v in enumerate(variables):
        if v.name is None:
            v.name = _name("v")

    dynamic_constants = traverse.filter_nodes([v for v in variables if not Placeholder.check_resolved(v.size)], ConstantVariable)
    assert len(dynamic_constants) == 0, f"ConstantVariable with unresolved placeholder shape is detected: f{dynamic_constants}"

    allocations = _get_allocations(graph, operators, variables)
    _optimize_buffer_reuse(allocations)

    variable_allocations = {v: allocations[v] for v in variables if not isinstance(v, ConstantVariable)}
    constant_allocations = {v: allocations[v] for v in variables if isinstance(v, ConstantVariable)}

    data = _update_constant_offset(constant_allocations)

    allocations = variable_allocations
    allocations.update(constant_allocations)

    layout = WebGLMemoryLayout(allocations, data)
    return layout
コード例 #27
0
    def change_order(self, order: Order) -> "Variable":
        """change_order_statement(order)

        Change variable order.

        When number of dimension will be increased, axes whose size is one are created.
        Conversely when number of dimension will be decreased, the size of axes which will be removed must be one.

        Args:
            order: new order
        """
        old_shape_dict = self.shape_dict
        new_shape = [old_shape_dict.get(axis, 1) for axis in order.axes]
        for axis, size in old_shape_dict.items():
            if axis not in order.axes:
                if Placeholder.check_resolved(size):
                    assert size == 1, f"""
[Variable.change_order()] The size of axes which will be removed must be one:
    (variable) = {self}
    (shape[{axis}]) = {size}
    (new order) = {order}
"""
        self._order = order
        self._shape = new_shape

        return self
コード例 #28
0
    def _convert_tensors(self, tf_tensors: List["tf.Tensor"],
                         orders: List[Order]):
        if orders is None:
            orders = [None for _ in tf_tensors]

        orders = [
            get_default_order(tf_tensor) if order is None else order
            for tf_tensor, order in zip(tf_tensors, orders)
        ]

        assert len(tf_tensors) == len(orders), f"[KerasConverter] Number of specified orders is mismatched for number " \
                                               f"of tensors: tensors={tf_tensors} orders={orders}"

        variables = []
        for tf_tensor, order in zip(tf_tensors, orders):
            shape = []
            for s, axis in zip(tf_tensor.shape, order.axes):
                if s.value is None:
                    if axis not in self._placeholders:
                        self._placeholders[axis] = Placeholder(label=axis.name)
                    shape.append(self._placeholders[axis])

                else:
                    shape.append(s.value)

            variable = Variable(shape, order)
            self.set_variable(tf_tensor, variable)
            variables.append(variable)

        return variables
コード例 #29
0
ファイル: math.py プロジェクト: zhangaz1/webdnn
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    # TODO: Conversion result is wrong in case x.shape[category_axis] is placeholder.
    if any(not Placeholder.check_resolved(x.shape_dict[axis])
           for axis in axes):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSumExp\" for dynamic number of categories is not supported"
        )

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
コード例 #30
0
ファイル: allocator.py プロジェクト: cys4/webdnn
    def allocate_variables(cls, graph: Graph, variables: List[Variable]):
        # check if constant variable with shape with unresolved placeholder.
        dynamic_constants = traverse.filter_nodes(
            [v for v in variables if not Placeholder.check_resolved(v.size)],
            ConstantVariable)
        assert len(
            dynamic_constants
        ) == 0, f"ConstantVariable with unresolved placeholder shape is detected: f{dynamic_constants}"

        ops = traverse.listup_operators(graph)
        layout = MemoryLayout()

        lifetime = get_lifetime(
            graph, ops, variables)  # type: Dict[Variable, Tuple[int, int]]
        offsets = generate_allocation_info(
            variables,
            lifetime)  # type: Dict[Variable, Union[int, Placeholder]]
        for variable, offset in offsets.items():
            layout.append(variable, offset)

        layout.data = np.zeros(layout.static_size, dtype=np.float32)
        constant_size = 0
        for var in variables:
            if not isinstance(var, ConstantVariable):
                continue

            allocation = layout[var]
            layout.data[allocation.offset:allocation.offset +
                        allocation.size] = var.data.flatten()
            constant_size += var.data.size
        layout.data = layout.data[:constant_size]
        if flags.VISUALIZE_MEMORY_ALLOCATION:
            _visualize_allocation(ops, variables, layout, lifetime, offsets)

        return layout