예제 #1
0
    def __call__(self, *xs: Variable):
        """
        Args:
            *xs (:class:`~webdnn.graph.variable.Variable`): Inputs

        Returns:
            tuple of :class:`~webdnn.graph.variable.Variable`: Output
        """
        concat_axis = self.parameters["axis"]  # type: Axis
        axis_index = xs[0].order.axes_dict[concat_axis]
        axes_set = set(xs[0].order.axes)

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0

        for i, x in enumerate(xs):
            assert set(x.order.axes) == axes_set
            for other_axis in [other_axis for other_axis in axes_set if other_axis != concat_axis]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]) and Placeholder.check_resolved(x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis]

            self.append_input(f"x{i}", x)
            y_shape[axis_index] += x.shape_dict[concat_axis]

        y = Variable(y_shape, xs[0].order)
        self.append_output("y", y)
        return y,
예제 #2
0
    def exec(self):
        xs = [self.inputs[f"x{i}"] for i in range(len(self.inputs))]
        axis = self.axis
        axis_index = xs[0].order.axes_dict[axis]
        axes = xs[0].order.axes

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0
        y_order = xs[0].order

        for a in y_order.axes:
            if a == axis:
                continue

            self.attributes.add(Tensorwise(self, a))

        for i, x in enumerate(xs):
            assert x.order.check_same_axes(xs[0].order), f"""
[Concat] Input variable of Concat operator must have same axes
  (x0.order.axes) = {xs[0].order.axes}
  (x{i}.order.axes) = {xs[i].order.axes}"""

            for other_axis in [other_axis for other_axis in axes if other_axis != axis]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]) and Placeholder.check_resolved(x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis], f"""
[Concat] Input variable of Concat operator must be same shape except the specified axis:
  (x0.shape_dict[{other_axis}]) = {xs[0].shape_dict[other_axis]}
  (x{i}.shape_dict[{other_axis}]) = {xs[i].shape_dict[other_axis]}"""

            y_shape[axis_index] += x.shape_dict[axis]

        y = Variable(y_shape, y_order)
        self.append_output("y", y)
        return y,
예제 #3
0
    def __call__(self, x: Variable, w: Variable) -> Tuple[Variable]:
        assert x.order.check_same_axes(OrderNCHW), \
            "Input variable of Convolution2D must have N, C, H, and W axes.: " \
            f"x.order.axes={x.order.axes}"

        assert w.order.check_same_axes(OrderNCHW), \
            "Kernel variable of Convolution2D must have N, C, H, and W axes.: " \
            f"w.order.axes={w.order.axes}"

        if Placeholder.check_resolved(w.shape_dict[Axis.H]) and Placeholder.check_resolved(w.shape_dict[Axis.W]):
            assert (w.shape_dict[Axis.H], w.shape_dict[Axis.W]) == self.ksize, \
                "Kernel variable of Convolution2D must be same spatial size as ksize parameter: " \
                f"w.shape_dict[Axis.H]={w.shape_dict[Axis.H]}, " \
                f"w.shape_dict[Axis.W]={w.shape_dict[Axis.W]}, " \
                f"self.ksize={self.ksize}"

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], \
                "Input and Kernel variables of Convolution2D must be same channel size: " \
                f"x.shape_dict[Axis.C]={x.shape_dict[Axis.C]}, " \
                f"w.shape_dict[Axis.C]={w.shape_dict[Axis.C]}"

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] + 2 * self.PH - self.WH) // self.SH + 1
        W2 = (x.shape_dict[Axis.W] + 2 * self.PW - self.WW) // self.SW + 1
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
예제 #4
0
    def __call__(self, x: Variable, w: Variable):
        """
        Args:
            x (:class:`~webdnn.graph.variable.Variable`): Input
            w (:class:`~webdnn.graph.variable.Variable`): Filter

        Returns:
            tuple of :class:`~webdnn.graph.variable.Variable`: Output
        """
        x_shape_dict = x.shape_dict
        w_shape_dict = w.shape_dict
        if Placeholder.check_resolved(
                w_shape_dict[Axis.H]) and Placeholder.check_resolved(
                    w_shape_dict[Axis.W]):
            assert (w_shape_dict[Axis.H], w_shape_dict[Axis.W]) == self.ksize
        if Placeholder.check_resolved(
                w_shape_dict[Axis.C]) and Placeholder.check_resolved(
                    x_shape_dict[Axis.C]):
            assert w_shape_dict[Axis.C] == x_shape_dict[Axis.C]

        N = x_shape_dict[Axis.N]
        H2 = (x_shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x_shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w_shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
예제 #5
0
    def __call__(self, x: Variable, w: Variable):
        assert x.order.check_same_axes(OrderNCHW), f"""
[Deconvolution2D] Input variable of Deconvolution2D must have N, C, H, and W axes:
    (x.order.axes) = {x.order.axes}"""

        assert w.order.check_same_axes(Order([Axis.N, Axis.KH, Axis.KW, Axis.C])), f"""
[Deconvolution2D] Kernel variable of Deconvolution2D must have N, C, KH, and KW axes:
    (w.order.axes) = {w.order.axes}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.KH]) and Placeholder.check_resolved(w.shape_dict[Axis.KW]):
            assert (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]) == self.ksize, f"""
[Deconvolution2D] Kernel variable of Deconvolution2D must be same spatial size as ksize parameter:
    (w.shape_dict[Axis.KH]) = {w.shape_dict[Axis.KH]}
    (w.shape_dict[Axis.KW]) = {w.shape_dict[Axis.KW]}
    (self.ksize) = {self.ksize}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], f"""
[Deconvolution2D] Input and Kernel variables of Deconvolution2D must be same channel size:
    (x.shape_dict[Axis.C]) = {x.shape_dict[Axis.C]}
    (w.shape_dict[Axis.C]) = {w.shape_dict[Axis.C]}"""

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x.shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
예제 #6
0
    def __call__(self, *xs: "variable.Variable"):
        y_axes = []
        y_shape_dict = AxisKeyDict()

        # Check variable in descent order of the number of dimensions.
        # Without this procedure, in case that x0.order=C and x1.order=NC, the output order is CN. Expected result is NC.
        xs_order = [(i, x) for i, x in enumerate(xs)]
        xs_order.sort(key=lambda d: d[1].ndim, reverse=True)

        for i, x in xs_order:
            for axis in x.order.axes:
                if axis in y_axes:
                    if y_shape_dict[axis] == 1:
                        # broadcast
                        y_shape_dict[axis] = x.shape_dict[axis]
                else:
                    y_axes.append(axis)
                    y_shape_dict[axis] = x.shape_dict[axis]

                if Placeholder.check_resolved(x.shape_dict[axis]):
                    if Placeholder.check_resolved(y_shape_dict[axis]):
                        assert y_shape_dict[axis] == x.shape_dict[axis] or x.shape_dict[axis] == 1, \
                            "All input variables of elementwise operator should be same shape: " \
                            f"y.shape_dict[{axis}]={y_shape_dict[axis]}, " \
                            f"x{i}.shape_dict[{axis}]={x.shape_dict[axis]}"
                    else:
                        y_shape_dict[axis] = x.shape_dict[axis]

        y = variable.Variable([y_shape_dict[axis] for axis in y_axes], Order(y_axes))
        ChannelMode.set(y, ChannelModeEnum.R)

        for i, x in enumerate(xs):
            self.append_input(f"x{i}", x)
        self.append_output("y", y)
        return y,
예제 #7
0
    def __call__(self, x: Variable, w: Variable):
        assert set(x.order.axes) == {Axis.N, Axis.C, Axis.H, Axis.W}, \
            "Input variable of Deconvolution2D must have N, C, H, and W axes.: " \
            f"x.order.axes={x.order.axes}"

        assert set(w.order.axes) == {Axis.N, Axis.C, Axis.H, Axis.W}, \
            "Kernel variable of Deconvolution2D must have N, C, H, and W axes.: " \
            f"w.order.axes={w.order.axes}"

        if Placeholder.check_resolved(w.shape_dict[Axis.H]) and Placeholder.check_resolved(w.shape_dict[Axis.W]):
            assert (w.shape_dict[Axis.H], w.shape_dict[Axis.W]) == self.ksize, \
                "Kernel variable of Deconvolution2D must be same spatial size as ksize parameter: " \
                f"w.shape_dict[Axis.H]={w.shape_dict[Axis.H]}, " \
                f"w.shape_dict[Axis.W]={w.shape_dict[Axis.W]}, " \
                f"self.ksize={self.ksize}"

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], \
                "Input and Kernel variables of Deconvolution2D must be same channel size: " \
                f"x.shape_dict[Axis.C]={x.shape_dict[Axis.C]}, " \
                f"w.shape_dict[Axis.C]={w.shape_dict[Axis.C]}"

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] - 1) * self.SH - 2 * self.PH + self.KH
        W2 = (x.shape_dict[Axis.W] - 1) * self.SW - 2 * self.PW + self.KW
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_input("x", x)
        self.append_input("w", w)
        self.append_output("y", y)
        return y,
예제 #8
0
    def __call__(self, *xs: Variable):
        axis = self.axis
        axis_index = xs[0].order.axes_dict[axis]
        axes = xs[0].order.axes

        y_shape = list(xs[0].shape)  # type: List[Placeholder]
        y_shape[axis_index] = 0

        for i, x in enumerate(xs):
            assert x.order.check_same_axes(xs[0].order), "Input variable of Concat operator must have same axes: " \
                                                         f"x0.order.axes={xs[0].order.axes}, x{i}.order.axes={xs[i].order.axes}"

            for other_axis in [
                    other_axis for other_axis in axes if other_axis != axis
            ]:
                if Placeholder.check_resolved(xs[0].shape_dict[other_axis]
                                              ) and Placeholder.check_resolved(
                                                  x.shape_dict[other_axis]):
                    assert xs[0].shape_dict[other_axis] == x.shape_dict[other_axis], "Input variable of Concat operator must be same " \
                                                                                     f"shape except the specified axis: " \
                                                                                     f"x0.shape_dict[{axis}]={xs[0].shape_dict[axis]}, " \
                                                                                     f"x{i}.shape_dict[{axis}]={xs[i].shape_dict[axis]}"

            self.append_input(f"x{i}", x)
            y_shape[axis_index] += x.shape_dict[axis]

        y = Variable(y_shape, xs[0].order)
        self.append_output("y", y)
        return y,
예제 #9
0
    def __call__(self, *xs: "variable.Variable"):
        y_axes = []
        y_shape_dict = AxisKeyDict()
        for i, x in enumerate(xs):
            for axis in x.order.axes:
                if axis in y_axes:
                    if y_shape_dict[axis] == 1:
                        # broadcast
                        y_shape_dict[axis] = x.shape_dict[axis]
                else:
                    y_axes.append(axis)
                    y_shape_dict[axis] = x.shape_dict[axis]

                if Placeholder.check_resolved(x.shape_dict[axis]):
                    if Placeholder.check_resolved(y_shape_dict[axis]):
                        assert y_shape_dict[axis] == x.shape_dict[axis] or x.shape_dict[axis] == 1, \
                            "All input variables of elementwise operator should be same shape: " \
                            f"y.shape_dict[{axis}]={y_shape_dict[axis]}, " \
                            f"x{i}.shape_dict[{axis}]={x.shape_dict[axis]}"
                    else:
                        y_shape_dict[axis] = x.shape_dict[axis]

            self.append_input(f"x{i}", x)

        y = variable.Variable([y_shape_dict[axis] for axis in y_axes],
                              Order(y_axes))
        self.append_output("y", y)
        return y,
예제 #10
0
    def exec(self):
        x = self.inputs["x"]
        w = self.inputs["w"]

        assert x.order.check_same_axes(OrderNCHW), f"""
[Convolution2D] Input variable of Convolution2D must have N, C, H, and W axes:
    (x.order.axes) = {x.order.axes}"""

        assert w.order.check_same_axes(Order([Axis.N, Axis.KH, Axis.KW, Axis.C])), f"""
[Convolution2D] Kernel variable of Convolution2D must have N, C, KH, and KW axes:
    (w.order.axes) = {w.order.axes}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.KH]) and Placeholder.check_resolved(w.shape_dict[Axis.KW]):
            assert (w.shape_dict[Axis.KH], w.shape_dict[Axis.KW]) == self.ksize, f"""
[Convolution2D] Kernel variable of Convolution2D must be same spatial size as ksize parameter:
    (w.shape_dict[Axis.KH]) = {w.shape_dict[Axis.KH]}
    (w.shape_dict[Axis.KW]) = {w.shape_dict[Axis.KW]}
    (self.ksize) = {self.ksize}"""

        if Placeholder.check_resolved(w.shape_dict[Axis.C]) and Placeholder.check_resolved(x.shape_dict[Axis.C]):
            assert w.shape_dict[Axis.C] == x.shape_dict[Axis.C], f"""
[Convolution2D] Input and Kernel variables of Convolution2D must be same channel size:
    (x.shape_dict[Axis.C]) = {x.shape_dict[Axis.C]}
    (w.shape_dict[Axis.C]) = {w.shape_dict[Axis.C]}"""

        N = x.shape_dict[Axis.N]
        H2 = (x.shape_dict[Axis.H] + 2 * self.PH - self.WH) // self.SH + 1
        W2 = (x.shape_dict[Axis.W] + 2 * self.PW - self.WW) // self.SW + 1
        C2 = w.shape_dict[Axis.N]

        y = Variable([N, H2, W2, C2], OrderNHWC)
        y.change_order(x.order)  # output same order as input to preserve following reshape semantics

        self.append_output("y", y)
        return y,
예제 #11
0
    def append(self, v: Variable, offset: IntLike = -1):
        if offset == -1:
            if Placeholder.check_resolved(
                    offset) and Placeholder.check_resolved(v.size):
                offset = self.static_size
            else:
                offset = self.dynamic_size

        self.allocations[v] = Allocation(offset, v.size)
예제 #12
0
파일: kernel.py 프로젝트: xczhanjun/webdnn
    def unresolved_placeholders(self):
        result = []

        if not Placeholder.check_resolved(self.width):
            result += [self.width]

        if not Placeholder.check_resolved(self.height):
            result += [self.height]

        if not Placeholder.check_resolved(self.depth):
            result += [self.depth]

        return result
예제 #13
0
    def get_depend_placeholders(self) -> Set[Placeholder]:
        result = set()

        if not Placeholder.check_resolved(self.width):
            result.update(self.width.get_depend_placeholders())

        if not Placeholder.check_resolved(self.height):
            result.update(self.height.get_depend_placeholders())

        if not Placeholder.check_resolved(self.depth):
            result.update(self.depth.get_depend_placeholders())

        return result
예제 #14
0
    def __call__(self, x: Variable, s: Variable):
        assert s.ndim == 1, f"Scale variable of AxiswiseScale operator should be 1D variable: s.ndim={s.ndim}"

        axis = self.axis
        if Placeholder.check_resolved(
                x.shape_dict[axis]) and Placeholder.check_resolved(s.size):
            assert x.shape_dict[axis] == s.size, f"Dimension mismatch: x.shape_dict[{axis}]={x.shape_dict[axis]}, " \
                                                 f"s.shape_dict[{axis}]={b.shape_dict[axis]}"

        y = Variable(x.shape, x.order)
        self.append_input("x", x)
        self.append_input("s", s)
        self.append_output("y", y)
        return y,
예제 #15
0
    def append(self, var: Variable, offset: Union[int, Placeholder] = -1, buffer_type: Optional[BufferType] = None):
        if buffer_type is None:
            if Placeholder.check_resolved(offset) and Placeholder.check_resolved(var.size):
                buffer_type = BufferType.Static
            else:
                buffer_type = BufferType.Dynamic

        if offset == -1:
            if buffer_type is BufferType.Static:
                offset = self.static_size
            else:
                offset = self.dynamic_size

        self.allocations[var.name] = Allocation(var, offset, buffer_type)
예제 #16
0
    def __call__(self, x: Variable, b: Variable):
        assert b.ndim == 1, f"Bias variable of AxiswiseBias operator should be 1D variable: b.ndim={b.ndim}"

        axis = self.axis
        if Placeholder.check_resolved(
                x.shape_dict[axis]) and Placeholder.check_resolved(b.size):
            assert x.shape_dict[axis] == b.size, f"Dimension mismatch: x.shape_dict[{axis}]={x.shape_dict[axis]}, " \
                                                 f"b.shape_dict[{axis}]={b.shape_dict[axis]}"

        y = Variable(x.shape, x.order)
        self.append_input("x", x)
        self.append_input("b", b)
        self.append_output("y", y)
        return y,
예제 #17
0
파일: sgemm.py 프로젝트: fossabot/hash2face
    def exec(self):
        A = self.inputs["A"]
        B = self.inputs["B"]
        if Placeholder.check_resolved(A.size) and Placeholder.check_resolved(
                self.M * self.K):
            assert A.size == self.M * self.K
        if Placeholder.check_resolved(B.size) and Placeholder.check_resolved(
                self.N * self.K):
            assert B.size == self.N * self.K

        C = Variable(self.parameters["out_shape"],
                     self.parameters["out_order"])
        self.append_output("C", C)

        return C,
예제 #18
0
    def __call__(self, A: Variable, B: Variable):
        if Placeholder.check_resolved(A.size) and Placeholder.check_resolved(
                self.M * self.K):
            assert A.size == self.M * self.K
        if Placeholder.check_resolved(B.size) and Placeholder.check_resolved(
                self.N * self.K):
            assert B.size == self.N * self.K

        C = Variable(self.parameters["out_shape"],
                     self.parameters["out_order"])

        self.append_input("A", A)
        self.append_input("B", B)
        self.append_output("C", C)
        return C,
예제 #19
0
def _convert_get_item(converter: ChainerConverter, c_op: "chainer.functions.GetItem"):
    x = converter.get_variable(c_op.inputs[0])
    if any(not Placeholder.check_resolved(v) for v in x.shape):
        raise NotImplementedError("[ChainerConverter] \"GetItem\" for dynamic shape variable is not supported ")

    y = x[c_op.slices]
    converter.set_variable(c_op.outputs[0](), y)
예제 #20
0
파일: math.py 프로젝트: zhangaz1/webdnn
def _convert_logsumexp(converter: ChainerConverter,
                       c_op: "chainer.functions.LogSumExp"):
    x = converter.get_variable(c_op.inputs[0])

    if c_op.axis is None:
        axes = list(x.order.axes)
    else:
        axes = [x.order.axes[i] for i in c_op.axis]

    # TODO: Conversion result is wrong in case x.shape[category_axis] is placeholder.
    if any(not Placeholder.check_resolved(x.shape_dict[axis])
           for axis in axes):
        raise NotImplementedError(
            "[ChainerConverter] \"LogSumExp\" for dynamic number of categories is not supported"
        )

    max_x = x
    for axis in axes:
        max_x, = Max(None, axis=axis)(max_x)
    exp_delta_x, = Exp(None)(x - max_x)

    sum_exp_delta_x = exp_delta_x
    for axis in axes:
        sum_exp_delta_x, = Sum(None, axis=axis)(sum_exp_delta_x)

    y = Log(None)(sum_exp_delta_x)[0] + max_x
    converter.set_variable(c_op.outputs[0](), y)
예제 #21
0
def allocate(graph: Graph) -> MemoryLayout:
    nodes = traverse.listup_nodes(graph)
    operators = traverse.filter_nodes(nodes, Operator)  # type: List[Operator]
    variables = traverse.filter_nodes(nodes, Variable)  # type: List[Variable]

    for i, v in enumerate(variables):
        if v.name is None:
            v.name = _name("v")

    dynamic_constants = traverse.filter_nodes([v for v in variables if not Placeholder.check_resolved(v.size)], ConstantVariable)
    assert len(dynamic_constants) == 0, f"ConstantVariable with unresolved placeholder shape is detected: f{dynamic_constants}"

    allocations = _get_allocations(graph, operators, variables)
    _optimize_inplace(operators, allocations)

    variable_allocations = {v: allocations[v] for v in variables if not isinstance(v, ConstantVariable)}
    constant_allocations = {v: allocations[v] for v in variables if isinstance(v, ConstantVariable)}

    _update_offset(variable_allocations)
    _optimize_buffer_reuse(variable_allocations)

    data = _update_constant_offset(constant_allocations)

    for allocation in set(variable_allocations.values()):
        allocation.offset += data.size

    allocations = variable_allocations
    allocations.update(constant_allocations)

    layout = MemoryLayout(allocations, data)

    if flags.VISUALIZE_MEMORY_ALLOCATION:
        _visualize_allocation(operators, variables, layout)

    return layout
예제 #22
0
    def change_order(self, order: Order) -> "Variable":
        """change_order_statement(order)

        Change variable order.

        When number of dimension will be increased, axes whose size is one are created.
        Conversely when number of dimension will be decreased, the size of axes which will be removed must be one.

        Args:
            order: new order
        """
        old_shape_dict = self.shape_dict
        new_shape = [old_shape_dict.get(axis, 1) for axis in order.axes]
        for axis, size in old_shape_dict.items():
            if axis not in order.axes:
                if Placeholder.check_resolved(size):
                    assert size == 1, f"""
[Variable.change_order()] The size of axes which will be removed must be one:
    (variable) = {self}
    (shape[{axis}]) = {size}
    (new order) = {order}
"""
        self._order = order
        self._shape = new_shape

        return self
예제 #23
0
def allocate(graph: Graph) -> WebGLMemoryLayout:
    nodes = traverse.listup_nodes(graph)
    operators = traverse.filter_nodes(nodes, Operator)  # type: List[Operator]
    variables = traverse.filter_nodes(nodes, Variable)  # type: List[Variable]

    for i, v in enumerate(variables):
        if v.name is None:
            v.name = _name("v")

    dynamic_constants = traverse.filter_nodes([v for v in variables if not Placeholder.check_resolved(v.size)], ConstantVariable)
    assert len(dynamic_constants) == 0, f"ConstantVariable with unresolved placeholder shape is detected: f{dynamic_constants}"

    allocations = _get_allocations(graph, operators, variables)
    _optimize_buffer_reuse(allocations)

    variable_allocations = {v: allocations[v] for v in variables if not isinstance(v, ConstantVariable)}
    constant_allocations = {v: allocations[v] for v in variables if isinstance(v, ConstantVariable)}

    data = _update_constant_offset(constant_allocations)

    allocations = variable_allocations
    allocations.update(constant_allocations)

    layout = WebGLMemoryLayout(allocations, data)
    return layout
예제 #24
0
파일: allocator.py 프로젝트: cys4/webdnn
    def allocate_variables(cls, graph: Graph, variables: List[Variable]):
        # check if constant variable with shape with unresolved placeholder.
        dynamic_constants = traverse.filter_nodes(
            [v for v in variables if not Placeholder.check_resolved(v.size)],
            ConstantVariable)
        assert len(
            dynamic_constants
        ) == 0, f"ConstantVariable with unresolved placeholder shape is detected: f{dynamic_constants}"

        ops = traverse.listup_operators(graph)
        layout = MemoryLayout()

        lifetime = get_lifetime(
            graph, ops, variables)  # type: Dict[Variable, Tuple[int, int]]
        offsets = generate_allocation_info(
            variables,
            lifetime)  # type: Dict[Variable, Union[int, Placeholder]]
        for variable, offset in offsets.items():
            layout.append(variable, offset)

        layout.data = np.zeros(layout.static_size, dtype=np.float32)
        constant_size = 0
        for var in variables:
            if not isinstance(var, ConstantVariable):
                continue

            allocation = layout[var]
            layout.data[allocation.offset:allocation.offset +
                        allocation.size] = var.data.flatten()
            constant_size += var.data.size
        layout.data = layout.data[:constant_size]
        if flags.VISUALIZE_MEMORY_ALLOCATION:
            _visualize_allocation(ops, variables, layout, lifetime, offsets)

        return layout
예제 #25
0
    def __call__(self):
        # noinspection PyTypeChecker
        if Placeholder.check_resolved(self.expression):
            return ["(", float(self.expression), ")"]

        else:
            return ["(float(", self.expression, "))"]
예제 #26
0
def _convert_broadcast_to(converter: ChainerConverter, c_op: "chainer.functions.BroadcastTo"):
    x = converter.get_variable(c_op.inputs[0])
    if any(not Placeholder.check_resolved(v) for v in x.shape):
        raise NotImplementedError("[ChainerConverter] \"BroadcastTo\" for dynamic shape variable is not supported ")

    # noinspection PyProtectedMember
    y, = Broadcast(None, out_shape=c_op._shape, out_order=x.order)(x)
    converter.set_variable(c_op.outputs[0](), y)
예제 #27
0
def shape_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
    x = converter.get_variable(tf_op.inputs[0])
    assert all(
        Placeholder.check_resolved(s) for s in x.shape
    ), "[TensorFlowConverter] op 'Shape' with dynamic shape is not supported yet. "

    y = ConstantVariable(np.array(x.shape), Order([None]))
    converter.set_variable(tf_op.outputs[0], y)
예제 #28
0
def _optimize_inplace(operators: Sequence[Operator],
                      allocations_dict: AllocationDict):
    if not (flags.optimize.OPTIMIZE
            and flags.optimize.OPTIMIZE_MEMORY_ALLOCATION
            and flags.optimize.OPTIMIZE_INPLACE_OPERATION):
        console.debug('_optimize_inplace is skipped')
        return

    for op in operators:
        for attr in op.get_attribute(Inplace):  # type: Inplace
            a1 = allocations_dict[attr.get_input()]
            a2 = allocations_dict[attr.get_output()]
            if not Placeholder.check_resolved(
                    a1.size) or not Placeholder.check_resolved(a2.size):
                continue

            _merge_allocation(allocations_dict, a1, a2)
예제 #29
0
    def __call__(self, *xs: "variable.Variable"):
        y = variable.Variable(xs[0].shape, xs[0].order)
        for i, x in enumerate(xs):
            for axis in x.order.axes:
                assert axis in y.order.axes, f"All input variables of elementwise operator should be same shape. x[{i}] does not have " \
                                             f"{axis}: x0.order={xs[0].order}, x{i}.order={xs[i].order}"

                if Placeholder.check_resolved(
                        x.shape_dict[axis]) or Placeholder.check_resolved(
                            y.shape_dict[axis]):
                    assert y.shape_dict[axis] == x.shape_dict[axis], "All input variables of elementwise operator should be " \
                                                                     f"same shape: x0.shape_dict=f{xs[0].shape_dict}, x{i}" \
                                                                     f".shape_dict=f{xs[i].shape_dict}"

            self.append_input(f"x{i}", x)
        self.append_output("y", y)
        return y,
예제 #30
0
파일: array.py 프로젝트: VislaLabs/webdnn-1
def _convert_flip_ud(converter: ChainerConverter,
                     c_op: "chainer.functions.FlipUD"):
    x = converter.get_variable(c_op.inputs[0])
    if any(not Placeholder.check_resolved(v) for v in x.shape):
        raise NotImplementedError(
            "[ChainerConverter] \"FlipUD\" for dynamic shape variable is not supported "
        )

    converter.set_variable(c_op.outputs[0](), x[::-1, :])