Beispiel #1
0
    def optimize(self, graph: Graph):
        flag_changed = False
        """
        Some operators does not support splitting, but only appear as constant.
        Workaround for such case, use ConstantFolding for limited operators even if it is turned off.
        """
        cf = ConstantFolding()
        graph, flag_changed_in_cf = cf.optimize(graph, (Transpose, ))
        flag_changed |= flag_changed_in_cf

        c_before = traverse.filter_nodes(traverse.listup_variables(graph),
                                         ConstantVariable)
        c_size_before = sum([c.size for c in c_before])

        for v in traverse.filter_nodes(traverse.listup_variables(graph),
                                       SplitTarget):
            axis = _choose_split_axis(v)
            _split_axis(v, axis, graph)
            flag_changed = True

            c_after = traverse.filter_nodes(traverse.listup_variables(graph),
                                            ConstantVariable)
            c_size_after = sum([c.size for c in c_after])

            if c_size_before > c_size_after:
                raise Exception

        return graph, flag_changed
Beispiel #2
0
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        variables = traverse.listup_variables(graph)

        while len(variables) > 0:
            x = variables.pop()
            for op1, op2 in itertools.permutations(x.input_to, 2):
                if op2 is op1:
                    continue

                if op2.__class__ != op1.__class__:
                    # class is not same
                    continue

                if any((x_name not in op2.inputs) or (
                        op2.inputs[x_name] != op1.inputs[x_name])
                       for x_name in op1.inputs.keys()):
                    # input is not same
                    continue

                if any((key not in op2.parameters) or (
                        op2.parameters[key] != op1.parameters[key])
                       for key in op1.parameters.keys()):
                    # parameter is not same
                    continue

                flag_changed = True

                vs_1 = dict(op1.outputs)
                vs_2 = dict(op2.outputs)

                op2.remove_all()

                for v_name, v1 in vs_1.items():
                    v2 = vs_2[v_name]
                    if v1.order == v2.order:
                        """
                                    +-{op3}-
                        -{op1}- v1 -+
                                    +-{op4}-
                        """
                        OptimizeRule.replace_variable(graph, v2, v1)

                    else:
                        """
                                    +-{op3}-
                        -{op1}- v1 -+
                                    +-{Transpose}- v2 -{op4}-
                        """
                        v2_dummy, = Transpose(None)(v1)
                        v2_dummy.change_order(v2.order)
                        OptimizeRule.replace_variable(graph, v2_dummy, v2)

                variables = traverse.listup_variables(graph)
                break

        return graph, flag_changed
Beispiel #3
0
    def optimize(self, graph: Graph):
        flag_changed = False

        for v in traverse.filter_nodes(traverse.listup_variables(graph),
                                       SplitTarget):
            axis = _choose_split_axis(v)
            _split_axis(v, axis, graph)
            flag_changed = True

        return graph, flag_changed
Beispiel #4
0
    def optimize(self, graph: Graph):
        traverse.dump(graph)
        MAX_SIZE = config.WEBGL_MAX_TEXTURE_SIZE

        for v in traverse.listup_variables(graph):
            height, width = TextureShape.get(v)
            assert height <= MAX_SIZE and width <= MAX_SIZE, f"Texture size is invalid: {v.name} \n" \
                                                             f"  (variable shape)={v.shape}, \n" \
                                                             f"  (channel mode)={ChannelMode.get(v).name}, \n" \
                                                             f"  (texture shape)=(width={width}, height={height}), \n" \
                                                             f"  (WEBGL_MAX_TEXTURE_SIZE)={config.WEBGL_MAX_TEXTURE_SIZE}"
        return graph, False
Beispiel #5
0
    def optimize(self, graph: Graph):
        flag_changed = False

        c_before = traverse.filter_nodes(traverse.listup_variables(graph),
                                         ConstantVariable)
        c_size_before = sum([c.size for c in c_before])

        for v in traverse.filter_nodes(traverse.listup_variables(graph),
                                       SplitTarget):
            axis = _choose_split_axis(v)
            _split_axis(v, axis, graph)
            flag_changed = True

            c_after = traverse.filter_nodes(traverse.listup_variables(graph),
                                            ConstantVariable)
            c_size_after = sum([c.size for c in c_after])

            if c_size_before > c_size_after:
                raise Exception

        return graph, flag_changed
Beispiel #6
0
    def allocate(cls,
                 graph: Graph) -> Tuple[MemoryLayout, MemoryLayout, np.array]:
        variables = set(traverse.listup_variables(graph))
        constants = set(traverse.filter_nodes(
            variables, Constant))  # type: Set[ConstantVariable]
        variables = variables.difference(constants)

        variables = list(variables)
        constants = list(constants)

        constants_layout, data = cls.allocate_constants(constants)
        variables_layout = cls.allocate_variables(graph, variables)
        return variables_layout, constants_layout, data
    def optimize(self, graph: Graph):
        MAX_TEXTURE_SIZE = config.WEBGL_MAX_TEXTURE_SIZE
        flag_changed = False

        for v in traverse.listup_variables(graph):
            height, width = TextureShape.get(v)
            if height <= MAX_TEXTURE_SIZE and width <= MAX_TEXTURE_SIZE:
                continue

            if not v.has_attribute(SplitTarget):
                flag_changed = True
                v.attributes.add(SplitTarget(v))

        return graph, flag_changed
Beispiel #8
0
    def optimize(self, graph: Graph):
        traverse.dump(graph)
        MAX_SIZE = config.WEBGL_MAX_TEXTURE_SIZE

        for v in traverse.listup_variables(graph):
            if not Placeholder.check_resolved(v.size):
                continue

            height, width = TextureShape.get(v)
            assert height <= MAX_SIZE and width <= MAX_SIZE, f"""
[SplitTexture] Texture size is invalid: {v.name}
    (variable shape)={v.shape}
    (channel mode)={ChannelMode.get(v).name}
    (texture shape)=(width={width}, height={height})
    (WEBGL_MAX_TEXTURE_SIZE)={config.WEBGL_MAX_TEXTURE_SIZE}"""

        return graph, False
Beispiel #9
0
    def _transpose_vars(self, graph: Graph):
        """
        Transpose variable order into typical WebDNN order.
        """
        for n_var in traverse.listup_variables(graph):
            if isinstance(n_var.output_from, ReinterpretAxis):
                # workaround for MatMulVarVar
                continue
            if isinstance(n_var, ConstantVariable):
                if n_var.ndim == 1:
                    n_var.change_order(OrderC)

                elif n_var.ndim == 2:
                    n_var.change_order(OrderCN)

                elif n_var.ndim == 4:
                    assert len(n_var.input_to) == 1
                    first_input_to = list(n_var.input_to)[0]

                    if isinstance(first_input_to, Convolution2D):
                        n_var.change_order(OrderHWNC)

                    elif isinstance(first_input_to, Deconvolution2D):
                        n_var.change_order(OrderHWNC)

                    elif isinstance(first_input_to, Linear):
                        n_var.change_order(OrderHWCN)

                    else:
                        raise NotImplementedError(
                            f"Unknown data format: {n_var}")

            else:
                if n_var.ndim == 1:
                    n_var.change_order(OrderC)

                elif n_var.ndim == 2:
                    n_var.change_order(OrderNC)

                elif n_var.ndim == 4:
                    n_var.change_order(OrderNHWC)
Beispiel #10
0
    def allocate(cls, graph: Graph) -> MemoryLayout:
        variables = set(traverse.listup_variables(graph))
        for i, v in enumerate(variables):
            v.name = f"v{i}"

        return cls.allocate_variables(graph, list(variables))
Beispiel #11
0
def test_listup_variables_residual():
    global graph, v0, v1, v2, v3
    variables = listup_variables(graph)

    assert tuple(variables) == (v0, v1, v2, v3)
Beispiel #12
0
    def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
        flag_changed = False
        for v in traverse.listup_variables(graph):
            flag_changed |= _remove_redundant_conversion(v)

        return graph, flag_changed