Пример #1
0
 def __call__(self, xgraph: XGraph):
     """Main method to be called on object to start mutation pass"""
     self.xgraph = xgraph
     new_xg = XGraph(self.xgraph.get_name())
     new_xg.copy_meta_attrs(self.xgraph)
     for X in xgraph.get_layers():
         new_X = self.visit(X)
         # if new_X not None
         if new_X:
             # Make sure tops are not set
             new_X.tops = []
             new_xg.add(new_X)
     return new_xg
Пример #2
0
def xgraph_build_func(xgraph: XGraph,
                      target: str,
                      xtype,
                      layout='NCHW',
                      **kwargs) -> XGraph:

    fancy_logger.banner("Subgraph build func, target: {}, layout: {}".format(
        target, layout))

    compiler_output = xgraph.get_compiler_output() if xgraph.is_compiled() \
        else None
    compiler_output_keys = list(compiler_output.keys()) \
        if compiler_output else []
    logger.debug("Compiler output keys: {}".format(compiler_output_keys))

    if layout not in ['NCHW', 'NHWC']:
        raise ValueError(
            "Supported layouts are [NCHW, NHWC] but got: {}".format(layout))

    layout_transform_pass = \
        XGraphLayoutTransformationPass(layout, target=target)
    xgraph = layout_transform_pass.execute(xgraph, subgraphs_only=False)

    xgraph_factory = XGraphFactory()
    xgraph_partitioner = XGraphPartitioner()

    subgraphs = {
        xp.name: xp
        for xp in xgraph_partitioner.get_subgraphs(xgraph)
    }

    # Retrieve CompilerOutput if available
    # compiler_output = xgraph.get_compiler_output() if xgraph.is_compiled() \
    #     else None
    # compiler_output_keys = list(compiler_output.keys()) \
    #     if compiler_output else []
    # logger.debug("Compiler output keys: {}".format(compiler_output_keys))
    # Keep track of the visited partitions/subgraphs and the layers
    #   inside the partition
    visited_xps = {}

    # Keep track of the subgraph output tensors and the corresponding
    #   new layers (TupleGetItem or Transpose)
    xp_out_tensors_2_layers = {}

    name_changes = {}
    net_map = {}
    net = []
    for X in xgraph.get_layers():

        if X.subgraph is not None and X.subgraph not in visited_xps:

            Xp = subgraphs[X.subgraph]

            if 'target' in Xp.attrs and Xp.attrs['target'] == target:

                visited_xps[Xp.name] = set([X.name])

                logger.debug("XSHAPES: {}".format(X.shapes))

                bottoms = Xp.bottoms

                # Keep track of subgraph input and output names
                sub_xgraph = xgraph_factory.build_from_xlayer(Xp.subgraph_data)

                input_names = Xp.attrs['input_names'][:]
                output_names = Xp.attrs['output_names'][:]
                input_layers = \
                    [sub_xgraph.get(in_name) for in_name in input_names]
                output_layers = \
                    [sub_xgraph.get(out_name) for out_name in output_names]

                attrs = {
                    'input_names': input_names,
                    'output_names': output_names,
                    'input_layers':
                    {il.name: il.layer[:]
                     for il in input_layers},
                    'output_layers':
                    {ol.name: ol.layer[:]
                     for ol in output_layers}
                }
                for k, v in kwargs.items():
                    if k in attrs:
                        raise ValueError("Provided claimed subgraph layer"
                                         " key: {}".format(k))
                    attrs[k] = v

                if Xp.name in compiler_output_keys:
                    attrs['rt_in_map'] = compiler_output.get_in_map(Xp.name)
                    for in_name in input_names:
                        for merged_layer in attrs['input_layers'][in_name]:
                            attrs['rt_in_map'][merged_layer] = \
                                attrs['rt_in_map'][in_name]
                    attrs['rt_out_map'] = compiler_output.get_out_map(Xp.name)
                    for out_name in output_names:
                        for merged_layer in attrs['output_layers'][out_name]:
                            attrs['rt_out_map'][merged_layer] = \
                                attrs['rt_out_map'][out_name]

                Xp.attrs.update(attrs)

                shapes = Xp.shapes[:]

                subgraph_X = Xp._replace(
                    # name = X.name,
                    type=[xtype],
                    shapes=shapes,
                    bottoms=bottoms,
                    # Fill tops later
                    tops=[],
                    subgraph_data=[])
                net.append(subgraph_X.name)
                net_map[Xp.name] = subgraph_X

                # Subgraph layers have multiple outputs (Tuple) so we
                #   retrieve the different subgraph outputs
                #   (see output_names variable) using a TupleGetItem
                #   layer
                top_tensors = Xp.attrs['__top_tensors']

                for i, output_name in enumerate(output_names):
                    # Handle merged layers
                    out_tensor = Xp.attrs['output_layers'][output_name][-1]
                    tgi_name = out_tensor
                    # tgi_name = subgraph_X.name + '_tgi' + str(i)

                    top_tensor = top_tensors[output_name]

                    shapes = subgraph_X.shapes[i][:]
                    X_tgi = defaultXLayer()
                    X_tgi = X_tgi._replace(name=tgi_name,
                                           type=['TupleGetItem'],
                                           shapes=shapes,
                                           sizes=shapes.get_size(),
                                           layer=[tgi_name],
                                           tops=top_tensor[:],
                                           bottoms=[subgraph_X.name],
                                           internal=1,
                                           attrs={'index': i})
                    net.append(X_tgi.name)
                    # Keep track of TGI layer for both last merged layer and output name
                    net_map[tgi_name] = X_tgi
                    net_map[output_name] = X_tgi

                    subgraph_X.tops.append(tgi_name)

                    xp_out_tensors_2_layers[output_name] = tgi_name

            else:
                net.append(X.name)
                net_map[X.name] = X

        elif X.subgraph is not None and X.subgraph in visited_xps:
            # Remove layer
            visited_xps[X.subgraph].add(X.name)
        elif 'Transpose' in X.type:
            # Possibly merge transpose in TupleGetItem layer
            bX = net_map[X.bottoms[0]]
            new_tops = []
            for t in bX.tops:
                if t != X.name:
                    new_tops.append(t)
                elif len(X.tops) > 0:
                    new_tops.append(X.tops[0])
            if 'TupleGetItem' in bX.type:
                new_X = bX._replace(tops=new_tops)
                new_X.attrs['transpose'] = True
                new_X.attrs['axes'] = X.attrs['axes']
                new_X.shapes[:] = TensorShape(X.shapes[:])
                net_map[new_X.name] = new_X
                name_changes[X.name] = bX.name
            else:
                net.append(X.name)
                net_map[X.name] = X
        else:
            net.append(X.name)
            net_map[X.name] = X

        # Reflect possibly merged layers
        new_bottoms = [
            b if b not in name_changes else name_changes[b] for b in X.bottoms
        ]
        if new_bottoms != X.bottoms:
            new_X = X._replace(bottoms=new_bottoms)
            net_map[X.name] = new_X

    # Set tops and bottoms  & enforce topological sequence
    for xp in visited_xps.keys():
        Xp = subgraphs[xp]

        for b in Xp.bottoms:
            top_name = Xp.name
            bX = xgraph.get(b)
            bX.tops = [(bXt if bXt not in visited_xps[Xp.name] else top_name)
                       for bXt in bX.tops]

        for t in Xp.tops:
            tX = xgraph.get(t)
            tX.bottoms = [(tXb if tXb not in visited_xps[Xp.name] else
                           xp_out_tensors_2_layers[tXb]) for tXb in tX.bottoms]

    # Topological sorting
    X_net = [net_map[e] for e in net]
    top_net = sort_topologically(X_net)

    sub_xgraph = xgraph_factory.build_from_xlayer(top_net)

    # Merge transposes if they are cancelling out
    # optimizer = XGraphTransposesOptimizer(sub_xgraph)
    # optimizer.optimize()

    return sub_xgraph
Пример #3
0
    def test_simple_model_opaque_func(self):
        x = helper.make_tensor_value_info('x', TensorProto.FLOAT,
                                          [None, 1, 4, 4])
        x_val = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                            [13, 14, 15, 16]]]]).astype(np.float32)
        # x_init = helper.make_tensor('x', TensorProto.FLOAT, (1, 1, 4, 4),
        #                             list(x_val.reshape(-1)))

        # Create one output (ValueInfoProto)
        z = helper.make_tensor_value_info('z', TensorProto.FLOAT,
                                          [None, 2, 2, 2])

        W_val = np.array([[[[1, 1], [1, 1]]], [[[1, -1],
                                                [1, 1]]]]).astype(np.float32)
        W = helper.make_tensor('W', TensorProto.FLOAT, (2, 1, 2, 2),
                               list(W_val.reshape(-1)))

        B_val = np.array([1, -1]).astype(np.float32)
        B = helper.make_tensor('B', TensorProto.FLOAT, (2, ),
                               list(B_val.reshape((-1))))

        conv_node = onnx.helper.make_node('Conv',
                                          inputs=['x', 'W', 'B'],
                                          outputs=['y'],
                                          kernel_shape=[2, 2],
                                          pads=[1, 1, 0, 0])

        pool_node = onnx.helper.make_node('AveragePool',
                                          inputs=['y'],
                                          outputs=['z'],
                                          kernel_shape=[2, 2],
                                          pads=[0, 0, 0, 0],
                                          strides=[2, 2])

        # Create the graph (GraphProto)
        graph_def = onnx.helper.make_graph(
            [conv_node, pool_node],
            'test-model',
            [x],
            [z],
            [W, B]  # x_init]
        )

        # Create the model (ModelProto)
        model_def = onnx.helper.make_model(graph_def,
                                           producer_name='onnx-example')
        test_file = os.path.join(FILE_DIR, 'test.onnx')
        onnx.save(model_def, test_file)

        xgraph = XGraph(name='test')
        of = OpaqueFuncRegistry.Get('pyxir.onnx.from_onnx')
        of(xgraph, test_file)

        assert xgraph.get_name() == 'test-model'

        xlayers = xgraph.get_layers()
        assert len(xlayers) == 4

        assert xlayers[0].name == 'x'
        assert xlayers[0].type[0] == 'Input'
        assert xlayers[0].shapes == [-1, 1, 4, 4]
        assert xlayers[0].attrs['onnx_id'] == 'x'

        assert xlayers[1].name == 'y_Conv'
        assert xlayers[1].type[0] == 'Convolution'
        assert xlayers[1].shapes == [-1, 2, 4, 4]
        assert xlayers[1].attrs['padding'] == [(0, 0), (0, 0), (1, 0), (1, 0)]
        assert xlayers[1].attrs['strides'] == [1, 1]
        assert xlayers[1].attrs['dilation'] == [1, 1]
        assert xlayers[1].attrs['kernel_size'] == [2, 2]
        assert xlayers[1].attrs['channels'] == [1, 2]
        assert xlayers[1].attrs['data_layout'] == 'NCHW'
        assert xlayers[1].attrs['kernel_layout'] == 'OIHW'
        assert xlayers[1].attrs['groups'] == 1
        assert xlayers[1].attrs['onnx_id'] == 'y'

        assert xlayers[2].name == 'y'
        assert xlayers[2].shapes == [-1, 2, 4, 4]
        assert xlayers[2].attrs['axis'] == 1
        assert xlayers[2].attrs['onnx_id'] == 'y'

        assert xlayers[3].name == 'z'
        assert xlayers[3].shapes == [-1, 2, 2, 2]
        assert xlayers[3].type[0] == 'Pooling'
        assert xlayers[3].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert xlayers[3].attrs['strides'] == [2, 2]
        assert xlayers[3].attrs['kernel_size'] == [2, 2]
        assert xlayers[3].attrs['data_layout'] == 'NCHW'
        assert xlayers[3].attrs['type'] == 'Avg'
        assert xlayers[3].attrs['onnx_id'] == 'z'

        of = OpaqueFuncRegistry.Get('pyxir.partition')
        of(xgraph, ['test_dpu'], "")

        assert xgraph.get_name() == 'test-model'
        assert len(xgraph) == 4

        xlayers = xgraph.get_layers()
        assert xlayers[0].name == 'x'
        assert xlayers[0].target == 'cpu'
        assert xlayers[0].subgraph is None

        assert xlayers[1].name == 'y_Conv'
        assert xlayers[1].target == 'test_dpu'
        assert xlayers[1].subgraph == 'xp0'

        assert xlayers[2].name == 'y'
        assert xlayers[2].type == ['BiasAdd']
        assert xlayers[2].target == 'test_dpu'
        assert xlayers[2].subgraph == 'xp0'

        assert xlayers[3].name == 'z'
        assert xlayers[3].type == ['Pooling']
        assert xlayers[3].target == 'test_dpu'
        assert xlayers[3].subgraph == 'xp0'

        os.remove(test_file)
Пример #4
0
    def partition(self,
                  xgraph: XGraph,
                  targets: List[str],
                  last_layer: str = None) -> XGraph:
        """
        Partition the provided XGraph according to the provided targets

        NOTE: This is a naive approach which only handles one target
        and just partitions everything it can

        Arguments
        ---------
        xgraph: XGraph
            the XGraph to be partitioned
        targets: List[int]
            the targets to be partitioned for, for now only one target can
            be passed
        last_layer: str
            the last layer to be partitioned
        """
        # TODO Add Base partitioning support for multiple algorithms
        if len(targets) != 1:
            raise NotImplementedError("XGraph partitioning is only supported"
                                      " for one target at the moment but got:"
                                      " {}".format(len(targets)))
        target = targets[0]

        # ALGO:
        # 1. loop through all the XLayers
        # 2. For every layer, if it's a target layer, either add it to the
        #   corresponding Partition layer if a bottom is already added to
        #   this layer, else start a new Partition layer
        # TODO set bottoms and tops to [] for partition input respectively
        #   output layers
        name, idx = 'xp', 0
        name_2_pars = {}
        par_2_names = {}
        xlayers = []
        # Keep track of layers that have a non-target bottom
        #  e.g. T1 --> NT --> T3 --> T4
        #        `--->  T2 ----^
        # Here, T3 has a non-target bottom layer and a new partition has to be
        #   started
        non_target_bottom_layers = set([])
        # Keep track of layer partition dependencies
        #  e.g. T1 --> NT --> T2 -->
        #        `------------^
        # Here, T1 and T2 can't belong to the same partition because of NT in between
        # partition_dep_map = {}

        logger.debug("Partition for target: {}".format(target))

        stop_partitioning = False

        for X in xgraph.get_layers():

            logger.debug("----------------")
            logger.debug(X.name)

            # partition_dependencies = set([e for b in X.bottoms
            #                               if b in partition_dep_map
            #                               for e in partition_dep_map[b]])
            # if X.name in partition_dep_map:
            #     partition_dep_map[X.name] |= partition_dependencies
            # else:
            #     partition_dep_map[X.name] = partition_dependencies

            if target not in X.targets or stop_partitioning:

                if X.name in name_2_pars:
                    # Cap partition(s)

                    for p in name_2_pars[X.name]:
                        par_2_names[p].remove(X.name)

                    del name_2_pars[X.name]

                for t in X.tops:
                    non_target_bottom_layers.add(t)

                # partition_dependencies = set([e for b in X.bottoms
                #                               if b in name_2_pars
                #                               for e in name_2_pars[b]])
                # if X.name in partition_dep_map:
                #     partition_dep_map[X.name] |= partition_dependencies

                continue

            elif X.name in name_2_pars\
                    and X.name not in non_target_bottom_layers:
                # and not partition_dep_map[X.name]\
                #     .isdisjoint(set(name_2_pars[X.name])):

                # Check whether only one partition added this element
                if len(name_2_pars[X.name]) == 1:
                    # -- p_0 --> A
                    pass
                else:
                    # -- p_0 --> A
                    # -- p_1 ----^
                    new_p_name = name + str(idx)
                    idx += 1
                    logger.debug(
                        "Merge into new partition: {}".format(new_p_name))

                    par_2_names[new_p_name] = []

                    for p in name_2_pars[X.name]:

                        par_2_names[new_p_name].extend(par_2_names[p])

                        for layer in par_2_names[p]:
                            name_2_pars[layer] = [new_p_name]

                        # xlayers.remove(pX)
                        del par_2_names[p]

                    name_2_pars[X.name] = [new_p_name]

            else:
                # Create new partition
                # Also,
                # -- p_0 --> A --> p_1 --> B
                #     `--------------------^

                new_p_name = name + str(idx)
                idx += 1
                logger.debug("Create new partition: {}".format(new_p_name))

                par_2_names[new_p_name] = [X.name]
                name_2_pars[X.name] = [new_p_name]

            p = name_2_pars[X.name][0]
            for t in X.tops:
                if t in name_2_pars and p not in name_2_pars[t]:
                    name_2_pars[t].append(p)
                else:
                    name_2_pars[t] = [p]

                if t not in par_2_names[p]:
                    par_2_names[p].append(t)

            if X.name == last_layer:
                stop_partitioning = True

            # logger.debug(name_2_pars)
            # logger.debug(par_2_names)

        logger.debug("----------------")

        # ALGO: keep only largest subgraph, prune all others
        # TODO Make more generic and support multiple criteria

        largest_xp, largest_xp_size = '', 0
        for xp in sorted(par_2_names.keys()):
            if len(par_2_names[xp]) > largest_xp_size:
                largest_xp = xp
                largest_xp_size = len(par_2_names[xp])

        for xp in list(par_2_names.keys()):
            if xp != largest_xp:
                del par_2_names[xp]

        for xname, xp_lst in list(name_2_pars.items()):
            if xp_lst[0] != largest_xp:
                del name_2_pars[xname]

        # Set target and group attributes
        for X in xgraph.get_layers():

            if X.name in name_2_pars:
                xlayers.append(
                    X._replace(target=target, subgraph=name_2_pars[X.name][0]))
            else:
                xlayers.append(copy.deepcopy(X))

        # TODO Sort xlayers in topological order
        xgraph = XGraphPartitioner.xgraph_factory.build_from_xlayer(
            net=xlayers,
            name=xgraph.get_name(),
            output_png='tvm_partitioned_graph.png'
            if logger.getEffectiveLevel() <= 10 else None)

        # Transpose optimizer
        optimizer = XGraphTransposesOptimizer(xgraph,
                                              target=target,
                                              opt_name='partitioning')
        optimizer.optimize()

        return xgraph
Пример #5
0
    def get_subgraphs(self, xgraph: XGraph) -> List[XGraph]:
        """Return a list of subgraphs for the given xgraph in XGraph format."""

        # ALGO:
        # 1. loop through all the XLayers
        # 2. For every layer, if it's a target layer, either add it to the
        #   corresponding partition if a bottom is already added to
        #   this layer, else start a new partition
        # TODO set bottoms and tops to [] for partition input respectively
        #   output layers

        in_idx = 0

        visited = {}
        subgraphs = {}

        for X in xgraph.get_layers():

            if X.subgraph is not None:

                X_copy = copy.deepcopy(X)

                if X.subgraph not in subgraphs:
                    new_subgraph = xlayer.defaultXLayer()
                    new_subgraph = new_subgraph._replace(
                        name=X.subgraph,
                        type=["SubGraph"],
                        data=[],
                        shapes=TupleShape([]),
                        sizes=[],
                        internal=1,
                        attrs={
                            "target": X.target,
                            "__bottom_tensors": {},
                            "orig_bottom_tensors": {},
                            "__top_tensors": {},
                            "orig_top_tensors": {},
                        },
                    )
                    subgraphs[X.subgraph] = new_subgraph
                    visited[X.subgraph] = set()

                # First check if this layer is a subgraph input layer
                #   by looking at the visited subgraph layers
                for b in X.bottoms:

                    if b not in visited[X.subgraph]:

                        bX = xgraph.get(b)

                        x_in_name = "xinput" + str(in_idx)

                        def find_original_bottom_layers(rX):
                            if not bool(rX.internal):
                                return [rX.name]

                            bottom_layers = []
                            for r_bottom_name in rX.bottoms:
                                rbX = xgraph.get(r_bottom_name)
                                rec_bottom_layers = find_original_bottom_layers(
                                    rbX)
                                bottom_layers.extend(rec_bottom_layers)

                            return bottom_layers

                        orig_bottoms = find_original_bottom_layers(bX)

                        if "input_names" not in subgraphs[X.subgraph].attrs:
                            subgraphs[X.subgraph].attrs["input_names"] = [
                                x_in_name
                            ]
                        else:
                            subgraphs[X.subgraph].attrs["input_names"].append(
                                x_in_name)

                        # Keep track of input - bottom connections
                        sg_bottoms_ext = subgraphs[
                            X.subgraph].attrs["__bottom_tensors"]
                        if X.name not in sg_bottoms_ext:
                            sg_bottoms_ext.update({x_in_name: [b]})
                        else:
                            new_bottoms_ext = sg_bottoms_ext[x_in_name] + [b]
                            sg_bottoms_ext.update({x_in_name: new_bottoms_ext})

                        # Keep track of input - original (model) bottom
                        #   connections, i.e. exclude internally added
                        #   operations here
                        sg_orig_bottoms_ext = subgraphs[
                            X.subgraph].attrs["orig_bottom_tensors"]
                        if X.name not in sg_orig_bottoms_ext:
                            sg_orig_bottoms_ext.update(
                                {x_in_name: orig_bottoms})
                        else:
                            new_orig_bottoms_ext = (
                                sg_orig_bottoms_ext[x_in_name] + orig_bottoms)
                            sg_orig_bottoms_ext.update(
                                {x_in_name: new_orig_bottoms_ext})

                        new_in_X = xlayer.defaultXLayer()
                        new_in_X = new_in_X._replace(
                            name=x_in_name,
                            type=["Input"],
                            shapes=bX.shapes[:],
                            sizes=bX.sizes[:],
                            # Keep track of the first original layer of the
                            #   operation in front of which we are adding an
                            #   input layer
                            layer=[X.layer[0]],
                            tops=[X.name],
                            bottoms=[],
                            internal=1,
                            attrs={},
                            targets=[],
                        )
                        in_idx += 1

                        X_copy.bottoms[:] = [(bc if bc != b else new_in_X.name)
                                             for bc in X_copy.bottoms]

                        subgraphs[X.subgraph].subgraph_data = subgraphs[
                            X.subgraph].subgraph_data + [new_in_X]
                        # subgraphs[X.subgraph].shapes[:] = new_in_X.shapes[:]
                        # subgraphs[X.subgraph].sizes[:] = new_in_X.sizes[:]
                        subgraphs[X.subgraph].bottoms.append(b)

                        visited[X.subgraph].add(new_in_X.name)

                if X.tops == []:
                    sg_tops_ext = subgraphs[X.subgraph].attrs["__top_tensors"]
                    sg_orig_tops_ext = subgraphs[
                        X.subgraph].attrs["orig_top_tensors"]
                    sg_tops_ext.update({X.name: []})
                    sg_orig_tops_ext.update({X.name: []})

                    if "output_names" not in subgraphs[X.subgraph].attrs:
                        subgraphs[X.subgraph].attrs["output_names"] = [X.name]
                    else:
                        subgraphs[X.subgraph].attrs["output_names"].append(
                            X.name)

                for t in X.tops:
                    tX = xgraph.get(t)

                    if tX.subgraph != X.subgraph:

                        def find_original_top_layers(rX):
                            if not bool(rX.internal):
                                return [rX.name]

                            top_layers = []
                            for r_top_name in rX.tops:
                                rtX = xgraph.get(r_top_name)
                                rec_top_layers = find_original_top_layers(rtX)
                                top_layers.extend(rec_top_layers)

                            return top_layers

                        orig_tops = find_original_top_layers(tX)

                        if "output_names" not in subgraphs[X.subgraph].attrs:
                            subgraphs[X.subgraph].attrs["output_names"] = [
                                X.name
                            ]
                        else:
                            subgraphs[X.subgraph].attrs["output_names"].append(
                                X.name)

                        # Keep track of output - top connections
                        sg_tops_ext = subgraphs[
                            X.subgraph].attrs["__top_tensors"]
                        if X.name not in sg_tops_ext:
                            sg_tops_ext.update({X.name: [t]})  # X.tops[:]
                        else:
                            new_tops_ext = sg_tops_ext[X.name] + [t]  # X.tops
                            sg_tops_ext.update({X.name: new_tops_ext})

                        # Keep track of output - original (model) top
                        #   connections, i.e. exclude internally added
                        #   operations here
                        sg_orig_tops_ext = subgraphs[
                            X.subgraph].attrs["orig_top_tensors"]
                        if X.name not in sg_orig_tops_ext:
                            sg_orig_tops_ext.update({X.name: orig_tops})
                        else:
                            new_orig_tops_ext = sg_orig_tops_ext[
                                X.name] + orig_tops
                            sg_orig_tops_ext.update(
                                {X.name: new_orig_tops_ext})

                        X_copy.tops.remove(t)
                        subgraphs[X.subgraph].tops.append(t)
                        subgraphs[X.subgraph].shapes.append(X.shapes[:])
                        subgraphs[X.subgraph].sizes.extend(X.sizes[:])

                # If no tops
                if X.tops == []:
                    subgraphs[X.subgraph].shapes.append(X.shapes[:])
                    subgraphs[X.subgraph].sizes.extend(X.sizes[:])

                subgraphs[X.subgraph].subgraph_data = subgraphs[
                    X.subgraph].subgraph_data + [X_copy]
                visited[X.subgraph].add(X_copy.name)

        sg_list = []
        for sg, sgX in subgraphs.items():
            # (len(sgX.tops) == len(sgX.shapes))
            # if len(sgX.tops) != 1:
            #    raise ValueError("Subgraphs are only supported for one output"
            #        " but got: {}".format(sgX.tops))

            # TODO Sort xlayers in topological order
            # sub_xgraph = XGraphPartitioner.xgraph_factory.build_from_xlayer(
            #     net=sgX.data,
            #     name=sg
            # )

            sg_list.append(
                sgX._replace(
                    # shapes = sgX.shapes[0],
                    # sizes=[sum([s[0] for s in sgX.sizes])],
                    subgraph_data=sgX.subgraph_data))

        return sg_list
Пример #6
0
    def quantize_subgraph(
        self,
        xgraph: XGraph,
        inputs: Dict[str, np.ndarray],
        input_names: List[str],
        output_names: List[str],
    ) -> None:
        """Quantize subgraph with inputs"""

        # Import Tensorflow only when needed to avoid strict dependency
        import tensorflow as tf

        frozen_graph = self.partition_graphs[xgraph.get_name()]
        logger.info("Load frozen graph from: {}".format(frozen_graph))
        input_graph_def = tf.compat.v1.GraphDef()
        with tf.io.gfile.GFile(frozen_graph, "rb") as f:
            input_graph_def.ParseFromString(f.read())

        logger.info("Quantization input: {} and output names: {}".format(
            input_names, output_names))
        input_shapes = [X.shapes.tolist() for X in xgraph.get_input_layers()]

        in_batch_size = inputs[input_names[0]].shape[0]
        quant_batch_size = min(32, in_batch_size)
        nb_quant_iters = in_batch_size // quant_batch_size

        def inputs_func(iter):
            import numpy as np

            nonlocal inputs
            nonlocal quant_batch_size
            return {
                in_name: inputs[in_name][iter * quant_batch_size:(iter + 1) *
                                         quant_batch_size]
                for in_name in inputs.keys()
            }

        logger.info(
            "START decent quantization for graph partition: {}, nb_iters: {}, batch_size: {}"
            .format(xgraph.get_name(), nb_quant_iters, quant_batch_size))
        q_config = self.decent_q.QuantizeConfig(
            input_nodes=input_names,
            output_nodes=output_names,
            input_shapes=input_shapes,
            output_dir=self.work_dir,
            method="1",
            calib_iter=nb_quant_iters,
        )
        self.decent_q.quantize_frozen(input_graph_def, inputs_func, q_config)

        netcfg = os.path.join(self.work_dir, "deploy_model.pb")
        q_eval_file = os.path.join(self.work_dir, "quantize_eval_model.pb")
        quant_info_file = os.path.join(
            self.work_dir, "quant_info_{}.txt".format(xgraph.get_name()))
        self._save_quant_info(netcfg, quant_info_file)

        self.q_output.add(xgraph.get_name(), netcfg, quant_info_file,
                          frozen_graph, q_eval_file)

        # TODO
        # Add quantization info to corresponding XLayers
        self._add_quant_info_to_xgraph(netcfg)
Пример #7
0
def _from_onnx(onnx_model, xgraph=None, postprocessing=None):
    # type: (onnx.onnx_ONNX_RELEASE_ml_pb2.ModelProto, XGraph, List[str])
    #   -> XGraph
    """
    Tranform ONNX model into XGraph

    Arguments
    ---------
    onnx_model: onnx.onnx_ONNX_RELEASE_ml_pb2.ModelProto
        The ONNX model to be transformed into a XGraph
    xgraph: XGraph (Optional)
        The XGraph object to be used for string the transformed ONNX model
    postprocessing: List[str] (Optional)
        a list of postprocessing layers to be added

    Returns
    -------
    xgraph: XGraph
        the created xgraph model
    """

    onnx_graph = onnx_model.graph

    if xgraph is None:
        xgraph = XGraph(name=onnx_graph.name)
    else:
        xgraph.set_name(onnx_graph.name)

    if postprocessing is None:
        postprocessing = []

    onnx_elem_type_2_dtype = get_onnx_elem_type_2_dtype()
    registry = ONNX2XLayerRegistry()
    xgraph_factory = XGraphFactory()

    # Metadata
    quant_info = {}
    for meta in onnx_model.metadata_props:
        meta_key_split = meta.key.split("--")
        if meta_key_split[0] == "vitis_ai_quant":
            qkey = meta_key_split[-1]
            if qkey not in quant_info:
                quant_info[qkey] = {}
            quant_info[qkey][meta_key_split[1]] = meta.value
    quant_keys = list(quant_info.keys())
    xgraph.meta_attrs['quant_keys'] = quant_keys
    for qkey in quant_keys:
        xgraph.meta_attrs[qkey] = quant_info[qkey]

    params = {e.name: numpy_helper.to_array(e)
              for e in onnx_model.graph.initializer}
    logger.debug("ONNX params size: {}".format(len(params)))

    net = []
    xmap = {}

    # Setup parameters layers
    for p_name in params.keys():
        # logger.debug("pyxir.onnx param: {}".format(p_name))
        # if p_name not in onnx_graph.input:
        cX = xlf.get_xop_factory_func('Constant')(
            op_name=px.stringify(p_name),
            value=params[p_name],
            onnx_id=p_name
        )

        # xmap[cX.name] = cX
        xmap[cX.attrs['onnx_id']] = cX

    # Setup input xlayers
    for input_proto in onnx_graph.input:
        name = input_proto.name
        # logger.debug("pyxir.onnx input: {}".format(name))
        if name not in params:
            logger.debug("input_proto: {}".format(name))
            t_type = TensorTypeWrapper(input_proto.type.tensor_type)
            dtype = t_type.get_dtype()
            shape = t_type.get_shape()
            X = xlf.get_xop_factory_func('Input')(
                px.stringify(name),
                list(shape),
                dtype=dtype,
                onnx_id=name
            )

            net.append(X)
            # xmap[X.name] = X
            xmap[X.attrs['onnx_id']] = X

    for node in onnx_graph.node:
        # logger.debug("pyxir.onnx node: {}".format(node))
        wrapped_node = NodeWrapper(node)
        op_type = wrapped_node.get_op_type()
        Xs = registry[op_type](wrapped_node, params, xmap)
        net.extend(Xs)

    # Postprocessing
    OP_2_XLAYER = {
        'Softmax': xlf.get_xop_factory_func('Softmax',
                                            internal=True)
    }

    # Add additional output layers to the network that are not specified
    #   in the network file (usually only used for adding softmax layers)
    for i, output in enumerate(postprocessing):
        if output not in OP_2_XLAYER:
            continue
            # raise NotImplementedError(
            #     "The provided output operation: {} is invalid."
            #     " The valid output operations are: {}"
            #     .format(output, list(OP_2_XLAYER.keys())))
        op_name = output + str(i)

        # Update tops of current last layer
        X = net[-1]
        X.tops.append(op_name)
        X = OP_2_XLAYER[output](op_name, [X])

        if X.name in net:
            raise ValueError("This should never happen. Error because the"
                             " generated output name already exists in the"
                             " network dictionary used for setup.")

        net.append(X)
        xmap[X.name] = X

    # net = sort_topologically(list(xmap.values()))

    xgraph_factory.build_from_xlayer(
        net=net,
        xgraph=xgraph,
        name=onnx_graph.name,
        blobs=False
    )

    return xgraph
Пример #8
0
    def _get_net_and_params(self, xgraph: XGraph, last_layers: List[str]):
        """ Return the XGraph submodel as a list of XLayers and the
            parameters provided the given last layers of the runtime model"""
        # TODO Remove hardcoding parameter retrieval 

        net = []
        params = {}
        last_layer_cnt = 1
        last_layer_tops = set([])

        for X in xgraph.get_layers():

            if X.name in last_layer_tops:
                last_layer_tops = last_layer_tops.union(tuple(X.tops))
                continue

            if 'Convolution' in X.type or 'Conv2DTranspose' in X.type:
                if not isinstance(X.data, xlayer.ConvData):
                    raise ValueError(
                        "Invalid convolution data type: {}, should be "
                        " xlayer.ConvData".format(type(X.data)))
                # OIHW
                params[X.name + '_kernel'] = X.data.weights
                params[X.name + '_biases'] = X.data.biases
            elif 'Dense' in X.type:
                if not isinstance(X.data, xlayer.ConvData):
                    raise ValueError(
                        "Invalid inner product data type: {}, should be "
                        " xlayer.ConvData".format(type(X.data)))
                # OIHW
                params[X.name + '_weights'] = X.data.weights
                params[X.name + '_biases'] = X.data.biases
            elif 'BatchNorm' in X.type:
                if not isinstance(X.data, xlayer.BatchData):
                    raise ValueError(
                        "Invalid batchnorm data type: {}, should be"
                        " xlayer.BatchData".format(type(X.data)))
                # channels
                params[X.name + '_mu'] = X.data.mu
                params[X.name + '_variance'] = X.data.sigma_square
                params[X.name + '_gamma'] = X.data.gamma
                params[X.name + '_beta'] = X.data.beta
            elif 'Scale' in X.type:
                if not isinstance(X.data, xlayer.ScaleData):
                    raise ValueError(
                        "Invalid scale data type: {}, should be"
                        " xlayer.ScaleData".format(type(X.data)))
                # channels
                params[X.name + '_gamma'] = X.data.gamma
                params[X.name + '_beta'] = X.data.beta
            elif 'BiasAdd' in X.type:
                assert X.data is not None
                params[X.name + '_bias'] = X.data[0]
            elif 'Eltwise' in X.type:
                if X.data != []:
                    params[X.name + '_beta'] = X.data[0]

            net.append(X)

            if last_layers is not None and X.name in last_layers:
                if last_layer_cnt == len(last_layers):
                    break
                else:
                    last_layer_cnt += 1
                    last_layer_tops = last_layer_tops.union(tuple(X.tops))

        return net, params