Beispiel #1
0
def insert_resizer(G, out_edge, resize_op, from_shape):
    input_node = out_edge.from_node
    net_in_dim = input_node.in_dims[0]
    from_dim = deepcopy(net_in_dim)
    from_dim.h = from_shape[0]
    from_dim.w = from_shape[1]
    if resize_op == 'bilinear':
        resize_node = BilinearResizerParameters(input_node.name + "_resizer",
                                                (net_in_dim.h, net_in_dim.w))
    elif resize_op == 'nearest':
        resize_node = NearestNeighborResizerParameters(
            input_node.name + "_resizer", (net_in_dim.h, net_in_dim.w))
    to_node = out_edge.to_node
    to_idx = out_edge.to_idx
    resize_node.in_dims = [from_dim]
    input_node.dims.h = from_shape[0]
    input_node.dims.w = from_shape[1]

    # qrec updated to reflect resizer
    input_qrec = G.quantization and G.quantization.get(NodeId(input_node))
    if input_qrec:
        resizer_qrec = deepcopy(input_qrec)
        resizer_qrec.in_qs = resizer_qrec.out_qs
        G.quantization[NodeId(resize_node)] = resizer_qrec

    G.remove_edge(out_edge)
    G.add_node(resize_node)
    G.add_edge(NNEdge(input_node, resize_node))
    G.add_edge(NNEdge(resize_node, to_node, to_idx=to_idx))
Beispiel #2
0
    def _import_as_matmul(cls, node, inputs, x, y, real_x_shape, real_y_shape, trans_a, trans_b, alpha, beta, **kwargs):
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        all_nodes = kwargs['all_nodes']
        if trans_a:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tinx'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=x[0], to_node=tparams, from_idx=x[1]))
            x = (tparams, 0)
        if trans_b:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tiny'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=y[0], to_node=tparams, from_idx=y[1]))
            y = (tparams, 0)
        params = MatMulOpParameters(G.unique_name(valid_name))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(
            NNEdge(from_node=y[0], to_node=params, from_idx=y[1], to_idx=1))

        out_dims = params.get_output_size(
            [Dim.unnamed(real_x_shape), Dim.unnamed(real_y_shape)])

        biases = cls.get_constant(inputs[2]) if len(inputs) > 2 else np.zeros(out_dims[0].shape[1])
        biases_params = ConstantInputParameters(
            G.unique_name(f'{valid_name}_biases'), dims=Dim.unnamed(biases.shape), value=biases)
        G.add_edge(
            NNEdge(from_node=biases_params, to_node=params, to_idx=2))
        cls.record_constant_qrec(inputs[2], biases_params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, out_dims[0], None)
        return params
Beispiel #3
0
    def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
        has_modified_graph = False

        for node in G.nodes(node_classes=tuple(VALID_FUSIONS.keys())):
            node_list = self.get_node_list(G, node,
                                           FusionMatch(self._default_ktype))
            if node_list is None or len(node_list.order) < 2:
                continue
            LOG.info("fusing nodes %s", ",".join(
                (node.name for node in node_list.order)))
            has_modified_graph = True
            subgraph = GraphView()
            last_node = None
            for snode in node_list.order:
                if last_node is not None:
                    subgraph.add_edge(
                        NNEdge(from_node=last_node, to_node=snode))
                last_node = snode
            # assumption here is that the first node could have multiple inputs but definitely has only
            # one output
            input_mapping = [[
                (node_list.node, idx)
            ] for idx in range(G.num_in_edges(node_list.node.name))]
            output_mapping = [(last_node, 0)]
            pnode = node_list.fusions_class(node_list.node.name + '_fusion',
                                            fusion_type=node_list.fusion_type,
                                            subgraph=subgraph,
                                            input_mapping=input_mapping,
                                            output_mapping=output_mapping)
            if G.quantization:
                # TODO - stats
                qrecs = G.quantization.get_all(pnode.contained_nodes())
                if qrecs:
                    prec = QRec.copy_ktype(qrecs[0],
                                           in_qs=qrecs[0].in_qs,
                                           out_qs=qrecs[-1].out_qs)
                    for fnode in pnode.contained_nodes():
                        G.quantization.move_to_fusion(fnode, pnode)
                    G.quantization[NodeId(pnode)] = prec
            in_edges = G.in_edges(node_list.node.name)
            out_edges = G.out_edges(last_node.name)
            for snode in node_list.order:
                G.remove(snode)
            for edge in in_edges:
                G.add_edge(
                    NNEdge(edge.from_node,
                           pnode,
                           from_idx=edge.from_idx,
                           to_idx=edge.to_idx))
            for edge in out_edges:
                G.add_edge(
                    NNEdge(pnode,
                           edge.to_node,
                           from_idx=edge.from_idx,
                           to_idx=edge.to_idx))

        if set_identity:
            self.set_identity(G)

        return has_modified_graph
 def insert_copy_on_common_concat_in(self, G, concat_nodes):
     # in every concat nodes collect all the in edges (from_node, from_idx)
     # if there are repetition of tuples, insert a copy in every repetition
     # different concats cannot have the same in edge (from_node, from_idx)
     concat_in_edges = []
     has_modified_graph = False
     for concat_node in concat_nodes:
         for idx, in_edge in enumerate(G.indexed_in_edges(
                 concat_node.name)):
             real_in_edge = find_real_in_edge(G, in_edge)
             if real_in_edge in concat_in_edges:
                 has_modified_graph = True
                 copy_node = CopyParameters("%s_copy_%s" %
                                            (concat_node.name, idx))
                 G.remove_edge(in_edge)
                 LOG.info(
                     'common_concat: inserting copy between %s/%s and %s/%s',
                     in_edge.from_node.name, idx, concat_node.name,
                     in_edge.to_idx)
                 G.add_edge(
                     NNEdge(in_edge.from_node,
                            copy_node,
                            from_idx=in_edge.from_idx))
                 G.add_edge(
                     NNEdge(copy_node, concat_node, to_idx=in_edge.to_idx))
                 if G.quantization:
                     qrec = G.quantization[NodeId(concat_node)]
                     G.quantization[NodeId(copy_node)] = QRec.copy_ktype(
                         qrec,
                         in_qs=[deepcopy(qrec.in_qs[idx])],
                         out_qs=[deepcopy(qrec.in_qs[idx])])
             else:
                 concat_in_edges.append(real_in_edge)
     return has_modified_graph
Beispiel #5
0
 def match(self, G: GraphView, set_identity: bool = True):
     split_nodes = [
         node for node in G.nodes() if isinstance(node, SplitParameters)
     ]
     has_modified_graph = False
     for node in split_nodes:
         # traverse reshapes or transposes that do nothing - check gen
         # find edges connected to concats
         res = self.find_split_concat(G, node)
         if res is None:
             continue
         # TODO(martin) - group edges that have adjacent inputs and outputs
         if G.quantization:
             qrec = G.quantization[NodeId(node)]
         for idx, bundle in enumerate(res):
             if not bundle:
                 continue
             has_modified_graph = True
             copy_node = CopyParameters("%s_copy_%s" % (node.name, idx))
             for edge_set in bundle:
                 first_edge = edge_set[0]
                 G.remove_edge(first_edge)
                 G.add_edge(
                     NNEdge(copy_node,
                            first_edge.to_node,
                            to_idx=first_edge.to_idx))
             G.add_edge(NNEdge(node, copy_node, from_idx=idx))
             if G.quantization:
                 G.quantization[NodeId(copy_node)] = qrec.__class__(
                     in_qs=deepcopy(qrec.out_qs[idx]),
                     out_qs=deepcopy(qrec.out_qs[idx]))
     return has_modified_graph
Beispiel #6
0
    def _common(cls, node, scales, sizes, nearest_mode='round_prefer_ceil', **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] if inp else None for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_rank = len(x_shape)
        spatial_size = x_rank - 2
        in_c = x_shape[1]
        in_w = x_shape[-1]
        if scales is not None:
            sizes = np.array(x_shape) * np.array(scales)
        sizes = [None if x_shape[idx] is None else dim
                 for idx, dim in enumerate(sizes)]
        if spatial_size == 1:
            sizes.insert(-1, 1)

        if nearest_mode != 'round_prefer_ceil':
            logger.warning('only round_prefer_ceil is supported for nearest mode')

        if spatial_size != 2 and spatial_size != 1:
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        if not all(x_dim == size_dim for x_dim, size_dim in zip(x_shape[:2:], sizes[:2:])):
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        mode = node.attrs.get('mode', 'nearest')
        if mode != 'nearest' and mode != 'linear':
            raise ValueError('resize only supports nearest and linear modes')

        params_class = BilinearResizerParameters if mode == 'linear' else NearestNeighborResizerParameters

        params = params_class(valid_name,
                              new_shape=tuple(sizes[2::]),
                              align_corners=False,
                              halfpixel_centers=False,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])

        if spatial_size == 1:
            r1_params = ReshapeParameters(f'{valid_name}_reshape2d',
                                          old_shape=Dim.unnamed([in_c, in_w]),
                                          shape=Dim.unnamed([in_c, 1, in_w]))
            r2_params = ReshapeParameters(f'{valid_name}_reshape1d',
                                          old_shape=Dim.unnamed([in_c, 1, sizes[-1]]),
                                          shape=Dim.unnamed([in_c, sizes[-1]]))
            G.add_edge(NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0))
            G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0))
            G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0))
            pout_dims = ProvisionalDim(sizes[:-2:] + sizes[-1::])
            params = r2_params
        else:
            pout_dims = ProvisionalDim(sizes)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Beispiel #7
0
 def _common(cls, node, v13=False, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     G = kwargs['G']
     inputs = [all_nodes[inp] for inp in node.input]
     axis = node.attrs.get('axis', None)
     # may have more than one input i.e. clip
     x = inputs[0]
     x_shape = x[2].shape
     if axis and axis < 0:
         axis += len(x_shape)
     axis = cls._trim_axis(axis, x_shape)
     if axis != 0 and not v13:
         ValueError(
             'LogSoftmax does not support ONNX version < 13 with axis not first'
         )
     if cls.is_constant(x):
         logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(
             valid_name,
             value=np.log(softmax_func(cls.get_constant(x), axis=axis)))
     else:
         softmax_params = SoftMaxParameters(f'{valid_name}_softmax',
                                            axis=axis)
         G.add_edge(
             NNEdge(from_node=x[0],
                    to_node=softmax_params,
                    from_idx=x[1],
                    to_idx=0))
         params = LogOpParameters(f'{valid_name}_log')
         G.add_edge(NNEdge(from_node=softmax_params, to_node=params))
     all_nodes[node.output[0]] = (params, 0, copy.deepcopy(x[2]), None)
     return params
Beispiel #8
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=False,
                                  in_dims_hint=SparseList([['c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            params.weights = weights
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Beispiel #9
0
def two_conv_graph():
    G = NNGraph(name='two_conv_graph')
    ti = G.add_input(Dim.unnamed([10, 10, 2]))
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    to = G.add_output()
    G.add_edge(NNEdge(ti, n1))
    G.add_edge(NNEdge(n1, n2))
    G.add_edge(NNEdge(n2, to))
    G.add_dimensions()
    yield G
 def find_direct_connects(self,
                          G,
                          node,
                          has_modified_graph,
                          find_output=True):
     # traverse reshapes or transposes that do nothing - check gen
     # find edges connected to concats
     res = self.find_split_concat(G, node, find_output=find_output)
     if res is None:
         return has_modified_graph
     if G.quantization:
         qrec = G.quantization[NodeId(node)]
     for idx, bundle in enumerate(res):
         if not bundle:
             continue
         has_modified_graph = True
         copy_node = CopyParameters("%s_copy_%s" % (node.name, idx))
         for edge_set in bundle:
             first_edge = edge_set[0]
             G.remove_edge(first_edge)
             LOG.info('inserting copy between %s/%s and %s/%s', node.name,
                      idx, first_edge.to_node.name, first_edge.to_idx)
             G.add_edge(
                 NNEdge(copy_node,
                        first_edge.to_node,
                        to_idx=first_edge.to_idx))
         G.add_edge(NNEdge(node, copy_node, from_idx=idx))
         if G.quantization:
             G.quantization[NodeId(copy_node)] = QRec.copy_ktype(
                 qrec,
                 in_qs=[deepcopy(qrec.out_qs[idx])],
                 out_qs=[deepcopy(qrec.out_qs[idx])])
     return True
Beispiel #11
0
    def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
        rnn_nodes = [
            self.find_unpack(G, node) for node in G.nodes()
            if isinstance(node, RNNBaseParameters) and node.n_output_cells > 1
        ]
        rnn_nodes_by_slice = self.validate_slices(G, rnn_nodes)
        rnn_nodes_by_slice = self.validate_multi_branch(G, rnn_nodes_by_slice)
        if not rnn_nodes_by_slice:
            return False

        for unpack_node, rnn_unpacks in rnn_nodes_by_slice.items():
            modified_nodes = set()
            for rnn_unpack in rnn_unpacks:
                self.process_path(G, rnn_unpack, modified_nodes)
            # since process path will have removed all unnecessary nodes the edges will be correct here
            out_edges = G.out_edges(unpack_node.name)
            in_edges = G.in_edges(unpack_node.name)
            assert len(in_edges
                       ) == 1, "expecting unpack node to have only one in edge"
            in_edge = in_edges[0]
            changes_shape = unpack_node.changes_shape if isinstance(
                unpack_node, StridedSliceParameters) else False

            LOG.info("Eliminating last cell unpack: %s", unpack_node.name)
            G.remove(unpack_node)

            # Here the strided slice can change the output shape of the RNN
            # so insert a reshape to do the shape change
            if changes_shape:
                reshape = ReshapeParameters(
                    unpack_node.name + '_reshape',
                    old_shape=Dim.unnamed(unpack_node.post_slice_shape),
                    shape=Dim.unnamed(unpack_node.out_shape))
                G.add_edge(
                    NNEdge(from_node=in_edge.from_node,
                           to_node=reshape,
                           from_idx=in_edge.from_idx))
                for out_edge in out_edges:
                    G.add_edge(
                        NNEdge(from_node=reshape,
                               to_node=out_edge.to_node,
                               to_idx=out_edge.to_idx))
                if G.quantization:
                    G.quantization[NodeId(reshape)] = G.quantization[NodeId(
                        unpack)]
            else:
                for out_edge in out_edges:
                    G.add_edge(
                        NNEdge(from_node=in_edge.from_node,
                               to_node=out_edge.to_node,
                               from_idx=in_edge.from_idx,
                               to_idx=out_edge.to_idx))
            if G.quantization:
                del G.quantization[NodeId(unpack_node)]

        if set_identity:
            self.set_identity(G)

        return True
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(TransposeConvOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[2]
        x_shape = x[2].shape
        in_b, in_h, in_w, in_c = tuple(x_shape)
        pout_shape = [
            dim if x_shape[idx] is not None else None
            for idx, dim in enumerate(cls.get_constant(inputs[0]))
        ]
        out_b, out_h, out_w, out_c = tuple(pout_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        stride_w = node_opts.StrideW()
        stride_h = node_opts.StrideH()
        # compute padding
        pad = node_opts.Padding()
        if pad == Padding.SAME:
            pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
            pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
            pad_top = pad_h // 2
            pad_left = pad_w // 2
            pad = PadDim(pad_top,
                         pad_h - pad_top,
                         pad_left,
                         pad_w - pad_left,
                         same_type='balanced_right')
        else:
            pad = PadDim(0)

        params = TransposeConv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(stride_h, stride_w),
            padding=pad,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy()],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        pout_dims = ProvisionalDim(pout_shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
    def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
        has_modified_graph = False
        group_identity = kwargs.get('group_identity')
        if group_identity == 'pow2_match_group':
            valid_activations = VALID_ACTIVATIONS_POW2
        else:
            valid_activations = VALID_ACTIVATIONS_SQ8
        for fc_node in [params for params in G.nodes() if isinstance(params, FcParameters)]:
            node_list = self.get_node_list(G, fc_node, valid_activations)
            if node_list is None or len(node_list.order) < 2:
                continue
            LOG.info("fusing nodes %s", ",".join(
                (node.name for node in node_list.order)))
            has_modified_graph = True
            subgraph = GraphView()
            last_node = None
            for node in node_list.order:
                if last_node is not None:
                    subgraph.add_edge(
                        NNEdge(from_node=last_node, to_node=node))
                last_node = node
            input_mapping = [[(node_list.linear, idx)] for idx in range(3)]
            output_mapping = [(last_node, 0)]
            pnode = LinearFusionParameters(
                node_list.linear.name + '_fusion',
                fusion_type=node_list.fusion_type,
                subgraph=subgraph,
                input_mapping=input_mapping,
                output_mapping=output_mapping)
            if G.quantization:
                # TODO - stats
                qrecs = G.quantization.get_all(pnode.contained_nodes())
                if qrecs:
                    prec = QRec.copy_ktype(
                        qrecs[0], in_qs=qrecs[0].in_qs, out_qs=qrecs[-1].out_qs)
                    for node in pnode.contained_nodes():
                        G.quantization.move_to_fusion(node, pnode)
                    G.quantization[NodeId(pnode)] = prec
            in_edges = G.in_edges(node_list.linear.name)
            out_edges = G.out_edges(last_node.name)
            for node in node_list.order:
                G.remove(node)
            for edge in in_edges:
                G.add_edge(NNEdge(edge.from_node, pnode,
                                  from_idx=edge.from_idx, to_idx=edge.to_idx))
            for edge in out_edges:
                G.add_edge(NNEdge(pnode, edge.to_node,
                                  from_idx=edge.from_idx, to_idx=edge.to_idx))

        if set_identity:
            self.set_identity(G)

        return has_modified_graph
Beispiel #14
0
    def get_state(cls, G, inputs, idx, name, hidden_size, num_directions=1):
        if not inputs[idx]:
            state = np.zeros((num_directions, hidden_size))
        elif cls.is_constant(inputs[idx]):
            state = cls.get_constant(inputs[idx])
        else:
            state_inp = inputs[idx]
            if num_directions == 2:
                act_slices = (((0, 1, 1), (0, hidden_size, 1)),
                              ((1, 2, 1), (0, hidden_size, 1)))
                out_shapes = ((1, hidden_size), (1, hidden_size))
                split = SplitParameters(G.unique_name(f'{name}_split'),
                                        act_slices=act_slices,
                                        out_shapes=out_shapes,
                                        axis=0)
                G.add_edge(
                    NNEdge(from_node=state_inp[0],
                           to_node=split,
                           from_idx=state_inp[1]))
                return {
                    'forward': {
                        name: (split, 0)
                    },
                    'backward': {
                        name: (split, 0)
                    },
                }
            else:
                reshape = ReshapeParameters(G.unique_name(f'{name}_reshape'),
                                            old_shape=(1, hidden_size),
                                            shape=(hidden_size, ))
                G.add_edge(
                    NNEdge(from_node=state_inp[0],
                           to_node=reshape,
                           from_idx=state_inp[1]))
                return {
                    'forward': {
                        name: (reshape, 0)
                    },
                }

        state = cls.get_constant(inputs[idx]) if inputs[idx] else np.zeros(
            (num_directions, hidden_size))
        return {
            'forward' if dir == 0 else 'backward': {
                name: dir_arr.reshape((hidden_size, ))
            }
            for dir, dir_arr in enumerate(
                np.split(state, num_directions, axis=0))
        }
Beispiel #15
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        indices = cls.get_constant(y)
        axis = node.attrs.get('axis', 0)

        pshape = ProvisionalDim(x_shape[:axis:] + list(indices.shape) +
                                x_shape[axis + 1:])
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=np.take(x_val,
                                                           indices,
                                                           axis=axis))
        else:
            axis = cls._trim_axis(axis, x_shape)
            params = GatherParametters(valid_name, axis=axis, indices=indices)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
Beispiel #16
0
    def _common(cls, node: TFLiteNode, **kwargs):
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        axes = list(cls._verify_constant(inputs[1]))
        node.input[1].used = True
        if len(axes) > 1:
            raise ValueError(
                "reverses of more than one dimension are not supported")
        axis = axes[0]
        if x_shape[axis] is None:
            params = NoOPParameters(node.name,
                                    desc="reversed removed dimension")
        else:
            axis -= sum([1 if dim is None else 0 for dim in x_shape[:axis:]])
            params = ReverseParameters(node.name, axis=axis)
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, deepcopy(x[2]))
        return params
Beispiel #17
0
def reverse_matmul(G, params):
    in_edges = G.indexed_in_edges(params.name)
    for edge in in_edges[0:2:]:
        G.remove_edge(edge)
    other_idx = 1
    for edge in in_edges[0:2:]:
        G.add_edge(
            NNEdge(from_node=edge.from_node,
                   to_node=params,
                   from_idx=edge.from_idx,
                   to_idx=other_idx))
        other_idx = 1 - other_idx
    trans_in = params.transpose_in if params.transpose_in is not None else [
        None, None
    ]
    for idx in range(2):
        if trans_in[idx] is None:
            trans_in[idx] = (1, 0)
        else:
            trans_in[idx] = tuple(trans_in[idx][jdx] for jdx in (1, 0))
    params.transpose_in = trans_in
    trans_out = params.transpose_out if params.transpose_out is not None else [
        None
    ]
    if trans_out[0] is None:
        trans_out[0] = (1, 0)
    else:
        trans_out[0] = tuple(trans_in[0][idx] for idx in (1, 0))
    params.transpose_out = trans_out
    nid = NodeId(params)
    if G.quantization and nid in G.quantization:
        qrec = G.quantization[nid]
        # swap qrecs
        qrec.in_qs[0], qrec.in_qs[1] = qrec.in_qs[1], qrec.in_qs[0]
    def _common(cls, node, **kwargs):
        params_class = kwargs['params_class']
        params_args = kwargs.get('params_args', {})
        flatten = kwargs.get('flatten')
        if params_args is None:
            params_args = {}
        all_nodes = kwargs['all_nodes']
        opts = kwargs['opts']
        G = kwargs['G']
        inputs = [all_nodes[inp] for inp in node.input]
        assert len(inputs) == 1
        inp = inputs[0]
        pout = inp[2].flatten if flatten else copy.deepcopy(inp[2])

        params = params_class(node.name, **params_args)
        if opts.get('load_quantization'):
            in_qs = kwargs['in_qs'] if "in_qs" in kwargs else None
            out_qs = kwargs['out_qs'] if "out_qs" in kwargs else None
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output, in_qs=in_qs, out_qs=out_qs)
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout)
        return params
Beispiel #19
0
    def _import_nodes(self, G, graph, handlers, all_nodes, outputs, opts):
        for node in graph.nodes:
            handler = handlers.get(node.op_name, None)
            if not handler:
                raise ValueError("no handler found for %s" % node.op_type)
            if node.is_custom and handler:
                handler = handler.get(node.custom_op_name, None)
                if not handler:
                    raise ValueError(
                        "no handler found for custom operation %s" %
                        node.custom_op_name)

            params = handler.handle(node,
                                    all_nodes=all_nodes,
                                    G=G,
                                    opts=opts,
                                    importer=self)
            if params is None:
                continue
            for idx, out_tensor in enumerate(node.output):
                output = outputs.get(out_tensor)
                if not output:
                    continue
                G.add_edge(
                    NNEdge(from_node=params,
                           to_node=output[0],
                           from_idx=idx,
                           to_idx=output[1]))
                if opts.get('load_quantization'):
                    qtype = deepcopy(
                        G.quantization[NodeId(params)].out_qs[idx])
                    G.quantization[NodeId(output[0])] = QRec.scaled(
                        in_qs=[qtype], out_qs=[qtype])
Beispiel #20
0
    def _common(cls, node: TFLiteNode, **kwargs):
        custom_opts = node.get_custom_options()
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']
        importer = kwargs['importer']

        inputs = [all_nodes[t] for t in node.input]
        outputs = [
            all_nodes.get(node.output[idx]) if idx < len(node.output) else None
            for idx in range(4)
        ]
        # inp_shapes = [input[2].shape for input in inputs]

        if 'max_bb_before_nms' not in custom_opts:
            custom_opts['max_bb_before_nms'] = 300

        params = SSDDetectorParameters(node.name, parameters=custom_opts)

        overriden_outputs = []
        for idx, output in enumerate(outputs):
            if output:
                overriden_outputs.append(node.output[idx])
                continue
            oparams = G.add_output()
            otensor = TensorBase("Detect_%s" % idx)
            overriden_outputs.append(otensor)
            importer.provisional_outputs[otensor] = (oparams, 0, None)
        # covers the case where not all outputs are generated by the conversion tool
        node.override_outputs(overriden_outputs)

        for idx, inp in enumerate(inputs):
            G.add_edge(
                NNEdge(from_node=inp[0],
                       to_node=params,
                       from_idx=inp[1],
                       to_idx=idx))

        if opts.get('load_quantization'):
            in_qtypes = [
                QType.from_min_max_sq(tensor.qtype.min_val,
                                      tensor.qtype.max_val) if
                (tensor.qtype.is_asymmetric
                 or not tensor.qtype.signed) else tensor.qtype
                for tensor in node.input
            ]
            o_boxes_qtype = QType(min_val=-2,
                                  max_val=2,
                                  dtype=np.int16,
                                  scale=2**(-14))
            o_scores_qtype = node.input[1].qtype
            o_class_qtype = QType(scale=1, dtype=np.int8)
            qrec = QRec.scaled(in_qs=in_qtypes,
                               out_qs=[
                                   o_boxes_qtype, o_class_qtype,
                                   o_scores_qtype, o_class_qtype
                               ])
            G.quantization[NodeId(params)] = qrec

        return params
Beispiel #21
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if len(x_shape) == 0:
            assert len(axes) == 1 and axes[0] == 0
            new_shape = [1]
        else:
            new_shape = [
                item for sublist in [[1, dim] if idx in axes else [dim]
                                     for idx, dim in enumerate(x_shape)]
                for item in sublist
            ]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=x_val.reshape(new_shape))
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name,
                                       old_shape=old_shape,
                                       shape=shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
 def move_node(G, node, edges):
     nid = NodeId(node)
     qrec = G.quantization[nid] if G.quantization and nid in G.quantization else None
     node_in_edge = G.in_edges(node.name)[0]
     node_out_edges = G.out_edges(node.name)
     G.remove(node)
     for node_out_edge in node_out_edges:
         new_edge = NNEdge(from_node=node_in_edge.from_node, to_node=node_out_edge.to_node,
                           from_idx=node_in_edge.from_idx, to_idx=node_out_edge.to_idx)
         G.add_edge(new_edge)
     cnt = 0
     original_node = node
     for edge in edges:
         LOG.info("Moving node %s between %s and %s",
                  node.name, edge.from_node.name, edge.to_node.name)
         if cnt > 0:
             new_node = deepcopy(node)
             new_node.name = f'{original_node.name}_{cnt}'
         else:
             new_node = node
         cnt += 1
         new_node.in_dims = [edge.from_node.out_dims[edge.from_idx].clone()]
         new_node.out_dims = [edge.to_node.in_dims[edge.to_idx].clone()]
         G.insert_node(new_node, edge.from_node, edge.to_node,
                       from_idx=edge.from_idx, to_idx=edge.to_idx,
                       edge_class=NNEdge)
         if qrec:
             from_qrec = G.quantization[NodeId(edge.from_node)]
             new_qrec = deepcopy(qrec)
             new_qrec.in_qs[0] = deepcopy(from_qrec.out_qs[edge.from_idx])
             G.quantization[NodeId(new_node)] = new_qrec
             G.quantization.propagate(
                 G, new_node, node_in_edge.from_node, qtype=new_qrec.out_qs[0])
Beispiel #23
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        axis = kwargs['axis']
        splits = kwargs.get('splits')
        opts = kwargs['opts']
        input_idx = kwargs.get('input_idx', 0)
        num_splits = kwargs.get('num_splits')

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[input_idx]

        x_shape = x[2].shape
        act_slices, pout_shapes, axis = SplitParameters.get_splits(
            x_shape, axis, splits=splits, num_splits=num_splits)
        out_shapes = [
            BackendHandler.remove_unspecified_dim(shape)
            for shape in pout_shapes
        ]
        params = SplitParameters(node.name,
                                 act_slices=act_slices,
                                 out_shapes=out_shapes,
                                 axis=axis)

        if opts.get('load_quantization'):
            G.quantization[NodeId(
                params)] = BackendHandler.load_tf_quantization([node.input[0]],
                                                               node.output)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        for idx, tensor in enumerate(node.output):
            all_nodes[tensor] = (params, idx, ProvisionalDim(pout_shapes[idx]))
        return params
Beispiel #24
0
    def _common(cls, node: TFLiteNode, **kwargs):
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        ptranspose = cls._verify_constant(inputs[1])
        pout_shape = [x_shape[dim] for dim in ptranspose]
        transpose = [
            new_axes[axis] for axis in ptranspose if x_shape[axis] is not None
        ]
        node.input[1].used = True

        params = TransposeParameters(node.name, transpose=transpose)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
Beispiel #25
0
def add_node(G: NNGraph, node: Node, anode: Node = None) -> str:
    G.add_node(node)
    if not anode:
        return (node.name, node.name)
    G.add_node(anode)
    G.add_edge(NNEdge(node, anode))
    return (node.name, anode.name)
Beispiel #26
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ReshapeOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # TF2 seems to use the second input whereas TF1 uses the opts
        new_shape = None
        if node_opts:
            new_shape = list(node_opts.NewShapeAsNumpy())
        elif len(inputs) > 1:
            set_shape_tensor = list(cls._verify_constant(inputs[1]))
            node.input[1].used = True
            new_shape = list(set_shape_tensor)
        else:
            ValueError(
                f"Cannot asses new_shape for Reshape Parameter: {node.name}")

        if -1 in new_shape:
            new_shape_size = reduce(lambda x, y: x * 1
                                    if y == -1 else x * y, new_shape, 1)
            inp_size = reduce(lambda x, y: x * y
                              if y is not None else x, x_shape, 1)
            new_shape[new_shape.index(-1)] = inp_size // new_shape_size

        if None in x_shape:
            if 1 in new_shape:
                old_batch_dim = x_shape.index(None)
                new_batch_dim = new_shape.index(1)
                if old_batch_dim != new_batch_dim:
                    LOG.info(
                        "node %s moved batch dimension for axis %s to axis %s",
                        node.name, old_batch_dim, new_batch_dim)
                new_shape[new_batch_dim] = None
            else:
                raise ValueError(
                    "unable to determine movement of unspcified axis in node %s"
                    % node.name)

        pnew_shape = ProvisionalDim(new_shape)
        old_shape = Dim.unnamed(cls.remove_unspecified_dim(x_shape),
                                is_ordered=True)
        new_shape = Dim.unnamed(cls.remove_unspecified_dim(new_shape),
                                is_ordered=True)

        params = ReshapeParameters(node.name,
                                   old_shape=old_shape,
                                   shape=new_shape)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pnew_shape)
        return params
Beispiel #27
0
 def gen_concat(cls, node, inputs, axis, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     input_shapes = [inp[2].shape for inp in inputs]
     axis_sum = sum(shape[axis] for shape in input_shapes)
     axis = axis if axis >= 0 else len(input_shapes[0]) + axis
     output_shape = [
         axis_sum if idx == axis else dim
         for idx, dim in enumerate(input_shapes[0])
     ]
     pout_dim = ProvisionalDim(output_shape)
     none_dims = sum(
         [1 if dim is None else 0 for dim in output_shape[:axis:]])
     if all(cls.is_constant(inp) for inp in inputs):
         value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                axis=axis)
         logger.info(
             f"reducing {valid_name} to a constant {print_small(value)}")
         params = ConstantInputParameters(valid_name, value=value)
     else:
         params = ConcatParameters(valid_name, axis=axis - none_dims)
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
     all_nodes[node.output[0]] = (params, 0, pout_dim, inputs[0][3])
     return params
 def _common(cls,
             node,
             pool_type="max",
             constant_operation=None,
             copy_qtype=False,
             **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     unknown_dims = sum(1 if dim is None else 0 for dim in x_shape)
     params = GlobalPoolingParameters(
         valid_name,
         pool_type=pool_type,
         axis=tuple(range(1,
                          len(x_shape) - unknown_dims)),
         keep_dims=True)
     pout_dims = ProvisionalDim([x_shape[0], x_shape[1]] +
                                ([1] * (len(x_shape) - 2)))
     G.add_edge(
         NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
     all_nodes[node.output[0]] = (params, 0, pout_dims,
                                  x[3] if copy_qtype else None)
     return params
Beispiel #29
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     to_dtype = node.attrs['to']
     if cls.is_constant(x):
         x_val = cls.get_constant(x)
         x_val = x_val.astype(to_dtype)
         if x_val.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, x_val)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          dims=Dim.unnamed(x_val.shape),
                                          value=x_val)
     else:
         params = QuantizeParameters(valid_name,
                                     to_qtype=QType(dtype=to_dtype))
         G.add_edge(
             NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                    to_idx=0))
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape), None)
     return params
Beispiel #30
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if axes:
            if any(x_shape[axis] != 1 for axis in axes):
                raise ValueError("axis parameter in node %s is invalid %s" % (valid_name, axes))
            new_shape = [dim for idx, dim in enumerate(x_shape) if idx not in axes]
        else:
            new_shape = [dim for dim in x_shape if dim != 1]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name, value=x_val.reshape(new_shape),
                                             constant_store=G.constant_store)
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name, old_shape=old_shape, shape=shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params