コード例 #1
0
def adjust_order(G, reshape_weights=True, postprocess=True, debug_function=None, steps=None, single_step=False):
    if steps is None:
        opts = {'reshape_weights': reshape_weights}
        selector = AdjusterBase.get_all_handlers(opts)
        LOG.info("adding transposes to correct tensor order for AT kernels")
        ConstantInputParameters.clear_compression_state(G)
        for node in G.nodes(node_classes=tuple(selector)):
            adjusters = selector[node.__class__]
            for adjuster, attrs in adjusters:
                if attrs:
                    not_selected = False
                    for attr, val in attrs.items():
                        if not hasattr(node, attr):
                            not_selected = True
                            break
                        if callable(val):
                            if not val(getattr(node, attr)):
                                not_selected = True
                                break
                        elif getattr(node, attr) != val:
                            not_selected = True
                            break
                    if not_selected:
                        continue
                adjuster.adjust(G, node)
                break
        add_dimensions(G)
    if debug_function:
        debug_function(G)
    if steps is not None or postprocess:
        eliminate_transposes(G, debug_function=debug_function, steps=steps, single_step=single_step)
コード例 #2
0
    def _common(cls, node, copy_qtype=False, quantized_args=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        valid_name = kwargs['valid_name']
        G = kwargs['G']
        constant_operation = kwargs.get('constant_operation')
        constant_int_operation = kwargs.get('constant_int_operation')
        inputs = [all_nodes[inp] for inp in node.input]
        if quantized_args:
            args = [inputs[quantized_args[0][0]], inputs[quantized_args[1][0]]]
            inp_qtypes = [
                cls.get_qtype(inputs[quantized_args[0][1]], inputs[quantized_args[0][2]]),
                cls.get_qtype(inputs[quantized_args[1][1]], inputs[quantized_args[1][2]])
            ]
            out_qtype = cls.get_qtype(inputs[quantized_args[2][0]], inputs[quantized_args[2][1]])
        else:
            args = inputs
            assert len(args) == 2
            out_qtype = None

        if all(cls.is_constant(inp) for inp in args) and constant_operation:
            values = [cls.get_constant(inp) for inp in args]
            if quantized_args:
                values = [inp_qtype.dequantize(val) for inp_qtype, val in zip(inp_qtypes, values)]
            outputs = cls.implied_broadcast(inputs)
            if constant_int_operation and all(np.issubdtype(val.dtype, np.integer) for val in values):
                res = constant_int_operation(*values)
            else:
                res = constant_operation(*values)
            if quantized_args:
                res = out_qtype.quantize(res)
            if res.size < 10:
                logger.info("reducing %s to a constant %s", valid_name, res)
            else:
                logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name, value=res,
                                             dims=Dim.unnamed(outputs[0].known_shape),
                                             qtype=out_qtype)
        else:
            params_args = kwargs.get('params_args', {})
            params = kwargs['params_class'](valid_name, **params_args)
            outputs = cls.implied_broadcast(inputs)
            shapes = []
            for idx, inp in enumerate(args):
                G.add_edge(NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1], to_idx=idx))
                shapes.append(inp[2].known_shape)
            if isinstance(params, Broadcastable):
                params.set_broadcast(shapes)
            if quantized_args:
                for qtype, inp in zip(inp_qtypes, args):
                    if cls.is_constant(inp):
                        inp[0].qtype = qtype
                qrecs = kwargs['qrecs']
                qrecs[NodeId(params)] = QRec.scaled(in_qs=inp_qtypes, out_qs=[out_qtype])

        if copy_qtype:
            out_qtype = inputs[0][3] if inputs[0][3] is not None else inputs[1][3]

        all_nodes[node.output[0]] = (params, 0, outputs[0], out_qtype)
        return params
コード例 #3
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     opts = kwargs['opts']
     node_opts = kwargs.get("node_opts", None)
     params_args = kwargs.get('params_args', {})
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         LOG.info("reducing %s to a constant", node.name)
         values = [cls.get_constant(inp) for inp in inputs]
         output_shapes = cls.implied_broadcast(inputs)
         params = ConstantInputParameters(node.name,
                                          value=constant_operation(*values),
                                          dims=Dim.unnamed(
                                              output_shapes[0].known_shape))
     else:
         params = kwargs['params_class'](node.name, **params_args)
         output_shapes = cls.implied_broadcast(inputs)
         shapes = []
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
             shapes.append(inp[2].known_shape)
         if isinstance(params, Broadcastable):
             for idx, shape in enumerate(shapes.copy()):
                 len_diff = len(shape) - len(output_shapes[0].known_shape)
                 if len_diff > 0:
                     if not all(dim is None or dim == 1
                                for dim in shape[:len_diff:]):
                         in_shapes = ",".join(
                             str(shape) for shape in shapes)
                         raise ValueError(
                             f'strange broadcast {in_shapes} -> {output_shapes[0].shape}'
                         )
                     shapes[idx] = shape[len_diff::]
             params.set_broadcast(shapes)
     if opts.get('load_quantization'):
         G.quantization[NodeId(params)] = cls.load_tf_quantization(
             node.input, node.output)
     if node_opts is not None:
         params = cls.fuse_activation(node_opts, node.name, params,
                                      **kwargs)
     all_nodes[node.output[0]] = (params, 0, output_shapes[0])
     return params
コード例 #4
0
ファイル: constant.py プロジェクト: bot-motion/gap_sdk
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     value = numpy_helper.to_array(node.attrs['value'])
     params = ConstantInputParameters(valid_name, dims=Dim.unnamed(value.shape), value=value)
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape))
     return params
コード例 #5
0
    def _common(cls, node, starts, ends, axes, steps, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        x = all_nodes[node.input[0]]
        x_shape = np.array(x[2].shape)
        x_rank = len(x_shape)
        axes = cls._resolve_negative_ranks(
            axes, len(x_shape)) if axes else tuple(range(x_rank))
        axes_rank = len(axes)
        steps = steps if steps else [1] * axes_rank
        slices = np.stack([starts, ends, steps]).transpose((1, 0))
        p_slices = []
        p_shape = []
        for idx, dim in enumerate(x_shape):
            try:
                if dim is None:
                    p_slices.append(None)
                    p_shape.append(None)
                else:
                    slice_idx = axes.index(idx)
                    begin, end, step = slices[slice_idx]
                    begin = max(min(begin if begin >= 0 else dim + begin, dim),
                                0)
                    end = max(min(end if end >= 0 else dim + end, dim), -1)
                    # -sys.maxsize is used to indicate 0 in the reverse slice direction
                    # this makes it compatible with the numpy slice
                    p_slices.append(
                        (begin, -sys.maxsize if end == -1 else end, step))
                    if step < 0:
                        p_shape.append((begin - end) // -step)
                    else:
                        p_shape.append((end - begin) // step)

            except ValueError:
                p_slices.append((0, dim, 1))
                p_shape.append(dim)
        slices = cls._get_real_dim(p_slices)
        shape = cls._get_real_dim(p_shape)

        params = StridedSliceParameters(valid_name,
                                        act_slice=slices,
                                        out_shape=shape)
        if cls.is_constant(x):
            x_val = cls.get_constant(x)
            x_val = params.numpy_slice(x_val)
            if x_val.size < 10:
                logger.info("reducing %s to a constant %s", valid_name, x_val)
            else:
                logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             dims=Dim.unnamed(x_val.shape),
                                             value=x_val,
                                             constant_store=G.constant_store)
        else:
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(p_shape))
        return params
コード例 #6
0
ファイル: cast.py プロジェクト: bot-motion/gap_sdk
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(CastOptions)
        G = kwargs['G']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]

        if node_opts:
            in_dtype = TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[
                node_opts.InDataType()]
            out_dtype = TFLiteTensorWrapper.TF_TO_NUMPY_TYPE[
                node_opts.OutDataType()]
        else:
            in_dtype = out_dtype = None
        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            val = cls.get_constant(x)
            if out_dtype:
                val = val.astype(out_dtype)
            params = ConstantInputParameters(node.name, value=val)
        else:
            params = CastParameters(node.name,
                                    in_dtype=in_dtype,
                                    out_dtype=out_dtype)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))

        all_nodes[node.output[0]] = (params, 0, deepcopy(x[2]))
        return params
コード例 #7
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        out_rank = len(x_shape) + len(kwargs['axes'])
        axes = cls._resolve_negative_ranks(kwargs['axes'], out_rank)

        old_shape = x_shape.copy()
        new_shape = [
            1 if new_idx in axes else old_shape.pop(0)
            for new_idx in range(out_rank)
        ]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            x_val = cls.get_constant(x)
            logger.info(
                f"reducing {valid_name} to a constant {cls.print_small(x_val)}"
            )
            params = ConstantInputParameters(valid_name,
                                             value=x_val.reshape(new_shape))
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name,
                                       old_shape=old_shape,
                                       shape=shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape, x[3])
        return params
コード例 #8
0
 def _common(cls, node, v13=False, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     G = kwargs['G']
     inputs = [all_nodes[inp] for inp in node.input]
     axis = node.attrs.get('axis', None)
     # may have more than one input i.e. clip
     x = inputs[0]
     x_shape = x[2].shape
     if axis and axis < 0:
         axis += len(x_shape)
     axis = cls._trim_axis(axis, x_shape)
     if axis != 0 and not v13:
         ValueError(
             'LogSoftmax does not support ONNX version < 13 with axis not first'
         )
     if cls.is_constant(x):
         logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(
             valid_name,
             value=np.log(softmax_func(cls.get_constant(x), axis=axis)))
     else:
         softmax_params = SoftMaxParameters(f'{valid_name}_softmax',
                                            axis=axis)
         G.add_edge(
             NNEdge(from_node=x[0],
                    to_node=softmax_params,
                    from_idx=x[1],
                    to_idx=0))
         params = LogOpParameters(f'{valid_name}_log')
         G.add_edge(NNEdge(from_node=softmax_params, to_node=params))
     all_nodes[node.output[0]] = (params, 0, copy.deepcopy(x[2]), None)
     return params
コード例 #9
0
def add_constants(G, sub_g):
    """ adds scalar constants to the subgraphs. If a constant is used in more than one place then
    it is duplicated """
    for node in sub_g.nodes():
        for edge in G.in_edges(node.name):
            if not isinstance(edge.from_node, ConstantInputParameters
                              ) or edge.from_node.out_dims[0].size() > 1:
                continue
            const_node = edge.from_node
            out_edges = G.out_edges(const_node.name)
            # if constant is connected to more than one node then duplicate it
            if len(out_edges) > 1:
                new_const = ConstantInputParameters(
                    G.unique_name(f'{const_node}_dup'),
                    value=const_node.value.copy(),
                    dims=const_node.dims.clone())
                G.remove_edge(edge)
                G.add_edge(
                    NNEdge(from_node=new_const,
                           to_node=edge.to_node,
                           to_idx=edge.to_idx))
                sub_g.add_edge(
                    NNEdge(from_node=new_const,
                           to_node=edge.to_node,
                           to_idx=edge.to_idx))
            else:
                sub_g.add_edge(
                    NNEdge(from_node=const_node,
                           to_node=edge.to_node,
                           to_idx=edge.to_idx))
コード例 #10
0
 def gen_concat(cls, node, inputs, axis, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     input_shapes = [inp[2].shape for inp in inputs]
     axis_sum = sum(shape[axis] for shape in input_shapes)
     axis = axis if axis >= 0 else len(input_shapes[0]) + axis
     output_shape = [
         axis_sum if idx == axis else dim
         for idx, dim in enumerate(input_shapes[0])
     ]
     pout_dim = ProvisionalDim(output_shape)
     none_dims = sum(
         [1 if dim is None else 0 for dim in output_shape[:axis:]])
     if all(cls.is_constant(inp) for inp in inputs):
         value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                axis=axis)
         logger.info(
             f"reducing {valid_name} to a constant {print_small(value)}")
         params = ConstantInputParameters(valid_name, value=value)
     else:
         params = ConcatParameters(valid_name, axis=axis - none_dims)
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
     all_nodes[node.output[0]] = (params, 0, pout_dim, inputs[0][3])
     return params
コード例 #11
0
ファイル: fill.py プロジェクト: mfkiwl/gap_sdk
    def _common(cls, node: TFLiteNode, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        if len(x_shape) != 1:
            raise ValueError(f'FILL {node.name} expecting 1D tensor for shape')

        shape = list(cls._verify_constant(inputs[0]))

        if cls._is_constant(inputs[1]):
            val = cls._get_constant(inputs[1])

            params = ConstantInputParameters(node.name,
                                             dims=Dim.unnamed(shape),
                                             value=np.full(shape, val),
                                             constant_store=G.constant_store)
            all_nodes[node.output[0]] = (params, 0, ProvisionalDim(shape))
            return params
        else:
            raise ValueError(
                f'FILL {node.name} non constant fill values are not currently supported'
            )
コード例 #12
0
    def match(self, G: GraphView, set_identity: bool = True):
        has_modified = False
        for node in G.nodes(node_classes=ConstantInputParameters):
            out_edges = G.out_edges(node.name)
            if len(out_edges) <= 1:
                continue
            has_modified = True
            LOG.info(
                'node %s has more than one out edge and will be duplicated',
                node.name)
            idx = 1
            for out_edge in out_edges[1::]:
                new_constant = ConstantInputParameters(f'{node.name}_{idx}',
                                                       dims=Dim.unnamed(
                                                           node.dims.shape),
                                                       value=node.value.copy())
                G.remove_edge(out_edge)
                G.add_edge(
                    NNEdge(from_node=new_constant,
                           to_node=out_edge.to_node,
                           to_idx=out_edge.to_idx))
                idx += 1

        if set_identity:
            self.set_identity(G)

        return has_modified
コード例 #13
0
    def common_quantize(cls, in_qtype, out_qtype, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        opts = kwargs['opts']
        G = kwargs['G']
        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            if out_qtype:
                val = x[0].value_as(out_qtype)
            else:
                val = cls.get_constant(x)
            params = ConstantInputParameters(node.name,
                                             value=val,
                                             dims=Dim.unnamed(val.shape),
                                             qtype=out_qtype,
                                             constant_store=G.constant_store)
            if opts.get('load_quantization'):
                G.quantization[NodeId(params)] = MultQuantizationRecord(
                    in_qs=[out_qtype], out_qs=[out_qtype])
        else:
            params = QuantizeParameters(node.name, from_qtype=in_qtype, to_qtype=out_qtype)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

            if opts.get('load_quantization'):
                G.quantization[NodeId(params)] = MultQuantizationRecord(
                    in_qs=[in_qtype], out_qs=[out_qtype])
        all_nodes[node.output[0]] = (params, 0, deepcopy(x[2]))
        return params
コード例 #14
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        inputs = [all_nodes[inp] for inp in node.input]

        if not all(cls.is_constant(inp) for inp in inputs):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )

        importer = kwargs['importer']
        sub_G = NNGraph()
        all_nodes_clone = all_nodes.copy()
        importer.import_subgraph(sub_G,
                                 node.attrs['body'], {},
                                 all_nodes=all_nodes_clone)
        if not all(
                isinstance(inp, (InputParameters, ConstantInputParameters))
                for inp in sub_G.inputs()):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )
        sub_G.add_dimensions()
        for idx, inp in enumerate(sub_G.inputs()):
            inp.index = idx

        logger.info(f"reducing loop {valid_name} to a constant")
        count = inputs[0][0].value
        keep_going = inputs[1][0].value
        loop_carried = [inp[0].value for inp in inputs[2:]]
        outputs = [np.array([])] * len(node.output)
        while keep_going and count > 0:
            executer = GraphExecuter(sub_G)
            output_tensors = executer.execute([count, keep_going] +
                                              loop_carried,
                                              silent=True)
            outp_vals = [
                output_tensors[node.step_idx][0] for node in sub_G.outputs()
                if not isinstance(node, InputParameters)
            ]
            keep_going = outp_vals[0]
            for idx, val in enumerate(outp_vals[1:]):
                if idx < len(loop_carried):
                    loop_carried[idx] = outputs[idx] = val
                elif outputs[idx] is None:
                    outputs[idx] = val
                else:
                    outputs[idx] = np.concatenate((outputs[idx], val))
            count -= 1
        for idx, outp in enumerate(node.output):
            params = ConstantInputParameters(
                G.unique_name(f'{valid_name}_out{idx}'),
                value=outputs[idx],
                dims=Dim.unnamed(outputs[idx].shape))
            all_nodes[outp] = (params, 0, ProvisionalDim(outputs[idx].shape),
                               None)

        return None
コード例 #15
0
 def gen_dct_matrix(self):
     norm_factor = np.ones((self.n_dct, self.n_dct))
     if self.dct_norm == "ortho" and self.dct_type == 2:
         norm_factor *= np.sqrt(1 / (2 * self.n_dct))
         norm_factor[0] = np.sqrt(1 / (4 * self.n_dct))
     if self.dct_norm == "ortho" and self.dct_type == 3:
         norm_factor *= np.sqrt(1 / (2 * self.n_dct))
         norm_factor[0] = np.sqrt(1 / (self.n_dct))
     DCT_Coeff = np.zeros((self.n_dct, self.n_dct))
     for k in range(self.n_dct):
         for i in range(self.n_dct):
             if self.dct_type == 2:
                 coeff = 2 * np.cos(np.pi / (2 * self.n_dct) * k *
                                    (2 * i + 1))
             elif self.dct_type == 3:
                 coeff = 1 if i == 0 else 2 * np.cos(np.pi /
                                                     (2 * self.n_dct) * i *
                                                     (2 * k + 1))
             else:
                 raise NotImplementedError(
                     "DCT type 2 and 3 only supported")
         DCT_Coeff[k, i] = coeff
     return ConstantInputParameters(self.name + "_DCT_Matrix",
                                    value=DCT_Coeff * norm_factor,
                                    dims=Dim.unnamed(
                                        [self.n_dct, self.n_dct]))
コード例 #16
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     to_dtype = node.attrs['to']
     if cls.is_constant(x):
         x_val = cls.get_constant(x)
         x_val = x_val.astype(to_dtype)
         if x_val.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, x_val)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          dims=Dim.unnamed(x_val.shape),
                                          value=x_val)
     else:
         params = QuantizeParameters(valid_name,
                                     to_qtype=QType(dtype=to_dtype))
         G.add_edge(
             NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                    to_idx=0))
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape), None)
     return params
コード例 #17
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        if node.attrs.get('value'):
            value = numpy_helper.to_array(node.attrs['value'])
        elif node.attrs.get('value_float'):
            value = np.atleast_1d(node.attrs['value_float'], dtype=np.float32)
        elif node.attrs.get('value_floats'):
            value = np.array(node.attrs['value_floats'], dtype=np.float32)
        elif node.attrs.get('value_int'):
            value = np.atleast_1d(node.attrs['value_int'], dtype=np.int32)
        elif node.attrs.get('value_ints'):
            value = np.array(node.attrs['value_ints'], dtype=np.int32)
        elif node.attrs.get('value_string') or node.attrs.get('value_strings'):
            raise NotImplementedError(
                'NNTOOL does not support string constants')
        elif node.attrs.get('sparse_value'):
            raise NotImplementedError(
                'NNTOOL does not support sparse constants')
        else:
            raise ValueError('ONNX constant has no value')

        params = ConstantInputParameters(valid_name,
                                         dims=Dim.unnamed(value.shape),
                                         value=value)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape),
                                     None)
        return params
コード例 #18
0
ファイル: squeeze.py プロジェクト: mfkiwl/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if axes:
            if any(x_shape[axis] != 1 for axis in axes):
                raise ValueError("axis parameter in node %s is invalid %s" % (valid_name, axes))
            new_shape = [dim for idx, dim in enumerate(x_shape) if idx not in axes]
        else:
            new_shape = [dim for dim in x_shape if dim != 1]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name, value=x_val.reshape(new_shape),
                                             constant_store=G.constant_store)
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name, old_shape=old_shape, shape=shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
コード例 #19
0
ファイル: unsqueeze.py プロジェクト: bot-motion/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if len(x_shape) == 0:
            assert len(axes) == 1 and axes[0] == 0
            new_shape = [1]
        else:
            new_shape = [
                item for sublist in [[1, dim] if idx in axes else [dim]
                                     for idx, dim in enumerate(x_shape)]
                for item in sublist
            ]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=x_val.reshape(new_shape))
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name,
                                       old_shape=old_shape,
                                       shape=shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
コード例 #20
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        transpose = node.attrs.get('perm', list(range(len(x_shape) - 1, -1, -1)))
        transpose = tuple(transpose)
        pout_shape = [x_shape[i] for i in transpose]

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name, value=x_val.transpose(transpose))
        else:
            transpose = [new_axes[axis] for axis in transpose if x_shape[axis] is not None]
            if transpose == sorted(transpose):
                params = NoOPParameters(valid_name, desc="transpose does nothing")
            else:
                params = TransposeParameters(valid_name, transpose=transpose)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
コード例 #21
0
    def _import_as_matmul(cls, node, inputs, x, y, real_x_shape, real_y_shape, trans_a, trans_b, alpha, beta, **kwargs):
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        all_nodes = kwargs['all_nodes']
        if trans_a:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tinx'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=x[0], to_node=tparams, from_idx=x[1]))
            x = (tparams, 0)
        if trans_b:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tiny'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=y[0], to_node=tparams, from_idx=y[1]))
            y = (tparams, 0)
        params = MatMulOpParameters(G.unique_name(valid_name))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(
            NNEdge(from_node=y[0], to_node=params, from_idx=y[1], to_idx=1))

        out_dims = params.get_output_size(
            [Dim.unnamed(real_x_shape), Dim.unnamed(real_y_shape)])

        biases = cls.get_constant(inputs[2]) if len(inputs) > 2 else np.zeros(out_dims[0].shape[1])
        biases_params = ConstantInputParameters(
            G.unique_name(f'{valid_name}_biases'), dims=Dim.unnamed(biases.shape), value=biases)
        G.add_edge(
            NNEdge(from_node=biases_params, to_node=params, to_idx=2))
        cls.record_constant_qrec(inputs[2], biases_params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, out_dims[0], None)
        return params
コード例 #22
0
    def do_fusions(self, args):
        """
Carry out the default set of fusions on the graph"""
        if args.list:
            table = texttable.Texttable()
            table.set_cols_align(['l', 'l'])
            table.set_max_width(120)
            table.add_rows([['Name', 'Description']] + get_fusions())
            self.ppaged(table.draw())
            return
        self._check_graph()
        state = ConstantInputParameters.save_compression_state(self.G)
        try:
            if args.apply:
                fusions = [get_fusion(name) for name in args.apply]
                invalid_names = [
                    args.apply[idx] for idx, fusion in enumerate(fusions)
                    if fusion is None
                ]
                if invalid_names:
                    self.perror(
                        f'fusion{"s" if len(invalid_names) > 1 else ""} {", ".join(invalid_names)} not found'
                    )
                    return
            elif args.pow2:
                fusions = [get_pow2_match_group()]
            elif args.scale8:
                fusions = [get_scale8_match_group()]
            else:
                self.perror(
                    "No fusion set selected. Nothing to do. Select --pow2 or --scale8."
                )
                return
            for fusion in fusions:
                fusion.match(self.G)
            self.G.add_dimensions()
            if self.G.quantization and verify_quantization(self.G):
                quantizer = NewQuantizer(self.G)
                quantizer.quantize()
                problems = verify_quantization(self.G)
                if problems:
                    self.perror('quantization issue after fusions')
                    for problem in problems:
                        self.perror(problem)
        finally:
            ConstantInputParameters.restore_compression_state(self.G, state)
コード例 #23
0
    def rescale_constant(cls,
                         node: ConstantInputParameters,
                         scale,
                         qrecs,
                         dtype=None):
        qrec = qrecs[NodeId(node)]
        qtype = qrec.out_qs[0]
        if (qtype.scale == scale.astype(qtype.scale.dtype)
                and (dtype is None or dtype == qtype.dtype)):
            return
        if node.qtype:
            node.value = node.dqvalue
            node.qtype = None

        qtype.scale = scale
        if dtype:
            qtype.dtype = dtype
コード例 #24
0
ファイル: onnx.py プロジェクト: brupa9/gap_sdk
 def _get_initializers(self, G, initializer):
     return {
         init.name: (ConstantInputParameters(
             f'constant_{self._validate_name(init.name)}',
             dims=Dim.unnamed(init.dims or [1]),
             value=self._get_numpy_array(init),
             constant_store=G.constant_store), 0, ProvisionalDim(init.dims))
         for init in initializer
     }
コード例 #25
0
ファイル: pad.py プロジェクト: brupa9/gap_sdk
    def _common(cls,
                node,
                mode='constant',
                pads=None,
                constant_value=0,
                **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        apads = np.array(pads).reshape((-1, 2))
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            val = cls.get_constant(x)
            if mode == 'constant':
                val = np.pad(val,
                             apads,
                             mode=mode,
                             constant_values=constant_value)
            else:
                val = np.pad(val, apads, mode=mode)
            params = ConstantInputParameters(valid_name,
                                             value=val,
                                             constant_store=G.constant_store)
            all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape))
            return params

        if mode != 'constant':
            raise ValueError('%s - pad mode %s is not supported' %
                             (valid_name, mode))
        if constant_value != 0:
            raise ValueError('%s - only zero padding is supported' %
                             valid_name)

        trimmed_pads = tuple(
            [pad for idx, pad in enumerate(apads) if x_shape[idx] is not None])

        if all(sum(trimmed_pad) == 0 for trimmed_pad in trimmed_pads):
            params = NoOPParameters(valid_name, desc="eliminated pad of 0")
            pshape = x_shape
        else:
            pshape = [
                dim + sum(apads[idx]) if dim is not None else None
                for idx, dim in enumerate(x_shape)
            ]
            # pshape = [None if dim is None else dim + sum(apads[idx]) for idx, dim in enumerate(x_shape)]
            padvals = [(constant_value, constant_value)] * len(trimmed_pads)
            params = PadParameters(valid_name,
                                   padding=trimmed_pads,
                                   pad_vals=padvals)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape))
        return params
コード例 #26
0
 def _get_initializers(self, initializer):
     return {
         init.name:
         (ConstantInputParameters(self._validate_name(init.name),
                                  dims=Dim.unnamed(init.dims or [1]),
                                  value=self._get_numpy_array(init)), 0,
          Dim.unnamed(init.dims))
         for init in initializer
     }
コード例 #27
0
 def _get_initializers(self, G, initializer):
     return {
         init.name: (ConstantInputParameters(
             f'constant_{self._validate_name(init.name)}',
             dims=Dim.unnamed(init.dims or [1]),
             value=self._get_numpy_array(init),
             imported_dtype=self.get_onnx_tensor_dtype(init)), 0,
                     ProvisionalDim(init.dims), None)
         for init in initializer
     }
コード例 #28
0
ファイル: reshape.py プロジェクト: mfkiwl/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        if cls.SINCE_VERSION == 1:
            shape = np.array(node.attrs["shape"])
        else:  # since_version >= 5
            shape = cls.get_constant(inputs[1])

        input_shape = np.array(inputs[0][2].shape)
        shape = [
            dim if dim != 0 else input_shape[idx]
            for idx, dim in enumerate(shape)
        ]
        if -1 in shape:
            wild_index = shape.index(-1)
            in_size = prod([1 if dim is None else dim for dim in input_shape])
            shape_size = prod(
                [1 if dim is None or dim <= 0 else dim for dim in shape])
            if in_size % shape_size != 0:
                raise ValueError('invalid reshape')
            shape[wild_index] = in_size // shape_size
        shape = np.array(shape)

        if cls.is_constant(inputs[0]):
            logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             value=cls.get_constant(
                                                 inputs[0]).reshape(shape),
                                             dims=Dim.unnamed(shape),
                                             constant_store=G.constant_store)
            pshape = ProvisionalDim(shape)
            all_nodes[node.output[0]] = (params, 0, pshape)
            return params

        # TODO - There must be a better way of doing this
        # This hacks around the fact that the batch dimension will be in the reshape
        if input_shape[0] is None and shape[0] == 1:
            shape = np.array([None] + list(shape[1::]))

        pshape = ProvisionalDim(shape)
        # pylint: disable=singleton-comparison
        old_shape = Dim.unnamed(list(input_shape[input_shape != None]))
        shape = Dim.unnamed(list(shape[shape != None]))
        params = ReshapeParameters(valid_name,
                                   old_shape=old_shape,
                                   shape=shape)
        inp = inputs[0]
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
コード例 #29
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ConcatenationOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        non_zero_idxes = [idx for idx in buffer_idxes if idx != 0]
        if len(set(non_zero_idxes)) != len(non_zero_idxes):
            raise NotImplementedError(
                "concats with multiple versions of the same input are not supported. "
                "This is normally a graph design problem.")

        axis = node_opts.Axis()
        if any(inp_shape[axis] is None for inp_shape in inp_shapes):
            raise ValueError("concat on undefined axis in node %s" % node.name)

        def red_func(x, y):
            return y.copy() if x is None else [
                (elem if y[idx] is not None and elem is not None else None)
                if idx != axis else elem + y[axis]
                for idx, elem in enumerate(x)
            ]

        pout_shape = reduce(red_func, inp_shapes)

        if all(cls.is_constant(inp) for inp in inputs):
            # cls.remove_none_from_constants(inputs, pout_shape)
            LOG.info("reducing %s to a constant", node.name)
            value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                   axis=axis)
            params = ConstantInputParameters(node.name,
                                             value=value,
                                             constant_store=G.constant_store)
        else:
            axis -= sum(1 if dim is None else 0 for dim in pout_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            for idx, inp in enumerate(inputs):
                inp_node, inp_idx = cls._maybe_insert_reshape(
                    G, inp, inp_shapes[idx], pout_shape)
                G.add_edge(
                    NNEdge(from_node=inp_node,
                           to_node=params,
                           from_idx=inp_idx,
                           to_idx=idx))
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)
        cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
コード例 #30
0
    def do_aquant(self, args: argparse.Namespace):
        """
Attempt to calculate quantization for graph using one or more sample input files."""
        self._check_graph()
        stats_collector = ActivationRangesCollector()
        # if replaying state file then load the activation stats if they are present
        opts = get_options_from_args(args)
        state = ConstantInputParameters.save_compression_state(self.G)
        try:
            if self.replaying_history and self.history_stats:
                astats = self.history_stats
            else:
                input_args = self._get_input_args(args)
                processed_input = False
                for file_per_input in glob_input_files(args.input_files,
                                                       self.G.num_inputs):
                    LOG.info("input file %s", file_per_input)
                    processed_input = True
                    data = [
                        import_data(input_file, **input_args)
                        for input_file in file_per_input
                    ]
                    stats_collector.collect_stats(self.G, data)
                if not processed_input:
                    self.perror("No input files found")
                    return
                astats = stats_collector.stats
                self._record_stats(astats)

            if args.force_width:
                opts['bits'] = args.force_width

            quantizer = NewQuantizer(self.G, reset_all=True)
            quantizer.schemes.append(args.scheme)
            quantizer.set_stats(astats, opts)
            quantizer.quantize()

            self.G.add_dimensions()
            LOG.info("Quantization set. Use qshow command to see it.")
        finally:
            ConstantInputParameters.restore_compression_state(self.G, state)