示例#1
0
文件: resize.py 项目: brupa9/gap_sdk
    def _common(cls, node, scales, sizes, nearest_mode='round_prefer_ceil', **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] if inp else None for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_rank = len(x_shape)
        spatial_size = x_rank - 2
        in_c = x_shape[1]
        in_w = x_shape[-1]
        if scales is not None:
            sizes = np.array(x_shape) * np.array(scales)
        sizes = [None if x_shape[idx] is None else dim
                 for idx, dim in enumerate(sizes)]
        if spatial_size == 1:
            sizes.insert(-1, 1)

        if nearest_mode != 'round_prefer_ceil':
            logger.warning('only round_prefer_ceil is supported for nearest mode')

        if spatial_size != 2 and spatial_size != 1:
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        if not all(x_dim == size_dim for x_dim, size_dim in zip(x_shape[:2:], sizes[:2:])):
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        mode = node.attrs.get('mode', 'nearest')
        if mode != 'nearest' and mode != 'linear':
            raise ValueError('resize only supports nearest and linear modes')

        params_class = BilinearResizerParameters if mode == 'linear' else NearestNeighborResizerParameters

        params = params_class(valid_name,
                              new_shape=tuple(sizes[2::]),
                              align_corners=False,
                              halfpixel_centers=False,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])

        if spatial_size == 1:
            r1_params = ReshapeParameters(f'{valid_name}_reshape2d',
                                          old_shape=Dim.unnamed([in_c, in_w]),
                                          shape=Dim.unnamed([in_c, 1, in_w]))
            r2_params = ReshapeParameters(f'{valid_name}_reshape1d',
                                          old_shape=Dim.unnamed([in_c, 1, sizes[-1]]),
                                          shape=Dim.unnamed([in_c, sizes[-1]]))
            G.add_edge(NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0))
            G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0))
            G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0))
            pout_dims = ProvisionalDim(sizes[:-2:] + sizes[-1::])
            params = r2_params
        else:
            pout_dims = ProvisionalDim(sizes)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
示例#2
0
文件: pad.py 项目: brupa9/gap_sdk
    def _common(cls,
                node,
                mode='constant',
                pads=None,
                constant_value=0,
                **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        apads = np.array(pads).reshape((-1, 2))
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            val = cls.get_constant(x)
            if mode == 'constant':
                val = np.pad(val,
                             apads,
                             mode=mode,
                             constant_values=constant_value)
            else:
                val = np.pad(val, apads, mode=mode)
            params = ConstantInputParameters(valid_name,
                                             value=val,
                                             constant_store=G.constant_store)
            all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape))
            return params

        if mode != 'constant':
            raise ValueError('%s - pad mode %s is not supported' %
                             (valid_name, mode))
        if constant_value != 0:
            raise ValueError('%s - only zero padding is supported' %
                             valid_name)

        trimmed_pads = tuple(
            [pad for idx, pad in enumerate(apads) if x_shape[idx] is not None])

        if all(sum(trimmed_pad) == 0 for trimmed_pad in trimmed_pads):
            params = NoOPParameters(valid_name, desc="eliminated pad of 0")
            pshape = x_shape
        else:
            pshape = [
                dim + sum(apads[idx]) if dim is not None else None
                for idx, dim in enumerate(x_shape)
            ]
            # pshape = [None if dim is None else dim + sum(apads[idx]) for idx, dim in enumerate(x_shape)]
            padvals = [(constant_value, constant_value)] * len(trimmed_pads)
            params = PadParameters(valid_name,
                                   padding=trimmed_pads,
                                   pad_vals=padvals)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape))
        return params
示例#3
0
文件: reshape.py 项目: mfkiwl/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        if cls.SINCE_VERSION == 1:
            shape = np.array(node.attrs["shape"])
        else:  # since_version >= 5
            shape = cls.get_constant(inputs[1])

        input_shape = np.array(inputs[0][2].shape)
        shape = [
            dim if dim != 0 else input_shape[idx]
            for idx, dim in enumerate(shape)
        ]
        if -1 in shape:
            wild_index = shape.index(-1)
            in_size = prod([1 if dim is None else dim for dim in input_shape])
            shape_size = prod(
                [1 if dim is None or dim <= 0 else dim for dim in shape])
            if in_size % shape_size != 0:
                raise ValueError('invalid reshape')
            shape[wild_index] = in_size // shape_size
        shape = np.array(shape)

        if cls.is_constant(inputs[0]):
            logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             value=cls.get_constant(
                                                 inputs[0]).reshape(shape),
                                             dims=Dim.unnamed(shape),
                                             constant_store=G.constant_store)
            pshape = ProvisionalDim(shape)
            all_nodes[node.output[0]] = (params, 0, pshape)
            return params

        # TODO - There must be a better way of doing this
        # This hacks around the fact that the batch dimension will be in the reshape
        if input_shape[0] is None and shape[0] == 1:
            shape = np.array([None] + list(shape[1::]))

        pshape = ProvisionalDim(shape)
        # pylint: disable=singleton-comparison
        old_shape = Dim.unnamed(list(input_shape[input_shape != None]))
        shape = Dim.unnamed(list(shape[shape != None]))
        params = ReshapeParameters(valid_name,
                                   old_shape=old_shape,
                                   shape=shape)
        inp = inputs[0]
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        input_shapes = [inp[2].shape for inp in inputs]
        axis = node.attrs['axis']
        new_axis = node.attrs.get('new_axis', 0)
        # if new_axis is false this is the same as concat
        if not new_axis:
            return cls.gen_concat(node, inputs, axis)
        # if it is true then it's different
        if not all(shape == input_shapes[0] for shape in input_shapes[1::]):
            raise ValueError(
                'all shapes must be the same in ConcatFromSequence with new axis'
            )

        # reduce to a constant if we can
        if all(cls.is_constant(inp) for inp in inputs):
            logger.info("reducing %s to a constant", valid_name)
            value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                   axis=axis)
            params = ConstantInputParameters(valid_name, value=value)
            all_nodes[node.output[0]] = (params, 0,
                                         ProvisionalDim(value.shape),
                                         inputs[0][3])
            return params

        # add the axis into the shape
        new_shape = input_shapes[0].copy()
        new_shape = new_shape[:axis:] + [1] + new_shape[axis::]
        old_shape = cls._get_real_dim(input_shapes[0])
        shape = cls._get_real_dim(new_shape)
        # create a reshape on each input and pass the outputs to the concat mixin
        #pylint: disable=consider-using-enumerate
        for idx in range(len(inputs)):
            inp = inputs[idx]
            rparams = ReshapeParameters("%s_reshape_%s" % (valid_name, idx),
                                        old_shape=old_shape,
                                        shape=shape)
            G.add_edge(
                NNEdge(from_node=inp[0],
                       to_node=rparams,
                       from_idx=inp[1],
                       to_idx=0))
            inputs[idx] = (rparams, 0, ProvisionalDim(new_shape), inp[3])

        return cls.gen_concat(node, inputs, axis)
示例#5
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     value = numpy_helper.to_array(node.attrs['value'])
     params = ConstantInputParameters(valid_name, dims=Dim.unnamed(value.shape), value=value)
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape))
     return params
示例#6
0
    def _common(cls, node, starts, ends, axes, steps, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        x = all_nodes[node.input[0]]
        x_shape = np.array(x[2].shape)
        x_rank = len(x_shape)
        axes = cls._resolve_negative_ranks(
            axes, len(x_shape)) if axes else tuple(range(x_rank))
        axes_rank = len(axes)
        steps = steps if steps else [1] * axes_rank
        slices = np.stack([starts, ends, steps]).transpose((1, 0))
        p_slices = []
        p_shape = []
        for idx, dim in enumerate(x_shape):
            try:
                if dim is None:
                    p_slices.append(None)
                    p_shape.append(None)
                else:
                    slice_idx = axes.index(idx)
                    begin, end, step = slices[slice_idx]
                    begin = max(min(begin if begin >= 0 else dim + begin, dim),
                                0)
                    end = max(min(end if end >= 0 else dim + end, dim), -1)
                    # -sys.maxsize is used to indicate 0 in the reverse slice direction
                    # this makes it compatible with the numpy slice
                    p_slices.append(
                        (begin, -sys.maxsize if end == -1 else end, step))
                    if step < 0:
                        p_shape.append((begin - end) // -step)
                    else:
                        p_shape.append((end - begin) // step)

            except ValueError:
                p_slices.append((0, dim, 1))
                p_shape.append(dim)
        slices = cls._get_real_dim(p_slices)
        shape = cls._get_real_dim(p_shape)

        params = StridedSliceParameters(valid_name,
                                        act_slice=slices,
                                        out_shape=shape)
        if cls.is_constant(x):
            x_val = cls.get_constant(x)
            x_val = params.numpy_slice(x_val)
            if x_val.size < 10:
                logger.info("reducing %s to a constant %s", valid_name, x_val)
            else:
                logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             dims=Dim.unnamed(x_val.shape),
                                             value=x_val,
                                             constant_store=G.constant_store)
        else:
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(p_shape))
        return params
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        out_rank = len(x_shape) + len(kwargs['axes'])
        axes = cls._resolve_negative_ranks(kwargs['axes'], out_rank)

        old_shape = x_shape.copy()
        new_shape = [
            1 if new_idx in axes else old_shape.pop(0)
            for new_idx in range(out_rank)
        ]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            x_val = cls.get_constant(x)
            logger.info(
                f"reducing {valid_name} to a constant {cls.print_small(x_val)}"
            )
            params = ConstantInputParameters(valid_name,
                                             value=x_val.reshape(new_shape))
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name,
                                       old_shape=old_shape,
                                       shape=shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape, x[3])
        return params
示例#8
0
    def _common(cls, node: TFLiteNode, **kwargs):
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        ptranspose = cls._verify_constant(inputs[1])
        pout_shape = [x_shape[dim] for dim in ptranspose]
        transpose = [new_axes[axis] for axis in ptranspose if x_shape[axis] is not None]
        node.input[1].used = True

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            val = np.transpose(cls.get_constant(x), ptranspose)
            params = ConstantInputParameters(node.name, value=np.transpose(val, ptranspose),
                                             dims=Dim.unnamed(val.shape), constant_store=G.constant_store)
        else:
            params = TransposeParameters(node.name, transpose=transpose)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization([node.input[0]], node.output)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#9
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        transpose = node.attrs.get('perm', list(range(len(x_shape) - 1, -1, -1)))
        transpose = tuple(transpose)
        pout_shape = [x_shape[i] for i in transpose]

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name, value=x_val.transpose(transpose))
        else:
            transpose = [new_axes[axis] for axis in transpose if x_shape[axis] is not None]
            if transpose == sorted(transpose):
                params = NoOPParameters(valid_name, desc="transpose does nothing")
            else:
                params = TransposeParameters(valid_name, transpose=transpose)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#10
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        inputs = [all_nodes[inp] for inp in node.input]

        if not all(cls.is_constant(inp) for inp in inputs):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )

        importer = kwargs['importer']
        sub_G = NNGraph()
        all_nodes_clone = all_nodes.copy()
        importer.import_subgraph(sub_G,
                                 node.attrs['body'], {},
                                 all_nodes=all_nodes_clone)
        if not all(
                isinstance(inp, (InputParameters, ConstantInputParameters))
                for inp in sub_G.inputs()):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )
        sub_G.add_dimensions()
        for idx, inp in enumerate(sub_G.inputs()):
            inp.index = idx

        logger.info(f"reducing loop {valid_name} to a constant")
        count = inputs[0][0].value
        keep_going = inputs[1][0].value
        loop_carried = [inp[0].value for inp in inputs[2:]]
        outputs = [np.array([])] * len(node.output)
        while keep_going and count > 0:
            executer = GraphExecuter(sub_G)
            output_tensors = executer.execute([count, keep_going] +
                                              loop_carried,
                                              silent=True)
            outp_vals = [
                output_tensors[node.step_idx][0] for node in sub_G.outputs()
                if not isinstance(node, InputParameters)
            ]
            keep_going = outp_vals[0]
            for idx, val in enumerate(outp_vals[1:]):
                if idx < len(loop_carried):
                    loop_carried[idx] = outputs[idx] = val
                elif outputs[idx] is None:
                    outputs[idx] = val
                else:
                    outputs[idx] = np.concatenate((outputs[idx], val))
            count -= 1
        for idx, outp in enumerate(node.output):
            params = ConstantInputParameters(
                G.unique_name(f'{valid_name}_out{idx}'),
                value=outputs[idx],
                dims=Dim.unnamed(outputs[idx].shape))
            all_nodes[outp] = (params, 0, ProvisionalDim(outputs[idx].shape),
                               None)

        return None
示例#11
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if len(x_shape) == 0:
            assert len(axes) == 1 and axes[0] == 0
            new_shape = [1]
        else:
            new_shape = [
                item for sublist in [[1, dim] if idx in axes else [dim]
                                     for idx, dim in enumerate(x_shape)]
                for item in sublist
            ]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=x_val.reshape(new_shape))
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name,
                                       old_shape=old_shape,
                                       shape=shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
示例#12
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        indices = cls.get_constant(y)
        axis = node.attrs.get('axis', 0)

        pshape = ProvisionalDim(x_shape[:axis:] + list(indices.shape) +
                                x_shape[axis + 1:])
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=np.take(x_val,
                                                           indices,
                                                           axis=axis))
        else:
            axis = cls._trim_axis(axis, x_shape)
            params = GatherParametters(valid_name, axis=axis, indices=indices)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
示例#13
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        axis = kwargs['axis']
        splits = kwargs.get('splits')
        opts = kwargs['opts']
        input_idx = kwargs.get('input_idx', 0)
        num_splits = kwargs.get('num_splits')

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[input_idx]

        x_shape = x[2].shape
        act_slices, pout_shapes, axis = SplitParameters.get_splits(
            x_shape, axis, splits=splits, num_splits=num_splits)
        out_shapes = [
            BackendHandler.remove_unspecified_dim(shape)
            for shape in pout_shapes
        ]
        params = SplitParameters(node.name,
                                 act_slices=act_slices,
                                 out_shapes=out_shapes,
                                 axis=axis)

        if opts.get('load_quantization'):
            G.quantization[NodeId(
                params)] = BackendHandler.load_tf_quantization([node.input[0]],
                                                               node.output)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        for idx, tensor in enumerate(node.output):
            all_nodes[tensor] = (params, idx, ProvisionalDim(pout_shapes[idx]))
        return params
示例#14
0
    def _common(cls, node: TFLiteNode, **kwargs):
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        ptranspose = cls._verify_constant(inputs[1])
        pout_shape = [x_shape[dim] for dim in ptranspose]
        transpose = [
            new_axes[axis] for axis in ptranspose if x_shape[axis] is not None
        ]
        node.input[1].used = True

        params = TransposeParameters(node.name, transpose=transpose)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#15
0
    def _common1_11(cls, node, **kwargs):
        axis = node.attrs.get('axis', 1)
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        x = all_nodes[node.input[0]]
        x_shape = x[2].shape
        if axis < 0:
            axis += len(x_shape)
        old_shape = cls._get_real_dim(x_shape)
        # v 1 and 11 work differently to v13. In v1 and v11 the input is collected into a 2D tensor
        # based on the axis [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}]  with axis k
        # becomes [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]
        # This is used for the softmax
        new_pshape = [condense(x_shape[:axis:]), condense(x_shape[axis::])]
        new_shape = cls._get_real_dim(new_pshape)
        reshape_1 = ReshapeParameters(valid_name + "_reshape1",
                                      old_shape=old_shape,
                                      shape=new_shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=reshape_1, from_idx=x[1], to_idx=0))
        # operation axis will either be 1 or 0
        softmax = SoftMaxParameters(valid_name, axis=len(new_shape) - 1)
        G.add_edge(NNEdge(from_node=reshape_1, to_node=softmax))
        reshape_2 = ReshapeParameters(valid_name + "_reshape2",
                                      old_shape=new_shape,
                                      shape=old_shape)
        G.add_edge(NNEdge(from_node=softmax, to_node=reshape_2))

        all_nodes[node.output[0]] = (reshape_2, 0, ProvisionalDim(x_shape))
        return softmax
示例#16
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        if node.attrs.get('value'):
            value = numpy_helper.to_array(node.attrs['value'])
        elif node.attrs.get('value_float'):
            value = np.atleast_1d(node.attrs['value_float'], dtype=np.float32)
        elif node.attrs.get('value_floats'):
            value = np.array(node.attrs['value_floats'], dtype=np.float32)
        elif node.attrs.get('value_int'):
            value = np.atleast_1d(node.attrs['value_int'], dtype=np.int32)
        elif node.attrs.get('value_ints'):
            value = np.array(node.attrs['value_ints'], dtype=np.int32)
        elif node.attrs.get('value_string') or node.attrs.get('value_strings'):
            raise NotImplementedError(
                'NNTOOL does not support string constants')
        elif node.attrs.get('sparse_value'):
            raise NotImplementedError(
                'NNTOOL does not support sparse constants')
        else:
            raise ValueError('ONNX constant has no value')

        params = ConstantInputParameters(valid_name,
                                         dims=Dim.unnamed(value.shape),
                                         value=value)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape),
                                     None)
        return params
示例#17
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     to_dtype = node.attrs['to']
     if cls.is_constant(x):
         x_val = cls.get_constant(x)
         x_val = x_val.astype(to_dtype)
         if x_val.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, x_val)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          dims=Dim.unnamed(x_val.shape),
                                          value=x_val)
     else:
         params = QuantizeParameters(valid_name,
                                     to_qtype=QType(dtype=to_dtype))
         G.add_edge(
             NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                    to_idx=0))
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape), None)
     return params
示例#18
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ReshapeOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # TF2 seems to use the second input whereas TF1 uses the opts
        new_shape = None
        if node_opts:
            new_shape = list(node_opts.NewShapeAsNumpy())
        elif len(inputs) > 1:
            set_shape_tensor = list(cls._verify_constant(inputs[1]))
            node.input[1].used = True
            new_shape = list(set_shape_tensor)
        else:
            ValueError(
                f"Cannot asses new_shape for Reshape Parameter: {node.name}")

        if -1 in new_shape:
            new_shape_size = reduce(lambda x, y: x * 1
                                    if y == -1 else x * y, new_shape, 1)
            inp_size = reduce(lambda x, y: x * y
                              if y is not None else x, x_shape, 1)
            new_shape[new_shape.index(-1)] = inp_size // new_shape_size

        if None in x_shape:
            if 1 in new_shape:
                old_batch_dim = x_shape.index(None)
                new_batch_dim = new_shape.index(1)
                if old_batch_dim != new_batch_dim:
                    LOG.info(
                        "node %s moved batch dimension for axis %s to axis %s",
                        node.name, old_batch_dim, new_batch_dim)
                new_shape[new_batch_dim] = None
            else:
                raise ValueError(
                    "unable to determine movement of unspcified axis in node %s"
                    % node.name)

        pnew_shape = ProvisionalDim(new_shape)
        old_shape = Dim.unnamed(cls.remove_unspecified_dim(x_shape),
                                is_ordered=True)
        new_shape = Dim.unnamed(cls.remove_unspecified_dim(new_shape),
                                is_ordered=True)

        params = ReshapeParameters(node.name,
                                   old_shape=old_shape,
                                   shape=new_shape)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pnew_shape)
        return params
示例#19
0
文件: squeeze.py 项目: mfkiwl/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        axes = cls._resolve_negative_ranks(kwargs['axes'], len(x_shape))
        if axes:
            if any(x_shape[axis] != 1 for axis in axes):
                raise ValueError("axis parameter in node %s is invalid %s" % (valid_name, axes))
            new_shape = [dim for idx, dim in enumerate(x_shape) if idx not in axes]
        else:
            new_shape = [dim for dim in x_shape if dim != 1]

        pshape = ProvisionalDim(new_shape)
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name, value=x_val.reshape(new_shape),
                                             constant_store=G.constant_store)
        else:
            old_shape = cls._get_real_dim(x_shape)
            shape = cls._get_real_dim(new_shape)
            params = ReshapeParameters(valid_name, old_shape=old_shape, shape=shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
示例#20
0
文件: fill.py 项目: mfkiwl/gap_sdk
    def _common(cls, node: TFLiteNode, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        if len(x_shape) != 1:
            raise ValueError(f'FILL {node.name} expecting 1D tensor for shape')

        shape = list(cls._verify_constant(inputs[0]))

        if cls._is_constant(inputs[1]):
            val = cls._get_constant(inputs[1])

            params = ConstantInputParameters(node.name,
                                             dims=Dim.unnamed(shape),
                                             value=np.full(shape, val),
                                             constant_store=G.constant_store)
            all_nodes[node.output[0]] = (params, 0, ProvisionalDim(shape))
            return params
        else:
            raise ValueError(
                f'FILL {node.name} non constant fill values are not currently supported'
            )
示例#21
0
 def gen_concat(cls, node, inputs, axis, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     input_shapes = [inp[2].shape for inp in inputs]
     axis_sum = sum(shape[axis] for shape in input_shapes)
     axis = axis if axis >= 0 else len(input_shapes[0]) + axis
     output_shape = [
         axis_sum if idx == axis else dim
         for idx, dim in enumerate(input_shapes[0])
     ]
     pout_dim = ProvisionalDim(output_shape)
     none_dims = sum(
         [1 if dim is None else 0 for dim in output_shape[:axis:]])
     if all(cls.is_constant(inp) for inp in inputs):
         value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                axis=axis)
         logger.info(
             f"reducing {valid_name} to a constant {print_small(value)}")
         params = ConstantInputParameters(valid_name, value=value)
     else:
         params = ConcatParameters(valid_name, axis=axis - none_dims)
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
     all_nodes[node.output[0]] = (params, 0, pout_dim, inputs[0][3])
     return params
 def _common(cls,
             node,
             pool_type="max",
             constant_operation=None,
             copy_qtype=False,
             **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     unknown_dims = sum(1 if dim is None else 0 for dim in x_shape)
     params = GlobalPoolingParameters(
         valid_name,
         pool_type=pool_type,
         axis=tuple(range(1,
                          len(x_shape) - unknown_dims)),
         keep_dims=True)
     pout_dims = ProvisionalDim([x_shape[0], x_shape[1]] +
                                ([1] * (len(x_shape) - 2)))
     G.add_edge(
         NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
     all_nodes[node.output[0]] = (params, 0, pout_dims,
                                  x[3] if copy_qtype else None)
     return params
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(TransposeConvOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[2]
        x_shape = x[2].shape
        in_b, in_h, in_w, in_c = tuple(x_shape)
        pout_shape = [
            dim if x_shape[idx] is not None else None
            for idx, dim in enumerate(cls.get_constant(inputs[0]))
        ]
        out_b, out_h, out_w, out_c = tuple(pout_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        stride_w = node_opts.StrideW()
        stride_h = node_opts.StrideH()
        # compute padding
        pad = node_opts.Padding()
        if pad == Padding.SAME:
            pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
            pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
            pad_top = pad_h // 2
            pad_left = pad_w // 2
            pad = PadDim(pad_top,
                         pad_h - pad_top,
                         pad_left,
                         pad_w - pad_left,
                         same_type='balanced_right')
        else:
            pad = PadDim(0)

        params = TransposeConv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(stride_h, stride_w),
            padding=pad,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy()],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        pout_dims = ProvisionalDim(pout_shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
示例#24
0
    def conv(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape
        spatial_size = x_rank - 2
        assert spatial_size <= 2, "only 1D and 2D convolutions supported"

        # M x C/group x kH x kW
        weights = cls.get_constant(inputs[1])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        filt_h = weights.shape[2]
        filt_w = weights.shape[2]
        h = 1 if spatial_size <= 1 else x_shape[2]
        w = 1 if spatial_size == 0 else (x_shape[2] if spatial_size == 1 else x_shape[3])

        filt_dim = Conv2DFilterDim(filt_h, filt_w,
                                   out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([out_c])

        dilations = cls.pad_start_with(node.attrs.get("dilations", [1] * spatial_size), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", [1] * spatial_size), [1], 2)
        pad_dim = cls.calc_pad_dim(node, spatial_size)

        params = Conv2DParameters(valid_name,
                                  filt=filt_dim,
                                  stride=StrideDim(strides[0],
                                                   strides[1]),
                                  dilation=DilationDim(dilations[0],
                                                       dilations[1]),
                                  groups=group,
                                  padding=pad_dim,
                                  has_bias=True,
                                  in_dims_hint=SparseList([['c', 'h', 'w']]),
                                  out_dims_hint=SparseList([['c', 'h', 'w']]),
                                  constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases
        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
示例#25
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ConcatenationOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        non_zero_idxes = [idx for idx in buffer_idxes if idx != 0]
        if len(set(non_zero_idxes)) != len(non_zero_idxes):
            raise NotImplementedError(
                "concats with multiple versions of the same input are not supported. "
                "This is normally a graph design problem.")

        axis = node_opts.Axis()
        if any(inp_shape[axis] is None for inp_shape in inp_shapes):
            raise ValueError("concat on undefined axis in node %s" % node.name)

        def red_func(x, y):
            return y.copy() if x is None else [
                (elem if y[idx] is not None and elem is not None else None)
                if idx != axis else elem + y[axis]
                for idx, elem in enumerate(x)
            ]

        pout_shape = reduce(red_func, inp_shapes)

        if all(cls.is_constant(inp) for inp in inputs):
            # cls.remove_none_from_constants(inputs, pout_shape)
            LOG.info("reducing %s to a constant", node.name)
            value = np.concatenate([cls.get_constant(inp) for inp in inputs],
                                   axis=axis)
            params = ConstantInputParameters(node.name,
                                             value=value,
                                             constant_store=G.constant_store)
        else:
            axis -= sum(1 if dim is None else 0 for dim in pout_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            for idx, inp in enumerate(inputs):
                inp_node, inp_idx = cls._maybe_insert_reshape(
                    G, inp, inp_shapes[idx], pout_shape)
                G.add_edge(
                    NNEdge(from_node=inp_node,
                           to_node=params,
                           from_idx=inp_idx,
                           to_idx=idx))
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)
        cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#26
0
    def pool(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_feature_shape = x_shape[2::]
        in_c = x_shape[1]

        kernel_shape = node.attrs["kernel_shape"]
        spatial_size = len(kernel_shape)
        x_rank = spatial_size + 2
        if spatial_size != 2:
            raise ValueError(valid_name + " with {}D input".format(x_rank))

        h = x_shape[2]
        w = x_shape[3]

        strides = node.attrs.get("strides", [1] * spatial_size)
        stride_is_one = all(stride == 1 for stride in strides)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        if any(dilation > 1 for dilation in dilations):
            raise ValueError(valid_name + " with dilation not supported")
        # ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pad_dim = cls.calc_pad_dim(node, spatial_size)
        # Note: This needs to check dilation if it is added
        filter_matches_input = (all(
            k_dim >= (x_dim + pad) for k_dim, x_dim, pad in zip(
                kernel_shape, x_feature_shape, [pad_dim.h, pad_dim.w])))

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(valid_name,
                                          pool_type=pool_type,
                                          axis=[1, 2],
                                          keep_dims=True,
                                          in_dims_hint=[['c', 'h', 'w']],
                                          out_dims_hint=[['c', 'h', 'w']])
        else:
            params = PoolingParameters(
                valid_name,
                filt=PoolFilterDim(kernel_shape[0], kernel_shape[1]),
                stride=StrideDim(strides[0], strides[1]),
                padding=pad_dim,
                pool_type=pool_type,
                in_dims_hint=[['c', 'h', 'w']],
                out_dims_hint=[['c', 'h', 'w']])

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
示例#27
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(node.input) > 2:
            node.input[2].used = True

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList([in_hint]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
示例#28
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(ReducerOptions)
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        reduce_type = kwargs['reduce_type']
        opts = kwargs['opts']

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]

        x_shape = x[2].shape

        axes = cls._verify_constant(inputs[1])
        node.input[1].used = True

        if len(axes.shape) == 0:
            axes = list([int(axes)])
        else:
            axes = sorted(list(axes))

        # convert all negative axis to their true value
        axes = sorted(
            [elem if elem >= 0 else len(x_shape) + elem for elem in axes])

        if not BackendHandler.remove_unspecified_dim(axes):
            params = NoOPParameters(node.name)
            pout_shape = x_shape.copy()
        else:
            pout_shape = [
                1 if idx in axes and dim is not None else dim
                for idx, dim in enumerate(x_shape)
            ]
            # subtract 1 from axis for all None's preceeding it and remove
            # axes that are not defined
            axes = [
                ax - sum([1 if dim is None else 0 for dim in x_shape[:ax:]])
                for ax in axes if x_shape[ax] is not None
            ]
            params = GlobalPoolParameters(node.name,
                                          pool_type=reduce_type,
                                          axis=tuple(axes),
                                          keep_dims=node_opts.KeepDims())
            # the reduced axes are set to 1 in the output shape

        if opts.get('load_quantization'):
            G.quantization[NodeId(
                params)] = BackendHandler.load_tf_quantization([node.input[0]],
                                                               node.output)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#29
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        reduce_type = kwargs['reduce_type']
        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_rank = len(x_shape)

        axes = node.attrs['axes']

        # convert all negative axis to their true value
        axes = set([elem if elem >= 0 else x_rank + elem for elem in axes])
        assert all(axis >= 0 and axis < x_rank
                   for axis in axes), "axis out of bounds"
        keep_dims = node.attrs.get('keepdims', 1)

        stripped_axes = [axis for axis in axes if x_shape[axis] is not None]

        if not stripped_axes:
            params = NoOPParameters(valid_name)
            pout_shape = x_shape.copy()
        else:
            if keep_dims:
                pout_shape = [
                    dim if idx not in axes else 1
                    for idx, dim in enumerate(x_shape)
                ]
            else:
                pout_shape = [
                    dim for idx, dim in enumerate(x_shape) if idx not in axes
                ]
                if all(dim is None for dim in pout_shape):
                    pout_shape.append(1)

            # subtract 1 from axis for all None's preceeding it and remove
            # axes that are not defined
            axes = [
                ax - sum([1 if dim is None else 0 for dim in x_shape[:ax:]])
                for ax in stripped_axes
            ]
            params = GlobalPoolParameters(valid_name,
                                          pool_type=reduce_type,
                                          axis=tuple(axes),
                                          keep_dims=keep_dims)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
示例#30
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     logger.info("reducing %s to a constant", valid_name)
     x_shape = [dim if dim else 1 for dim in x[2].shape]
     sz = np.array(prod(x_shape))
     params = ConstantInputParameters(valid_name, value=sz)
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim([]), None)
     return params