Esempio n. 1
0
    def _common(cls, node, scales, sizes, nearest_mode='round_prefer_ceil', **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] if inp else None for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        x_rank = len(x_shape)
        spatial_size = x_rank - 2
        in_c = x_shape[1]
        in_w = x_shape[-1]
        if scales is not None:
            sizes = np.array(x_shape) * np.array(scales)
        sizes = [None if x_shape[idx] is None else dim
                 for idx, dim in enumerate(sizes)]
        if spatial_size == 1:
            sizes.insert(-1, 1)

        if nearest_mode != 'round_prefer_ceil':
            logger.warning('only round_prefer_ceil is supported for nearest mode')

        if spatial_size != 2 and spatial_size != 1:
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        if not all(x_dim == size_dim for x_dim, size_dim in zip(x_shape[:2:], sizes[:2:])):
            raise ValueError('resize only supports 4D tensor in NCHW mode or 3D tensor in NCF mode'
                             f' - input shape is {x_shape} sizes is {sizes}')

        mode = node.attrs.get('mode', 'nearest')
        if mode != 'nearest' and mode != 'linear':
            raise ValueError('resize only supports nearest and linear modes')

        params_class = BilinearResizerParameters if mode == 'linear' else NearestNeighborResizerParameters

        params = params_class(valid_name,
                              new_shape=tuple(sizes[2::]),
                              align_corners=False,
                              halfpixel_centers=False,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])

        if spatial_size == 1:
            r1_params = ReshapeParameters(f'{valid_name}_reshape2d',
                                          old_shape=Dim.unnamed([in_c, in_w]),
                                          shape=Dim.unnamed([in_c, 1, in_w]))
            r2_params = ReshapeParameters(f'{valid_name}_reshape1d',
                                          old_shape=Dim.unnamed([in_c, 1, sizes[-1]]),
                                          shape=Dim.unnamed([in_c, sizes[-1]]))
            G.add_edge(NNEdge(from_node=x[0], to_node=r1_params, from_idx=x[1], to_idx=0))
            G.add_edge(NNEdge(from_node=r1_params, to_node=params, from_idx=0, to_idx=0))
            G.add_edge(NNEdge(from_node=params, to_node=r2_params, from_idx=0, to_idx=0))
            pout_dims = ProvisionalDim(sizes[:-2:] + sizes[-1::])
            params = r2_params
        else:
            pout_dims = ProvisionalDim(sizes)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Esempio n. 2
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=False,
                                  in_dims_hint=SparseList([['c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            params.weights = weights
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Esempio n. 3
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(ReshapeOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # TF2 seems to use the second input whereas TF1 uses the opts
        new_shape = None
        if node_opts:
            new_shape = list(node_opts.NewShapeAsNumpy())
        elif len(inputs) > 1:
            set_shape_tensor = list(cls._verify_constant(inputs[1]))
            node.input[1].used = True
            new_shape = list(set_shape_tensor)
        else:
            ValueError(
                f"Cannot asses new_shape for Reshape Parameter: {node.name}")

        if -1 in new_shape:
            new_shape_size = reduce(lambda x, y: x * 1
                                    if y == -1 else x * y, new_shape, 1)
            inp_size = reduce(lambda x, y: x * y
                              if y is not None else x, x_shape, 1)
            new_shape[new_shape.index(-1)] = inp_size // new_shape_size

        if None in x_shape:
            if 1 in new_shape:
                old_batch_dim = x_shape.index(None)
                new_batch_dim = new_shape.index(1)
                if old_batch_dim != new_batch_dim:
                    LOG.info(
                        "node %s moved batch dimension for axis %s to axis %s",
                        node.name, old_batch_dim, new_batch_dim)
                new_shape[new_batch_dim] = None
            else:
                raise ValueError(
                    "unable to determine movement of unspcified axis in node %s"
                    % node.name)

        pnew_shape = ProvisionalDim(new_shape)
        old_shape = Dim.unnamed(cls.remove_unspecified_dim(x_shape),
                                is_ordered=True)
        new_shape = Dim.unnamed(cls.remove_unspecified_dim(new_shape),
                                is_ordered=True)

        params = ReshapeParameters(node.name,
                                   old_shape=old_shape,
                                   shape=new_shape)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], node.output)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pnew_shape)
        return params
Esempio n. 4
0
def test_combine2():
    dim1 = Dim.unnamed((1, 12800, 2))
    dim2 = Dim.unnamed((1, 3200, 2))
    dim3 = Dim.unnamed((1, 800, 2))
    dim4 = Dim.unnamed((1, 200, 2))
    res = Dim.combine((dim1, dim2, dim3, dim4), 1)
    assert res.shape == [1, 17000, 2]
Esempio n. 5
0
def test_broadcast1():
    dim1 = Dim.unnamed((1, 12800, 2))
    dim2 = Dim.unnamed((1, 3200, 2))
    dim3 = Dim.unnamed((1, 800, 2))
    dim4 = Dim.unnamed((1, 200, 2))
    res = Dim.broadcast((dim1, dim2, dim3, dim4))
    assert res.shape == [1, 17000, 2]
Esempio n. 6
0
def add_reshape(G,
                tensors,
                name,
                subgraph,
                _,
                op,
                load_tensors=False,
                dequantize=False):
    reshape_opts = ReshapeOptions.ReshapeOptions()
    reshape_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
    inp = get_input_size(tensors, subgraph, op, 0)
    set_shape = get_tensor(G.model, tensors, subgraph, op, 1)
    # TODO - Which to use? Attribute or input? TFLITE seems to set both
    del set_shape
    new_shape = list(reshape_opts.NewShapeAsNumpy())
    if -1 in new_shape:
        new_shape_size = reduce(lambda x, y: x * 1
                                if y == -1 else x * y, new_shape, 1)
        inp_size = reduce(lambda x, y: x * y, inp, 1)
        new_shape[new_shape.index(-1)] = inp_size // new_shape_size

    old_shape = Dim.unnamed(remove_batch_dim(inp), is_ordered=True)
    new_shape = Dim.unnamed(remove_batch_dim(new_shape), is_ordered=True)
    node = ReshapeParameters(name, old_shape=old_shape, shape=new_shape)
    return add_node(G, node)
Esempio n. 7
0
    def _import_as_matmul(cls, node, inputs, x, y, real_x_shape, real_y_shape, trans_a, trans_b, alpha, beta, **kwargs):
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        all_nodes = kwargs['all_nodes']
        if trans_a:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tinx'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=x[0], to_node=tparams, from_idx=x[1]))
            x = (tparams, 0)
        if trans_b:
            tparams = TransposeParameters(G.unique_name(
                f'{valid_name}_tiny'), transpose=(1, 0))
            G.add_edge(NNEdge(from_node=y[0], to_node=tparams, from_idx=y[1]))
            y = (tparams, 0)
        params = MatMulOpParameters(G.unique_name(valid_name))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(
            NNEdge(from_node=y[0], to_node=params, from_idx=y[1], to_idx=1))

        out_dims = params.get_output_size(
            [Dim.unnamed(real_x_shape), Dim.unnamed(real_y_shape)])

        biases = cls.get_constant(inputs[2]) if len(inputs) > 2 else np.zeros(out_dims[0].shape[1])
        biases_params = ConstantInputParameters(
            G.unique_name(f'{valid_name}_biases'), dims=Dim.unnamed(biases.shape), value=biases)
        G.add_edge(
            NNEdge(from_node=biases_params, to_node=params, to_idx=2))
        cls.record_constant_qrec(inputs[2], biases_params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, out_dims[0], None)
        return params
Esempio n. 8
0
    def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
        rnn_nodes = [
            self.find_unpack(G, node) for node in G.nodes()
            if isinstance(node, RNNBaseParameters) and node.n_output_cells > 1
        ]
        rnn_nodes_by_slice = self.validate_slices(G, rnn_nodes)
        rnn_nodes_by_slice = self.validate_multi_branch(G, rnn_nodes_by_slice)
        if not rnn_nodes_by_slice:
            return False

        for unpack_node, rnn_unpacks in rnn_nodes_by_slice.items():
            modified_nodes = set()
            for rnn_unpack in rnn_unpacks:
                self.process_path(G, rnn_unpack, modified_nodes)
            # since process path will have removed all unnecessary nodes the edges will be correct here
            out_edges = G.out_edges(unpack_node.name)
            in_edges = G.in_edges(unpack_node.name)
            assert len(in_edges
                       ) == 1, "expecting unpack node to have only one in edge"
            in_edge = in_edges[0]
            changes_shape = unpack_node.changes_shape if isinstance(
                unpack_node, StridedSliceParameters) else False

            LOG.info("Eliminating last cell unpack: %s", unpack_node.name)
            G.remove(unpack_node)

            # Here the strided slice can change the output shape of the RNN
            # so insert a reshape to do the shape change
            if changes_shape:
                reshape = ReshapeParameters(
                    unpack_node.name + '_reshape',
                    old_shape=Dim.unnamed(unpack_node.post_slice_shape),
                    shape=Dim.unnamed(unpack_node.out_shape))
                G.add_edge(
                    NNEdge(from_node=in_edge.from_node,
                           to_node=reshape,
                           from_idx=in_edge.from_idx))
                for out_edge in out_edges:
                    G.add_edge(
                        NNEdge(from_node=reshape,
                               to_node=out_edge.to_node,
                               to_idx=out_edge.to_idx))
                if G.quantization:
                    G.quantization[NodeId(reshape)] = G.quantization[NodeId(
                        unpack)]
            else:
                for out_edge in out_edges:
                    G.add_edge(
                        NNEdge(from_node=in_edge.from_node,
                               to_node=out_edge.to_node,
                               from_idx=in_edge.from_idx,
                               to_idx=out_edge.to_idx))
            if G.quantization:
                del G.quantization[NodeId(unpack_node)]

        if set_identity:
            self.set_identity(G)

        return True
Esempio n. 9
0
 def _get_initializers(self, initializer):
     return {
         init.name:
         (ConstantInputParameters(self._validate_name(init.name),
                                  dims=Dim.unnamed(init.dims or [1]),
                                  value=self._get_numpy_array(init)), 0,
          Dim.unnamed(init.dims))
         for init in initializer
     }
Esempio n. 10
0
 def __init__(self, *args, old_shape=None, shape=None, **kwargs):
     super(ReshapeParameters, self).__init__(*args, **kwargs)
     if not isinstance(shape, Dim):
         shape = Dim.unnamed(shape)
     if old_shape is not None and not isinstance(old_shape, Dim):
         old_shape = Dim.unnamed(old_shape)
     assert shape.is_ordered and (old_shape is None or old_shape.is_ordered)
     self._shape = shape
     self._old_shape = old_shape
Esempio n. 11
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(StridedSliceOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # begin end stride
        vec_begin = list(cls._verify_constant(inputs[1]))
        vec_end = list(cls._verify_constant(inputs[2]))
        vec_stride = list(cls._verify_constant(inputs[3]))
        for i in range(1, 4):
            node.input[i].used = True
        if any([vec is None for vec in [vec_begin, vec_end, vec_stride]]):
            raise NotImplementedError(
                "strided slice with variable begin end or stride is not supported")
        spec = zip(vec_begin, vec_end, vec_stride)
        begin_mask = node_opts.BeginMask()
        ellipsis_mask = node_opts.EllipsisMask()
        end_mask = node_opts.EndMask()
        new_axis_mask = node_opts.NewAxisMask()
        shrink_axis_mask = node_opts.ShrinkAxisMask()

        act_slice, out_shape, can_reshape = StridedSliceParameters.get_slice(
            x_shape, spec,
            begin_mask,
            end_mask, ellipsis_mask,
            new_axis_mask, shrink_axis_mask)

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            x_val = cls.get_constant(x)
            params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            x_val = params.numpy_slice(x_val)
            params = ConstantInputParameters(node.name, value=x_val)
        else:
            if can_reshape:
                if list(x_shape) == list(out_shape):
                    LOG.info("converting strided slice %s to a noop", node.name)
                    params = NoOPParameters(node.name)
                else:
                    LOG.info("converting strided slice %s to a reshape", node.name)
                    in_shape = Dim.unnamed(x[2].known_shape, is_ordered=True)
                    out_shape = Dim.unnamed(out_shape, is_ordered=True)
                    params = ReshapeParameters(node.name, old_shape=in_shape, shape=out_shape)
            else:
                params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization([node.input[0]], node.output)
        all_nodes[node.output[0]] = (params, 0, x[2].infer_mapping(out_shape, allow_bad_length=True))
        return params
Esempio n. 12
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        if cls.SINCE_VERSION == 1:
            shape = np.array(node.attrs["shape"])
        else:  # since_version >= 5
            shape = cls.get_constant(inputs[1])

        input_shape = np.array(inputs[0][2].shape)
        shape = [
            dim if dim != 0 else input_shape[idx]
            for idx, dim in enumerate(shape)
        ]
        if -1 in shape:
            wild_index = shape.index(-1)
            in_size = prod([1 if dim is None else dim for dim in input_shape])
            shape_size = prod(
                [1 if dim is None or dim <= 0 else dim for dim in shape])
            if in_size % shape_size != 0:
                raise ValueError('invalid reshape')
            shape[wild_index] = in_size // shape_size
        shape = np.array(shape)

        if cls.is_constant(inputs[0]):
            logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             value=cls.get_constant(
                                                 inputs[0]).reshape(shape),
                                             dims=Dim.unnamed(shape),
                                             constant_store=G.constant_store)
            pshape = ProvisionalDim(shape)
            all_nodes[node.output[0]] = (params, 0, pshape)
            return params

        # TODO - There must be a better way of doing this
        # This hacks around the fact that the batch dimension will be in the reshape
        if input_shape[0] is None and shape[0] == 1:
            shape = np.array([None] + list(shape[1::]))

        pshape = ProvisionalDim(shape)
        # pylint: disable=singleton-comparison
        old_shape = Dim.unnamed(list(input_shape[input_shape != None]))
        shape = Dim.unnamed(list(shape[shape != None]))
        params = ReshapeParameters(valid_name,
                                   old_shape=old_shape,
                                   shape=shape)
        inp = inputs[0]
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
 def __init__(self, node, in_shape=None, out_shape=None, **kwargs) -> None:
     super(InsertReshapeAction, self).__init__(node, **kwargs)
     assert in_shape is not None and out_shape is not None, 'find test'
     if isinstance(in_shape, (list, tuple)):
         self.in_shape = Dim.unnamed(in_shape)
     else:
         self.in_shape = in_shape.clone() if in_shape is not None else None
     if isinstance(out_shape, (list, tuple)):
         self.out_shape = Dim.unnamed(out_shape)
     else:
         self.out_shape = out_shape.clone(
         ) if out_shape is not None else None
Esempio n. 14
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        if node.attrs.get('value'):
            value = numpy_helper.to_array(node.attrs['value'])
        elif node.attrs.get('value_float'):
            value = np.atleast_1d(node.attrs['value_float'], dtype=np.float32)
        elif node.attrs.get('value_floats'):
            value = np.array(node.attrs['value_floats'], dtype=np.float32)
        elif node.attrs.get('value_int'):
            value = np.atleast_1d(node.attrs['value_int'], dtype=np.int32)
        elif node.attrs.get('value_ints'):
            value = np.array(node.attrs['value_ints'], dtype=np.int32)
        elif node.attrs.get('value_string') or node.attrs.get('value_strings'):
            raise NotImplementedError(
                'NNTOOL does not support string constants')
        elif node.attrs.get('sparse_value'):
            raise NotImplementedError(
                'NNTOOL does not support sparse constants')
        else:
            raise ValueError('ONNX constant has no value')

        params = ConstantInputParameters(valid_name,
                                         dims=Dim.unnamed(value.shape),
                                         value=value)
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape),
                                     None)
        return params
Esempio n. 15
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            has_bias = True
            biases = cls.get_constant(inputs[2])
        else:
            biases = None
            has_bias = False

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        params = FcParameters(valid_name,
                              filt=filt_dim,
                              has_bias=has_bias,
                              in_dims_hint=SparseList([['c']]),
                              out_dims_hint=SparseList([['c']]),
                              constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases * beta
        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
Esempio n. 16
0
 def _remove_none_dims_on_constants(cls, inputs):
     const_inputs = set([
         inp[0] for inp in inputs
         if isinstance(inp[0], ConstantInputParameters)
     ])
     if not const_inputs:
         return
     non_const_shapes = [
         inp[2].shape for inp in inputs if inp[0] not in const_inputs
     ]
     max_len = max(len(shape) for shape in non_const_shapes)
     non_const_shapes = [[1] * (max_len - len(shape)) + shape
                         for shape in non_const_shapes]
     none_axes = [
         any(dim is None for dim in dims) for dims in zip(*non_const_shapes)
     ]
     for inp in const_inputs:
         start = max_len - len(inp.value.shape)
         del_axes = tuple([
             idx for idx in range(len(inp.value.shape))
             if none_axes[start + idx]
         ])
         if not del_axes:
             continue
         inp.value = np.squeeze(inp.value, axis=del_axes)
         inp.dims = Dim.unnamed(inp.value.shape)
Esempio n. 17
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     value = numpy_helper.to_array(node.attrs['value'])
     params = ConstantInputParameters(valid_name, dims=Dim.unnamed(value.shape), value=value)
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(value.shape))
     return params
Esempio n. 18
0
 def __init__(self, *args, old_shape=None, shape=None, **kwargs):
     super(ReshapeParameters, self).__init__(
         *args, eliminate_transposes_pass_down=True, eliminate_transposes_pass_up=True, **kwargs)
     if not isinstance(shape, Dim):
         shape = Dim.unnamed(shape)
     self._shape = shape
     self._old_shape = old_shape
Esempio n. 19
0
    def _common(cls, node, **kwargs):
        params_class = kwargs['params_class']
        opts_class = kwargs['opts_class']
        node_opts = node.get_options(opts_class)
        all_nodes = kwargs['all_nodes']
        opts = kwargs['opts']
        G = kwargs['G']

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        new_shape = tuple(cls._verify_constant(inputs[1]))
        params = params_class(node.name,
                              new_shape=new_shape,
                              align_corners=node_opts.AlignCorners(),
                              halfpixel_centers=node_opts.HalfPixelCenters(),
                              in_dims_hint=[['h', 'w', 'c']],
                              out_dims_hint=[['h', 'w', 'c']])

        out_shape = params.get_output_size([Dim.unnamed(x[2].known_shape)])[0]
        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                [node.input[0]], [node.output[0]])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0,
                                     x[2].infer_mapping(out_shape.shape))
        return params
Esempio n. 20
0
def get_all_output_dims(subgraph, elem, order=None):
    outputs = []
    for idx in range(elem.OutputsLength()):
        tf_idx = elem.Outputs(idx)
        outputs.append(
            Dim.unnamed(remove_batch_dim(get_shape(subgraph, tf_idx, order))))
    return outputs
Esempio n. 21
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        inputs = [all_nodes[inp] for inp in node.input]

        if not all(cls.is_constant(inp) for inp in inputs):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )

        importer = kwargs['importer']
        sub_G = NNGraph()
        all_nodes_clone = all_nodes.copy()
        importer.import_subgraph(sub_G,
                                 node.attrs['body'], {},
                                 all_nodes=all_nodes_clone)
        if not all(
                isinstance(inp, (InputParameters, ConstantInputParameters))
                for inp in sub_G.inputs()):
            raise NotImplementedError(
                "nntool does not support import of graphs with evaluated loops"
            )
        sub_G.add_dimensions()
        for idx, inp in enumerate(sub_G.inputs()):
            inp.index = idx

        logger.info(f"reducing loop {valid_name} to a constant")
        count = inputs[0][0].value
        keep_going = inputs[1][0].value
        loop_carried = [inp[0].value for inp in inputs[2:]]
        outputs = [np.array([])] * len(node.output)
        while keep_going and count > 0:
            executer = GraphExecuter(sub_G)
            output_tensors = executer.execute([count, keep_going] +
                                              loop_carried,
                                              silent=True)
            outp_vals = [
                output_tensors[node.step_idx][0] for node in sub_G.outputs()
                if not isinstance(node, InputParameters)
            ]
            keep_going = outp_vals[0]
            for idx, val in enumerate(outp_vals[1:]):
                if idx < len(loop_carried):
                    loop_carried[idx] = outputs[idx] = val
                elif outputs[idx] is None:
                    outputs[idx] = val
                else:
                    outputs[idx] = np.concatenate((outputs[idx], val))
            count -= 1
        for idx, outp in enumerate(node.output):
            params = ConstantInputParameters(
                G.unique_name(f'{valid_name}_out{idx}'),
                value=outputs[idx],
                dims=Dim.unnamed(outputs[idx].shape))
            all_nodes[outp] = (params, 0, ProvisionalDim(outputs[idx].shape),
                               None)

        return None
Esempio n. 22
0
    def _common(cls, node, starts, ends, axes, steps, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        x = all_nodes[node.input[0]]
        x_shape = np.array(x[2].shape)
        x_rank = len(x_shape)
        axes = cls._resolve_negative_ranks(
            axes, len(x_shape)) if axes else tuple(range(x_rank))
        axes_rank = len(axes)
        steps = steps if steps else [1] * axes_rank
        slices = np.stack([starts, ends, steps]).transpose((1, 0))
        p_slices = []
        p_shape = []
        for idx, dim in enumerate(x_shape):
            try:
                if dim is None:
                    p_slices.append(None)
                    p_shape.append(None)
                else:
                    slice_idx = axes.index(idx)
                    begin, end, step = slices[slice_idx]
                    begin = max(min(begin if begin >= 0 else dim + begin, dim),
                                0)
                    end = max(min(end if end >= 0 else dim + end, dim), -1)
                    # -sys.maxsize is used to indicate 0 in the reverse slice direction
                    # this makes it compatible with the numpy slice
                    p_slices.append(
                        (begin, -sys.maxsize if end == -1 else end, step))
                    if step < 0:
                        p_shape.append((begin - end) // -step)
                    else:
                        p_shape.append((end - begin) // step)

            except ValueError:
                p_slices.append((0, dim, 1))
                p_shape.append(dim)
        slices = cls._get_real_dim(p_slices)
        shape = cls._get_real_dim(p_shape)

        params = StridedSliceParameters(valid_name,
                                        act_slice=slices,
                                        out_shape=shape)
        if cls.is_constant(x):
            x_val = cls.get_constant(x)
            x_val = params.numpy_slice(x_val)
            if x_val.size < 10:
                logger.info("reducing %s to a constant %s", valid_name, x_val)
            else:
                logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             dims=Dim.unnamed(x_val.shape),
                                             value=x_val,
                                             constant_store=G.constant_store)
        else:
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(p_shape))
        return params
Esempio n. 23
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     opts = kwargs['opts']
     qrec_class = kwargs.get('qrec_class')
     params_args = kwargs.get('params_args', {})
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         LOG.info("reducing %s to a constant", node.name)
         values = [cls.get_constant(inp) for inp in inputs]
         output_shapes = cls.implied_broadcast(inputs)
         params = ConstantInputParameters(node.name, value=constant_operation(*values),
                                          dims=Dim.unnamed(output_shapes[0].known_shape), constant_store=G.constant_store)
     else:
         params = kwargs['params_class'](node.name, **params_args)
         output_shapes = cls.implied_broadcast(inputs)
         shapes = []
         for idx, inp in enumerate(inputs):
             G.add_edge(NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1], to_idx=idx))
             shapes.append(inp[2].known_shape)
         if isinstance(params, Broadcastable):
             params.set_broadcast(shapes)
     if opts.get('load_quantization'):
         G.quantization[NodeId(params)] = cls.load_tf_quantization(
             node.input, node.output, qrec_class=qrec_class)
     all_nodes[node.output[0]] = (params, 0, output_shapes[0])
     return params
Esempio n. 24
0
    def _common(cls, node: TFLiteNode, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        if len(x_shape) != 1:
            raise ValueError(f'FILL {node.name} expecting 1D tensor for shape')

        shape = list(cls._verify_constant(inputs[0]))

        if cls._is_constant(inputs[1]):
            val = cls._get_constant(inputs[1])

            params = ConstantInputParameters(node.name,
                                             dims=Dim.unnamed(shape),
                                             value=np.full(shape, val),
                                             constant_store=G.constant_store)
            all_nodes[node.output[0]] = (params, 0, ProvisionalDim(shape))
            return params
        else:
            raise ValueError(
                f'FILL {node.name} non constant fill values are not currently supported'
            )
Esempio n. 25
0
 def _fix_constant_inputs(cls, inputs, shape):
     #TODO - This should be checked again
     # this fixes constant inputs to the broadcasted shape
     # this may not be a good thing to do if the input is connected to more than one node
     # since the shape change could cause problems
     # Two possible solutions:
     # 1) insert a rehape in between the constant and the broadcasted node
     # 2) make the broadcast node adjust more complete
     none_axes = tuple(
         [idx for idx, dim in enumerate(shape) if dim is None])
     const_inputs = list([
         inp for inp in inputs
         if isinstance(inp[0], ConstantInputParameters)
     ])
     if not const_inputs:
         return
     for inp in const_inputs:
         node = inp[0]
         node.value = np.reshape(node.value, [1] *
                                 (len(shape) - len(node.value.shape)) +
                                 list(node.value.shape))
         if none_axes:
             node.value = np.squeeze(node.value, axis=none_axes)
         # setting the provisional shape here is a little dangerous
         # if the unknown axis is first then it works but if it is
         # in the middle and this value is connected to another node then
         # could be problematic (but it is problematic anyway since it won't
         # expect something broadcasted)
         inp[2].shape = list(node.value.shape)
         node.dims = Dim.unnamed(node.value.shape)
Esempio n. 26
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     valid_name = kwargs['valid_name']
     inputs = [all_nodes[inp] for inp in node.input]
     x = inputs[0]
     x_shape = x[2].shape
     to_dtype = node.attrs['to']
     if cls.is_constant(x):
         x_val = cls.get_constant(x)
         x_val = x_val.astype(to_dtype)
         if x_val.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, x_val)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          dims=Dim.unnamed(x_val.shape),
                                          value=x_val)
     else:
         params = QuantizeParameters(valid_name,
                                     to_qtype=QType(dtype=to_dtype))
         G.add_edge(
             NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                    to_idx=0))
     all_nodes[node.output[0]] = (params, 0, ProvisionalDim(x_shape), None)
     return params
 def __init__(self,
              *args,
              adjust_transpose=None,
              is_mutated=False,
              is_intermediate=False,
              always_copy=False,
              value: np.ndarray = None,
              qtype: QType = None,
              dims: Dim = None,
              **kwargs):
     if dims is None:
         dims = Dim.unnamed(value.shape)
     super(ConstantInputParameters, self).__init__(*args,
                                                   dims=dims,
                                                   **kwargs)
     self._value = value
     del self.at_options.valid_options['FIXED_ORDER']
     self.at_options.valid_options['RESET_NAME'] = str
     self._adjust_transpose = adjust_transpose
     self._is_mutated = is_mutated
     self._is_intermediate = is_intermediate
     self._is_constant = True
     self._is_global = True
     self._always_copy = always_copy
     self._use_fake = False
     self._use_compressed = False
     self._compressed_value = None
     self._qtype = qtype
Esempio n. 28
0
def two_conv_graph():
    G = NNGraph(name='two_conv_graph')
    ti = G.add_input(Dim.unnamed([10, 10, 2]))
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    to = G.add_output()
    G.add_edge(NNEdge(ti, n1))
    G.add_edge(NNEdge(n1, n2))
    G.add_edge(NNEdge(n2, to))
    G.add_dimensions()
    yield G
Esempio n. 29
0
    def match(self, G: GraphView, set_identity: bool = True):
        has_modified = False
        for node in G.nodes(node_classes=ConstantInputParameters):
            out_edges = G.out_edges(node.name)
            if len(out_edges) <= 1:
                continue
            has_modified = True
            LOG.info(
                'node %s has more than one out edge and will be duplicated',
                node.name)
            idx = 1
            for out_edge in out_edges[1::]:
                new_constant = ConstantInputParameters(f'{node.name}_{idx}',
                                                       dims=Dim.unnamed(
                                                           node.dims.shape),
                                                       value=node.value.copy())
                G.remove_edge(out_edge)
                G.add_edge(
                    NNEdge(from_node=new_constant,
                           to_node=out_edge.to_node,
                           to_idx=out_edge.to_idx))
                idx += 1

        if set_identity:
            self.set_identity(G)

        return has_modified
Esempio n. 30
0
    def _get_input_nodes(self, G, inputs, constants, batch_hint=None):
        prov_dims = {
            idx: ProvisionalDim.from_onnx_shape(input.type.tensor_type.shape,
                                                check_for_batch=0)
            for idx, input in enumerate(inputs) if input.name not in constants
        }
        if batch_hint is None and any(
                len(pshape.shape) >= 4 and pshape.shape[0] != 1
                for pshape in prov_dims.values()):
            logger.warning(
                "unable to determine batch dimension. if the graph fails to import properly set it to 1 or a variable."
            )
            batch_hint = 0

        if batch_hint is not None:
            prov_dims = {
                idx: dim.eliminate_dimension(batch_hint)
                for idx, dim in prov_dims.items()
            }

        hints = {
            idx: (['c', 'h', 'w'] if
                  (len(dim.shape) == 4 and
                   (dim.shape[1] == 1 or dim.shape[1] == 3)) else None)
            for idx, dim in prov_dims.items()
        }
        return {
            input.name:
            (G.add_input(Dim.unnamed(
                prov_dims[idx].known_shape).apply_naming_hints(hints[idx]),
                         in_dim_hint=[hints[idx]] if hints[idx] else None,
                         out_dim_hint=[hints[idx]] if hints[idx] else None), 0,
             prov_dims[idx])
            for idx, input in enumerate(inputs) if input.name not in constants
        }