Exemplo n.º 1
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     opts = kwargs['opts']
     qrec_class = kwargs.get('qrec_class')
     params_args = kwargs.get('params_args', {})
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         LOG.info("reducing %s to a constant", node.name)
         values = [cls.get_constant(inp) for inp in inputs]
         output_shapes = cls.implied_broadcast(inputs)
         params = ConstantInputParameters(node.name, value=constant_operation(*values),
                                          dims=Dim.unnamed(output_shapes[0].known_shape), constant_store=G.constant_store)
     else:
         params = kwargs['params_class'](node.name, **params_args)
         output_shapes = cls.implied_broadcast(inputs)
         shapes = []
         for idx, inp in enumerate(inputs):
             G.add_edge(NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1], to_idx=idx))
             shapes.append(inp[2].known_shape)
         if isinstance(params, Broadcastable):
             params.set_broadcast(shapes)
     if opts.get('load_quantization'):
         G.quantization[NodeId(params)] = cls.load_tf_quantization(
             node.input, node.output, qrec_class=qrec_class)
     all_nodes[node.output[0]] = (params, 0, output_shapes[0])
     return params
Exemplo n.º 2
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     G = kwargs['G']
     opts = kwargs['opts']
     node_opts = kwargs.get("node_opts", None)
     params_args = kwargs.get('params_args', {})
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         LOG.info("reducing %s to a constant", node.name)
         values = [cls.get_constant(inp) for inp in inputs]
         output_shapes = cls.implied_broadcast(inputs)
         params = ConstantInputParameters(node.name,
                                          value=constant_operation(*values),
                                          dims=Dim.unnamed(
                                              output_shapes[0].known_shape),
                                          constant_store=G.constant_store)
     else:
         params = kwargs['params_class'](node.name, **params_args)
         output_shapes = cls.implied_broadcast(inputs)
         shapes = []
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
             shapes.append(inp[2].known_shape)
         if isinstance(params, Broadcastable):
             for idx, shape in enumerate(shapes.copy()):
                 len_diff = len(shape) - len(output_shapes[0].known_shape)
                 if len_diff > 0:
                     if not all(dim is None or dim == 1
                                for dim in shape[:len_diff:]):
                         in_shapes = ",".join(
                             str(shape) for shape in shapes)
                         raise ValueError(
                             f'strange broadcast {in_shapes} -> {output_shapes[0].shape}'
                         )
                     shapes[idx] = shape[len_diff::]
             params.set_broadcast(shapes)
     if opts.get('load_quantization'):
         G.quantization[NodeId(params)] = cls.load_tf_quantization(
             node.input, node.output)
     if node_opts is not None:
         params = cls.fuse_activation(node_opts, node.name, params,
                                      **kwargs)
     all_nodes[node.output[0]] = (params, 0, output_shapes[0])
     return params
Exemplo n.º 3
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        indices = cls.get_constant(y)
        axis = node.attrs.get('axis', 0)

        pshape = ProvisionalDim(x_shape[:axis:] + list(indices.shape) +
                                x_shape[axis + 1:])
        if cls.is_constant(x):
            logger.info("reducing %s to a constant", valid_name)
            x_val = cls.get_constant(x)
            params = ConstantInputParameters(valid_name,
                                             value=np.take(x_val,
                                                           indices,
                                                           axis=axis))
        else:
            axis = cls._trim_axis(axis, x_shape)
            params = GatherParametters(valid_name, axis=axis, indices=indices)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
Exemplo n.º 4
0
    def _common(cls, node: TFLiteNode, **kwargs):
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        new_axes = {}
        for idx, dim in enumerate(x_shape):
            if dim is not None:
                new_axes[idx] = len(new_axes)
        ptranspose = cls._verify_constant(inputs[1])
        pout_shape = [x_shape[dim] for dim in ptranspose]
        transpose = [new_axes[axis] for axis in ptranspose if x_shape[axis] is not None]
        node.input[1].used = True

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            val = np.transpose(cls.get_constant(x), ptranspose)
            params = ConstantInputParameters(node.name, value=np.transpose(val, ptranspose),
                                             dims=Dim.unnamed(val.shape), constant_store=G.constant_store)
        else:
            params = TransposeParameters(node.name, transpose=transpose)

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization([node.input[0]], node.output)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
        return params
Exemplo n.º 5
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     G = kwargs['G']
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     # may have more than one input i.e. clip
     x = inputs[0]
     if cls.is_constant(x) and constant_operation:
         res = constant_operation(cls.get_constant(x))
         if res.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, res)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          value=res,
                                          constant_store=G.constant_store)
     else:
         params_args = kwargs.get('params_args', {})
         params = kwargs['params_class'](valid_name, **params_args)
         G.add_edge(
             NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                    to_idx=0))
     all_nodes[node.output[0]] = (params, 0, copy.deepcopy(x[2]))
     return params
Exemplo n.º 6
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            weights_params = ConstantInputParameters(
                f'{valid_name}_weights',
                dims=Dim.unnamed([y_shape[1], x_shape[0]]),
                value=weights)
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=True,
                                  in_dims_hint=SparseList([['c'],
                                                           ['out_c', 'in_c'],
                                                           ['out_c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
            biases_params = ConstantInputParameters(
                f'{valid_name}_biases',
                dims=Dim.unnamed([y_shape[1]]),
                value=np.zeros((y_shape[1]), dtype=np.float32))
            G.add_edge(
                NNEdge(from_node=weights_params, to_node=params, to_idx=1))
            G.add_edge(
                NNEdge(from_node=biases_params, to_node=params, to_idx=2))
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Exemplo n.º 7
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(StridedSliceOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        # begin end stride
        vec_begin = list(cls._verify_constant(inputs[1]))
        vec_end = list(cls._verify_constant(inputs[2]))
        vec_stride = list(cls._verify_constant(inputs[3]))
        for i in range(1, 4):
            node.input[i].used = True
        if any([vec is None for vec in [vec_begin, vec_end, vec_stride]]):
            raise NotImplementedError(
                "strided slice with variable begin end or stride is not supported")
        spec = zip(vec_begin, vec_end, vec_stride)
        begin_mask = node_opts.BeginMask()
        ellipsis_mask = node_opts.EllipsisMask()
        end_mask = node_opts.EndMask()
        new_axis_mask = node_opts.NewAxisMask()
        shrink_axis_mask = node_opts.ShrinkAxisMask()

        act_slice, out_shape, can_reshape = StridedSliceParameters.get_slice(
            x_shape, spec,
            begin_mask,
            end_mask, ellipsis_mask,
            new_axis_mask, shrink_axis_mask)

        if cls.is_constant(x):
            LOG.info("reducing %s to a constant", node.name)
            x_val = cls.get_constant(x)
            params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            x_val = params.numpy_slice(x_val)
            params = ConstantInputParameters(node.name, value=x_val)
        else:
            if can_reshape:
                if list(x_shape) == list(out_shape):
                    LOG.info("converting strided slice %s to a noop", node.name)
                    params = NoOPParameters(node.name)
                else:
                    LOG.info("converting strided slice %s to a reshape", node.name)
                    in_shape = Dim.unnamed(x[2].known_shape, is_ordered=True)
                    out_shape = Dim.unnamed(out_shape, is_ordered=True)
                    params = ReshapeParameters(node.name, old_shape=in_shape, shape=out_shape)
            else:
                params = StridedSliceParameters(node.name, act_slice=act_slice, out_shape=out_shape)
            G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization([node.input[0]], node.output)
        all_nodes[node.output[0]] = (params, 0, x[2].infer_mapping(out_shape, allow_bad_length=True))
        return params
Exemplo n.º 8
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     G = kwargs['G']
     constant_operation = kwargs.get('constant_operation')
     constant_int_operation = kwargs.get('constant_int_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         values = [cls.get_constant(inp) for inp in inputs]
         outputs = cls.implied_broadcast(inputs)
         if constant_int_operation and all(
                 np.issubdtype(val.dtype, np.integer) for val in values):
             res = constant_int_operation(*values)
         else:
             res = constant_operation(*values)
         if res.size < 10:
             logger.info("reducing %s to a constant %s", valid_name, res)
         else:
             logger.info("reducing %s to a constant", valid_name)
         params = ConstantInputParameters(valid_name,
                                          value=res,
                                          dims=Dim.unnamed(
                                              outputs[0].known_shape),
                                          constant_store=G.constant_store)
     else:
         params_args = kwargs.get('params_args', {})
         params = kwargs['params_class'](valid_name, **params_args)
         outputs = cls.implied_broadcast(inputs)
         shapes = []
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
             shapes.append(inp[2].known_shape)
         if isinstance(params, Broadcastable):
             params.set_broadcast(shapes)
     all_nodes[node.output[0]] = (params, 0, outputs[0])
     return params
Exemplo n.º 9
0
 def _common(cls, node, **kwargs):
     all_nodes = kwargs['all_nodes']
     valid_name = kwargs['valid_name']
     G = kwargs['G']
     constant_operation = kwargs.get('constant_operation')
     inputs = [all_nodes[inp] for inp in node.input]
     assert len(inputs) == 2
     if all(cls.is_constant(inp) for inp in inputs) and constant_operation:
         logger.info("reducing %s to a constant", valid_name)
         values = [cls.get_constant(inp) for inp in inputs]
         params = ConstantInputParameters(valid_name,
                                          value=constant_operation(*values))
     else:
         params_args = kwargs.get('params_args', {})
         params = kwargs['params_class'](valid_name, **params_args)
         for idx, inp in enumerate(inputs):
             G.add_edge(
                 NNEdge(from_node=inp[0],
                        to_node=params,
                        from_idx=inp[1],
                        to_idx=idx))
     outputs = cls.implied_broadcast(inputs)
     all_nodes[node.output[0]] = (params, 0, outputs[0])
     return params
Exemplo n.º 10
0
    def get_all_const_inputs(cls, G, all_nodes, opts, node, params,
                             exclude=None, names=None,
                             short_names=None,
                             adjust_transposes=None,
                             load_quantization_if_present=False,
                             skip_empty_tensors=True):
        if exclude is None:
            exclude = []
        if names is None:
            names = [None] * len(node.inputs)
        if short_names is None:
            short_names = [None] * len(node.inputs)
        if adjust_transposes is None:
            adjust_transposes = [None] * len(node.nputs)
        const_params = []

        # TODO - this should just be picking up the existing constant nodes not creating new ones.
        for idx, tensor in enumerate(node.input):
            if tensor is None or idx in exclude or (skip_empty_tensors and not tensor.is_constant):
                const_params.append(None)
                continue

            tensor.used = True

            if tensor not in all_nodes:
                # this can occur for RNN/LSTM state nodes that have a buffer idx of 0
                const_param = ConstantInputParameters(
                    tensor.name,
                    dims=Dim.unnamed(tensor.shape),
                    value=tensor.value,
                    constant_store=G.constant_store)
                all_nodes[tensor] = (
                    const_param,
                    0,
                    ProvisionalDim.from_tflite_shape(tensor.shape)
                )
            else:
                const_param = all_nodes[tensor][0]

            # some constant nodes can be connected to multiple nodes
            # changing their name is not a good idea
            if const_param not in G.nodes():
                const_param.name = names[idx]
                const_param.adjust_transpose = adjust_transposes[idx]
                const_param.is_mutated = node.is_mutated(idx)
                const_param.is_intermediate = node.is_intermediate(idx)
                const_param.short_name = short_names[idx]

                const_param.value = np.reshape(tensor.value, tensor.shape)

                # if opts.get('load_quantization'):
                #     G.quantization[NodeId(const_param)] = MultConstantQuantizationRecord(
                #         in_qs=[tensor.qtype],
                #         out_qs=[tensor.qtype])

            # if load_quantization_if_present and tensor.qtype:
            #     const_param.value_quantization = tensor.qtype

            const_params.append(const_param)
            G.add_edge(NNEdge(const_param, params, to_idx=idx))

        return const_params
Exemplo n.º 11
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        assert len(weights_shape
                   ) == 2, f'bad filter shape {weights_shape} in {node.name}'
        out_c = weights_shape[0]
        batch_size = inp_sz // weights_shape[1]
        if batch_size > 1:
            filt_dim = FcFilterDim(weights_shape[0], weights_shape[1])
        else:
            filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)

        node.input[1].used = True
        check(filt_dim.sz * batch_size == inp_sz,
              "filter doesn't match input size")

        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([out_c]),
                value=np.zeros([out_c], dtype=np.float32))  # TODO - check

        keep_dims = node_opts.KeepNumDims()

        if batch_size > 1:
            if keep_dims:
                raise ValueError(
                    f'keep dims on Fully Connected {node.name} with batch size > 1 is not supported'
                )

            # add a reshape to force the size of the input to batch * in_c
            input_shape = (batch_size, weights_shape[1])
            if x_known_shape != input_shape:
                rparams = ReshapeParameters(
                    G.unique_name(f'{node.name}_batch'),
                    old_shape=Dim.unnamed(x_known_shape),
                    shape=Dim.unnamed(input_shape))
                G.add_edge(
                    NNEdge(from_node=x[0],
                           to_node=rparams,
                           from_idx=x[1],
                           to_idx=0))
                link = (rparams, 0)
            else:
                link = x

            # the batched linear is transpose(weights . transpose(input))
            params = MatMulOpParameters(node.name)
            params.transpose_in = [None, (1, 0), None]
            params.transpose_out = [(1, 0)]
            cls.new_load_filter_parameters(G, params, weights_shape, 0,
                                           node.input[0], weights_node,
                                           bias_node, node.output[0], opts)
            G.add_edge(
                NNEdge(from_node=link[0],
                       to_node=params,
                       from_idx=link[1],
                       to_idx=1))
            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=0))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            out_shape = [batch_size, out_c]
        else:
            # in_hint = [[str(i) for i in range(len(x_known_shape) - 1)] + ['c'],
            #            ['out_c', 'in_c'], ['out_c']]
            in_hint = [None, ['out_c', 'in_c'], ['out_c']]
            out_hint = in_hint.copy() if keep_dims else ['c']
            ker_in_order = None
            ker_out_order = None
            link = (x[0], x[1])

            params = FcParameters(node.name,
                                  filt=filt_dim,
                                  has_bias=True,
                                  in_dims_hint=in_hint,
                                  out_dims_hint=[out_hint],
                                  ker_in_order=ker_in_order,
                                  ker_out_order=ker_out_order,
                                  batch_size=batch_size,
                                  constant_store=G.constant_store,
                                  keep_dims=keep_dims)
            cls.new_load_filter_parameters(
                G, params, params.filter.actual_shape,
                params.filter.get_order_idx('out_c'), node.input[0],
                weights_node, bias_node, node.output[0], opts)

            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=1))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            G.add_edge(
                NNEdge(from_node=link[0],
                       to_node=params,
                       from_idx=link[1],
                       to_idx=0))
            # handle keep_dims
            if x_shape[0] is None:
                if keep_dims:
                    out_shape = x_shape[:-1:] + [out_c]
                else:
                    out_shape = [None, out_c]
            else:
                if keep_dims:
                    out_shape = [None] + x_shape[1:-1:] + [out_c]
                else:
                    out_shape = [None, out_c]

        pout_dims = ProvisionalDim(out_shape)

        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
Exemplo n.º 12
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        axis = node.attrs.get('axis', 0)

        if axis < 0:
            axis += len(x_shape)

        assert axis < len(x_shape) and axis >= 0,\
            "axis %s is out of bounds - input dims %s in node %s" % (axis, x_shape, valid_name)

        split_dim = x_shape[axis]
        assert split_dim is not None, "split dimension must be defined"

        split = None
        if cls.SINCE_VERSION >= 13:
            if len(inputs) > 1:
                split = cls.get_constant(inputs[1])
        else:
            split = node.attrs.get('split')
            if split:
                split = np.array(split)
                assert sum(
                    split
                ) == split_dim, "split sizes should add up to total size %s" % valid_name
                assert np.all(
                    split > 0
                ), "split sizes should be greater than zero %s" % valid_name
            else:
                num_outputs = len(node.output)
                assert split_dim % num_outputs == 0,\
                    "no split attribute or value and dimension is not divisible by number of outputs %s" % valid_name
                split = np.array([split_dim // num_outputs] * num_outputs)

        split = split.tolist()
        act_slices = []
        out_shapes = []
        out_pshapes = []
        cur = 0
        for idx, split_dim in enumerate(split):
            act_slices.append(
                tuple(
                    (cur, cur + split_dim, 1) if didx == axis else (0, dim, 1)
                    for didx, dim in enumerate(x_shape) if dim is not None))
            out_pshape = tuple(split_dim if didx == axis else dim
                               for didx, dim in enumerate(x_shape))
            out_shapes.append(
                tuple(dim for dim in out_pshape if dim is not None))
            out_pshapes.append(ProvisionalDim(out_pshape))
            cur += split_dim
        axis -= sum(1 if dim is None else 0 for dim in x_shape[:axis:])
        params = SplitParameters(valid_name,
                                 act_slices=act_slices,
                                 out_shapes=out_shapes,
                                 axis=axis)
        if cls.is_constant(x):
            logger.info("reducing %s to %s constant(s)", valid_name,
                        len(out_shapes))
            values = params.numpy_split(cls.get_constant(x))
            for idx, out_pshape in enumerate(out_pshapes):
                cparams = ConstantInputParameters(
                    valid_name,
                    value=values[idx],
                    constant_store=G.constant_store)
                all_nodes[node.output[idx]] = (cparams, 0, out_pshape)
            return None

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        for idx, out_pshape in enumerate(out_pshapes):
            all_nodes[node.output[idx]] = (params, idx, out_pshape)
        return params
Exemplo n.º 13
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([out_c]),
                value=np.zeros([out_c], dtype=np.float32))  # TODO - check

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList(
                                  [in_hint, ['out_c', 'in_c'], ['out_c']]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))

        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)

        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(
        #         G, params, node.input, bias_node.value, node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
Exemplo n.º 14
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(PackOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        inp_shapes = [input[2].shape for input in inputs]

        values_count = node_opts.ValuesCount()
        check(
            len(inputs) == values_count,
            "invalid tflite file - values count not equal to inputs")

        buffer_idxes = [tensor.buffer_idx for tensor in node.input]
        check(
            len(set(buffer_idxes)) == len(buffer_idxes),
            "packs with multiple versions of the same input are not supported. This is normally a graph design problem."
        )

        axis = node_opts.Axis()
        dimension_size = len(inp_shapes)
        if axis < 0:
            axis += dimension_size

        check(all(shape == inp_shapes[0] for shape in inp_shapes[1::]),
              "invalid tflite file - pack inputs not the same")

        # prepare shapes of all tensors
        pconcat_out_shape = inp_shapes[0].copy()
        pconcat_out_shape.insert(axis, values_count)

        pconcat_in_shape = inp_shapes[0].copy()
        pconcat_in_shape.insert(axis, 1)

        preshape_in_shape = inp_shapes[0].copy()

        # remove nones from constants
        cls.remove_none_from_constants(inputs, preshape_in_shape)

        # remove nones from reshape shapes
        reshape_in_shape = cls.remove_unspecified_dim(preshape_in_shape)
        concat_in_shape = cls.remove_unspecified_dim(pconcat_in_shape)

        if all(cls.is_constant(inp) for inp in inputs):
            LOG.info("reducing %s to a constant", node.name)
            value = np.stack([cls.get_constant(inp) for inp in inputs],
                             axis=axis)
            params = ConstantInputParameters(node.name,
                                             value=value,
                                             constant_store=G.constant_store)
        else:
            axis -= sum(1 if dim is None else 0
                        for dim in pconcat_out_shape[:axis:])
            params = ConcatParameters(node.name, axis=axis, axis_hint=None)

            # insert reshapes on each input to add concat axis
            for idx, inp in enumerate(inputs):
                rparams = ReshapeParameters(node.name + "_%s" % idx,
                                            old_shape=reshape_in_shape,
                                            shape=concat_in_shape)
                G.add_edge(
                    NNEdge(from_node=inp[0], to_node=rparams, from_idx=inp[1]))
                G.add_edge(NNEdge(from_node=rparams, to_node=params))
                if opts.get('load_quantization'):
                    G.quantization[NodeId(rparams)] = cls.load_tf_quantization(
                        [node.input[idx]], [node.input[idx]])

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(
                node.input, node.output)

        all_nodes[node.output[0]] = (params, 0,
                                     ProvisionalDim(pconcat_out_shape))
        return params
Exemplo n.º 15
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([real_y_shape[1]], dtype=np.float32)

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])

        # always create new constants since they may be modified by this not and could be linked elsewhere
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        weights_params = ConstantInputParameters(f'{valid_name}_weights',
                                                 dims=Dim.unnamed(
                                                     weights.shape),
                                                 value=weights)
        biases = biases * beta
        biases_params = ConstantInputParameters(f'{valid_name}_biases',
                                                dims=Dim.unnamed(biases.shape),
                                                value=biases)

        params = FcParameters(
            valid_name,
            filt=filt_dim,
            has_bias=True,
            #   in_dims_hint=[['c']],
            in_dims_hint=[None, ['out_c', 'in_c'], ['out_c']],
            out_dims_hint=[['c']],
            constant_store=G.constant_store)

        G.add_edge(NNEdge(from_node=weights_params, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=biases_params, to_node=params, to_idx=2))

        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
Exemplo n.º 16
0
    def conv(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape
        spatial_size = x_rank - 2
        assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported"

        # M x C/group x kH x kW
        weights_node = inputs[1][0]
        weights_node.name = f'{valid_name}_weights'
        weights = cls.get_constant(inputs[1])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        if in_c != weights.shape[1] * group:
            raise ValueError(
                f'node {valid_name} has incorrect input channel '
                f'dimension {in_c} expecting {weights.shape[1] * group}')
        if spatial_size == 1:
            filt_w = weights.shape[-1]
            filt_h = 1
            # create a new constant node since we are changing the shape
            weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w))
            weights_node = ConstantInputParameters(
                f'{valid_name}_weights',
                value=weights,
                dims=Dim.unnamed(weights.shape),
                constant_store=G.constant_store)
        else:
            filt_h = weights.shape[-2]
            filt_w = weights.shape[-1]
        h = 1 if spatial_size == 1 else x_shape[-2]
        w = x_shape[-1]

        filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        if len(inputs) > 2:
            biases_node = inputs[2][0]
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([out_c], dtype=np.float32)
            biases_node = ConstantInputParameters(
                f'{valid_name}_biases',
                value=biases,
                dims=Dim.unnamed(biases.shape),
                constant_store=G.constant_store)

        dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2)
        pad_dim = cls.calc_pad_dim(node, 4)

        params = Conv2DParameters(valid_name,
                                  filt=filt_dim,
                                  stride=StrideDim(strides[0], strides[1]),
                                  dilation=DilationDim(dilations[0],
                                                       dilations[1]),
                                  groups=group,
                                  padding=pad_dim,
                                  has_bias=True,
                                  in_dims_hint=[['c', 'h', 'w'],
                                                cls.ONNX_FILTER_ORDER, ['c']],
                                  out_dims_hint=[['c', 'h', 'w']],
                                  constant_store=G.constant_store)

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        w_dim = Dim.named_ordered(out_c=out_c,
                                  in_c=filt_in_c,
                                  h=filt_h,
                                  w=filt_w)
        b_dim = Dim.named_ordered(c=out_c)
        out_dims = params.get_output_size([in_dim, w_dim, b_dim])
        G.add_edge(
            NNEdge(from_node=weights_node,
                   to_node=params,
                   from_idx=0,
                   to_idx=1))
        G.add_edge(
            NNEdge(from_node=biases_node, to_node=params, from_idx=0,
                   to_idx=2))
        if spatial_size == 1:
            oned_in_shape = [in_c, w]
            twod_in_shape = [in_c, 1, w]
            oned_out_shape = [out_dims[0].c, out_dims[0].w]
            r1_params = ReshapeParameters(f'{valid_name}_reshape2d',
                                          old_shape=Dim.unnamed(oned_in_shape),
                                          shape=Dim.unnamed(twod_in_shape))
            r2_params = ReshapeParameters(f'{valid_name}_reshape1d',
                                          old_shape=out_dims[0],
                                          shape=Dim.unnamed(oned_out_shape))
            G.add_edge(
                NNEdge(from_node=x[0],
                       to_node=r1_params,
                       from_idx=x[1],
                       to_idx=0))
            G.add_edge(
                NNEdge(from_node=r1_params,
                       to_node=params,
                       from_idx=0,
                       to_idx=0))
            G.add_edge(
                NNEdge(from_node=params,
                       to_node=r2_params,
                       from_idx=0,
                       to_idx=0))
            pout_dims = ProvisionalDim([x_shape[0]] + oned_out_shape)
            all_nodes[node.output[0]] = (r2_params, 0, pout_dims)
            return r2_params
        else:
            pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
            all_nodes[node.output[0]] = (params, 0, pout_dims)
            return params
Exemplo n.º 17
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            padding=pad,
            has_bias=True,
            in_dims_hint=SparseList([['h', 'w', 'c'],
                                     cls.TF_LITE_FILTER_ORDER.copy(),
                                     ['out_c']]),
            out_dims_hint=SparseList([['h', 'w', 'c']]),
            constant_store=G.constant_store)
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)
        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value,
        #                                                                            node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params