Exemple #1
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            has_bias = True
            biases = cls.get_constant(inputs[2])
        else:
            biases = None
            has_bias = False

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        params = FcParameters(valid_name,
                              filt=filt_dim,
                              has_bias=has_bias,
                              in_dims_hint=SparseList([['c']]),
                              out_dims_hint=SparseList([['c']]),
                              constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases * beta
        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
Exemple #2
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=False,
                                  in_dims_hint=SparseList([['c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            params.weights = weights
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
def add_fully_connected(G,
                        tensors,
                        name,
                        subgraph,
                        _,
                        op,
                        load_tensors=False,
                        dequantize=False):
    fc_opts = FullyConnectedOptions.FullyConnectedOptions()
    fc_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    inp = get_input_size(tensors, subgraph, op, 0)
    check(inp[0] == 1, "Multi batch not supported")
    filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FC_ORDER)
    check(filt['sz'] == reduce(lambda i, j: i * j, inp, 1),
          "filter doesn't match input size")
    # in the case we get an input of 1 batch with everything flattened fill h and w with 1
    if len(inp) == 2:
        inp = {'h': 1, 'w': 1, 'c': inp[1]}
    elif len(inp) == 4:
        inp = {'h': inp[1], 'w': inp[2], 'c': inp[3]}
    else:
        raise NotImplementedError('FC input size not implemented')

    filt_dim = FcFilterDim(inp['h'],
                           inp['w'],
                           filt['out_c'],
                           in_c=inp['c'],
                           order=TF_LITE_FC_EXP_ORDER)

    # does it have biases
    has_bias = op.InputsLength() > 2

    node = FcParameters(name,
                        filt=filt_dim,
                        has_bias=has_bias,
                        in_dims_hint=SparseList([['h', 'w', 'c']]),
                        out_dims_hint=SparseList([['c']]),
                        constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)

    return fuse_activation(G, fc_opts, name, node)