コード例 #1
0
ファイル: test_operations.py プロジェクト: hasetz/gap_sdk
def test_fc():
    filt = FcFilterDim(3, 3, 3, 1)
    params = FcParameters("test", filt=filt)
    weights_q = QType(16, 2, True)
    in_q = QType(16, 2, True)
    acc_q = QType(16, 4, True)
    calc_q = QType(16, 4, True)
    qrec = FilterQuantizationRecord(in_qs=[in_q],
                                    out_qs=[in_q],
                                    calc_q=calc_q,
                                    acc_q=acc_q,
                                    biases_q=None,
                                    weights_q=weights_q)
    weights = weights_q.quantize(np.full([3, 1, 3, 3], 1.0))
    input_ = in_q.quantize(np.arange(9)).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])

    output_ = linear(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     qrec=qrec)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[36]], [[36]], [[36]]])
コード例 #2
0
ファイル: gemm.py プロジェクト: bot-motion/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            has_bias = True
            biases = cls.get_constant(inputs[2])
        else:
            biases = None
            has_bias = False

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        params = FcParameters(valid_name,
                              filt=filt_dim,
                              has_bias=has_bias,
                              in_dims_hint=SparseList([['c']]),
                              out_dims_hint=SparseList([['c']]),
                              constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases * beta
        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
コード例 #3
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=False,
                                  in_dims_hint=SparseList([['c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            params.weights = weights
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
コード例 #4
0
def add_fully_connected(out_graph, routes, idx, l):
    activation = get_str(l, 'activation', default="logistic")
    filter_c = get_int(l, 'output')
    node_name = "{}_{}".format(l['type'], idx)
    if activation is None:
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name, FcParameters(FcFilterDim(filter_c), has_bias=True))
    else:
        activation = DARKNET_ACTIVATION_TYPES[activation]
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operators(
                node_name,
                [FcParameters(FcFilterDim(filter_c), has_bias=True),\
                    ActivationParameters(activation)]
            )

    return True
コード例 #5
0
def add_fully_connected(G,
                        tensors,
                        name,
                        subgraph,
                        _,
                        op,
                        load_tensors=False,
                        dequantize=False):
    fc_opts = FullyConnectedOptions.FullyConnectedOptions()
    fc_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    inp = get_input_size(tensors, subgraph, op, 0)
    check(inp[0] == 1, "Multi batch not supported")
    filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FC_ORDER)
    check(filt['sz'] == reduce(lambda i, j: i * j, inp, 1),
          "filter doesn't match input size")
    # in the case we get an input of 1 batch with everything flattened fill h and w with 1
    if len(inp) == 2:
        inp = {'h': 1, 'w': 1, 'c': inp[1]}
    elif len(inp) == 4:
        inp = {'h': inp[1], 'w': inp[2], 'c': inp[3]}
    else:
        raise NotImplementedError('FC input size not implemented')

    filt_dim = FcFilterDim(inp['h'],
                           inp['w'],
                           filt['out_c'],
                           in_c=inp['c'],
                           order=TF_LITE_FC_EXP_ORDER)

    # does it have biases
    has_bias = op.InputsLength() > 2

    node = FcParameters(name,
                        filt=filt_dim,
                        has_bias=has_bias,
                        in_dims_hint=SparseList([['h', 'w', 'c']]),
                        out_dims_hint=SparseList([['c']]),
                        constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)

    return fuse_activation(G, fc_opts, name, node)
コード例 #6
0
ファイル: fully_connected.py プロジェクト: bot-motion/gap_sdk
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(node.input) > 2:
            node.input[2].used = True

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList([in_hint]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
コード例 #7
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            weights_params = ConstantInputParameters(f'{valid_name}_weights',
                                                     dims=Dim.unnamed(
                                                         [y_shape[1], x_shape[0]]),
                                                     value=weights)
            params = FcParameters(valid_name, filt=filt_dim, has_bias=True,
                                #   in_dims_hint=[
                                #       ['c'], ['out_c', 'in_c'], ['out_c']],
                                  in_dims_hint=[
                                      None, ['out_c', 'in_c'], ['out_c']],
                                  out_dims_hint=[['c']],
                                  constant_store=G.constant_store)
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
            biases_params = ConstantInputParameters(f'{valid_name}_biases', dims=Dim.unnamed([y_shape[1]]),
                                                    value=np.zeros((y_shape[1]), dtype=np.float32))
            G.add_edge(NNEdge(from_node=weights_params,
                              to_node=params, to_idx=1))
            G.add_edge(NNEdge(from_node=biases_params,
                              to_node=params, to_idx=2))
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape), Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1], to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
コード例 #8
0
    def _handle(cls, node, quantized=False, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y_idx = 3 if quantized else 1
        y = inputs[y_idx]
        y_shape = cls._get_real_dim(y[2].shape)

        if quantized:
            qrecs = kwargs['qrecs']
            x_zp = cls.get_constant(inputs[2])
            x_scale = cls.get_constant(inputs[1])
            if len(x_scale) > 1:
                raise NotImplementedError('QMatMul scales must be scalar')
            x_qtype = QType(dtype=x_zp.dtype, scale=x_scale, zero_point=x_zp)
            y_zp = cls.get_constant(inputs[5])
            y_scale = cls.get_constant(inputs[4])
            if len(y_scale) > 1:
                raise NotImplementedError('QMatMul scales must be scalar')
            y_qtype = QType(dtype=y_zp.dtype, scale=y_scale, zero_point=y_zp)
            o_zp = cls.get_constant(inputs[7])
            o_scale = cls.get_constant(inputs[6])
            o_qtype = QType(dtype=o_zp.dtype, scale=o_scale, zero_point=o_zp)
        else:
            o_qtype = None

        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            weights_params = ConstantInputParameters(
                f'{valid_name}_weights',
                dims=Dim.unnamed([y_shape[1], x_shape[0]]),
                value=weights)
            cls.record_constant_qrec(y, weights_params, **kwargs)
            params = FcParameters(
                valid_name,
                filt=filt_dim,
                has_bias=True,
                #   in_dims_hint=[
                #       ['c'], ['out_c', 'in_c'], ['out_c']],
                in_dims_hint=[None, ['out_c', 'in_c'], ['out_c']],
                out_dims_hint=[['c']])

            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
            biases_params = ConstantInputParameters(
                f'{valid_name}_biases',
                dims=Dim.unnamed([y_shape[1]]),
                value=np.zeros((y_shape[1]), dtype=np.float32))
            G.add_edge(
                NNEdge(from_node=weights_params, to_node=params, to_idx=1))
            G.add_edge(
                NNEdge(from_node=biases_params, to_node=params, to_idx=2))
            if quantized:
                weights_params.qtype = y_qtype
                qrecs[NodeId(params)] = QRec.scaled(
                    in_qs=[x_qtype, y_qtype, None],
                    out_qs=[o_qtype],
                )
        else:
            params = MatMulTransposedParameters(valid_name)
            trans_shape = [i for i in range(len(y_shape))]
            temp = trans_shape[-1]
            trans_shape[-1] = trans_shape[-2]
            trans_shape[-2] = temp
            trans2 = TransposeParameters(f'{valid_name}_tin2',
                                         transpose=tuple(trans_shape))
            out_dims = params.get_output_size([
                Dim.unnamed(x_shape),
                Dim.unnamed(y_shape[:-2] + y_shape[-2:][::-1])
            ])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=trans2, from_idx=y[1],
                       to_idx=0))
            G.add_edge(
                NNEdge(from_node=trans2, to_node=params, from_idx=0, to_idx=1))
            biases_params = ConstantInputParameters(
                f'{valid_name}_biases',
                dims=Dim.unnamed([out_dims[0].shape[1]]),
                value=np.zeros((out_dims[0].shape[1]), dtype=np.float32))
            G.add_edge(
                NNEdge(from_node=biases_params, to_node=params, to_idx=2))

            if quantized:
                qrecs[NodeId(trans2)] = QRec.scaled(
                    in_qs=[y_qtype],
                    out_qs=[y_qtype],
                )
                qrecs[NodeId(params)] = QRec.scaled(
                    in_qs=[x_qtype, y_qtype],
                    out_qs=[o_qtype],
                )

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims, o_qtype)
        return params
コード例 #9
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([out_c]),
                value=np.zeros([out_c], dtype=np.float32))  # TODO - check

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList(
                                  [in_hint, ['out_c', 'in_c'], ['out_c']]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))

        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)

        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(
        #         G, params, node.input, bias_node.value, node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
コード例 #10
0
ファイル: gemm.py プロジェクト: mfkiwl/gap_sdk
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([real_y_shape[1]], dtype=np.float32)

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])

        # always create new constants since they may be modified by this not and could be linked elsewhere
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        weights_params = ConstantInputParameters(f'{valid_name}_weights',
                                                 dims=Dim.unnamed(
                                                     weights.shape),
                                                 value=weights)
        biases = biases * beta
        biases_params = ConstantInputParameters(f'{valid_name}_biases',
                                                dims=Dim.unnamed(biases.shape),
                                                value=biases)

        params = FcParameters(
            valid_name,
            filt=filt_dim,
            has_bias=True,
            #   in_dims_hint=[['c']],
            in_dims_hint=[None, ['out_c', 'in_c'], ['out_c']],
            out_dims_hint=[['c']],
            constant_store=G.constant_store)

        G.add_edge(NNEdge(from_node=weights_params, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=biases_params, to_node=params, to_idx=2))

        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
コード例 #11
0
    def _common(cls, node, **kwargs):

        all_nodes = kwargs['all_nodes']
        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [real_x_shape[1], real_x_shape[0]] if len(
            real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [real_y_shape[1], real_y_shape[0]] if len(
            real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            if alpha != 1.0 or beta != 1.0:
                raise NotImplementedError('Alpha and Beta not implemented on pure matmul GEMM')
            return cls._import_as_matmul(node, inputs, x, y, real_x_shape, real_y_shape,
                                         trans_a, trans_b, alpha, beta, **kwargs)

        G = kwargs['G']
        valid_name = kwargs['valid_name']
        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([real_y_shape[1]], dtype=np.float32)

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])

        # always create new constants since they may be modified by this not and could be linked elsewhere
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        weights_params = ConstantInputParameters(
            f'{valid_name}_weights', dims=Dim.unnamed(weights.shape), value=weights)
        if y[3]:
            if alpha == 1.0:
                cls.record_constant_qrec(y, weights_params, **kwargs)
            else:
                raise NotImplementedError("qtype on Gemm with alpha != 1.0")

        biases = biases * beta
        biases_params = ConstantInputParameters(
            f'{valid_name}_biases', dims=Dim.unnamed(biases.shape), value=biases)
        if len(inputs) > 2 and inputs[2][3]:
            if beta == 1.0:
                cls.record_constant_qrec(inputs[2], biases_params, **kwargs)
            else:
                raise NotImplementedError("qtype on Gemm with beta != 1.0")

        params = FcParameters(valid_name, filt=filt_dim, has_bias=True,
                              #   in_dims_hint=[['c']],
                              in_dims_hint=[
                                  None, ['out_c', 'in_c'], ['out_c']],
                              out_dims_hint=[['c']])

        G.add_edge(NNEdge(from_node=weights_params, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=biases_params, to_node=params, to_idx=2))

        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim, None)
        return params
コード例 #12
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        assert len(weights_shape
                   ) == 2, f'bad filter shape {weights_shape} in {node.name}'
        out_c = weights_shape[0]
        batch_size = inp_sz // weights_shape[1]
        if batch_size > 1:
            filt_dim = FcFilterDim(weights_shape[0], weights_shape[1])
        else:
            filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)

        node.input[1].used = True
        check(filt_dim.sz * batch_size == inp_sz,
              "filter doesn't match input size")

        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([out_c]),
                value=np.zeros([out_c], dtype=np.float32))  # TODO - check

        keep_dims = node_opts.KeepNumDims()

        if batch_size > 1:
            if keep_dims:
                raise ValueError(
                    f'keep dims on Fully Connected {node.name} with batch size > 1 is not supported'
                )

            # add a reshape to force the size of the input to batch * in_c
            input_shape = (batch_size, weights_shape[1])
            if x_known_shape != input_shape:
                rparams = ReshapeParameters(
                    G.unique_name(f'{node.name}_batch'),
                    old_shape=Dim.unnamed(x_known_shape),
                    shape=Dim.unnamed(input_shape))
                G.add_edge(
                    NNEdge(from_node=x[0],
                           to_node=rparams,
                           from_idx=x[1],
                           to_idx=0))
                link = (rparams, 0)
            else:
                link = x

            # the batched linear is transpose(weights . transpose(input))
            params = MatMulOpParameters(node.name)
            params.transpose_in = [None, (1, 0), None]
            params.transpose_out = [(1, 0)]
            cls.new_load_filter_parameters(G, params, weights_shape, 0,
                                           node.input[0], weights_node,
                                           bias_node, node.output[0], opts)
            G.add_edge(
                NNEdge(from_node=link[0],
                       to_node=params,
                       from_idx=link[1],
                       to_idx=1))
            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=0))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            out_shape = [batch_size, out_c]
        else:
            # in_hint = [[str(i) for i in range(len(x_known_shape) - 1)] + ['c'],
            #            ['out_c', 'in_c'], ['out_c']]
            in_hint = [None, ['out_c', 'in_c'], ['out_c']]
            out_hint = in_hint.copy() if keep_dims else ['c']
            ker_in_order = None
            ker_out_order = None
            link = (x[0], x[1])

            params = FcParameters(node.name,
                                  filt=filt_dim,
                                  has_bias=True,
                                  in_dims_hint=in_hint,
                                  out_dims_hint=[out_hint],
                                  ker_in_order=ker_in_order,
                                  ker_out_order=ker_out_order,
                                  batch_size=batch_size,
                                  constant_store=G.constant_store,
                                  keep_dims=keep_dims)
            cls.new_load_filter_parameters(
                G, params, params.filter.actual_shape,
                params.filter.get_order_idx('out_c'), node.input[0],
                weights_node, bias_node, node.output[0], opts)

            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=1))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            G.add_edge(
                NNEdge(from_node=link[0],
                       to_node=params,
                       from_idx=link[1],
                       to_idx=0))
            # handle keep_dims
            if x_shape[0] is None:
                if keep_dims:
                    out_shape = x_shape[:-1:] + [out_c]
                else:
                    out_shape = [None, out_c]
            else:
                if keep_dims:
                    out_shape = [None] + x_shape[1:-1:] + [out_c]
                else:
                    out_shape = [None, out_c]

        pout_dims = ProvisionalDim(out_shape)

        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
コード例 #13
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        keep_dims = node_opts.KeepNumDims()
        # check(not keep_dims,
        #       f'keep dims on Fully Connected {node.name} is not supported')

        inputs = [all_nodes[t] if t is not None else None for t in node.input]

        x = inputs[0]
        x_shape = x[2]
        x_known_shape = x_shape.known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        check(
            len(weights_shape) == 2,
            f'bad filter shape {weights_shape} in {node.name}')
        out_c = weights_shape[0]
        batch_size = inp_sz // weights_shape[1]

        keep_dims = node_opts.KeepNumDims()
        if keep_dims:
            if x_shape.shape[-1] != weights_shape[1]:
                raise ValueError(
                    f'Keep dims set on {node.name} but last input dimension does not match weights'
                )
            out_shape = x_shape.shape.copy()
            out_shape[-1] = out_c
        elif batch_size > 1:
            out_shape = (batch_size, out_c)
        else:
            out_shape = (None, out_c)
        real_out_shape = tuple(dim for dim in out_shape if dim is not None)

        filt_dim = FcFilterDim(weights_shape[0], weights_shape[1])

        node.input[1].used = True
        check(filt_dim.sz * batch_size == inp_sz,
              "filter doesn't match input size")

        if len(inputs) > 2 and inputs[2] is not None:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(f'{node.name}_bias',
                                                dims=Dim.unnamed([out_c]),
                                                value=np.zeros(
                                                    [out_c], dtype=np.float32))

        if batch_size > 1:
            # add a reshape to force the size of the input to batch * in_c
            input_shape = (batch_size, weights_shape[1])
            if x_known_shape != input_shape:
                rparams = ReshapeParameters(
                    G.unique_name(f'{node.name}_batch'),
                    old_shape=Dim.unnamed(x_known_shape),
                    shape=Dim.unnamed(input_shape))
                G.add_edge(
                    NNEdge(from_node=x[0],
                           to_node=rparams,
                           from_idx=x[1],
                           to_idx=0))
                link = (rparams, 0)
            else:
                link = x

            # the batched linear is ([NxM] . [MxK]) + [K]
            params = MatMulTransposedParameters(node.name)
            cls.new_load_filter_parameters(G, params, weights_shape, 0,
                                           node.input[0], weights_node,
                                           bias_node, node.output[0], opts)
            trans2 = TransposeParameters(G.unique_name(f'{node.name}_tin2'),
                                         transpose=(1, 0))
            G.add_edge(
                NNEdge(from_node=link[0], to_node=params, from_idx=link[1]))
            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=1))
            #G.add_edge(NNEdge(from_node=trans2, to_node=params, to_idx=1))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            fc_shape = (batch_size, out_c)
        else:
            ker_in_order = None
            ker_out_order = None
            link = (x[0], x[1])

            params = FcParameters(node.name,
                                  filt=filt_dim,
                                  has_bias=True,
                                  ker_in_order=ker_in_order,
                                  ker_out_order=ker_out_order,
                                  batch_size=batch_size,
                                  keep_dims=keep_dims)
            cls.new_load_filter_parameters(
                G, params, params.filter.actual_shape,
                params.filter.get_order_idx('out_c'), node.input[0],
                weights_node, bias_node, node.output[0], opts)

            G.add_edge(NNEdge(from_node=weights_node, to_node=params,
                              to_idx=1))
            G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
            G.add_edge(
                NNEdge(from_node=link[0],
                       to_node=params,
                       from_idx=link[1],
                       to_idx=0))
            fc_shape = (out_c, )

        pout_dims = ProvisionalDim(out_shape)
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)

        if real_out_shape != fc_shape:
            rparams = ReshapeParameters(G.unique_name(f'{node.name}_keepdims'),
                                        old_shape=fc_shape,
                                        shape=real_out_shape)
            G.add_edge(NNEdge(from_node=aparams, to_node=rparams))
            aparams = rparams

        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params