Exemple #1
0
def two_conv_graph():
    G = NNGraph(name='two_conv_graph')
    ti = G.add_input(Dim.unnamed([10, 10, 2]))
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    to = G.add_output()
    G.add_edge(NNEdge(ti, n1))
    G.add_edge(NNEdge(n1, n2))
    G.add_edge(NNEdge(n2, to))
    G.add_dimensions()
    yield G
Exemple #2
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            has_bias = True
            biases = cls.get_constant(inputs[2])
        else:
            biases = None
            has_bias = False

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        params = FcParameters(valid_name,
                              filt=filt_dim,
                              has_bias=has_bias,
                              in_dims_hint=SparseList([['c']]),
                              out_dims_hint=SparseList([['c']]),
                              constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases * beta
        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
Exemple #3
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = cls._get_real_dim(x[2].shape)
        y = inputs[1]
        y_shape = cls._get_real_dim(y[2].shape)
        if cls.is_linear(y, x_shape, y_shape):
            filt_dim = FcFilterDim(y_shape[1], x_shape[0])
            weights = np.transpose(cls.get_constant(y), [1, 0])
            params = FcParameters(valid_name,
                                  filt=filt_dim,
                                  has_bias=False,
                                  in_dims_hint=SparseList([['c']]),
                                  out_dims_hint=SparseList([['c']]),
                                  constant_store=G.constant_store)
            params.weights = weights
            out_dims = params.get_output_size([Dim.unnamed(x_shape)])
        else:
            params = MatMulOpParameters(valid_name)
            out_dims = params.get_output_size(
                [Dim.unnamed(x_shape),
                 Dim.unnamed(y_shape)])
            G.add_edge(
                NNEdge(from_node=y[0], to_node=params, from_idx=y[1],
                       to_idx=1))
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))

        pout_dims = x[2].infer_mapping(out_dims[0].shape)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
def add_pool(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    pool_opts = Pool2DOptions.Pool2DOptions()
    pool_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
    pad = get_tf_padding(pool_opts.Padding())
    pool_type = TF_POOL_OPS[op_name]

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    node = PoolingParameters(name,
                             filt=PoolFilterDim(pool_opts.FilterHeight(),
                                                pool_opts.FilterWidth()),
                             stride=StrideDim(pool_opts.StrideH(),
                                              pool_opts.StrideW()),
                             padding=pad,
                             pool_type=pool_type,
                             in_dims_hint=SparseList([['h', 'w', 'c']]),
                             out_dims_hint=SparseList([['h', 'w', 'c']]))

    return fuse_activation(G, pool_opts, name, node)
def add_mean(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    check(op.InputsLength() == 2,\
        "Very odd " + str(op.InputsAsNumpy()))
    mean_dims = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=False)
    if len(mean_dims) != 2 or mean_dims[0] != 1 or mean_dims[1] != 2:
        LOG.warning(
            "MEAN operator seen but can't convert to global average pool")
        return add_unconverted(G, name, subgraph, op_name, op, load_tensors,
                               dequantize)
    else:
        LOG.info("MEAN operator converted to global average pool")

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    return add_node(
        G,
        PoolingParameters(name,
                          filt=PoolFilterDim(inp['h'], inp['w']),
                          stride=StrideDim(1, 1),
                          padding=PadDim.valid(),
                          pool_type="average",
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']])))
def add_fully_connected(G,
                        tensors,
                        name,
                        subgraph,
                        _,
                        op,
                        load_tensors=False,
                        dequantize=False):
    fc_opts = FullyConnectedOptions.FullyConnectedOptions()
    fc_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    inp = get_input_size(tensors, subgraph, op, 0)
    check(inp[0] == 1, "Multi batch not supported")
    filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FC_ORDER)
    check(filt['sz'] == reduce(lambda i, j: i * j, inp, 1),
          "filter doesn't match input size")
    # in the case we get an input of 1 batch with everything flattened fill h and w with 1
    if len(inp) == 2:
        inp = {'h': 1, 'w': 1, 'c': inp[1]}
    elif len(inp) == 4:
        inp = {'h': inp[1], 'w': inp[2], 'c': inp[3]}
    else:
        raise NotImplementedError('FC input size not implemented')

    filt_dim = FcFilterDim(inp['h'],
                           inp['w'],
                           filt['out_c'],
                           in_c=inp['c'],
                           order=TF_LITE_FC_EXP_ORDER)

    # does it have biases
    has_bias = op.InputsLength() > 2

    node = FcParameters(name,
                        filt=filt_dim,
                        has_bias=has_bias,
                        in_dims_hint=SparseList([['h', 'w', 'c']]),
                        out_dims_hint=SparseList([['c']]),
                        constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)

    return fuse_activation(G, fc_opts, name, node)
Exemple #7
0
    def conv(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape
        spatial_size = x_rank - 2
        assert spatial_size <= 2, "only 1D and 2D convolutions supported"

        # M x C/group x kH x kW
        weights = cls.get_constant(inputs[1])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        filt_h = weights.shape[2]
        filt_w = weights.shape[2]
        h = 1 if spatial_size <= 1 else x_shape[2]
        w = 1 if spatial_size == 0 else (x_shape[2] if spatial_size == 1 else x_shape[3])

        filt_dim = Conv2DFilterDim(filt_h, filt_w,
                                   out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([out_c])

        dilations = cls.pad_start_with(node.attrs.get("dilations", [1] * spatial_size), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", [1] * spatial_size), [1], 2)
        pad_dim = cls.calc_pad_dim(node, spatial_size)

        params = Conv2DParameters(valid_name,
                                  filt=filt_dim,
                                  stride=StrideDim(strides[0],
                                                   strides[1]),
                                  dilation=DilationDim(dilations[0],
                                                       dilations[1]),
                                  groups=group,
                                  padding=pad_dim,
                                  has_bias=True,
                                  in_dims_hint=SparseList([['c', 'h', 'w']]),
                                  out_dims_hint=SparseList([['c', 'h', 'w']]),
                                  constant_store=G.constant_store)
        params.weights = weights
        params.biases = biases
        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(node.input) > 2:
            node.input[2].used = True

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList([in_hint]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
Exemple #9
0
    def pool2d(cls, node, pool_type=None, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        opts = kwargs['opts']
        node_opts = node.get_options(Pool2DOptions)

        inputs = [all_nodes[inp] for inp in node.input]
        x = inputs[0]
        x_shape = x[2].shape
        in_c = x_shape[1]

        in_b, h, w, in_c = tuple(x_shape)

        filt_h = node_opts.FilterHeight()
        filt_w = node_opts.FilterWidth()
        stride_h = node_opts.StrideH()
        stride_w = node_opts.StrideW()

        pad = cls.get_tf_padding(node_opts.Padding())

        filter_matches_input = h == filt_h and w == filt_w
        stride_is_one = stride_h == 1 and stride_w == 1

        if filter_matches_input and stride_is_one:
            params = GlobalPoolParameters(node.name,
                                          pool_type=pool_type,
                                          axis=[0, 1],
                                          keep_dims=True,
                                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                                          out_dims_hint=SparseList([['h', 'w', 'c']]))
        else:
            params = PoolingParameters(node.name,
                                       filt=PoolFilterDim(filt_h, filt_w),
                                       stride=StrideDim(stride_h, stride_w),
                                       padding=pad,
                                       pool_type=pool_type,
                                       in_dims_hint=SparseList([['h', 'w', 'c']]),
                                       out_dims_hint=SparseList([['h', 'w', 'c']]))

        if opts.get('load_quantization'):
            G.quantization[NodeId(params)] = cls.load_tf_quantization(node.input, node.output)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
def add_convolution(G,
                    tensors,
                    name,
                    subgraph,
                    _,
                    op,
                    load_tensors=False,
                    dequantize=False):
    conv_opts = Conv2DOptions.Conv2DOptions()
    conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    filt = get_input_size(tensors, subgraph, op, 1, order=TF_LITE_FILTER_ORDER)
    filt = Conv2DFilterDim(filt['h'], filt['w'],\
        filt['out_c'], in_c=filt['in_c'])
    filt = filt.impose_order(TF_LITE_FILTER_ORDER)
    # compute padding
    pad = get_tf_padding(conv_opts.Padding())

    # does it have biases
    has_bias = op.InputsLength() > 2

    node = Conv2DParameters(name,
                            filt=filt,
                            stride=StrideDim(conv_opts.StrideH(),
                                             conv_opts.StrideW()),
                            padding=pad,
                            has_bias=has_bias,
                            in_dims_hint=SparseList([['h', 'w', 'c']]),
                            out_dims_hint=SparseList([['h', 'w', 'c']]),
                            constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)
    return fuse_activation(G, conv_opts, name, node)
Exemple #11
0
def propagate_upwards(G: NNGraph):
    for node in G.dfs(reverse=True):
        # First propagate the out dim hints to the in dim hints
        # Any node that does not want this to happen should set its in dim hints

        if node.out_dims_hint is not None:
            if isinstance(node, ReshapeParameters):
                if len(node.shape) < len(node.out_dims_hint[0]):
                    node.shape = Dim.unnamed((
                        [1] * (len(node.out_dims_hint[0]) - len(node.shape))) +
                                             node.shape.shape)
                node.shape.apply_naming_hints(node.out_dims_hint[0])
                if node.in_dims_hint is None:
                    node.in_dims_hint = SparseList(
                        [["%s" % i for i in range(len(node.old_shape))]])
            elif isinstance(node, MatrixBroadcastedLinearOpParameters):
                node.in_dims_hint = [node.out_dims_hint[0]] * 2
            elif isinstance(node, MatrixMulParameters):
                continue
            elif isinstance(node, GlobalPoolParameters):
                if node.keep_dims:
                    node.in_dims_hint = deepcopy(node.out_dims_hint)
            elif isinstance(
                    node, ConstantInputParameters) and not node.dims.is_named:
                node.dims.apply_naming_hints(node.out_dims_hint[0])
            else:
                if node.in_dims_hint is None:
                    node.in_dims_hint = deepcopy(node.out_dims_hint)

        # if we have an in dim hint then propagate it to upstream nodes
        if node.in_dims_hint is not None:
            for edge in G.in_edges(node.name):
                hint = node.in_dims_hint[edge.to_idx]
                if hint is None:
                    continue
                if edge.from_node.out_dims_hint is None:
                    edge.from_node.out_dims_hint = SparseList()
                if edge.from_node.out_dims_hint[edge.from_idx] is None:
                    edge.from_node.out_dims_hint[edge.from_idx] = hint
                    if isinstance(edge.from_node, InputParameters):
                        assert edge.from_idx == 0, "input node should only have one output"
                        dims_len = len(edge.from_node.dims)
                        hint_len = len(hint)
                        if dims_len < hint_len:
                            edge.from_node.dims = Dim.unnamed(
                                [1] * (hint_len - dims_len) +
                                edge.from_node.dims.shape)
Exemple #12
0
def actfusion_graph():
    G = NNGraph(name='actfusion_graph')
    ti1 = G.add_input(Dim.unnamed([10, 10, 2])).name
    ti2 = G.add_input(Dim.unnamed([10, 10, 2])).name
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    n1a = ReluActivationParameters("node1a")
    G.add_node(n1a)
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    n3 = MatrixAddParameters("node3")
    G.add_node(n3)
    n4 = ReluActivationParameters("node4")
    G.add_node(n4)
    to = G.add_output()
    G.add_edge(NNEdge(ti1, n1))
    G.add_edge(NNEdge(n1, n1a))
    G.add_edge(NNEdge(ti2, n2))
    G.add_edge(NNEdge(n1a, n3, to_idx=0))
    G.add_edge(NNEdge(n2, n3, to_idx=1))
    G.add_edge(NNEdge(n3, n4))
    G.add_edge(NNEdge(n4, to))
    G.add_dimensions()
    yield G
Exemple #13
0
def propagate_downwards(G: NNGraph):
    for node in G.dfs():
        # First propagate the in dim hints to the out dim hints
        # Any node that does not want this to happen should set its out dim hints

        if node.in_dims_hint is not None:
            if isinstance(node, ReshapeParameters):
                if len(node.old_shape) == len(node.in_dims_hint[0]):
                    LOG.debug("set reshape %s in dims hint %s", node.name,
                              node.in_dims_hint[0])
                    node.old_shape.apply_naming_hints(node.in_dims_hint[0])
            elif isinstance(node, GlobalPoolParameters):
                if node.keep_dims:
                    node.out_dims_hint = deepcopy(node.in_dims_hint)
            elif isinstance(node, MatrixBroadcastedLinearOpParameters):
                max_hint = None
                for hint in node.in_dims_hint:
                    if hint is not None and (max_hint is None
                                             or len(hint) > len(max_hint)):
                        max_hint = hint
                if max_hint is not None:
                    node.out_dims_hint = [max_hint]
            elif isinstance(node, ConcatParameters):
                # if any incoming edge of the concat doesn't have a hint
                # set it the same as the others
                any_in_hint = next(
                    (hint for hint in node.in_dims_hint if hint is not None),
                    None)
                if any_in_hint:
                    LOG.debug("set concat %s in dims hint %s", node.name,
                              any_in_hint)
                    for edge in G.in_edges(node.name):
                        if not node.in_dims_hint[edge.to_idx]:
                            node.in_dims_hint[edge.to_idx] = any_in_hint
                    node.out_dims_hint = [any_in_hint]
            else:
                if node.out_dims_hint is None:
                    node.out_dims_hint = deepcopy(node.in_dims_hint)

        # if we have an out dim hint then propagate it to downstream nodes
        if node.out_dims_hint is not None:
            LOG.debug("propagate down hint from %s", node.name)
            for edge in G.out_edges(node.name):
                hint = node.out_dims_hint[edge.from_idx]
                if hint is None:
                    continue
                if edge.to_node.in_dims_hint is None:
                    edge.to_node.in_dims_hint = SparseList()
                if edge.to_node.in_dims_hint[edge.to_idx] is None:
                    edge.to_node.in_dims_hint[edge.to_idx] = hint
Exemple #14
0
def propagate_upwards(G: NNGraph):
    for node in G.dfs(reverse=True):
        # First propagate the out dim hints to the in dim hints
        # Any node that does not want this to happen should set its in dim hints

        if node.out_dims_hint is not None:
            if isinstance(node, ReshapeParameters):
                assert len(node.shape) == len(node.out_dims_hint[0])
                node.shape.apply_naming_hints(node.out_dims_hint[0])
                if node.in_dims_hint is None:
                    node.in_dims_hint = SparseList([["%s" % i for i in range(len(node.old_shape))]])
            else:
                if node.in_dims_hint is None:
                    node.in_dims_hint = deepcopy(node.out_dims_hint)

        # if we have an in dim hint then propagate it to upstream nodes
        if node.in_dims_hint is not None:
            for edge in G.in_edges(node.name):
                hint = node.in_dims_hint[edge.to_idx]
                if edge.from_node.out_dims_hint is None:
                    edge.from_node.out_dims_hint = SparseList()
                if edge.from_node.out_dims_hint[edge.from_idx] is None:
                    edge.from_node.out_dims_hint[edge.from_idx] = hint
Exemple #15
0
def test1():
    sl = SparseList()
    sl[2] = True
    assert sl[1] is None
    assert len(sl) == 3
    assert sl[2] == True
    sl[5] = False
    assert len(sl) == 6
    assert sl[5] == False
    del sl[2]
    assert len(sl) == 5
    assert sl[2] is None
    assert sl[4] == False
    tl = [v for v in sl]
    assert tl == [None, None, None, None, False]
def test1():
    sparse_list = SparseList()
    sparse_list[2] = True
    assert sparse_list[1] is None
    assert len(sparse_list) == 3
    assert sparse_list[2]
    sparse_list[5] = False
    assert len(sparse_list) == 6
    assert not sparse_list[5]
    del sparse_list[2]
    assert len(sparse_list) == 5
    assert sparse_list[2] is None
    assert not sparse_list[4]
    iter_sparse_list = [v for v in sparse_list]
    assert iter_sparse_list == [None, None, None, None, False]
Exemple #17
0
def propagate_downwards(G: NNGraph):
    for node in G.dfs():
        # First propagate the in dim hints to the out dim hints
        # Any node that does not want this to happen should set its out dim hints

        if node.in_dims_hint is not None:
            if isinstance(node, ReshapeParameters):
                assert len(node.old_shape) == len(node.in_dims_hint[0]), "reshape doesn't match input"
                node.old_shape.apply_naming_hints(node.in_dims_hint[0])
            else:
                if node.out_dims_hint is None:
                    node.out_dims_hint = deepcopy(node.in_dims_hint)

        # if we have an out dim hint then propagate it to downstream nodes
        if node.out_dims_hint is not None:
            for edge in G.out_edges(node.name):
                
                hint = node.out_dims_hint[edge.from_idx]
                if edge.to_node.in_dims_hint is None:
                    edge.to_node.in_dims_hint = SparseList()
                if edge.to_node.in_dims_hint[edge.to_idx] is None:
                    edge.to_node.in_dims_hint[edge.to_idx] = hint
Exemple #18
0
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(FullyConnectedOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        x_known_shape = x[2].known_shape
        inp_sz = np.prod(np.array(x_known_shape))
        weights = inputs[1]
        weights_node = weights[0]
        weights_shape = weights[2].shape
        out_c = weights_shape[0]

        filt_dim = FcFilterDim(weights_shape[0], *x_known_shape)
        node.input[1].used = True
        check(filt_dim.sz == inp_sz, "filter doesn't match input size")

        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([out_c]),
                value=np.zeros([out_c], dtype=np.float32))  # TODO - check

        keep_dims = node_opts.KeepNumDims()

        in_hint = [str(i) for i in range(len(x_known_shape) - 1)] + ['c']
        out_hint = in_hint.copy() if keep_dims else ['c']

        params = FcParameters(node.name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList(
                                  [in_hint, ['out_c', 'in_c'], ['out_c']]),
                              out_dims_hint=SparseList([out_hint]),
                              constant_store=G.constant_store,
                              keep_dims=keep_dims)

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))

        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)

        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(
        #         G, params, node.input, bias_node.value, node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        if x_shape[0] is None:
            out_shape = x_shape[:-1:] + [out_c] if keep_dims else [
                x_shape[0], out_c
            ]
        else:
            out_shape = x_known_shape[:-1:] + [out_c] if keep_dims else [out_c]
        pout_dims = ProvisionalDim(out_shape)

        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        aparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (aparams, 0, pout_dims)
        return params
    def _common(cls, node, **kwargs):
        node_opts = node.get_options(UnidirectionalSequenceLSTMOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes.get(t) for t in node.input]
        x = inputs[0]
        x_shape = x[2].shape

        time_major = node_opts.TimeMajor()
        max_time = int(x_shape[0 if time_major else 1])
        n_cells = int(node.input[2].shape[0])
        n_inputs = int(x_shape[2])
        pout_dims = ProvisionalDim([x_shape[0], x_shape[1], n_cells])
        params = LSTMParameters(node.name,
                                in_dims_hint=SparseList([['sz', 'c']]),
                                out_dims_hint=SparseList([['sz', 'c']]),
                                constant_store=G.constant_store,
                                cell_clip=node_opts.CellClip(),
                                proj_clip=node_opts.ProjClip(),
                                n_input_cells=max_time,
                                n_cells=max_time,  # TF says max_time - we say cells
                                n_inputs=n_inputs,  # Input will be n_input_cells, n_inputs
                                n_output_cells=max_time,  # Output will be n_output_cells, n_states
                                n_states=n_cells,  # TF says cells - we say states
                                activation=cls.TF_ACTIVATIONS[node_opts.FusedActivationFunction()])

        constant_nodes = cls.get_all_const_inputs(
            G,
            all_nodes,
            opts,
            node,
            params,
            exclude=[0],
            names=["%s_%s" % (in_name, node.name)
                   for in_name in LSTMParameters.INPUT_NAMES],
            short_names=LSTMParameters.INPUT_NAMES,
            adjust_transposes=[False] * len(node.input),
            load_quantization_if_present=True,
            skip_empty_tensors=False)

        # trim batch dimension from state values
        for state_node_name in ['i_state', 'c_state']:
            state_node = constant_nodes[LSTMParameters.INPUT_NAMES.index(state_node_name)]
            if opts.get('load_tensors'):
                state_node.value = state_node.value[0]
                state_node.dims = Dim(list(state_node.value.shape), is_ordered=True)
            # set by default as allocated
            state_node.at_options.allocate = True
            state_node.is_constant = False
            # reset state after each invocation
            state_node.always_copy = True
            # add a single reset
            state_node.reset_name = "Reset"

        # Link the state weights to the input weights
        # The autotiler expects the state and input weights to be
        # concatenated. This tells the constant code generator to do this
        for gate in ['i', 'o', 'c', 'f']:
            i_w_node = constant_nodes[LSTMParameters.INPUT_NAMES.index('i_2_%s_w' % gate)]
            r_w_node = constant_nodes[LSTMParameters.INPUT_NAMES.index('r_2_%s_w' % gate)]
            r_w_node.concated_nodes.append(i_w_node)
            i_w_node.generate_value = False

        G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Exemple #20
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        params = Conv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
            dilation=DilationDim(node_opts.DilationHFactor(),
                                 node_opts.DilationWFactor()),
            padding=pad,
            has_bias=True,
            in_dims_hint=SparseList([['h', 'w', 'c'],
                                     cls.TF_LITE_FILTER_ORDER.copy(),
                                     ['out_c']]),
            out_dims_hint=SparseList([['h', 'w', 'c']]),
            constant_store=G.constant_store)
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G, params, node.input[0], weights_node,
                                       bias_node, node.output[0], opts)
        # if opts.get('load_dequantized'):
        #     weights_node.value, bias_node.value = cls.load_dequantized_filter_parameters(
        #         node.input, bias_node.value)
        # else:
        #     qrec, weights_node.value, bias_node.value = cls.load_filter_parameters(G, params, node.input, bias_node.value,
        #                                                                            node.output, opts)
        #     if qrec:
        #         G.quantization[NodeId(weights_node)].out_qs[0] = qrec.in_qs[1]
        #         G.quantization[NodeId(bias_node)].out_qs[0] = qrec.in_qs[2]

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Exemple #21
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(DepthwiseConv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape)

        # get filter dimensions
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)

        # multiplier should match filter
        check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c,
              "invalid multiplier")

        groups = filt_dim.out_c // node_opts.DepthMultiplier()

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        if len(inputs) > 2:
            bias = inputs[2]
            bias_node = bias[0]
        else:
            bias_node = ConstantInputParameters(
                f'{node.name}_bias',
                dims=Dim.unnamed([filt_out_c]),
                value=np.zeros([filt_out_c], dtype=np.float32))  # TODO - check

        # TFLITE produces single channel input DW convolutions with the
        # multiplier equal to the number of out channels. This is just
        # a normal convolution and since we don't handle the channel
        # multiplier at present (but can) just convert them to normal
        # convolutions
        convert_to_conv = in_c == 1 and groups == 1

        if convert_to_conv:
            filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
            # TODO - reorder weights for node converted to convolution (perhaps just dequantize)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                has_bias=True,
                in_dims_hint=SparseList([['h', 'w', 'c'],
                                         cls.TF_LITE_FILTER_ORDER.copy(),
                                         ['out_c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)
        else:
            filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                groups=groups,
                multiplier=node_opts.DepthMultiplier(),
                has_bias=True,
                tf_depthwise=True,
                in_dims_hint=SparseList([['h', 'w', 'c'],
                                         cls.TF_LITE_DW_FILTER_ORDER.copy(),
                                         ['out_c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)

        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))
        cls.new_load_filter_parameters(G,
                                       params,
                                       node.input[0],
                                       weights_node,
                                       bias_node,
                                       node.output[0],
                                       opts,
                                       dw_to_pw=convert_to_conv)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size(
            [in_dim,
             Dim.unnamed(filt_dim.shape),
             Dim.unnamed([filt_out_c])])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Exemple #22
0
    def conv(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]
        # input N x C x H x W
        x = inputs[0]
        x_rank = len(x[2].shape)
        x_shape = x[2].shape
        spatial_size = x_rank - 2
        assert spatial_size == 2 or spatial_size == 1, "only 1D and 2D convolutions supported"

        # M x C/group x kH x kW
        weights_node = inputs[1][0]
        weights_node.name = f'{valid_name}_weights'
        weights = cls.get_constant(inputs[1])
        out_c = weights.shape[0]
        group = node.attrs.get("group", 1)
        in_c = x_shape[1]
        filt_in_c = in_c // group
        if in_c != weights.shape[1] * group:
            raise ValueError(
                f'node {valid_name} has incorrect input channel '
                f'dimension {in_c} expecting {weights.shape[0] * group}')
        if spatial_size == 1:
            filt_w = weights.shape[-1]
            filt_h = 1
            # create a new constant node since we are changing the shape
            weights = np.reshape(weights, (out_c, filt_in_c, filt_h, filt_w))
            weights_node = ConstantInputParameters(
                f'{valid_name}_weights',
                value=weights,
                dims=Dim.unnamed(weights.shape),
                constant_store=G.constant_store)
        else:
            filt_h = weights.shape[-2]
            filt_w = weights.shape[-1]
        h = 1 if spatial_size == 1 else x_shape[-2]
        w = x_shape[-1]

        filt_dim = Conv2DFilterDim(filt_h, filt_w, out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.ONNX_FILTER_ORDER)

        if len(inputs) > 2:
            biases_node = inputs[2][0]
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([out_c], dtype=np.float32)
            biases_node = ConstantInputParameters(
                f'{valid_name}_biases',
                value=biases,
                dims=Dim.unnamed(biases.shape),
                constant_store=G.constant_store)

        dilations = cls.pad_start_with(node.attrs.get("dilations", []), [1], 2)
        strides = cls.pad_start_with(node.attrs.get("strides", []), [1], 2)
        pad_dim = cls.calc_pad_dim(node, 4)

        params = Conv2DParameters(
            valid_name,
            filt=filt_dim,
            stride=StrideDim(strides[0], strides[1]),
            dilation=DilationDim(dilations[0], dilations[1]),
            groups=group,
            padding=pad_dim,
            has_bias=True,
            in_dims_hint=SparseList([['c', 'h', 'w'], cls.ONNX_FILTER_ORDER,
                                     ['c']]),
            out_dims_hint=SparseList([['c', 'h', 'w']]),
            constant_store=G.constant_store)

        in_dim = Dim.named_ordered(c=in_c, h=h, w=w)
        w_dim = Dim.named_ordered(out_c=out_c,
                                  in_c=filt_in_c,
                                  h=filt_h,
                                  w=filt_w)
        b_dim = Dim.named_ordered(c=out_c)
        out_dims = params.get_output_size([in_dim, w_dim, b_dim])
        G.add_edge(
            NNEdge(from_node=weights_node,
                   to_node=params,
                   from_idx=0,
                   to_idx=1))
        G.add_edge(
            NNEdge(from_node=biases_node, to_node=params, from_idx=0,
                   to_idx=2))
        if spatial_size == 1:
            oned_in_shape = [in_c, w]
            twod_in_shape = [in_c, 1, w]
            oned_out_shape = [out_dims[0].c, out_dims[0].w]
            r1_params = ReshapeParameters(f'{valid_name}_reshape2d',
                                          old_shape=Dim.unnamed(oned_in_shape),
                                          shape=Dim.unnamed(twod_in_shape))
            r2_params = ReshapeParameters(f'{valid_name}_reshape1d',
                                          old_shape=out_dims[0],
                                          shape=Dim.unnamed(oned_out_shape))
            G.add_edge(
                NNEdge(from_node=x[0],
                       to_node=r1_params,
                       from_idx=x[1],
                       to_idx=0))
            G.add_edge(
                NNEdge(from_node=r1_params,
                       to_node=params,
                       from_idx=0,
                       to_idx=0))
            G.add_edge(
                NNEdge(from_node=params,
                       to_node=r2_params,
                       from_idx=0,
                       to_idx=0))
            pout_dims = ProvisionalDim([x_shape[0]] + oned_out_shape)
            all_nodes[node.output[0]] = (r2_params, 0, pout_dims)
            return r2_params
        else:
            pout_dims = ProvisionalDim([x_shape[0]] + out_dims[0].shape)
            G.add_edge(
                NNEdge(from_node=x[0], to_node=params, from_idx=x[1],
                       to_idx=0))
            all_nodes[node.output[0]] = (params, 0, pout_dims)
            return params
def add_depthwise_convolution(G,
                              tensors,
                              name,
                              subgraph,
                              _,
                              op,
                              load_tensors=False,
                              dequantize=False):
    conv_opts = DepthwiseConv2DOptions.DepthwiseConv2DOptions()
    conv_opts.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)

    # get filter dimensions
    inp = get_input_size(tensors, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    filt = get_input_size(tensors,
                          subgraph,
                          op,
                          1,
                          order=TF_LITE_DW_FILTER_ORDER)
    filt = Conv2DFilterDim(filt['h'], filt['w'],\
        filt['out_c'], in_c=1)

    # multiplier should match filter
    check(filt.out_c == conv_opts.DepthMultiplier() * inp['c'],
          "invalid multiplier")

    groups = filt.out_c // conv_opts.DepthMultiplier()

    # compute padding
    pad = get_tf_padding(conv_opts.Padding())

    # does it have biases
    has_bias = op.InputsLength() > 2

    # TFLITE produces single channel input DW convolutions with the
    # multiplier equal to the number of out channels. This is just
    # a normal convolution and since we don't handle the channel
    # multiplier at present (but can) just convert them to normal
    # convolutions
    convert_to_conv = inp['c'] == 1 and groups == 1

    if convert_to_conv:
        filt.impose_order(TF_LITE_FILTER_ORDER)
        node = Conv2DParameters(name,
                                filt=filt,
                                stride=StrideDim(conv_opts.StrideH(),
                                                 conv_opts.StrideW()),
                                padding=pad,
                                has_bias=has_bias,
                                in_dims_hint=SparseList([['h', 'w', 'c']]),
                                out_dims_hint=SparseList([['h', 'w', 'c']]),
                                constant_store=G.constant_store)
    else:
        filt.impose_order(TF_LITE_DW_FILTER_ORDER)
        node = Conv2DParameters(name,
                                filt=filt,
                                stride=StrideDim(conv_opts.StrideH(),
                                                 conv_opts.StrideW()),
                                padding=pad,
                                groups=groups,
                                multiplier=conv_opts.DepthMultiplier(),
                                has_bias=has_bias,
                                tf_depthwise=True,
                                in_dims_hint=SparseList([['h', 'w', 'c']]),
                                out_dims_hint=SparseList([['h', 'w', 'c']]),
                                constant_store=G.constant_store)

    if load_tensors:
        node.weights = get_tensor(G.model,
                                  tensors,
                                  subgraph,
                                  op,
                                  1,
                                  dequantize=dequantize)
        # If we've converted to a normal conv then change the weight order
        if convert_to_conv:
            node.weights = node.weights.transpose(TF_LITE_DW_FILTER_TRANSPOSE)
        if has_bias:
            node.biases = get_tensor(G.model,
                                     tensors,
                                     subgraph,
                                     op,
                                     2,
                                     dequantize=dequantize)

    return fuse_activation(G, conv_opts, name, node)
Exemple #24
0
    def _common(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(DepthwiseConv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        filt_tensor = node.input[1]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_in_c, filt_h, filt_w, filt_out_c = tuple(filt_shape)

        # get filter dimensions
        filt_tensor.used = True
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)

        # multiplier should match filter
        check(filt_dim.out_c == node_opts.DepthMultiplier() * in_c,
              "invalid multiplier")

        groups = filt_dim.out_c // node_opts.DepthMultiplier()

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        has_bias = len(inputs) > 2
        if has_bias:
            node.input[2].used = True

        # TFLITE produces single channel input DW convolutions with the
        # multiplier equal to the number of out channels. This is just
        # a normal convolution and since we don't handle the channel
        # multiplier at present (but can) just convert them to normal
        # convolutions
        convert_to_conv = in_c == 1 and groups == 1

        if convert_to_conv:
            filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                has_bias=has_bias,
                in_dims_hint=SparseList([['h', 'w', 'c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)
        else:
            filt_dim.impose_order(cls.TF_LITE_DW_FILTER_ORDER)
            params = Conv2DParameters(
                node.name,
                filt=filt_dim,
                stride=StrideDim(node_opts.StrideH(), node_opts.StrideW()),
                dilation=DilationDim(node_opts.DilationHFactor(),
                                     node_opts.DilationWFactor()),
                padding=pad,
                groups=groups,
                multiplier=node_opts.DepthMultiplier(),
                has_bias=has_bias,
                tf_depthwise=True,
                in_dims_hint=SparseList([['h', 'w', 'c']]),
                out_dims_hint=SparseList([['h', 'w', 'c']]),
                constant_store=G.constant_store)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params,
                                                   node.input,
                                                   convert_to_conv,
                                                   is_dw=True)
        else:
            cls.load_filter_parameters(G,
                                       params,
                                       node.input,
                                       node.output,
                                       opts,
                                       converted_to_conv=convert_to_conv)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
Exemple #25
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']

        trans_a = node.attrs.get('transA', 0)
        trans_b = node.attrs.get('transB', 0)
        alpha = node.attrs.get('alpha', 1.0)
        beta = node.attrs.get('beta', 1.0)

        inputs = [all_nodes[inp] for inp in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        y = inputs[1]
        y_shape = y[2].shape

        real_x_shape = cls._get_real_dim(x_shape)
        real_y_shape = cls._get_real_dim(y_shape)

        real_x_shape = [
            real_x_shape[1], real_x_shape[0]
        ] if len(real_x_shape) == 2 and trans_a else real_x_shape
        real_y_shape = [
            real_y_shape[1], real_y_shape[0]
        ] if len(real_y_shape) == 2 and trans_b else real_y_shape

        if not cls.is_linear(y, real_x_shape, real_y_shape) or trans_a:
            raise ValueError(
                "GEMM is only currently supported for operations that map onto a linear kernel"
            )

        if len(inputs) > 2:
            biases = cls.get_constant(inputs[2])
        else:
            biases = np.zeros([real_y_shape[1]], dtype=np.float32)

        filt_dim = FcFilterDim(real_y_shape[1], real_x_shape[0])

        # always create new constants since they may be modified by this not and could be linked elsewhere
        weights = cls.get_constant(y) * alpha
        if not trans_b:
            weights = np.transpose(weights, [1, 0])
        weights_params = ConstantInputParameters(f'{valid_name}_weights',
                                                 dims=Dim.unnamed(
                                                     weights.shape),
                                                 value=weights)
        biases = biases * beta
        biases_params = ConstantInputParameters(f'{valid_name}_biases',
                                                dims=Dim.unnamed(biases.shape),
                                                value=biases)

        params = FcParameters(valid_name,
                              filt=filt_dim,
                              has_bias=True,
                              in_dims_hint=SparseList([['c']]),
                              out_dims_hint=SparseList([['c']]),
                              constant_store=G.constant_store)

        G.add_edge(NNEdge(from_node=weights_params, to_node=params, to_idx=1))
        G.add_edge(NNEdge(from_node=biases_params, to_node=params, to_idx=2))

        out_dims = params.get_output_size([Dim.unnamed(real_x_shape)])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        if isinstance(x[2], ProvisionalDim):
            out_dim = x[2].infer_mapping(out_dims[0].shape)
        else:
            out_dim = out_dims[0]
        all_nodes[node.output[0]] = (params, 0, out_dim)
        return params
Exemple #26
0
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(Conv2DOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]

        x = inputs[0]
        x_shape = x[2].shape
        in_b, h, w, in_c = tuple(x_shape)

        filt = inputs[1]
        filt_tensor = node.input[1]
        filt_shape = filt[2].shape
        # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        # get filter dimensions
        filt_tensor.used = True
        if filt_h > h or filt_w > w:
            LOG.warning(
                "Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]",
                node.name, filt_h, filt_w, h, w)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        # compute padding
        pad = cls.get_tf_padding(node_opts.Padding())

        # does it have biases
        has_bias = len(inputs) > 2
        if has_bias:
            node.input[2].used = True

        params = Conv2DParameters(node.name,
                                  filt=filt_dim,
                                  stride=StrideDim(node_opts.StrideH(),
                                                   node_opts.StrideW()),
                                  dilation=DilationDim(
                                      node_opts.DilationHFactor(),
                                      node_opts.DilationWFactor()),
                                  padding=pad,
                                  has_bias=has_bias,
                                  in_dims_hint=SparseList([['h', 'w', 'c']]),
                                  out_dims_hint=SparseList([['h', 'w', 'c']]),
                                  constant_store=G.constant_store)

        if opts.get('load_dequantized'):
            cls.load_dequantized_filter_parameters(params, node.input)
        else:
            cls.load_filter_parameters(G, params, node.input, node.output,
                                       opts)

        in_dim = Dim.named_ordered(h=h, w=w, c=in_c)
        out_dims = params.get_output_size([in_dim])
        pout_dims = ProvisionalDim([in_b] + out_dims[0].shape)
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        params = cls.fuse_activation(node_opts, node.name, params, **kwargs)
        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params