Example #1
0
def two_conv_graph():
    G = NNGraph(name='two_conv_graph')
    ti = G.add_input(Dim.unnamed([10, 10, 2]))
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    to = G.add_output()
    G.add_edge(NNEdge(ti, n1))
    G.add_edge(NNEdge(n1, n2))
    G.add_edge(NNEdge(n2, to))
    G.add_dimensions()
    yield G
    def version_1(cls, node: TFLiteNode, **kwargs):
        node_opts = node.get_options(TransposeConvOptions)
        G = kwargs['G']
        opts = kwargs['opts']
        all_nodes = kwargs['all_nodes']

        inputs = [all_nodes[t] for t in node.input]
        x = inputs[2]
        x_shape = x[2].shape
        in_b, in_h, in_w, in_c = tuple(x_shape)
        pout_shape = [
            dim if x_shape[idx] is not None else None
            for idx, dim in enumerate(cls.get_constant(inputs[0]))
        ]
        out_b, out_h, out_w, out_c = tuple(pout_shape)

        filt = inputs[1]
        weights_node = filt[0]
        filt_shape = filt[2].shape
        # # ['in_c', 'h', 'w', 'out_c']
        filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)

        filt_dim = Conv2DFilterDim(filt_h, filt_w, filt_out_c, in_c=filt_in_c)
        filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)

        stride_w = node_opts.StrideW()
        stride_h = node_opts.StrideH()
        # compute padding
        pad = node_opts.Padding()
        if pad == Padding.SAME:
            pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
            pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
            pad_top = pad_h // 2
            pad_left = pad_w // 2
            pad = PadDim(pad_top,
                         pad_h - pad_top,
                         pad_left,
                         pad_w - pad_left,
                         same_type='balanced_right')
        else:
            pad = PadDim(0)

        params = TransposeConv2DParameters(
            node.name,
            filt=filt_dim,
            stride=StrideDim(stride_h, stride_w),
            padding=pad,
            in_dims_hint=[['h', 'w', 'c'],
                          cls.TF_LITE_FILTER_ORDER.copy()],
            out_dims_hint=[['h', 'w', 'c']])
        G.add_edge(
            NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
        G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
        pout_dims = ProvisionalDim(pout_shape)

        all_nodes[node.output[0]] = (params, 0, pout_dims)
        return params
 def calc_pad_dim(cls, node, expected_len):
     if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
         pad_dim = PadDim(*cls.mix_pads(node.attrs.get("pads", [])))
     elif node.attrs["auto_pad"] == "VALID":
         pad_dim = PadDim.valid()
     elif node.attrs["auto_pad"] == "SAME_UPPER":
         pad_dim = PadDim.same(same_type="balanced_left")
     elif node.attrs["auto_pad"] == "SAME_LOWER":
         pad_dim = PadDim.same(same_type="balanced_right")
     else:
         raise ValueError("bad pad type")
     return pad_dim
Example #4
0
def test_paddim_compat():
    red1 = PadDim.pad_compatibility_reduce([True, True, False, True], [True, True, False, True])
    assert red1 == [True, True, False, True]
    red1 = PadDim.pad_compatibility_reduce([True, True, False, True], [True, True, False, False])
    assert red1 == [True, True, False, False]
    dim1 = PadDim(1)
    dim2 = PadDim(1, 2, 1, 2)
    compat1 = dim1.pad_compatibility
    assert compat1 == [False, False, True, True]    
    compat2 = dim2.pad_compatibility
    assert compat2 == [False, False, False, True]    
    red2 = PadDim.pad_compatibility_reduce(compat1, compat2)
    assert red2 == [False, False, False, True]    
Example #5
0
    def calc_pad_dim(cls, node, spatial_size):
        pads = cls.pad_start_with(node.attrs.get("pads", [0, 0] * spatial_size), [0, 0], 2)

        if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
            pad_dim = PadDim(*pads)
        elif node.attrs["auto_pad"] == "VALID":
            pad_dim = PadDim.valid()
        elif node.attrs["auto_pad"] == "SAME_UPPER":
            pad_dim = PadDim.same(same_type="balanced_left")
        elif node.attrs["auto_pad"] == "SAME_LOWER":
            pad_dim = PadDim.same(same_type="balanced_right")
        else:
            raise ValueError("bad pad type")
        return pad_dim
Example #6
0
def test_conf2d_depth():
    # TF Lite depthwise convolution
    weights = np.arange(9).reshape([3, 3])
    weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
    filt = Conv2DFilterDim(3, 3, 2,
                           1).impose_order(["in_c", "h", "w", "out_c"])
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              groups=1,
                              multiplier=2,
                              tf_depthwise=True,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output1 = conv2d(params, in_dims, out_dims[0], input_, weights, None)
    assert np.array_equal(output1,
                          [[[258, 294], [402, 438]], [[258, 294], [402, 438]]])
    output2 = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     allow_faster=False)
    assert np.array_equal(output1, output2)
Example #7
0
def test_conf2d_normal():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    details = {}
    output_ = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     details=details)
    # assert details['max_acc'] == 438.0 and details['min_acc'] == 258.0
    assert np.array_equal(output_, [[[258, 294], [402, 438]]])
Example #8
0
def test_conf2d_q2(caplog):
    caplog.set_level(logging.INFO)
    weights_q = QType(16, 1, True)
    weights = weights_q.quantize(np.full([1, 1, 2, 2], 1.0))
    filt = Conv2DFilterDim(2, 2, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.valid()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    in_q = QType(16, 0, True)
    calc_q = QType(weights_q.bits + in_q.bits, weights_q.q + in_q.q, True)
    qrec = FilterQuantizationRecord(in_qs=[in_q],
                                    out_qs=[in_q],
                                    weights_q=weights_q,
                                    acc_q=calc_q,
                                    calc_q=calc_q)
    input_ = in_q.quantize(np.full([1, 2, 2], 1.0))
    in_dims = Dim.named(c=1, h=2, w=2).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params,
                     in_dims,
                     out_dims[0],
                     input_,
                     weights,
                     None,
                     qrec=qrec)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[4.]]])
Example #9
0
def add_mean(G,
             tensors,
             name,
             subgraph,
             op_name,
             op,
             load_tensors=False,
             dequantize=False):
    check(op.InputsLength() == 2,\
        "Very odd " + str(op.InputsAsNumpy()))
    mean_dims = get_tensor(G.model, tensors, subgraph, op, 1, dequantize=False)
    if len(mean_dims) != 2 or mean_dims[0] != 1 or mean_dims[1] != 2:
        LOG.warning(
            "MEAN operator seen but can't convert to global average pool")
        return add_unconverted(G, name, subgraph, op_name, op, load_tensors,
                               dequantize)
    else:
        LOG.info("MEAN operator converted to global average pool")

    inp = get_input_size(None, subgraph, op, 0, order=TF_LITE_IN_OUT_ORDER)
    check(inp['n'] == 1, "Multi batch not supported")

    return add_node(
        G,
        PoolingParameters(name,
                          filt=PoolFilterDim(inp['h'], inp['w']),
                          stride=StrideDim(1, 1),
                          padding=PadDim.valid(),
                          pool_type="average",
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']])))
Example #10
0
def actfusion_graph():
    G = NNGraph(name='actfusion_graph')
    ti1 = G.add_input(Dim.unnamed([10, 10, 2])).name
    ti2 = G.add_input(Dim.unnamed([10, 10, 2])).name
    c1filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c1filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n1 = Conv2DParameters("node1",
                          filt=c1filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n1)
    w1 = [[0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]
    w1 = [w1, w1, w1]
    w2 = [[0.75, 0.75], [0.75, 0.75], [0.75, 0.75]]
    w2 = [w2, w2, w2]
    n1.weights = np.array([w1, w2])
    n1a = ReluActivationParameters("node1a")
    G.add_node(n1a)
    c2filt = Conv2DFilterDim(3, 3, 2, in_c=2)
    c2filt.impose_order(['out_c', 'h', 'w', 'in_c'])
    n2 = Conv2DParameters("node2",
                          filt=c2filt,
                          stride=StrideDim(1, 1),
                          padding=PadDim(0),
                          in_dims_hint=SparseList([['h', 'w', 'c']]),
                          out_dims_hint=SparseList([['h', 'w', 'c']]))
    G.add_node(n2)
    w3 = [[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]
    w3 = [w3, w3, w3]
    n2.weights = np.array([w3, w3])
    n3 = MatrixAddParameters("node3")
    G.add_node(n3)
    n4 = ReluActivationParameters("node4")
    G.add_node(n4)
    to = G.add_output()
    G.add_edge(NNEdge(ti1, n1))
    G.add_edge(NNEdge(n1, n1a))
    G.add_edge(NNEdge(ti2, n2))
    G.add_edge(NNEdge(n1a, n3, to_idx=0))
    G.add_edge(NNEdge(n2, n3, to_idx=1))
    G.add_edge(NNEdge(n3, n4))
    G.add_edge(NNEdge(n4, to))
    G.add_dimensions()
    yield G
Example #11
0
def add_convolution(out_graph, routes, idx, l):
    activation = get_str(l, 'activation', default="logistic")
    node_name = "{}_{}".format(l['type'], idx)
    routes['in'][idx] = node_name
    padding = l.get("padding")
    pad = l.get("pad")
    size = get_int(l, 'size', 1)
    groups = get_int(l, 'groups', 1)
    filters_c = get_int(l, 'filters', 1)
    stride = get_int(l, 'stride', 1)
    batch_normalize = get_int(l, 'batch_normalize', 0)
    flipped = get_int(l, 'flipped', 0)
    custom = {'batch_normalize': batch_normalize == 1, 'flipped': flipped == 1}

    assert 'binary' not in l, "Binary convolutions are not implemented"
    assert 'xnor' not in l, "XNOR convolutions are not implemented"
    assert 'dot' not in l, "dot is not implemented"

    # padding calculation as per Darknet code
    if pad is not None:
        padding = int(size / 2)
    if padding is None:
        padding = 0

    if activation is None:
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operator(node_name,\
                Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\
                    StrideDim(stride), PadDim(padding), groups=groups,\
                        custom=custom, has_bias=True))
    else:
        activation = DARKNET_ACTIVATION_TYPES[activation]
        routes['in'][idx], routes['out'][idx] =\
            out_graph.add_operators(
                node_name,
                [
                    Conv2DParameters(Conv2DFilterDim(size, size, filters_c),\
                        StrideDim(stride), PadDim(padding), groups=groups,\
                            custom=custom, has_bias=True),
                    ActivationParameters(activation)
                ]
            )

    return True
Example #12
0
def add_pad(G,
            name,
            subgraph,
            op_name,
            op,
            load_tensors=False,
            dequantize=False):
    check(op.InputsLength() == 2,\
        "Very odd " + str(op.InputsAsNumpy()))
    pad_dim = get_tensor(G.model, subgraph, op, 1, dequantize=False)
    assert np.all(pad_dim[3] == 0), "channel padding not supported"
    pad_dim = [int(pad_dim[i][j]) for i in range(1, 3) for j in range(2)]
    return add_node(G, PadParameters(name, PadDim(*pad_dim)))
 def calc_shapes(node, spatial_size, input_size, kernel_shape):
     padding = expand_dim(node.attrs.get('pads', None), 4 - spatial_size * 2, 0)
     auto_pad = node.attrs.get('auto_pad', 'NOTSET')
     output_shape = expand_dim(node.attrs.get('output_shape', None), 2 - spatial_size, 1)
     output_padding = Dim2D(*expand_dim(node.attrs.get('output_padding', None), 2 - spatial_size, 0))
     dilations = DilationDim(*expand_dim(node.attrs.get('dilations', None), 2 - spatial_size, 1))
     strides = StrideDim(*expand_dim(node.attrs.get('strides', None), 2 - spatial_size, 1))
     if output_shape:
         total_padding = strides * (input_size - 1) + output_padding + ((kernel_shape - 1) * dilations + 1) - output_shape
         if auto_pad == 'SAME_UPPER':
             pad_start = total_padding // 2
             pad_end = total_padding - pad_start
         else:
             pad_end = total_padding // 2
             pad_start = total_padding - pad_end
         padding = PadDim(pad_start.h, pad_end.h, pad_start.w, pad_end.w)
     elif auto_pad == 'NOTSET':
         assert padding, 'pads not set and auto_pad is NOTSET'
         padding = PadDim(*padding)
     elif auto_pad == 'VALID':
         padding = PadDim.valid()
     return padding, dilations, strides, output_padding
Example #14
0
def test_max_pool_normal():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    input_ = np.arange(9).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Example #15
0
    def __init__(self,
                 *args,
                 stride=None,
                 padding=None,
                 pad_type="zero",
                 **kwargs):
        if stride is None:
            stride = StrideDim(1, 1)
        if padding is None:
            padding = PadDim(0)

        super(FilterLikeParameters, self).__init__(*args, **kwargs)
        self.stride = stride
        self.padding = padding
        self.pad_type = pad_type
        self._ker_in_order = [['c', 'h', 'w']]
        self._ker_out_order = [['c', 'h', 'w']]
Example #16
0
def test_max_pool_q():
    filt = PoolFilterDim(2, 2)
    stride = StrideDim(1)
    pad = PadDim(0)
    params = PoolingParameters("test",
                               filt=filt,
                               stride=stride,
                               padding=pad,
                               pool_type="max")
    in_q = QType(16, 0, True)
    qrec = QuantizationRecord([in_q], [in_q])
    input_ = in_q.quantize(np.arange(9)).reshape([1, 3, 3])
    in_dims = Dim.named(c=1, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = max_pool(params, in_dims, out_dims[0], input_)
    output_ = in_q.dequantize(output_)
    assert np.array_equal(output_, [[[4, 5], [7, 8]]])
Example #17
0
def test_conf2d_depth_q():
    calc_q = QType(32, 9, True)
    biases_q = acc_q = out_q = QType(16, 4, True)
    weights_q = QType(16, 4, True)
    in_q = QType(16, 5, True)
    # TF Lite depthwise convolution
    biases = np.full([2], 0.5)
    qbiases = biases_q.quantize(biases)
    weights = np.full([3, 3], 0.5)
    weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
    qweights = weights_q.quantize(weights)
    filt = Conv2DFilterDim(3, 3, 2,
                           1).impose_order(["in_c", "h", "w", "out_c"])
    stride = StrideDim(1)
    pad = PadDim(0)
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              groups=1,
                              multiplier=2,
                              tf_depthwise=True,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    qrec = FilterQuantizationRecord(in_qs=[in_q],
                                    out_qs=[out_q],
                                    weights_q=weights_q,
                                    biases_q=biases_q,
                                    acc_q=acc_q,
                                    calc_q=calc_q)
    input_ = np.full([1, 4, 4], 2)
    qinput_ = in_q.quantize(input_)
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, biases)
    qoutput_ = conv2d(params,
                      in_dims,
                      out_dims[0],
                      qinput_,
                      qweights,
                      qbiases,
                      qrec=qrec)
    dqoutput_ = out_q.dequantize(qoutput_)
    assert np.array_equal(output_, dqoutput_)
Example #18
0
def test_paddim():
    dim1 = PadDim(1)
    assert not dim1.is_same
    assert dim1.h == 2 and dim1.w == 2
    assert dim1.l == 1 and dim1.r == 1 and dim1.t == 1 and dim1.b == 1
    assert dim1.numpy_pad_shape(Dim.named_ordered(w=10, h=10)) == [(1, 1), (1, 1)]
    stride_dim = StrideDim(1)
    filt_dim = Conv2DFilterDim(5, 5, 1, 1)
    in_dim = Dim.named_ordered(c=1, h=20, w=20)
    dim1 = PadDim.same()
    dim1.calculate_same(in_dim, filt_dim, stride_dim)
    assert dim1.shape == [2, 2, 2, 2]
Example #19
0
def test_conf2d_pad_dilate():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.same()
    dilation = DilationDim(2)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
    assert np.array_equal(output_, [[[266., 206.], [98., 66.]]])
Example #20
0
def test_conf2d_pad():
    weights = np.arange(9).reshape([1, 1, 3, 3])
    filt = Conv2DFilterDim(3, 3, 1, 1)
    stride = StrideDim(1)
    pad = PadDim.same()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(16).reshape([1, 4, 4])
    in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
    assert np.array_equal(output_, [[[73, 121, 154, 103], [171, 258, 294, 186],\
        [279, 402, 438, 270], [139, 187, 202, 113]]])
Example #21
0
    def replace_function(self, G: GraphView, subgraph: GraphView):
        filter_like_node, pad_node = None, None
        for node in subgraph.nodes():
            if isinstance(node, FilterLikeParameters):
                filter_like_node = node
            elif isinstance(node, PadParameters):
                pad_node = node
        assert filter_like_node and pad_node
        LOG.debug("adding padding from: %s to filter: %s", pad_node.name,
                  filter_like_node.name)
        assert filter_like_node.in_dims_hint and filter_like_node.in_dims_hint[
            0], "filter doesn't have a hint"
        in_hint = filter_like_node.in_dims_hint[0]
        hinted_pad = {
            in_hint[idx]: pad
            for idx, pad in enumerate(pad_node.padding) if sum(pad) > 0
        }
        key_set = set(hinted_pad.keys())
        key_set -= set(['h', 'w'])

        if len(key_set) > 0:
            LOG.error(
                "node %s has padding on axes %s and cannot be fused with filter %s",
                pad_node.name, key_set, filter_like_node.name)
            raise DontReplaceError()
        if any(pval != 0 for val in pad_node.pad_vals for pval in val):
            LOG.error(
                "node %s has non zero pad values and cannot be fused with filter %s",
                pad_node.name, filter_like_node.name)
            raise DontReplaceError()

        for key in ['h', 'w']:
            if key not in hinted_pad:
                hinted_pad[key] = (0, 0)

        filter_like_node.padding = PadDim(*(list(hinted_pad['h']) +
                                            list(hinted_pad['w'])))
        filter_like_node.pad_type = "zero"
        if G.quantization:
            G.quantization.remove_node(pad_node)
        return filter_like_node, None, None
Example #22
0
def test_conf2d_2_in_2_out_c():
    weights = np.arange(4).reshape([1, 2, 2])
    weights = np.append(weights, weights, axis=0)
    weights = np.append(weights, weights, axis=0)
    weights = weights.reshape([2, 2, 2, 2])
    filt = Conv2DFilterDim(2, 2, 2, 2)
    stride = StrideDim(1)
    pad = PadDim.valid()
    dilation = DilationDim(1)
    params = Conv2DParameters("test",
                              filt=filt,
                              stride=stride,
                              padding=pad,
                              dilation=dilation,
                              in_dims_hint=[['c', 'h', 'w']],
                              out_dims_hint=[['c', 'h', 'w']])
    input_ = np.arange(9).reshape([1, 3, 3])
    input_ = np.append(input_, input_, axis=0)
    in_dims = Dim.named(c=2, h=3, w=3).impose_order(['c', 'h', 'w'])
    out_dims = params.get_output_size([in_dims])
    output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
    assert np.array_equal(output_, [[[38., 50.], [74., 86.]],\
        [[38., 50.], [74., 86.]]])
    def __init__(self, node_name, cname, conv_params, conv_q,
                 pool_params, pool_q, act_params, act_q, at_ver=3, gen_ctrl=None):
        if gen_ctrl is None:
            gen_ctrl = GenCtrl(None, cname=cname)
        else:
            gen_ctrl.cname = cname

        in_q = filter_q = out_q = bias_q = mul_biases_q = None
        in_dim = out_dim = None
        pad_compatibilities = []
        if conv_params is not None:
            at_conv_params = gen_conv_at_params(
                conv_params, conv_q, pad_compatibilities)
            in_dim = conv_params.in_dims[0]
            out_dim = conv_params.out_dims[0]
            filter_q = conv_q.in_qs[1]
            in_q = conv_q.in_qs[0]
            out_q = conv_q.out_qs[0]
            bias_q = conv_q.in_qs[2]
            if conv_params.has_mul_bias:
                mul_biases_q = conv_q.mul_biases_q
        else:
            at_conv_params = NO_CONV

        if pool_params is not None:
            at_pool_params = gen_pool_at_params(
                pool_params, pad_compatibilities)
            if in_dim is None:
                in_dim = pool_params.in_dims[0]
            out_dim = pool_params.out_dims[0]
            if in_q is None:
                in_q = pool_q.in_qs[0]
            out_q = pool_q.out_qs[0]
        else:
            at_pool_params = NO_POOL

        if act_params is not None:
            at_act_params = gen_active_at_params(act_params)
            if in_dim is None:
                in_dim = act_params.in_dims[0]
            if out_dim is None:
                out_dim = act_params.out_dims[0]
            if in_q is None:
                in_q = act_q.in_qs[0]
            out_q = act_q.out_qs[0]
            if at_ver < 3:
                if act_params.activation == "relu6" and out_q.q != 0:
                    gen_ctrl.ReluN = 6 << out_q.q
                    gen_ctrl.ReluNNoNorm = 1
            else:
                if act_params.activation == "relun":
                    gen_ctrl.ReluN = act_params.activation_params

        else:
            at_act_params = NO_ACTIVATION

        if pad_compatibilities:
            reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,
                                                        "convolution padding is not compatible with pool padding")
            if not reduction[2]:  # default is balanced pad left
                at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
                LOG.debug("%s: generating pad control block", node_name)
                gen_ctrl.PadType = at_pad_ctrl

        attrs = {
            'in_qtype': in_q,
            'out_qtype': out_q,
            'filter_qtype': filter_q,
            'bias_qtype': bias_q,
            'mul_biases_qtype': mul_biases_q,
            'relu_oper': at_act_params.ReLUOper
        }

        if at_pool_params.PoolOper != 'KOP_NONE':
            attrs.update({
                'pool_oper': at_pool_params.PoolOper,
                'pool_w': at_pool_params.Fpx,
                'pool_h': at_pool_params.Fpy,
                'pool_d_w': at_pool_params.Dpx,
                'pool_d_h': at_pool_params.Dpy,
                'pool_s_w': at_pool_params.Spx,
                'pool_s_h': at_pool_params.Spy,
                'pool_pad': at_pool_params.PoolPad
            })
        else:
            attrs.update({
                'pool_oper': 'KOP_NONE',
                'pool_w': 0,
                'pool_h': 0,
                'pool_d_w': 0,
                'pool_d_h': 0,
                'pool_s_w': 0,
                'pool_s_h': 0,
                'pool_pad': 0
            })

        if at_conv_params == NO_CONV:
            if in_q.dtype_bits != out_q.dtype_bits:
                raise NotImplementedError(
                    "only homogenious operations are supported at present")
            LOG.debug("%s: pool relu inq %s outq %s control block",
                      node_name, in_q, out_q)
            if at_pool_params.PoolOper == 'KOP_NONE' and (not in_dim.is_named or not in_dim.has_keys(['c', 'w', 'h'])):
                in_shape = in_dim.shape + ([1] * (3 - len(in_dim.shape)))
                in_c, in_h, in_w = in_shape[0], in_shape[1], in_shape[2]
            else:
                in_c, in_h, in_w = in_dim.c, in_dim.h, in_dim.w
            if out_dim.is_named and out_dim.has_key('c'):
                out_c = out_dim.c
            else:
                out_c = in_c
            attrs.update({
                'in_c': in_c,
                'in_h': in_h,
                'in_w': in_w,
                'out_c': out_c,
                'conv_oper': 'KOP_NONE'
            })
            self.template = 'CALL_TEMPLATE_POOL_RELU'
        else:
            # swap w and h if w and filter w is 1 so generator sees 1D conv
            if in_dim.w == 1 and at_conv_params.Fcx == 1:
                attrs.update({
                    'in_c': in_dim.c,
                    'in_h': 1,
                    'in_w': in_dim.h,
                    'out_c': out_dim.c,
                    'conv_oper': at_conv_params.ConvOper,
                    'conv_w': at_conv_params.Fcy,
                    'conv_h': 1,
                    'conv_d_w': at_conv_params.Dcy,
                    'conv_d_h': at_conv_params.Dcx,
                    'conv_s_w': at_conv_params.Scy,
                    'conv_s_h': at_conv_params.Scx,
                    'conv_pad': at_conv_params.ConvPad

                })
            else:
                attrs.update({
                    'in_c': in_dim.c,
                    'in_h': in_dim.h,
                    'in_w': in_dim.w,
                    'out_c': out_dim.c,
                    'conv_oper': at_conv_params.ConvOper,
                    'conv_w': at_conv_params.Fcx,
                    'conv_h': at_conv_params.Fcy,
                    'conv_d_w': at_conv_params.Dcx,
                    'conv_d_h': at_conv_params.Dcy,
                    'conv_s_w': at_conv_params.Scx,
                    'conv_s_h': at_conv_params.Scy,
                    'conv_pad': at_conv_params.ConvPad
                })
            if isinstance(at_conv_params, ConvATParam):
                if mul_biases_q is not None:
                    LOG.debug("%s: mulconv pool relu inq %s outq %s control block",
                              node_name, in_q, out_q)
                    self.template = 'CALL_TEMPLATE_MULBIAS_CONV_POOL_RELU'
                else:
                    LOG.debug("%s: conv pool relu inq %s outq %s control block",
                              node_name, in_q, out_q)
                    self.template = 'CALL_TEMPLATE_CONV_POOL_RELU'
            elif isinstance(at_conv_params, GroupedConvATParam):
                attrs.update({
                    'group_in': at_conv_params.GroupIn,
                    'group_out': at_conv_params.GroupOut
                })
                if mul_biases_q is not None:
                    LOG.debug("%s: grouped conv pool relu inq %s outq %s control block",
                              node_name, in_q, out_q)
                    self.template = 'CALL_TEMPLATE_GROUPED_MULBIAS_CONV_POOL_RELU'
                else:
                    LOG.debug("%s: grouped mulconv pool relu inq %s outq %s control block",
                              node_name, in_q, out_q)
                    self.template = 'CALL_TEMPLATE_GROUPED_CONV_POOL_RELU'
            else:
                raise ValueError('Internal error')

        # other attributes
        extra_attrs = {
            'cname': cname,
            'node_name': node_name
        }

        super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
    def __init__(self,
                 node_name,
                 cname,
                 conv_params,
                 conv_q,
                 pool_params,
                 pool_q,
                 act_params,
                 act_q,
                 at_ver=3,
                 gen_ctrl=None,
                 force_relu=True):
        if gen_ctrl is None:
            self.gen_ctrl = gen_ctrl = GenCtrl(None, cname=cname)
        else:
            gen_ctrl.cname = cname
            self.gen_ctrl = gen_ctrl

        in_q = filter_q = out_q = bias_q = mul_biases_q = None
        in_dim = out_dim = None
        pad_compatibilities = []
        if conv_params is not None:
            at_conv_params = gen_conv_at_params(conv_params,
                                                pad_compatibilities)
            in_dim = conv_params.in_dims[0]
            out_dim = conv_params.out_dims[0]
            # Set ENABLEIM2COL on 1x1 filters by default
            if conv_params.filter.h == 1 and conv_params.filter.w == 1 and gen_ctrl.enableim2col is None:
                gen_ctrl.enableim2col = 1
            filter_q = conv_q.in_qs[1]
            in_q = conv_q.in_qs[0]
            out_q = conv_q.out_qs[0]
            bias_q = conv_q.in_qs[2]
            if conv_params.has_mul_bias:
                mul_biases_q = conv_q.mul_biases_q
        else:
            at_conv_params = NO_CONV

        if pool_params is not None:
            at_pool_params = gen_pool_at_params(pool_params,
                                                pad_compatibilities)
            if in_dim is None:
                in_dim = pool_params.in_dims[0]
            out_dim = pool_params.out_dims[0]
            if in_q is None:
                in_q = pool_q.in_qs[0]
            out_q = pool_q.out_qs[0]
        else:
            at_pool_params = NO_POOL

        if act_params is not None:
            at_act_params = gen_active_at_params(act_params,
                                                 force_relu=force_relu)
            if in_dim is None:
                in_dim = act_params.in_dims[0].expand_to_chw()
            if out_dim is None:
                out_dim = act_params.out_dims[0].expand_to_chw()
            if in_q is None:
                in_q = act_q.in_qs[0]
            out_q = act_q.out_qs[0]

        else:
            at_act_params = NO_ACTIVATION

        if pad_compatibilities:
            reduction = PadDim.pad_compatibility_reduce(
                *pad_compatibilities,
                "convolution padding is not compatible with pool padding")
            if not reduction[2]:  # default is balanced pad left
                at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
                LOG.debug("%s: generating pad control block", node_name)
                self.gen_ctrl.PadType = at_pad_ctrl
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.in_q = in_q
        self.bias_q = bias_q
        self.out_q = out_q
        self.filter_q = filter_q
        self.mul_biases_q = mul_biases_q
        self.at_act_params = at_act_params
        self.at_pool_params = at_pool_params
        self.at_conv_params = at_conv_params
        self.cname = cname
        self.node_name = node_name
        self.at_ver = at_ver
Example #25
0
def get_tf_padding(padding):
    if padding == Padding.Padding.SAME:
        return PadDim.same()
    if padding == Padding.Padding.VALID:
        return PadDim.valid()
    raise ValueError("Strange padding type")
Example #26
0
def split_pad(pad):
    pad_oth = pad // 2
    pad_full = pad - pad_oth
    return PadDim(pad_full, pad_oth, pad_full, pad_oth)
    def __init__(self,
                 node_name,
                 cname,
                 conv_params,
                 conv_q,
                 pool_params,
                 pool_q,
                 act_params,
                 act_q,
                 at_ver=3,
                 gen_ctrl=None):
        self.ne16 = False
        if gen_ctrl is None:
            self.gen_ctrl = gen_ctrl = GenCtrl(None, cname=cname)
        else:
            gen_ctrl.cname = cname
            self.gen_ctrl = gen_ctrl

        in_q = filter_q = out_q = bias_q = mul_biases_q = None
        in_dim = out_dim = None
        pad_compatibilities = []
        if conv_params is not None:
            if conv_params.ker_in_order and conv_params.ker_in_order[0] == [
                    "h", "w", "c"
            ]:
                self.hwc = True
                self.gen_ctrl.hwc = 1
            at_conv_params = gen_conv_at_params(conv_params,
                                                pad_compatibilities)
            in_dim = conv_params.in_dims[0]
            out_dim = conv_params.out_dims[0]
            # Set ENABLEIM2COL on 1x1 filters by default
            if conv_params.filter.h == 1 and conv_params.filter.w == 1 and gen_ctrl.enableim2col is None:
                gen_ctrl.enableim2col = 1
            filter_q = conv_q.in_qs[1]
            in_q = conv_q.in_qs[0]
            out_q = conv_q.out_qs[0]
            bias_q = conv_q.in_qs[2]
            if conv_params.has_mul_bias:
                mul_biases_q = conv_q.mul_biases_q
            self.ne16 = conv_q.cache.get('ne16')
        else:
            at_conv_params = NO_CONV

        if pool_params is not None:
            if pool_params.ker_in_order and pool_params.ker_in_order[0] == [
                    "h", "w", "c"
            ]:
                self.hwc = True
                self.gen_ctrl.hwc = 1
            at_pool_params = gen_pool_at_params(pool_params,
                                                pad_compatibilities)
            if in_dim is None:
                in_dim = pool_params.in_dims[0]
            out_dim = pool_params.out_dims[0]
            if in_q is None:
                in_q = pool_q.in_qs[0]
            out_q = pool_q.out_qs[0]
        else:
            at_pool_params = NO_POOL

        if act_params is not None:
            if act_params.ker_in_order and act_params.ker_in_order[0] == [
                    "h", "w", "c"
            ]:
                self.hwc = True
                self.gen_ctrl.hwc = 1
            if in_q is None:
                in_q = act_q.in_qs[0]
            at_act_params = gen_active_at_params(
                act_params,
                force_relu=False,
                asymmetric=act_q.in_qs[0].zero_point != 0)
            if isinstance(act_params,
                          ReluActivationParameters) and act_params.upper_bound:
                self.gen_ctrl.relun = act_params.upper_bound
            if in_dim is None:
                in_dim = act_params.in_dims[0].expand_to_chw()
            if out_dim is None:
                out_dim = act_params.out_dims[0].expand_to_chw()
            out_q = act_q.out_qs[0]

        else:
            at_act_params = NO_ACTIVATION

        if pad_compatibilities:
            reduction = PadDim.pad_compatibility_reduce(
                *pad_compatibilities,
                "convolution padding is not compatible with pool padding")
            if not reduction[2]:  # default is balanced pad left
                at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
                LOG.debug("%s: generating pad control block", node_name)
                self.gen_ctrl.PadType = at_pad_ctrl
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.in_q = in_q
        self.bias_q = bias_q
        self.out_q = out_q
        self.filter_q = filter_q
        self.mul_biases_q = mul_biases_q
        self.at_act_params = at_act_params
        self.at_pool_params = at_pool_params
        self.at_conv_params = at_conv_params
        self.cname = cname
        self.node_name = node_name
        self.at_ver = at_ver
    def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
        something_changed = False
        filt_nodes = [
            node for node in G.nodes()
            if isinstance(node, (Conv2DParameters, ConvFusionParameters))
        ]
        for filt_node in filt_nodes:
            pnode = filt_node
            if isinstance(filt_node, ConvFusionParameters):
                cnodes = filt_node.contained_nodes()
                filt_node = cnodes[0]
            if not isinstance(filt_node, Conv2DParameters):
                continue
            in_dim = filt_node.in_dims
            filt_dim = filt_node.filter
            if filt_dim.h <= in_dim[0].h and filt_dim.w <= in_dim[0].w:
                continue

            min_h = min(filt_dim.h, in_dim[0].h)
            min_w = min(filt_dim.w, in_dim[0].w)
            if min_h > 1 and min_w > 1:
                LOG.warning(
                    "Filter of %s [%dx%d] bigger than input [%dx%d] not optimal but will work on AT",
                    filt_node.name, filt_dim.h, filt_dim.w, in_dim[0].h,
                    in_dim[0].w)
                continue

            if min_h == 1:
                ker_h = 1
                ker_h_reduced = True
            else:
                ker_h_reduced = False
                ker_h = filt_dim.h
            if min_w == 1:
                ker_w = 1
                ker_w_reduced = True
            else:
                ker_w_reduced = False
                ker_w = filt_dim.w
            if ker_h == filt_dim.h and ker_w == filt_dim.w:
                continue
            if filt_node.padding:
                if ker_h_reduced:
                    top = bottom = 0
                else:
                    top = filt_node.padding.t
                    bottom = filt_node.padding.b
                if ker_w_reduced:
                    left = right = 0
                else:
                    left = filt_node.padding.l
                    right = filt_node.padding.r
                padding = PadDim(top, bottom, left, right)
            else:
                padding = PadDim(0)

            new_filt_dim = Conv2DFilterDim(ker_h,
                                           ker_w,
                                           filt_dim.out_c,
                                           in_c=filt_dim.in_c)
            LOG.warning("Converting filter of %s from [%dx%d] -> [%dx%d]",
                        filt_node.name, filt_dim.h, filt_dim.w, new_filt_dim.h,
                        new_filt_dim.w)
            filt_node.filter = new_filt_dim
            filt_node.padding = padding
            new_w_idxs = []
            for dim in filt_dim.order:
                if dim in ('out_c', 'in_c'):
                    new_w_idxs.append(slice(None))
                elif dim == 'h':
                    if new_filt_dim.h == 1:
                        new_w_idxs.append(
                            slice(filt_node.padding.t,
                                  filt_node.padding.t + 1))
                    else:
                        new_w_idxs.append(slice(0, new_filt_dim.h))
                elif dim == 'w':
                    if new_filt_dim.w == 1:
                        new_w_idxs.append(
                            slice(filt_node.padding.l,
                                  filt_node.padding.l + 1))
                    else:
                        new_w_idxs.append(slice(0, new_filt_dim.w))
            weights_node = G.indexed_in_edges(pnode.name)[1].from_node
            weights_node.value = weights_node.value[tuple(new_w_idxs)]
            weights_node.dims = Dim.unnamed(weights_node.value.shape)
            something_changed = True

        if set_identity:
            self.set_identity(G)

        return something_changed
    def __init__(self, node_name, cname, pool_params, pool_q,
                 act_params, act_q, code_block=None, at_ver=3, gen_ctrl=None):
        if gen_ctrl is None:
            self.gen_ctrl = GenCtrl(None, cname=cname)
        else:
            gen_ctrl.cname = cname
            self.gen_ctrl = gen_ctrl

        in_q = out_q = None
        in_dim = out_dim = None
        pad_compatibilities = []

        if pool_params is not None:
            at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities)
            if in_dim is None:
                in_dim = pool_params.in_dims[0]
            out_dim = pool_params.out_dims[0]
            if in_q is None:
                in_q = pool_q.in_qs[0]
            out_q = pool_q.out_qs[0]
        else:
            at_pool_params = NO_POOL

        if act_params is not None:
            at_act_params = gen_active_at_params(act_params)
            if in_dim is None:
                in_dim = act_params.in_dims[0]
            if out_dim is None:
                out_dim = act_params.out_dims[0]
            if in_q is None:
                in_q = act_q.in_qs[0]
            out_q = act_q.out_qs[0]
            if at_ver < 3:
                if act_params.activation == "relu6" and out_q.q != 0:
                    self.gen_ctrl.ReluN = 6 << out_q.q
                    self.gen_ctrl.ReluNNoNorm = 1
            else:
                if act_params.activation == "relun":
                    self.gen_ctrl.ReluN = act_params.activation_params
        else:
            at_act_params = NO_ACTIVATION

        if code_block is None:
            code_block = CodeBlock()

        if pad_compatibilities:
            reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,
                                                        "convolution padding is not compatible with pool padding")
            if not reduction[2]:  # default is balanced pad left
                at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
                self.gen_ctrl.PadType = at_pad_ctrl

        if in_q.bits != out_q.bits:
            raise NotImplementedError("only homogenious operations are supported at present")
        if at_pool_params == NO_POOL:
            raise NotImplementedError(
                "activation layer on its own should not be matched by this kernel")

        self.at_pool_params = at_pool_params
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.in_q = in_q
        self.out_q = out_q
        self.at_act_params = at_act_params
        self.cname = cname
        self.node_name = node_name
        self.at_ver = at_ver
Example #30
0
    def match(self, G: GraphView, set_identity: bool = True) -> bool:
        has_modified_graph = False
        for pad_params in [
                pad for pad in G.nodes() if isinstance(pad, PadParameters)
        ]:
            pad_in_edges = G.in_edges(pad_params.name)
            pad_out_edges = G.out_edges(pad_params.name)
            dont_delete = False
            for pad_out_edge in pad_out_edges:
                filter_like_node, is_1d = self.find_conv(
                    G, pad_out_edge.to_node)
                if not filter_like_node:
                    dont_delete = True
                    continue
                if not filter_like_node.in_dims_hint or not filter_like_node.in_dims_hint[
                        0]:
                    raise ValueError(
                        f"filter {filter_like_node.name} doesn't have a input hint"
                    )
                in_hint = filter_like_node.in_dims_hint[0]
                if is_1d:
                    if len(pad_params.padding) != 2:
                        LOG.warning(
                            "pad node %s is applied to 1d convolution but has length %s",
                            pad_params.name, len(pad_params.padding))
                        dont_delete = True
                        continue
                    expanded_padding = [
                        pad_params.padding[0], (0, 0), pad_params.padding[1]
                    ]
                else:
                    if len(pad_params.padding) != 3:
                        LOG.warning(
                            "pad node %s is applied to 2d convolution but has length %s",
                            pad_params.name, len(pad_params.padding))
                        dont_delete = True
                        continue
                    expanded_padding = pad_params.padding

                hinted_pad = {
                    in_hint[idx]: pad
                    for idx, pad in enumerate(expanded_padding) if sum(pad) > 0
                }
                key_set = set(hinted_pad.keys())
                key_set -= set(['h', 'w'])
                if len(key_set) > 0:
                    dont_delete = True
                    LOG.error(
                        "node %s has padding on axes %s and cannot be fused with filter %s",
                        pad_params.name, key_set, filter_like_node.name)
                    continue
                if any(pval != 0 for val in pad_params.pad_vals
                       for pval in val):
                    dont_delete = True
                    LOG.error(
                        "node %s has non zero pad values and cannot be fused with filter %s",
                        pad_params.name, filter_like_node.name)
                    continue

                LOG.info("adding padding from: %s to %s filter: %s",
                         pad_params.name, is_1d and "1D" or "2D",
                         filter_like_node.name)

                for key in ['h', 'w']:
                    if key not in hinted_pad:
                        hinted_pad[key] = (0, 0)

                filter_like_node.padding = PadDim(*(list(hinted_pad['h']) +
                                                    list(hinted_pad['w'])))
                filter_like_node.pad_type = "zero"
                has_modified_graph = True
                G.remove_edge(pad_out_edge)
                if is_1d:
                    reshape_node = pad_out_edge.to_node
                    reshape_node.old_shape = self.remove_padding(
                        reshape_node.old_shape, pad_params.padding)
                    reshape_node.shape = self.remove_padding(
                        reshape_node.shape, expanded_padding)
                for in_edge in pad_in_edges:
                    G.add_edge(
                        NNEdge(from_node=in_edge.from_node,
                               to_node=pad_out_edge.to_node,
                               from_idx=in_edge.from_idx,
                               to_idx=pad_out_edge.to_idx))

            if not dont_delete:
                G.remove(pad_params)
                if G.quantization:
                    G.quantization.remove_node(pad_params)

        if set_identity:
            self.set_identity(G)

        return has_modified_graph