예제 #1
0
def transform_add_conv(g):
    # type: (TFGraph)->None

    def is_bias(conv_op, bias_tensor):
        if _is_nhwc(conv_op):
            return bias_tensor.rank == 1
        else:
            return (bias_tensor.rank == conv_op.output.rank
                    and all(i == 1 or s == 1
                            for i, s in enumerate(bias_tensor.shape)))

    input, filter, bias, conv_output = matcher.tensors(4)
    conv = matcher.Operation(name=[
        "_conv", "_planewise_conv", "_separable_conv", "_deconv",
        "_planewise_deconv"
    ],
                             inputs=(input, filter),
                             outputs=conv_output)
    add = matcher.Operation(name="tf.add", inputs={conv_output, bias})

    matcher.replace(
        g, add, lambda m: TFOperation(graph=g,
                                      name=m[conv].name,
                                      inputs=(m[input], m[filter], m[bias]),
                                      attribs=m[conv].attribs,
                                      outputs=m[add].outputs),
        lambda m: is_bias(m[conv], m[bias]))
def _create_lp_pools(g):
    # type: (NNEFGraph)->None

    input, abs_out, pow_out, box_out, output, p, q = matcher.tensors(7)

    _abs_op = matcher.Operation(name='abs', inputs=input, outputs=abs_out)
    _pow_op = matcher.Operation(name='pow',
                                inputs=(abs_out, p),
                                outputs=pow_out)
    box_op = matcher.Operation(name='box', inputs=pow_out, outputs=box_out)
    pow2_op = matcher.Operation(name='pow',
                                inputs=(box_out, q),
                                outputs=output)

    matcher.replace(
        g,
        pow2_op, lambda m: NNEFOperation(graph=g,
                                         name="_lp_pool",
                                         inputs=m[input],
                                         outputs=m[output],
                                         attribs=utils.dict_union(
                                             m[box_op].attribs,
                                             dict(p=float(m[p].get_numpy_array(
                                             ).item())))), lambda m:
        (m[p].rank == 0 and m[p].data is not None and m[
            p].get_numpy_array().item() != 0 and m[q].rank == 0 and m[q].data
         is not None and m[p].get_numpy_array().item() != 0 and np.allclose(
             1.0 / m[p].get_numpy_array().item(), m[q].get_numpy_array().item(
             ))))
예제 #3
0
def _create_elus(g):
    # type: (NNEFGraph)->None

    input, gt_out, exp_out, sub_out, mul_out, output, zero, one, alpha = matcher.tensors(
        9)

    _gt = matcher.Operation(name='gt', inputs=(input, zero), outputs=gt_out)
    _exp = matcher.Operation(name='exp', inputs=input, outputs=exp_out)
    _sub = matcher.Operation(name='sub',
                             inputs=(exp_out, one),
                             outputs=sub_out)
    _mul = matcher.Operation(name='mul',
                             inputs=(sub_out, alpha),
                             outputs=mul_out)
    select = matcher.Operation(name='select',
                               inputs=(gt_out, input, mul_out),
                               outputs=output)

    matcher.replace(
        g, select,
        lambda m: NNEFOperation(graph=g,
                                name="elu",
                                inputs=m[input],
                                outputs=m[output],
                                attribs=dict(_alpha=float(m[alpha].data[0]))),
        lambda m: m[zero].data == [0.0] and m[one].data == [1.0] and m[
            alpha].rank == 0 and m[alpha].data is not None)
예제 #4
0
def _create_thresholds(g):
    # type: (NNEFGraph)->None

    input, threshold, gt_output, one, zero, output = matcher.tensors(6)
    _gt_op = matcher.Operation(name="gt",
                               inputs=(input, threshold),
                               outputs=gt_output)
    select_op = matcher.Operation(name="select",
                                  inputs=(gt_output, one, zero),
                                  outputs=output)

    def condition(m):
        return (m[output].shape == m[input].shape
                and m[threshold].data is not None and m[one].data is not None
                and m[zero].data is not None and m[threshold].rank == 0
                and np.all(m[one].get_numpy_array() == 1.0)
                and np.all(m[zero].get_numpy_array() == 0.0))

    def replacement(m):
        NNEFOperation(
            graph=g,
            name="_threshold",
            inputs=m[input],
            outputs=m[output],
            attribs=dict(threshold=float(m[threshold].get_numpy_array())))

    matcher.replace(g, select_op, replacement, condition)
예제 #5
0
def _unite_powers(g):
    # type: (CaffeGraph)->None

    input, scaled, shifted, powered = matcher.tensors(4)

    scale_op = matcher.Operation(name=['Power'], inputs=input, outputs=scaled)
    shift_op = matcher.Operation(name=['Power'],
                                 inputs=scaled,
                                 outputs=shifted)
    power_op = matcher.Operation(name=['Power', 'Exp'],
                                 inputs=shifted,
                                 outputs=powered)

    def condition(m):
        return (list(m[scale_op].attribs.keys()) == ['scale']
                and list(m[shift_op].attribs.keys()) == ['shift']
                and list(m[power_op].attribs.keys()) in (['power'], ['base']))

    def replacement(m):
        CaffeOperation(graph=g,
                       name=m[power_op].name,
                       inputs=m[input],
                       outputs=m[powered],
                       attribs=utils.dict_union(m[scale_op].attribs,
                                                m[shift_op].attribs,
                                                m[power_op].attribs))

    matcher.replace(g, power_op, replacement, condition)

    input, output1, output2 = matcher.tensors(3)

    op1 = matcher.Operation(name='Power', inputs=input, outputs=output1)
    op2 = matcher.Operation(name=['Power', 'Exp'],
                            inputs=output1,
                            outputs=output2)

    def condition(m):
        return ((list(m[op2].attribs.keys()) in (['power'], ['base'])
                 and list(m[op1].attribs.keys()) == ['shift'])
                or (list(m[op2].attribs.keys()) in (['power'], ['base'])
                    and list(m[op1].attribs.keys()) == ['scale'])
                or (list(m[op2].attribs.keys()) == ['shift']
                    and list(m[op1].attribs.keys()) == ['scale']))

    def replacement(m):
        CaffeOperation(graph=g,
                       name=m[op2].name,
                       inputs=m[input],
                       outputs=m[output2],
                       attribs=utils.dict_union(m[op2].attribs,
                                                m[op1].attribs))

    matcher.replace(g, op2, replacement, condition)
예제 #6
0
def transform_bts_conv_stb(g):
    # type: (TFGraph)->None

    input, filter, stb_output, conv_output = matcher.tensors(4)
    stb = matcher.Operation(name=["tf.space_to_batch", "tf.space_to_batch_nd"],
                            inputs=input,
                            outputs=stb_output)
    conv = matcher.Operation(name=["_conv", "_deconv"],
                             inputs=(stb_output, filter),
                             outputs=conv_output)
    bts = matcher.Operation(name=["tf.batch_to_space", "tf.batch_to_space_nd"],
                            inputs=conv_output)

    def replacement(m):
        # type: (matcher.Match)->TFOperation
        block_shape = (m[stb].attribs["block_shape"]
                       if m[stb].name.endswith("_nd") else
                       [m[stb].attribs["block_size"]] *
                       len(m[stb].attribs["paddings"]))
        if m[conv].name == "_conv":
            padding = "SAME" if utils.recursive_any(
                m[stb].attribs["paddings"], lambda x: x > 0) else "VALID"

            return TFOperation(graph=g,
                               name=m[conv].name,
                               inputs=(m[input], m[filter]),
                               attribs=utils.updated_dict(m[conv].attribs,
                                                          dilation=block_shape,
                                                          padding=padding),
                               outputs=m[bts].outputs)
        else:
            padding = "SAME" if utils.recursive_any(
                m[bts].attribs["crops"], lambda x: x > 0) else "VALID"
            output_shape = _apply_block_shape(
                shape=m[conv].attribs["output_shape"],
                block_shape=block_shape,
                data_format=m[conv].attribs["data_format"],
                crops=m[bts].attribs["crops"])

            return TFOperation(graph=g,
                               name=m[conv].name,
                               inputs=(m[input], m[filter]),
                               attribs=utils.updated_dict(
                                   m[conv].attribs,
                                   dilation=block_shape,
                                   padding=padding,
                                   output_shape=output_shape),
                               outputs=m[bts].outputs)

    matcher.replace(g, bts, replacement)
예제 #7
0
def transform_fuse_activations(tf_graph):
    # type: (TFGraph)->None

    fuse_to = [
        "tf.add",
        "tf.subtract",
        "tf.multiply",
        "tf.divide",
        "tf.nn.conv2d",
        "tf.nn.depthwise_conv2d",
        "tf.nn.max_pool",
        "tf.nn.avg_pool",
        # "tf.nn.conv2d_transpose", (not working yet)
        "tf.matmul",
        "tf.nn.l2_normalize",
        # "tf.concat" (not working yet)
    ]

    conv_output = matcher.Tensor()
    convlike = matcher.Operation(name=fuse_to, outputs=conv_output)
    activation = matcher.Operation(name="tf.nn.relu", inputs={0: conv_output})

    matcher.replace(
        tf_graph, activation, lambda m: TFOperation(
            graph=tf_graph,
            name=m[convlike].name,
            attribs=utils.dict_union(m[convlike].attribs,
                                     dict(fused_activation_function='RELU')),
            inputs=m[convlike].inputs,
            outputs=m[activation].outputs),
        lambda m: not m[convlike].attribs.get('fused_activation_function'))

    conv_output = matcher.Tensor()
    convlike = matcher.Operation(name=fuse_to, outputs=conv_output)
    activation = matcher.Operation(name="tf.clip_by_value",
                                   inputs={0: conv_output})

    matcher.replace(
        graph=tf_graph,
        pattern=activation,
        replacement=lambda m: TFOperation(
            graph=tf_graph,
            name=m[convlike].name,
            attribs=utils.dict_union(m[convlike].attribs,
                                     dict(fused_activation_function='RELU6')),
            inputs=m[convlike].inputs,
            outputs=m[activation].outputs),
        condition=lambda m:
        (m[activation].inputs[1].data == [0] and m[activation].inputs[2].data
         == [6] and not m[convlike].attribs.get('fused_activation_function')))
예제 #8
0
def transform_fuse_add_to_matmul(tf_graph):
    # type: (TFGraph)->None

    matmul_output, bias = matcher.tensors(2)
    matmul = matcher.Operation(name="tf.matmul", outputs=matmul_output)
    add = matcher.Operation(name="tf.add", inputs={matmul_output, bias})

    matcher.replace(tf_graph,
                    add,
                    lambda m: TFOperation(graph=tf_graph,
                                          name=m[matmul].name,
                                          attribs=m[matmul].attribs,
                                          inputs=tuple(m[matmul].inputs) + (m[bias],),
                                          outputs=m[add].outputs),
                    lambda m: len(m[matmul].inputs) == 2)
예제 #9
0
def transform_fuse_bias_add_to_conv(tf_graph):
    # type: (TFGraph)->None

    conv_output = matcher.Tensor()
    conv = matcher.Operation(name=["tf.nn.conv2d", "tf.nn.depthwise_conv2d"], outputs=conv_output)
    add = matcher.Operation(name="tf.nn.bias_add", inputs={0: conv_output})

    matcher.replace(tf_graph,
                    add,
                    lambda m: TFOperation(graph=tf_graph,
                                          name=m[conv].name,
                                          attribs=m[conv].attribs,
                                          inputs=tuple(m[conv].inputs) + (m[add].inputs[1],),
                                          outputs=m[add].outputs),
                    lambda m: len(m[conv].inputs) == 2)
예제 #10
0
def transform_bias_add_conv(g):
    # type: (TFGraph)->None

    conv_output = matcher.Tensor()
    conv = matcher.Operation(name=[
        "_conv", "_planewise_conv", "_separable_conv", "_deconv",
        "_planewise_deconv"
    ],
                             inputs={2: None},
                             outputs={0: conv_output})
    add = matcher.Operation(name="tf.nn.bias_add", inputs={0: conv_output})

    matcher.replace(
        g, add, lambda m: TFOperation(graph=g,
                                      name=m[conv].name,
                                      inputs=(m[conv].inputs[0], m[conv].
                                              inputs[1], m[add].inputs[1]),
                                      attribs=m[conv].attribs,
                                      outputs=m[add].outputs))
예제 #11
0
def _merge_up_bias(g):
    # type: (CaffeGraph)->None

    input, scale, scaled, bias, output = matcher.tensors(5)

    scale_op = matcher.Operation(name=['Scale', 'InnerProduct'],
                                 inputs=(input, scale),
                                 outputs=scaled)
    bias_op = matcher.Operation(name='Bias',
                                inputs=(scaled, bias),
                                outputs=output)

    matcher.replace(
        g, bias_op, lambda m: CaffeOperation(
            graph=g,
            name=m[scale_op].name,
            inputs=(m[input], m[scale], m[bias]),
            outputs=m[output],
            attribs=utils.updated_dict(m[scale_op].attribs, bias_term=True)))
예제 #12
0
def transform_pad(g):
    # type: (TFGraph)->None
    input, filter, pad_output = matcher.tensors(3)
    pad = matcher.Operation(name="tf.pad", inputs=input, outputs=pad_output)
    conv = matcher.Operation(name=[
        "_conv", "_planewise_conv", "_separable_conv", "_max_pool",
        "_max_pool_with_index", "_avg_pool"
    ],
                             inputs=(pad_output, filter))
    matcher.replace(
        g, conv, lambda m: TFOperation(
            graph=g,
            name=m[conv].name,
            inputs=(m[input], m[filter]),
            attribs=utils.updated_dict(
                m[conv].attribs,
                padding=[tuple(p) for p in m[pad].attribs["paddings"]],
                _border=m[pad].attribs["mode"]),
            outputs=m[conv].outputs),
        lambda m: m[conv].attribs["padding"].upper() == 'VALID')
예제 #13
0
def _merge_batch_norm_and_scale(g):
    # type: (CaffeGraph)->None

    input, mean, variance, scale_factor, offset, scale, normed, output = matcher.tensors(8)

    batch_norm_op = matcher.Operation(name='BatchNorm',
                                      inputs=(input, mean, variance, scale_factor),
                                      outputs=normed)
    scale_op = matcher.Operation(name='Scale',
                                 inputs=(normed, scale, offset),
                                 outputs=output,
                                 attribs=dict(axis=1, num_axes=1))

    matcher.replace(g, scale_op,
                    lambda m: CaffeOperation(
                        graph=g,
                        name='BatchNorm+Scale',
                        inputs=(m[input], m[mean], m[variance], m[scale_factor], m[offset], m[scale]),
                        outputs=m[output],
                        attribs=dict(eps=m[batch_norm_op].attribs['eps'])))