Exemple #1
0
def _create_elus(g):
    # type: (NNEFGraph)->None

    input, gt_out, exp_out, sub_out, mul_out, output, zero, one, alpha = matcher.tensors(
        9)

    _gt = matcher.Operation(name='gt', inputs=(input, zero), outputs=gt_out)
    _exp = matcher.Operation(name='exp', inputs=input, outputs=exp_out)
    _sub = matcher.Operation(name='sub',
                             inputs=(exp_out, one),
                             outputs=sub_out)
    _mul = matcher.Operation(name='mul',
                             inputs=(sub_out, alpha),
                             outputs=mul_out)
    select = matcher.Operation(name='select',
                               inputs=(gt_out, input, mul_out),
                               outputs=output)

    matcher.replace(
        g, select,
        lambda m: NNEFOperation(graph=g,
                                name="elu",
                                inputs=m[input],
                                outputs=m[output],
                                attribs=dict(_alpha=float(m[alpha].data[0]))),
        lambda m: m[zero].data == [0.0] and m[one].data == [1.0] and m[
            alpha].rank == 0 and m[alpha].data is not None)
Exemple #2
0
def transform_cgf_stb(g):
    # type: (TFGraph)->None
    orig_input, output_grad, stb1_output, stb2_output = matcher.tensors(4)
    stb1 = matcher.Operation(
        name=["tf.space_to_batch", "tf.space_to_batch_nd"],
        inputs=orig_input,
        outputs=stb1_output)
    _stb2 = matcher.Operation(
        name=["tf.space_to_batch", "tf.space_to_batch_nd"],
        inputs=output_grad,
        outputs=stb2_output)
    cgf = matcher.Operation(name="_conv_grad_filter",
                            inputs=(stb1_output, stb2_output))
    pattern = matcher.SetParams(cgf, allow_multi_consumer_inside=True)

    def action(m):
        # type: (matcher.Match)->None
        block_shape = (m[stb1].attribs["block_shape"]
                       if m[stb1].name.endswith("_nd") else
                       [m[stb1].attribs["block_size"]] *
                       len(m[stb1].attribs["paddings"]))
        padding = "SAME" if utils.recursive_any(m[stb1].attribs["paddings"],
                                                lambda x: x > 0) else "VALID"

        TFOperation(graph=g,
                    name=m[cgf].name,
                    inputs=(m[orig_input], m[output_grad]),
                    attribs=utils.updated_dict(m[cgf].attribs,
                                               dilation=block_shape,
                                               padding=padding),
                    outputs=m[cgf].outputs)
        g.remove_operation(m[cgf], unlink=True)

    matcher.for_each(g, pattern, action)
Exemple #3
0
def _merge_pads(g):
    # type: (NNEFGraph)->None

    t = matcher.Tensor()
    pad = matcher.Operation(name=['box', 'pad'], outputs=t)
    sliding = matcher.Operation(name=['argmax_pool', 'max_pool', 'max_pool_with_index', 'avg_pool', 'conv'],
                                inputs={0: t})

    def condition(m):
        # type: (matcher.Match)->bool
        if not (m[pad].name == 'pad' or
                (m[pad].name == 'box'
                 and all(s == 1 for s in m[pad].attribs.get('size', []))
                 and all(s == 1 for s in m[pad].attribs.get('stride', []))
                 and all(s == 1 for s in m[pad].attribs.get('dilation', []))
                 and not m[pad].attribs.get('normalize', False))):
            return False

        value = m[pad].attribs.get('_value', 0.0)

        if value not in [0.0, float('-inf')]:
            return False

        if value == float('-inf'):
            if not m[sliding].name in ['argmax_pool', 'max_pool', 'max_pool_with_index']:
                return False

        if m[pad].attribs.get('border', 'constant') != 'constant':
            return False

        if (m[sliding].attribs.get('border', 'constant') != 'constant'
                and any(p != 0 or q != 0 for p, q in m[sliding].attribs.get('padding', []))):
            return False

        if m[sliding].name in ['conv'] and any(p != 0 or q != 0 for p, q in m[pad].attribs.get('padding', [])[:2]):
            return False

        return True

    def action(m):
        # type: (matcher.Match)->None
        value = m[pad].attribs.get('_value', 0.0)
        pad_padding = m[pad].attribs.get('padding', [(0, 0) * m[t].rank])
        sliding_padding = m[sliding].attribs.get('padding', [(0, 0) * m[t].rank])

        if m[sliding].name in ['conv']:
            pad_padding = pad_padding[2:]

        assert len(pad_padding) == len(sliding_padding)

        m[sliding].attribs['padding'] = [(p + pp, q + qq) for (p, q), (pp, qq) in zip(pad_padding, sliding_padding)]
        m[sliding].attribs['border'] = 'ignore' if value == float('-inf') else 'constant'

        graph_utils.remove_passthrough(g, m[pad])

    matcher.for_each(graph=g, pattern=sliding, action=action, condition=condition)

    for op in g.operations:
        if op.name in ['box', 'pad'] and '_value' in op.attribs:
            raise utils.NNEFToolsException('Could not export {} with value={}'.format(op.name, op.attribs['_value']))
Exemple #4
0
def _create_thresholds(g):
    # type: (NNEFGraph)->None

    input, threshold, gt_output, one, zero, output = matcher.tensors(6)
    _gt_op = matcher.Operation(name="gt",
                               inputs=(input, threshold),
                               outputs=gt_output)
    select_op = matcher.Operation(name="select",
                                  inputs=(gt_output, one, zero),
                                  outputs=output)

    def condition(m):
        return (m[output].shape == m[input].shape
                and m[threshold].data is not None and m[one].data is not None
                and m[zero].data is not None and m[threshold].rank == 0
                and np.all(m[one].get_numpy_array() == 1.0)
                and np.all(m[zero].get_numpy_array() == 0.0))

    def replacement(m):
        NNEFOperation(
            graph=g,
            name="_threshold",
            inputs=m[input],
            outputs=m[output],
            attribs=dict(threshold=float(m[threshold].get_numpy_array())))

    matcher.replace(g, select_op, replacement, condition)
Exemple #5
0
def _unite_powers(g):
    # type: (CaffeGraph)->None

    input, scaled, shifted, powered = matcher.tensors(4)

    scale_op = matcher.Operation(name=['Power'], inputs=input, outputs=scaled)
    shift_op = matcher.Operation(name=['Power'],
                                 inputs=scaled,
                                 outputs=shifted)
    power_op = matcher.Operation(name=['Power', 'Exp'],
                                 inputs=shifted,
                                 outputs=powered)

    def condition(m):
        return (list(m[scale_op].attribs.keys()) == ['scale']
                and list(m[shift_op].attribs.keys()) == ['shift']
                and list(m[power_op].attribs.keys()) in (['power'], ['base']))

    def replacement(m):
        CaffeOperation(graph=g,
                       name=m[power_op].name,
                       inputs=m[input],
                       outputs=m[powered],
                       attribs=utils.dict_union(m[scale_op].attribs,
                                                m[shift_op].attribs,
                                                m[power_op].attribs))

    matcher.replace(g, power_op, replacement, condition)

    input, output1, output2 = matcher.tensors(3)

    op1 = matcher.Operation(name='Power', inputs=input, outputs=output1)
    op2 = matcher.Operation(name=['Power', 'Exp'],
                            inputs=output1,
                            outputs=output2)

    def condition(m):
        return ((list(m[op2].attribs.keys()) in (['power'], ['base'])
                 and list(m[op1].attribs.keys()) == ['shift'])
                or (list(m[op2].attribs.keys()) in (['power'], ['base'])
                    and list(m[op1].attribs.keys()) == ['scale'])
                or (list(m[op2].attribs.keys()) == ['shift']
                    and list(m[op1].attribs.keys()) == ['scale']))

    def replacement(m):
        CaffeOperation(graph=g,
                       name=m[op2].name,
                       inputs=m[input],
                       outputs=m[output2],
                       attribs=utils.dict_union(m[op2].attribs,
                                                m[op1].attribs))

    matcher.replace(g, op2, replacement, condition)
Exemple #6
0
def transform_bts_conv_stb(g):
    # type: (TFGraph)->None

    input, filter, stb_output, conv_output = matcher.tensors(4)
    stb = matcher.Operation(name=["tf.space_to_batch", "tf.space_to_batch_nd"],
                            inputs=input,
                            outputs=stb_output)
    conv = matcher.Operation(name=["_conv", "_deconv"],
                             inputs=(stb_output, filter),
                             outputs=conv_output)
    bts = matcher.Operation(name=["tf.batch_to_space", "tf.batch_to_space_nd"],
                            inputs=conv_output)

    def replacement(m):
        # type: (matcher.Match)->TFOperation
        block_shape = (m[stb].attribs["block_shape"]
                       if m[stb].name.endswith("_nd") else
                       [m[stb].attribs["block_size"]] *
                       len(m[stb].attribs["paddings"]))
        if m[conv].name == "_conv":
            padding = "SAME" if utils.recursive_any(
                m[stb].attribs["paddings"], lambda x: x > 0) else "VALID"

            return TFOperation(graph=g,
                               name=m[conv].name,
                               inputs=(m[input], m[filter]),
                               attribs=utils.updated_dict(m[conv].attribs,
                                                          dilation=block_shape,
                                                          padding=padding),
                               outputs=m[bts].outputs)
        else:
            padding = "SAME" if utils.recursive_any(
                m[bts].attribs["crops"], lambda x: x > 0) else "VALID"
            output_shape = _apply_block_shape(
                shape=m[conv].attribs["output_shape"],
                block_shape=block_shape,
                data_format=m[conv].attribs["data_format"],
                crops=m[bts].attribs["crops"])

            return TFOperation(graph=g,
                               name=m[conv].name,
                               inputs=(m[input], m[filter]),
                               attribs=utils.updated_dict(
                                   m[conv].attribs,
                                   dilation=block_shape,
                                   padding=padding,
                                   output_shape=output_shape),
                               outputs=m[bts].outputs)

    matcher.replace(g, bts, replacement)
Exemple #7
0
def transform_fuse_activations(tf_graph):
    # type: (TFGraph)->None

    fuse_to = [
        "tf.add",
        "tf.subtract",
        "tf.multiply",
        "tf.divide",
        "tf.nn.conv2d",
        "tf.nn.depthwise_conv2d",
        "tf.nn.max_pool",
        "tf.nn.avg_pool",
        # "tf.nn.conv2d_transpose", (not working yet)
        "tf.matmul",
        "tf.nn.l2_normalize",
        # "tf.concat" (not working yet)
    ]

    conv_output = matcher.Tensor()
    convlike = matcher.Operation(name=fuse_to, outputs=conv_output)
    activation = matcher.Operation(name="tf.nn.relu", inputs={0: conv_output})

    matcher.replace(
        tf_graph, activation, lambda m: TFOperation(
            graph=tf_graph,
            name=m[convlike].name,
            attribs=utils.dict_union(m[convlike].attribs,
                                     dict(fused_activation_function='RELU')),
            inputs=m[convlike].inputs,
            outputs=m[activation].outputs),
        lambda m: not m[convlike].attribs.get('fused_activation_function'))

    conv_output = matcher.Tensor()
    convlike = matcher.Operation(name=fuse_to, outputs=conv_output)
    activation = matcher.Operation(name="tf.clip_by_value",
                                   inputs={0: conv_output})

    matcher.replace(
        graph=tf_graph,
        pattern=activation,
        replacement=lambda m: TFOperation(
            graph=tf_graph,
            name=m[convlike].name,
            attribs=utils.dict_union(m[convlike].attribs,
                                     dict(fused_activation_function='RELU6')),
            inputs=m[convlike].inputs,
            outputs=m[activation].outputs),
        condition=lambda m:
        (m[activation].inputs[1].data == [0] and m[activation].inputs[2].data
         == [6] and not m[convlike].attribs.get('fused_activation_function')))
Exemple #8
0
def transform_fuse_add_to_matmul(tf_graph):
    # type: (TFGraph)->None

    matmul_output, bias = matcher.tensors(2)
    matmul = matcher.Operation(name="tf.matmul", outputs=matmul_output)
    add = matcher.Operation(name="tf.add", inputs={matmul_output, bias})

    matcher.replace(tf_graph,
                    add,
                    lambda m: TFOperation(graph=tf_graph,
                                          name=m[matmul].name,
                                          attribs=m[matmul].attribs,
                                          inputs=tuple(m[matmul].inputs) + (m[bias],),
                                          outputs=m[add].outputs),
                    lambda m: len(m[matmul].inputs) == 2)
Exemple #9
0
def transform_fuse_bias_add_to_conv(tf_graph):
    # type: (TFGraph)->None

    conv_output = matcher.Tensor()
    conv = matcher.Operation(name=["tf.nn.conv2d", "tf.nn.depthwise_conv2d"], outputs=conv_output)
    add = matcher.Operation(name="tf.nn.bias_add", inputs={0: conv_output})

    matcher.replace(tf_graph,
                    add,
                    lambda m: TFOperation(graph=tf_graph,
                                          name=m[conv].name,
                                          attribs=m[conv].attribs,
                                          inputs=tuple(m[conv].inputs) + (m[add].inputs[1],),
                                          outputs=m[add].outputs),
                    lambda m: len(m[conv].inputs) == 2)
Exemple #10
0
def transform_bias_add_conv(g):
    # type: (TFGraph)->None

    conv_output = matcher.Tensor()
    conv = matcher.Operation(name=[
        "_conv", "_planewise_conv", "_separable_conv", "_deconv",
        "_planewise_deconv"
    ],
                             inputs={2: None},
                             outputs={0: conv_output})
    add = matcher.Operation(name="tf.nn.bias_add", inputs={0: conv_output})

    matcher.replace(
        g, add, lambda m: TFOperation(graph=g,
                                      name=m[conv].name,
                                      inputs=(m[conv].inputs[0], m[conv].
                                              inputs[1], m[add].inputs[1]),
                                      attribs=m[conv].attribs,
                                      outputs=m[add].outputs))
Exemple #11
0
def _merge_up_bias(g):
    # type: (CaffeGraph)->None

    input, scale, scaled, bias, output = matcher.tensors(5)

    scale_op = matcher.Operation(name=['Scale', 'InnerProduct'],
                                 inputs=(input, scale),
                                 outputs=scaled)
    bias_op = matcher.Operation(name='Bias',
                                inputs=(scaled, bias),
                                outputs=output)

    matcher.replace(
        g, bias_op, lambda m: CaffeOperation(
            graph=g,
            name=m[scale_op].name,
            inputs=(m[input], m[scale], m[bias]),
            outputs=m[output],
            attribs=utils.updated_dict(m[scale_op].attribs, bias_term=True)))
Exemple #12
0
def transform_pad(g):
    # type: (TFGraph)->None
    input, filter, pad_output = matcher.tensors(3)
    pad = matcher.Operation(name="tf.pad", inputs=input, outputs=pad_output)
    conv = matcher.Operation(name=[
        "_conv", "_planewise_conv", "_separable_conv", "_max_pool",
        "_max_pool_with_index", "_avg_pool"
    ],
                             inputs=(pad_output, filter))
    matcher.replace(
        g, conv, lambda m: TFOperation(
            graph=g,
            name=m[conv].name,
            inputs=(m[input], m[filter]),
            attribs=utils.updated_dict(
                m[conv].attribs,
                padding=[tuple(p) for p in m[pad].attribs["paddings"]],
                _border=m[pad].attribs["mode"]),
            outputs=m[conv].outputs),
        lambda m: m[conv].attribs["padding"].upper() == 'VALID')
def _merge_batch_norm_and_scale(g):
    # type: (CaffeGraph)->None

    input, mean, variance, scale_factor, offset, scale, normed, output = matcher.tensors(8)

    batch_norm_op = matcher.Operation(name='BatchNorm',
                                      inputs=(input, mean, variance, scale_factor),
                                      outputs=normed)
    scale_op = matcher.Operation(name='Scale',
                                 inputs=(normed, scale, offset),
                                 outputs=output,
                                 attribs=dict(axis=1, num_axes=1))

    matcher.replace(g, scale_op,
                    lambda m: CaffeOperation(
                        graph=g,
                        name='BatchNorm+Scale',
                        inputs=(m[input], m[mean], m[variance], m[scale_factor], m[offset], m[scale]),
                        outputs=m[output],
                        attribs=dict(eps=m[batch_norm_op].attribs['eps'])))