Esempio n. 1
0
    def replace_pattern(graph: Graph, match: [str, Node]):
        consumers = [
            n for n in match if n not in ['add', 'pow_d']
            and not check_node_usages_out_of_match(match, n)
        ]
        if consumers:
            log.warning(
                'Power(add,pow) pattern was detected. Non pattern consumers of nodes: "{}" were found.'
                ' Won\'t replace'.format(', '.join(
                    [match[n].id for n in consumers])))
            return
        add = match['add']
        pow = match['pow']
        new_power = AttributedPower(
            graph, {
                'name': add.name + '/fused_power',
                'shift': add.shift,
                'power': pow.power
            }).create_node()

        source = add.in_port(0).get_connection().get_source()
        add.in_port(0).disconnect()
        new_power.in_port(0).connect(source)
        pow.out_port(0).get_connection().set_source(new_power.out_port(0))

        log.debug(
            'Power nodes {} and {} were fused to single Power node {}'.format(
                add.name, pow.name, new_power.name))
Esempio n. 2
0
 def extract(cls, node):
     data_type = tf_dtype_extractor(node.pb.attr["T"].type)
     AttributedPower.update_node_stat(node, {
         'power': data_type(2),
         'data_type': data_type
     })
     return cls.enabled
 def extract(cls, node: Node):
     scale = onnx_attr(node,
                       'scale',
                       'f',
                       default=np.array(1.0),
                       dst_type=lambda x: np.array(x))
     AttributedPower.update_node_stat(node, {'scale': scale})
     return cls.enabled
Esempio n. 4
0
 def extract(cls, node: Node):
     pb = node.pb
     assert pb, 'Protobuf layer can not be empty'
     param = pb.power_param
     attrs = {
         'output_spatial_shape': None,
         'power': param.power,
         'scale': param.scale,
         'shift': param.shift,
     }
     AttributedPower.update_node_stat(node, attrs)
     return cls.enabled
Esempio n. 5
0
    def replace_pattern(graph: Graph, match: [str, Node]):
        op = match['op']
        op_type = op.type

        if op.has_and_set('stop_value_propagation'):
            return

        const_port, tensor_port = get_value_in_port(op), get_tensor_in_port(op)
        if const_port is None or tensor_port is None:
            return
        value = const_port.data.get_value()
        assert value is not None
        if value.size != 1:
            return
        value = value.item(0)

        assert op_type in EltwisesWithScalarInputToPower.eltw_types
        if op_type == 'Add':
            delete_node = value == 0
            AttributedPower.update_node_stat(op, {'shift': value})
        elif op_type == 'Multiply':
            delete_node = value == 1
            AttributedPower.update_node_stat(op, {'scale': value})
        elif op_type == 'Power':
            delete_node = value == 1
            AttributedPower.update_node_stat(op, {'power': value})
        op.type_infer = AttributedPower.type_infer

        const_port.disconnect()
        if tensor_port.idx != 0:
            tensor_port.get_connection().set_destination(op.in_port(0))
        op.delete_input_port(1)
Esempio n. 6
0
 def extract(cls, node):
     AttributedPower.update_node_stat(node, {'scale': 0})
     return cls.enabled
Esempio n. 7
0
 def extract(cls, node):
     AttributedPower.update_node_stat(node, {'power': -0.5})
     return cls.enabled
 def extract(cls, node: Node):
     attrs = {
         'power': node.module.exponent,
     }
     AttributedPower.update_node_stat(node, attrs)
     return cls.enabled