def extract(cls, node): data_type = tf_dtype_extractor(node.pb.attr["T"].type) AttributedPower.update_node_stat(node, { 'power': data_type(2), 'data_type': data_type }) return cls.enabled
def extract(cls, node: Node): scale = onnx_attr(node, 'scale', 'f', default=np.array(1.0), dst_type=lambda x: np.array(x)) AttributedPower.update_node_stat(node, {'scale': scale}) return cls.enabled
def extract(cls, node: Node): pb = node.pb assert pb, 'Protobuf layer can not be empty' param = pb.power_param attrs = { 'output_spatial_shape': None, 'power': param.power, 'scale': param.scale, 'shift': param.shift, } AttributedPower.update_node_stat(node, attrs) return cls.enabled
def replace_pattern(graph: Graph, match: [str, Node]): op = match['op'] op_type = op.type if op.has_and_set('stop_value_propagation'): return const_port, tensor_port = get_value_in_port(op), get_tensor_in_port(op) if const_port is None or tensor_port is None: return value = const_port.data.get_value() assert value is not None if value.size != 1: return value = value.item(0) assert op_type in EltwisesWithScalarInputToPower.eltw_types if op_type == 'Add': delete_node = value == 0 AttributedPower.update_node_stat(op, {'shift': value}) elif op_type == 'Multiply': delete_node = value == 1 AttributedPower.update_node_stat(op, {'scale': value}) elif op_type == 'Power': delete_node = value == 1 AttributedPower.update_node_stat(op, {'power': value}) op.type_infer = AttributedPower.type_infer const_port.disconnect() if tensor_port.idx != 0: tensor_port.get_connection().set_destination(op.in_port(0)) op.delete_input_port(1)
def extract(cls, node): AttributedPower.update_node_stat(node, {'scale': 0}) return cls.enabled
def extract(cls, node): AttributedPower.update_node_stat(node, {'power': -0.5}) return cls.enabled
def extract(cls, node: Node): attrs = { 'power': node.module.exponent, } AttributedPower.update_node_stat(node, attrs) return cls.enabled