Beispiel #1
0
    def test_positive_matmul_infer(self, A_shape, B_shape, C_shape,
                                   transpose_a, transpose_b):
        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes,
                                       edges_with_attrs=self.edges,
                                       update_nodes_attributes=[
                                           ('A_d', {
                                               'shape': shape_array(A_shape)
                                           }),
                                           ('B_d', {
                                               'shape': shape_array(B_shape)
                                           }),
                                           ('mat_mul', {
                                               'transpose_a': transpose_a,
                                               'transpose_b': transpose_b
                                           }),
                                       ])
        node = Node(graph, 'mat_mul')
        MatMul.infer(node)

        msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={} " \
              "expected_shape={}, actual_shape={}"

        self.assertTrue(
            np.array_equal(graph.node['mat_mul_d']['shape'],
                           shape_array(C_shape)),
            msg.format(A_shape, B_shape, transpose_a, transpose_b, C_shape,
                       graph.node['mat_mul_d']['shape']))
 def test_value_propagation(self, a_shape, a_value, b_shape, b_value, transpose_a, transpose_b):
     graph = build_graph(
         nodes_attrs=graph_nodes_attrs,
         edges=graph_edges,
         update_attributes={
             'A': {'shape': int64_array(a_shape), 'value': a_value},
             'A_data': {'shape': int64_array(a_shape), 'value': a_value},
             'B': {'shape': int64_array(b_shape), 'value': b_value},
             'B_data': {'shape': int64_array(b_shape), 'value': b_value},
             'matmul': {'transpose_a': transpose_a, 'transpose_b': transpose_b},
             'matmul_data': {'value': None, 'shape': None},
         }
     )
     node = Node(graph, 'matmul')
     MatMul.infer(node)
     node_data = node.out_port(0).get_destination().data.get_value()
     a = a_value
     b = b_value
     if transpose_a:
         a = transpose(a)
     if transpose_b:
         b = transpose(b)
     ref_data = np.matmul(a, b)
     node_data_shape = node_data.shape
     ref_data_shape = ref_data.shape
     msg = "Value propagation for 'matmul' node is not correct."
     self.assertTrue(node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg)
Beispiel #3
0
 def extract(cls, node):
     attr = node.pb.attr
     attrs = {
         'transpose_a': int(attr['adj_x'].b),
         'transpose_b': int(attr['adj_y'].b),
     }
     MatMul.update_node_stat(node, attrs)
     return cls.enabled
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        node = match['op']
        name = node.soft_get('name', node.id)

        # biases normalization
        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            bias_node = Add(graph, {'name': name + '/Bias_'}).create_node()
            node_name = node.name + '/WithoutBiases'
            bias_node_name = node.name
            rename_nodes([(node, node_name), (bias_node, bias_node_name)])
            node.out_port(0).get_connection().set_source(bias_node.out_port(0))
            node.in_port(2).get_connection().set_destination(
                bias_node.in_port(1))
            node.out_port(0).connect(bias_node.in_port(0))

        # weights normalization
        assert node.has_valid('out-size')
        out_size = node['out-size']
        reshape_dim = int64_array([-1, out_size])
        if node.has_and_set('transpose_weights'):
            reshape_dim = int64_array([out_size, -1])
        node.insert_op_on_input_port(
            in_port_idx=1,
            new_op_class=Reshape,
            new_op_attrs={'name': name + '/weights_reshape'},
            value=reshape_dim)
        if node.has_and_set('transpose_weights'):
            node.insert_op_on_input_port(
                in_port_idx=1,
                new_op_class=Transpose,
                new_op_attrs={'name': name + '/weights_transpose'},
                value=int64_array([1, 0]))

        # input normalization for 4D Caffe and MXNet FullyConnected
        if graph.graph['fw'] == 'caffe':
            node.insert_op_on_input_port(in_port_idx=0,
                                         new_op_class=Reshape,
                                         new_op_attrs={
                                             'name':
                                             name + '/flatten_fc_input',
                                             'special_zero': True
                                         },
                                         value=int64_array([0, -1]))

        if graph.graph['fw'] == 'mxnet':
            if node.flatten is not False:
                node.insert_op_on_input_port(in_port_idx=0,
                                             new_op_class=Reshape,
                                             new_op_attrs={
                                                 'name':
                                                 name + '/flatten_fc_input',
                                                 'special_zero': True
                                             },
                                             value=int64_array([0, -1]))

        MatMul.update_node_stat(node, {})
Beispiel #5
0
    def extract(cls, node: Node):
        unsupported_attrs = []
        for attr_name in ['adjoint_a', 'adjoint_b', 'a_is_sparse', 'b_is_sparse']:
            if attr_name in node.pb.attr and node.pb.attr[attr_name].b:
                unsupported_attrs.append(attr_name)
        if len(unsupported_attrs) != 0:
            raise Error('MatMul operation {} use unsupported attrs: {}'.format(node.id, unsupported_attrs))

        MatMul.update_node_stat(node,
                                {
                                    'transpose_a': node.pb.attr['transpose_a'].b,
                                    'transpose_b': node.pb.attr['transpose_b'].b,
                                })
        return cls.enabled
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Gemm'):
            name = node.soft_get('name', node.id)
            node_output_port = node.out_port(0)
            if node.has_valid('alpha') and not math.isclose(node.alpha, 1):
                mul_alpha = create_op_with_const_inputs(
                    graph, Mul, {1: mo_array(node.alpha)}, {
                        'name': name + '/Alpha',
                        'can_be_scaleshift': False
                    })
                node_output_port.get_connection().insert_node(mul_alpha)
                node_output_port = mul_alpha.out_port(0)
                del node['alpha']

            if node.is_in_port_connected(2):
                # biases normalization
                bias_node = Add(graph, {
                    'name': name + '/Bias_',
                    'can_be_scaleshift': False
                }).create_node()
                without_biases_node_name = name + '/WithoutBiases'
                rename_nodes([(node, without_biases_node_name),
                              (bias_node, name)])
                node_output_port.get_connection().set_source(
                    bias_node.out_port(0))
                node.in_port(2).get_connection().set_destination(
                    bias_node.in_port(1))
                node_output_port.connect(bias_node.in_port(0))
                if node.has_valid('beta') and not math.isclose(node.beta, 1):
                    bias_node.insert_op_on_input_port(in_port_idx=1,
                                                      new_op_class=Mul,
                                                      value=mo_array(
                                                          node.beta),
                                                      new_op_attrs={
                                                          'name':
                                                          name + '/Beta',
                                                          'can_be_scaleshift':
                                                          False
                                                      })
                    del node['beta']

            MatMul.update_node_stat(
                node, {
                    'transpose_a': node.has_and_set('transpose_a'),
                    'transpose_b': node.has_and_set('transpose_b'),
                })
Beispiel #7
0
    def replace_op(self, graph: Graph, node: Node):
        matmul = MatMul(graph, dict(name=node.name, transpose_b=True)).create_node([node.in_node(0), node.in_node(1)])

        # Bias
        if len(node.in_nodes()) > 2:
            matmul = Add(graph, dict(name=node.name + '/bias')).create_node([matmul, node.in_node(2)])

        return [matmul.id]
Beispiel #8
0
    def extract(cls, node: Node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        transpose_a = attrs.bool('transpose_a', False)
        transpose_b = attrs.bool('transpose_b', False)
        forward_stype = attrs.str('forward_stype', None)

        if forward_stype is not None:
            log.error(
                "Node {} has non default value {} of attribute forward_stype."
                "Model Optimizer conversion assumes default value = None".
                format(node.soft_get('name', node.id), forward_stype),
                extra={'is_warning': True})

        MatMul.update_node_stat(node, {
            'transpose_a': transpose_a,
            'transpose_b': transpose_b
        })
        return cls.enabled
Beispiel #9
0
    def replace_op(self, graph: Graph, node: Node):
        weight = node.module.weight.detach().numpy()
        bias = node.module.bias.detach().numpy()

        weight = Const(graph, {'value': weight}).create_node()
        bias = Const(graph, {'value': bias}).create_node()
        matmul = MatMul(graph, dict(name=node.name)).create_node([node.in_node(0), weight])
        matmul = Add(graph, dict(name=node.name + '/bias')).create_node([matmul, bias])
        return [matmul.id]
Beispiel #10
0
 def extract(cls, node):
     MatMul.update_node_stat(node)
     return cls.enabled