示例#1
0
 def infer(node: Node):
     if node.has_valid('element_size'):
         # element_size should be set by Kaldi loader or MemoryOffsetAdjustment or SplitRecurrentMemoryOffset
         node.out_port(0).data.set_shape(node.element_size)
     else:
         # for TDNN blocks
         copy_shape_infer(node)
示例#2
0
def batch_norm_4_infer(node: Node):
    copy_shape_infer(node)
    mark_input_bins(node, ['weights', 'biases', 'mean', 'variance'])
    if node.has('fix_gamma') and node.fix_gamma:
        # go to the 1-st input weights and set all elements to 1
        node.in_node(1).value = np.full_like(node.in_node(1).value,
                                             1,
                                             dtype=np.float32)
示例#3
0
 def infer(node: Node):
     assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 1,\
         'LogSoftmax node with id {} have more than one port connected'.format(node.id)
     if node.axis < 0:
         node.axis = len(node.in_port(0).data.get_shape()) + node.axis
     assert 0 <= node.axis < len(node.in_port(0).data.get_shape()),\
         'LogSoftmax node with id {} has wrong axis attribute'.format(node.id)
     copy_shape_infer(node)
     PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
示例#4
0
    def infer(node):
        if len(node.in_nodes()) == 2:
            gamma_vector = node.in_node(1)
            if np.all(gamma_vector.shape == [1]):
                node['channel_shared'] = 1
            else:
                node['channel_shared'] = 0
            node.in_node(1)['correct_data_type'] = True

        copy_shape_infer(node)
示例#5
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        assert node.eps is not None, 'MVN required attribute `eps` unspecified for node {}'.format(
            name)
        assert node.normalize_variance is not None, \
            'MVN required attribute `normalize_variance` unspecified for node {}'.format(name)

        if node.version == 'opset6':
            assert node.eps_mode is not None, 'MVN required attribute `eps_mode` unspecified for node {}'.format(
                name)
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'axis')

        copy_shape_infer(node)
示例#6
0
    def test_grn_infer(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'grn'),
                             ('grn', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None},
                             'node_1': {'shape': np.array([1, 3, 227, 227])},
                             'grn': {'bias': 1}
                             })

        grn_node = Node(graph, 'grn')
        copy_shape_infer(grn_node)
        exp_shape = np.array([1, 3, 227, 227])
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
 def extract(cls, node):
     attrs = {
         'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type),
         'shape': tf_tensor_shape(node.pb.attr["shape"].shape),
         'identity': True,
         'infer': lambda node: copy_shape_infer(node, value_infer=copy_value),
     }
     Op.update_node_stat(node, attrs)
     return cls.enabled
示例#8
0
 def __init__(self, graph: Graph, attrs: dict):
     super().__init__(
         graph, {
             'op': self.op,
             'type': None,
             'version': None,
             'infer': lambda n: copy_shape_infer(n, copy_value),
             'type_infer': None,
             'in_ports_count': 1,
             'out_ports_count': 1,
         }, attrs)
示例#9
0
 def infer(node: Node):
     if len(node.in_nodes()) > 0:
         # In case this is a memory node with input,
         # It should not have output
         # However in order not to break MO pipeline,
         # we just set the same shape to the output
         # node that will be removed later in pipeline
         copy_shape_infer(node)
         return
     elif node.has_valid('shape'):
         # For Memories, that has not input infer shapes is very difficult
         # But often we can know shape in extracting attributes
         # And we can set the attribute 'shape' in extracting
         batch = 1
         for out_node in node.out_nodes().values():
             out_node.shape = shape_array([batch, *node.shape[:]])
         return
     else:
         raise Error(
             'Model Optimizer is unable to calculate output shape of Memory node {}. '
             + refer_to_faq_msg(88), node.id)
示例#10
0
 def infer(node: Node):
     copy_shape_infer(node)
     PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
示例#11
0
def roll_infer(node: Node):
    PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0',
                                          'axis')
    copy_shape_infer(node)
示例#12
0
 def test_copy_shape_infer(self, single_output_infer_mock):
     single_output_infer_mock.return_value = 0
     node = FakeNode(np.array([1, 2]))
     copy_shape_infer(node)
     self.assertTrue(single_output_infer_mock.called)
示例#13
0
 def infer(node: Node):
     mark_input_bins(node)
     copy_shape_infer(node)