Ejemplo n.º 1
0
    def infer(node: Node):
        if node.has_and_set('extra_inputs'):
            assert len(node.in_nodes()) == 8
        else:
            assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) in [1, 2]

        hidden_shape = node.in_node(1).shape.copy()
        cell_shape = node.in_node(2).shape.copy()

        mark_input_bins(node, start_port=3)
        node.out_node(0).shape = hidden_shape
        if len(node.out_nodes()) == 2:
            node.out_node(1).shape = cell_shape

        hidden_size = hidden_shape[1]

        if node.has_valid('hidden_size'):
            if node.hidden_size != hidden_size:
                raise Error(
                    "Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}"
                    .format(node.in_node(1).shape, node.soft_get('name')))
        else:
            node['hidden_size'] = hidden_size

        assert cell_shape[1] == hidden_size

        input_shape = node.in_node(0).shape
        assert input_shape is not None
        assert hidden_shape[0] == cell_shape[0] == input_shape[
            0], 'States are not broadcastable by batch'
Ejemplo n.º 2
0
 def test_infer_invalid1(self):
     graph = build_graph(nodes_attributes, edges1, inputs2)
     lookuptableinsert_node = Node(graph, 'lookuptableinsert_node')
     self.assertRaises(AssertionError, LookupTableInsert.infer,
                       lookuptableinsert_node)
def concat_convolutions(graph: Graph, start_node: Node, last_node: Node):
    """
    This function converts group of convolutions into one
    """

    # Check that concatenation makes in the same order
    conv_nodes = get_next_operation(start_node)
    assert len(conv_nodes) == len(last_node.in_nodes())
    gconv = conv_nodes[0]

    for id in range(len(conv_nodes)):
        conv = conv_nodes[id]
        if conv.out_node().id != last_node.in_node(id).id:
            return False
        # Check that all convolutions have same weights shapes
        if not np.array_equal(conv.in_node(1).shape, gconv.in_node(1).shape):
            log.debug(
                'Grouped convolutions fusion : convolutions have different weights shape'
            )
            return False

    # Check that split and concat dims are valid
    channel_dim = gconv.channel_dims[0]
    split_axis = start_node.in_port(1).data.get_value()
    if channel_dim != split_axis or channel_dim != last_node.axis:
        log.debug(
            'Grouped convolutions fusion : split or concat has weird axis!')
        return False

    # Check that all convolutions has the same parameters
    conv_attrs = ['pad', 'stride']
    for attr in conv_attrs:
        for id in range(len(conv_nodes)):
            conv = conv_nodes[id]
            if not np.array_equal(gconv[attr], conv[attr]):
                log.debug(
                    'Grouped convolutions fusion : attrs {} doesn\'t match'.
                    format(attr))
                return False

    # Check that all Convolutions has biases (if exists)
    has_biases = False
    for id in range(len(conv_nodes)):
        conv = conv_nodes[id]
        if len(conv.in_nodes()) == 3:
            if not has_biases:
                has_biases = True
        elif has_biases:
            return False  # All convolution mast have biases

    # Check that all biases have same shape
    if has_biases:
        for id in range(len(conv_nodes)):
            conv = conv_nodes[id]
            if conv.in_node(2).shape != gconv.in_node(2).shape:
                log.debug(
                    'Group convolutions fusion : convolutions have different biases shape {} and {}'
                    .format(conv.in_node(2).shape,
                            gconv.in_node(2).shape))
                return False

    graph.remove_edge(gconv.in_node(0).id, gconv.id)
    graph.remove_edge(gconv.id, gconv.out_node().id)

    input = start_node.in_node(0)
    output = last_node.out_node()

    # Removing edges from data nodes to Split and Concat
    graph.remove_edge(input.id, start_node.id)
    graph.remove_edge(last_node.id, output.id)

    # Add edges to grouped convolution
    graph.add_edges_from([(input.id, gconv.id, {
        'in': 0
    }), (gconv.id, output.id, {
        'out': 0
    })])

    # Concatenation of convolutions
    weights_node = gconv.in_node(1)
    bias_node = gconv.in_node(2) if has_biases else None

    weights_value = np.array(weights_node.value)
    bias_value = np.array(bias_node.value) if has_biases else None

    feature_dim = 3 if graph.graph['layout'] == 'NHWC' else 0

    for conv in conv_nodes[1:]:
        weights_value = np.concatenate((weights_value, conv.in_node(1).value),
                                       axis=feature_dim)
        if has_biases:
            bias_value = np.concatenate((bias_value, conv.in_node(2).value),
                                        axis=-1)  # Not validated

    weights_node.value = np.array(weights_value)
    weights_node.shape = np.array(weights_value.shape)

    if has_biases:
        bias_node.value = np.array(bias_value)
        bias_node.shape = np.array(bias_value.shape)

    log.debug('Start node : {} Last node : {}  Nodes inside : {}'.format(
        start_node.id, last_node.id, len(start_node.out_nodes())))
    log.debug('Output shape : {}'.format(weights_value.shape))

    gconv.group = len(conv_nodes)
    gconv.output = weights_node.shape[feature_dim]
    gconv.output_shape[feature_dim] = weights_node.shape[feature_dim]

    return True
Ejemplo n.º 4
0
 def add(self, node: Node):
     op = node.op if node.has_valid('op') else '<UNKNOWN OP>'
     name = node.name if node.has_valid('name') else '<UNKNOWN NAME>'
     self.unsupported[op].append(name)
    def test_component_map_loading_offset(self):
        test_map = "input-node name=input dim=16\n" + \
                   "component-node name=lda component=lda input=Offset(input, -3)\n" + \
                   "component-node name=tdnn1.affine component=tdnn1.affine input=Append(Offset(input, -1), Offset(lda, 1))\n" + \
                   "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine\n" + \
                   "\n"
        graph = Graph(name="test_graph_component_map_loading_offset")

        test_top_map = load_topology_map(io.BytesIO(bytes(test_map, 'ascii')),
                                         graph)

        ref_map = {
            b"lda": ["lda"],
            b"tdnn1.affine": ["tdnn1.affine"],
            b"tdnn1.relu": ["tdnn1.relu"]
        }
        self.assertEqual(test_top_map, ref_map)
        self.assertTrue("input" in graph.nodes())
        self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16])

        ref_graph = build_graph(
            {
                'input': {
                    'shape': np.array([1, 16]),
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'lda': {
                    'kind': 'op'
                },
                'tdnn1.affine': {
                    'kind': 'op'
                },
                'tdnn1.relu': {
                    'kind': 'op'
                },
                'append_input_lda': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'offset_in_input_3': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -3,
                    'pair_name': 'offset_out_input_3'
                },
                'offset_in_input_1': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -1,
                    'pair_name': 'offset_out_input_1'
                },
                'offset_in_lda_1': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -1,
                    'pair_name': 'offset_out_lda_1'
                },
            }, [
                ('input', 'offset_in_input_3', {
                    'out': 0
                }),
                ('offset_in_input_3', 'lda', {
                    'out': 0
                }),
                ('lda', 'offset_in_lda_1', {
                    'out': 0
                }),
                ('input', 'offset_in_input_1', {
                    'out': 1
                }),
                ('offset_in_lda_1', 'append_input_lda', {
                    'in': 1,
                    'out': 0
                }),
                ('offset_in_input_1', 'append_input_lda', {
                    'in': 0,
                    'out': 0
                }),
                ('append_input_lda', 'tdnn1.affine', {
                    'out': 0
                }),
                ('tdnn1.affine', 'tdnn1.relu', {
                    'out': 0
                }),
            ])

        (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu')
        self.assertTrue(flag, resp)
Ejemplo n.º 6
0
 def _add_output_node(self, node_name: str, node_port: int, sub_graph_output_port: int):
     if sub_graph_output_port in self._output_nodes_map:
         raise Error('Output node for port "{}" has already been specified. '.format(sub_graph_output_port) +
                     refer_to_faq_msg(34))
     self._output_nodes_map[sub_graph_output_port] = (Node(self.graph, node_name), node_port)
def skip_nodes_by_condition(current_node: Node, condition: callable):
    while condition(current_node):
        current_node = current_node.in_node()
    return current_node
Ejemplo n.º 8
0
def update_fully_connected_shapes(graph: nx.MultiDiGraph):
    nodes = nx.topological_sort(graph)
    while True:
        should_infer = False
        for n in nodes:
            node = Node(graph, n)
            if node.has(
                    'type') and node.type == 'FullyConnected' and node.in_node(
                        0).shape.size == 3:
                log.debug("node.in_node(0).shape = {}".format(
                    node.in_node(0).shape))
                log.debug("channel_dims = {}".format(node.channel_dims))
                assert (node.in_node(0).shape.size == 3
                        and node.channel_dims > 0)
                node.in_node(0).shape = np.delete(node.in_node(0).shape, 1)
                if node.out_node().shape.size == 3:
                    node.channel_dims = node.channel_dims - 1
                    log.debug(
                        "Initiated partial infer from update_fully_connected_shapes"
                    )
                    graph = partial_infer(graph, node.in_node(0).id)
                    should_infer = True
                    break
        if not should_infer:
            break
Ejemplo n.º 9
0
def partial_infer(graph: nx.MultiDiGraph, start_node: str = None):

    cycle_nodes = get_nodes_with_attributes(graph, is_cyclic=True)
    cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes]
    ebunch_cyclic = list(
        graph.out_edges(nbunch=cycle_nodes, data=True, keys=True))
    ebunch_reconnected = exit_bound_edges(graph,
                                          sources=cycle_nodes,
                                          end_node_attrs={'op': 'Exit'})
    graph.remove_edges_from(ebunch_cyclic)
    graph.add_edges_from(ebunch_reconnected)

    try:
        nodes = list(nx.topological_sort(graph))
    except:
        raise Error('Graph contains a cycle. Can not proceed. ' +
                    refer_to_faq_msg(97))

    graph.remove_edges_from(ebunch_reconnected)
    graph.add_edges_from(ebunch_cyclic)

    # Mark all nodes as not inferred yet
    if start_node is not None:
        start_index = nodes.index(start_node)
        nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]),
                               name='is_partial_inferred',
                               values=False)
    else:
        nx.set_node_attributes(G=graph,
                               name='is_partial_inferred',
                               values=False)
    debug_logger = log.getLogger().isEnabledFor(log.DEBUG)

    nx.set_node_attributes(
        G=graph,
        name='executable',
        values={
            n: True
            for n in get_nodes_with_attributes(graph, kind='data')
        })

    for n in nodes:
        # Data Flow Infer
        try:
            node = Node(graph, n)
            node_name = node.soft_get('name')
            if node.has(
                    'is_partial_inferred') and not node.is_partial_inferred:
                if node.has('infer') and node.infer is not None:
                    log.debug('-' * 20)
                    log.debug('Partial infer for {}'.format(
                        node.soft_get('name')))
                    log.debug('Op: {}'.format(node.soft_get('op')))
                    node.infer(node)
                    out_nodes = node.out_nodes()

                    # propagate nchw_layout attributes to data nodes
                    if node.has('nchw_layout'):
                        for out_node in out_nodes.values():
                            out_node['nchw_layout'] = node.nchw_layout

                    # In debug print current node attributes, input shapes/values and output shape/values
                    if debug_logger:
                        log.debug('Inputs:')
                        log_debug_dict(node.in_nodes(), 'input')
                        log.debug('Outputs:')
                        log_debug_dict(node.out_nodes(), 'output')

                    for out_port, out_node in out_nodes.items():
                        not_all_output_shapes = False
                        if not out_node.has_valid('shape'):
                            log.error(
                                'Shape is not defined for output {} of "{}".'.
                                format(out_port, node_name))
                            not_all_output_shapes = True
                        elif not is_fully_defined_shape(out_node.shape):
                            log.error((
                                'Shape {} is not fully defined for output {} of "{}". '
                                +
                                'Use --input_shape with positive integers to override model input shapes.'
                            ).format(out_node.shape, out_port, node_name))
                            not_all_output_shapes = True

                    if not_all_output_shapes:
                        raise Error(
                            'Not all output shapes were inferred or fully defined for node "{}". '
                            + refer_to_faq_msg(40), node_name)
                elif node.kind != 'data':
                    raise Error(
                        'There is no registered "infer" function for node "{}" with op = "{}". '
                        +
                        'Please implement this function in the extensions. ' +
                        refer_to_faq_msg(37), node_name, node.soft_get('op'))
                node.is_partial_inferred = True

        except Exception as err:
            log.error('Cannot infer shapes or values for node "{}".'.format(
                node.soft_get('name')))
            log.error(str(err))
            log.error('')
            log.error(
                'It can happen due to bug in custom shape infer function {}.'.
                format(node.soft_get('infer')))
            log.error(
                'Or because the node inputs have incorrect values/shapes.')
            log.error(
                'Or because input shapes are incorrect (embedded to the model or passed via --input_shape).'
            )
            debug_messages = '\n'.join([
                'Layer "' + node_name + '": ' + node_attrs['debug_message']
                for node_name, node_attrs in graph.nodes(data=True)
                if 'debug_message' in node_attrs
            ])
            if debug_messages != "":
                log.error('')
                log.error('Other possible failure reasons are listed below:')
                log.error(debug_messages)
            if not debug_logger:
                log.error(
                    'Run Model Optimizer with --log_level=DEBUG for more information.'
                )
            else:
                log.debug('Node "{}" attributes: {}'.format(
                    node.soft_get('name'), node.graph.node[node.id]))
            raise Error('Stopped shape/value propagation at "{}" node. '.
                        format(node.soft_get('name')) +
                        refer_to_faq_msg(38)) from err
        control_flow_infer(graph, n)

    not_fully_inferred = get_nodes_with_attributes(graph,
                                                   is_not_fully_inferred=True)
    for n in not_fully_inferred:
        node = Node(graph, n)
        if node.has('infer') and node.infer is not None:
            node.infer(node)

    # delete_not_executable(graph)
    return graph
Ejemplo n.º 10
0
    def generate_sub_graph(self, graph: Graph, match: SubgraphMatch):
        reshape_classes_op = Reshape(graph, {'dim': np.array([0, -1])})
        reshape_classes_node = reshape_classes_op.create_node(
            [match.single_input_node(1)[0]], dict(name='do_reshape_classes'))

        priors_node = match.single_input_node(2)[0]

        placeholder = [
            Node(graph, node_id) for node_id in graph.nodes()
            if Node(graph, node_id).op == 'Placeholder'
        ][0]
        im_height = placeholder.shape[1]
        im_width = placeholder.shape[2]

        # scale prior boxes to the [0, 1] interval
        priors_scale_const_node = Const(
            graph, {
                'value':
                np.array(
                    [1 / im_width, 1 / im_height, 1 / im_width, 1 / im_height])
            }).create_node([])
        priors_scale_node = Eltwise(graph, {
            'name': 'scale_priors',
            'operation': 'mul'
        }).create_node([priors_node, priors_scale_const_node])

        # calculate prior boxes widths and heights
        split_node = SplitV(graph, {
            'axis': 2,
            'size_splits': [1, 1, 1, 1],
            'out_ports_count': 4
        }).create_node([priors_scale_node])
        priors_width_node = __class__._create_sub(graph, split_node, 2,
                                                  split_node, 0)
        priors_height_node = __class__._create_sub(graph, split_node, 3,
                                                   split_node, 1)

        # concat weights and heights into a single tensor and multiple with the box coordinates regression values
        concat_width_height_node = Concat(graph, {
            'name': 'concat_priors_width_height',
            'axis': -1,
            'in_ports_count': 4
        }).create_node([
            priors_width_node, priors_height_node, priors_width_node,
            priors_height_node
        ])
        applied_width_height_regressions_node = Eltwise(graph, {'name': 'final_regressions', 'operation': 'mul'}). \
            create_node([concat_width_height_node, match.single_input_node(0)[0]])

        # reshape to 2D tensor as Inference Engine Detection Output layer expects
        reshape_regression_op = Reshape(graph, {'dim': np.array([0, -1])})
        reshape_regression_node = reshape_regression_op.create_node(
            [applied_width_height_regressions_node],
            {'name': 'reshape_regression'})

        detection_output_op = DetectionOutput(
            graph, match.custom_replacement_desc.custom_attributes)
        detection_output_op.attrs['old_infer'] = detection_output_op.attrs[
            'infer']
        detection_output_op.attrs['infer'] = __class__.do_infer
        detection_output_node = detection_output_op.create_node(
            [reshape_regression_node, reshape_classes_node, priors_scale_node],
            dict(name=detection_output_op.attrs['type'],
                 clip=1,
                 normalized=1,
                 variance_encoded_in_target=0))

        return {'detection_output_node': detection_output_node}
Ejemplo n.º 11
0
    def infer(node: Node):
        num_of_inputs = len(node.in_ports())
        opset = node.get_opset()
        max_num_of_inputs = 6 if opset == 'opset5' else 5
        input_msg_fmt = 'NonMaxSuppression node {} from {} must have from 2 to {} inputs'
        node_name = node.soft_get('name', node.id)
        inputs_msg = input_msg_fmt.format(node_name, opset, max_num_of_inputs)
        assert 2 <= num_of_inputs <= max_num_of_inputs, inputs_msg

        boxes_shape = node.in_port(0).data.get_shape()
        assert boxes_shape is not None, 'The shape of tensor with boxes is not defined'
        scores_shape = node.in_port(1).data.get_shape()
        assert scores_shape is not None, 'The shape of tensor with scores is not defined'
        assert len(boxes_shape
                   ) == 3, 'Length of tensors with boxes must be equal to 3'
        assert len(scores_shape
                   ) == 3, 'Length of tensors with scores must be equal to 3'

        # According to the specification of the operation NonMaxSuppression,
        # the input 'max_output_boxes_per_class' (port 2) is optional, with default value 0.
        if num_of_inputs >= 3:
            max_output_boxes_per_class = node.in_port(2).data.get_value()
        else:
            max_output_boxes_per_class = 0

        if not max_output_boxes_per_class:
            log.info(
                'Set default "max_output_boxes_per_class" for node {} to number of boxes'
                .format(node.name))
            max_output_boxes_per_class = boxes_shape[1]

        # convert the np.array value to a scalar to avoid issue with ragged numpy array generation in the shape
        # calculation formulas below
        if isinstance(max_output_boxes_per_class, np.ndarray):
            max_output_boxes_per_class = max_output_boxes_per_class.item()

        num_classes = scores_shape[1]
        num_input_boxes = boxes_shape[1]
        assert scores_shape[2] is dynamic_dimension or scores_shape[2] == num_input_boxes or scores_shape[2] is None \
               or num_input_boxes is None, 'Number of boxes mismatch for operation {}'.format(node_name)

        if node.get_opset() in ['opset4', 'opset5']:
            max_number_of_boxes = min(
                num_input_boxes,
                max_output_boxes_per_class) * boxes_shape[0] * num_classes
        else:
            max_number_of_boxes = min(
                num_input_boxes,
                boxes_shape[0] * max_output_boxes_per_class * num_classes)
        node.out_port(0).data.set_shape(shape_array([max_number_of_boxes, 3]))

        if opset == 'opset5':
            node.out_port(0).data.set_shape(
                shape_array([dynamic_dimension_value, 3]))
            num_of_outputs = len([
                port for port in node.out_ports().values()
                if not port.disconnected()
            ])
            if num_of_outputs >= 2 and node.has_port('out', 1):
                node.out_port(1).data.set_shape(
                    shape_array([dynamic_dimension_value, 3]))
            if num_of_outputs >= 3 and node.has_port('out', 2):
                node.out_port(2).data.set_shape(shape_array([1]))
Ejemplo n.º 12
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \
            "Tile should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_in_ports)

        shape = node.in_port(0).data.get_shape()
        assert shape is not None, "Undefined input shape for Tile node '{}'.".format(
            name)
        tile_array = node.in_port(1).data.get_value()
        assert tile_array is not None, "Undefined `repeats` (1st port input value) of Tile node '{}'".format(
            name)

        # align ranks of the tile_array tensor and input shape node
        if shape.size < tile_array.size:
            shape = np.insert(shape, 0, [1] * (tile_array.size - shape.size))
        elif shape.size > tile_array.size:
            tile_array = np.insert(tile_array, 0,
                                   [1] * (shape.size - tile_array.size))

        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                np.tile(
                    node.in_port(0).data.get_value().reshape(shape),
                    tile_array))
        else:
            node.out_port(0).data.set_shape(shape * tile_array)

        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'shape')
Ejemplo n.º 13
0
def arg_ops_infer(node: Node):
    shape = node.in_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)
    assert shape is not None, "Input shape for the node {} is None".format(
        node_name)

    # there are two inputs in TensorFlow. The second input is the axis for ArgMax
    connected_in_ports = [
        port for port in node.in_ports().values() if not port.disconnected()
    ]
    if len(connected_in_ports) == 2:
        axis = node.in_port(1).data.get_value()
        if axis is None:
            log.debug('The second argument to {} is None'.format(
                node.soft_get('name', node.id)))
            return
        node.axis = axis
        # remove the unnecessary input
        node.in_port(1).disconnect()

    num_top_axes = shape.size
    if num_top_axes < 3:
        num_top_axes = 3

    out_shape = np.ones(num_top_axes, dtype=np.int64)

    if node.has_valid('axis'):
        axis = get_canonical_axis_index(shape, node.axis)
        node.axis = axis
        out_shape = int64_array(shape)
        out_shape[axis] = node.top_k
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    else:
        out_shape[0] = shape[0]
        out_shape[2] = node.top_k
        if node.has_and_set('out_max_val'):
            out_shape[1] = 2

    node.out_port(0).data.set_shape(out_shape)
Ejemplo n.º 14
0
 def _create_node(attrs: dict):
     pb = onnx.helper.make_node("Crop", ["X"], ["Y"], **attrs)
     graph = build_graph({'node_0': {'pb': pb}}, [])
     return Node(graph, 'node_0')
Ejemplo n.º 15
0
 def test_infer_invalid2(self):
     graph = build_graph(nodes_attributes, edges1, inputs3)
     ctc_loss_node = Node(graph, 'ctcloss_node')
     self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node)
Ejemplo n.º 16
0
    def infer(node: Node):
        """
        Infers shape of convolution node as it is done in ONNX.
        It is very similar to one that Caffe does, but slightly different.
        We made a complete fork of this function because they are supposed to be
        supported differently by different people.
        Args:
            node: graph convolution node
        """
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        # bias_term cannot be deduced earlier for frameworks that represent
        # convolution weights/biases as regular inputs; so the number of inputs
        # is being checked here and restore correct value for bias_term to
        # have the rest of the code unchanged. It will be used after we merge
        # several infer functions for convolution in different FWs to a single one.
        if not node.has_valid('bias_term'):
            node['bias_term'] = len(node.in_nodes()) == 3

        weights_index = node.weights_index if node.has_valid(
            'weights_index') else 1

        # Reshape weights kernel to original shape
        # In case of caffe ot MXNet framework, values for weights has no structed shape like OIHW
        # so we have to reshape weights to normal shape
        # For this case, Convolution node should have attribute reshape_kernel = True
        if node.has_valid('reshape_kernel') and node.reshape_kernel:
            if not (node.has_valid('output') and node.has_valid('channel_dims')
                    and node.has_valid('group')
                    and node.has_valid('kernel_spatial')):
                log.error(
                    'Cannot reshape kernel due to not all required attrs was set to {} node'
                    .format(node.id))
                return
            # layout for Convolution weights is OIHW
            kernel_shape = np.array([
                node.output,
                input_shape[node.channel_dims].item() / node.group, *[
                    node.kernel_spatial[i]
                    for i in range(len(node.kernel_spatial))
                ]
            ],
                                    dtype=np.int64)
            if node.type == 'Deconvolution':  # layout for Deconvolution weights is IOHW
                kernel_shape[[0, 1]] = kernel_shape[[1, 0]]
                #node.input_feature_channel, node.output_feature_channel = node.output_feature_channel, node.input_feature_channel

            if np.prod(kernel_shape) != np.prod(
                    node.in_node(weights_index).value.shape):
                log.error(
                    "Size of weights {} does not match kernel shape: {}\n".
                    format(np.prod(node.in_node(weights_index).value.shape),
                           kernel_shape) +
                    "    Possible reason is wrong channel number in input shape\n"
                )
                raise Error("Cannot reshape weights to kernel shape")

            node.in_node(weights_index).shape = np.array(kernel_shape)
            node.in_node(weights_index).value = np.reshape(
                node.in_node(weights_index).value, kernel_shape)
            node.reshape_kernel = False

        # Pass weights shape to node attribute kernel_shape
        kernel_shape = node.in_node(weights_index).shape
        node['kernel_shape'] = kernel_shape
        # Calculate kernel_spatial_idx and spatial_dims if it is not specified
        # It is necessary for ONNX dut to convolution can be 1D/2D/3D
        if not node.has_valid('kernel_spatial_idx'):
            node['kernel_spatial_idx'] = np.delete(
                [x for x in range(len(kernel_shape))],
                (node.input_feature_channel, node.output_feature_channel))

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                (node.channel_dims[0], node.batch_dims[0]))

        node['kernel_spatial'] = kernel_shape[node.kernel_spatial_idx]

        if not node.has_valid('output'):
            # restore the number of output feature maps from the second argument that is weights
            if node.type in [
                    'Convolution', 'Deconvolution', 'DeformableConvolution',
                    'BinaryConvolution'
            ]:
                node['output'] = kernel_shape[node.output_feature_channel]
            else:
                raise Error(
                    'Convolution infer function was called for a node {} with unsupported type {}',
                    node.soft_get('name'), node.type)

        # Set default values for dilation, strides and pads if not set
        if not node.has_valid('dilation'):
            node['dilation'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('stride'):
            node['stride'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('pad'):
            node['pad'] = np.array([[0, 0]] * len(input_shape), dtype=np.int64)
        node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('output_padding'):
            node['output_padding'] = np.full([len(input_shape)],
                                             0,
                                             dtype=np.int64)

        if node.has_valid('output_padding') and len(input_shape) > len(
                node['output_padding']):
            output_padding = np.zeros(len(input_shape), dtype=np.int64)
            for i in range(len(node['output_padding'])):
                output_padding[i] = node['output_padding'][i]
            node['output_padding'] = output_padding

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.stride[node.spatial_dims]

        kernel_extent = node.dilation[node.spatial_dims] * (
            node.kernel_spatial - 1) + 1
        # TensorFlow always has auto_pad attribute that can be either valid or same_upper
        # In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower)
        # Caffe do not use auto_pad attribute
        if node.has_valid(
                'auto_pad') and not node.has_valid('output_spatial_shape'):
            node['pad_spatial_shape'], node[
                'output_spatial_shape'] = tf_window_op_pad_infer(
                    input_spatial_shape, kernel_extent, stride_spatial_shape,
                    node.auto_pad, node.type == 'Deconvolution')

            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:
            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
            if node.type in ('Convolution', 'BinaryConvolution'):
                float_spatial = Convolution.calc_convolution(
                    input_spatial_shape, stride_spatial_shape,
                    pad_spatial_shape, kernel_extent)
                node['output_spatial_shape'] = int64_array(float_spatial)
            elif node.type == 'Deconvolution':
                # In case of given output_spatial_shape we calculate pads spatial
                if node.has_valid('output_spatial_shape'):
                    if node.has_valid('get_pad'):
                        node['pad'] = node.get_pad(node, input_shape,
                                                   kernel_shape)
                    else:
                        log.debug(
                            'Can\'t calculate paddings due to missing lambda get_pad in {} node'
                            .format(node.id))
                        return
                else:
                    output_padding = node.output_padding[
                        node.spatial_dims] if node.has_valid(
                            'output_padding') else None
                    if output_padding is not None and any(output_padding):
                        pad_spatial_shape -= output_padding
                        for dim in range(len(pad_spatial_shape)):
                            node.pad_spatial_shape[dim][
                                1] -= pad_spatial_shape[dim]

                    float_spatial = Convolution.calc_deconvolution(
                        node, input_spatial_shape, pad_spatial_shape,
                        kernel_extent)
                    node['output_spatial_shape'] = int64_array(float_spatial)
            elif node.type == 'DeformableConvolution':
                # get the output spatial shape from the second input with offsets
                node['output_spatial_shape'] = int64_array(
                    [node.in_node(1).shape[2:4]])
            else:
                assert 'Unsupported layer type "{}"'.format(node.type)

        # For cases when group attribute wasn't set in extractor we should specify get_group attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_group'):
            node['group'] = node.get_group(node)
        output_shape = np.full_like(input_shape, -1, dtype=np.int64)
        output_shape[node.batch_dims] = input_shape[node.batch_dims]  # pylint: disable=unsupported-assignment-operation
        output_shape[node.spatial_dims] = node.output_spatial_shape  # pylint: disable=unsupported-assignment-operation

        # For cases when output attribute wasn't set in extractor we should specify get_output_feature_dim attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_output_feature_dim'):
            node['output'] = node.get_output_feature_dim(node)
        output_shape[node.channel_dims] = node.output  # pylint: disable=unsupported-assignment-operation
        node['output_shape'] = output_shape

        for n in node.out_nodes():
            node.out_node(n).shape = output_shape

        mark_input_bins(
            node, start_port=1 if node.type != 'DeformableConvolution' else 2)
        assign_dims_to_weights(node.in_node(weights_index),
                               node.kernel_spatial_idx,
                               node.input_feature_channel,
                               node.output_feature_channel, len(kernel_shape))

        PermuteAttrs.create_permute_attrs(
            node,
            attrs=[
                ('pad', 'input:0'),
                ('stride', 'input:0'),
                ('dilation', 'input:0'),
                ('output_shape', 'input:0'),
                ('batch_dims', 'input:0'),
                ('channel_dims', 'input:0'),
                ('spatial_dims', 'input:0'),
                ('kernel_shape', 'input:{}'.format(weights_index)),
                ('kernel_spatial_idx', 'input:{}'.format(weights_index)),
                ('input_feature_channel', 'input:{}'.format(weights_index)),
                ('output_feature_channel', 'input:{}'.format(weights_index)),
            ])

        PermuteAttrs.set_permutation(
            node.in_node(weights_index), node, node.get_weights_permute
            if node.has_valid('get_weights_permute') else None)
Ejemplo n.º 17
0
 def _add_input_node(self, node_name: str, node_port: int, sub_graph_input_port: int):
     self._input_nodes_map.setdefault(sub_graph_input_port, []).append((Node(self.graph, node_name), node_port))
Ejemplo n.º 18
0
 def test_tf_space_to_depth_infer_shape_error(self):
     graph = build_graph(nodes, edges)
     graph.graph['layout'] = 'NHWC'
     graph.node['in_data_node']['shape'] = np.array([1024, 576, 256])
     std_node = Node(graph, 'StD')
     self.assertRaises(Error, SpaceToDepth.infer, std_node)
Ejemplo n.º 19
0
 def type_infer(node: Node):
     node.out_port(0).set_data_type(np.bool)
Ejemplo n.º 20
0
 def test_tf_space_to_depth_infer_divisibility_error_2(self):
     graph = build_graph(nodes, edges)
     graph.graph['layout'] = 'NCHW'
     graph.node['in_data_node']['shape'] = np.array([1, 256, 1024, 577])
     std_node = Node(graph, 'StD')
     self.assertRaises(Error, SpaceToDepth.infer, std_node)
Ejemplo n.º 21
0
 def find_port_id(node: Node, virtual_id: str, attr: str):
     attrs = node.edge({attr: virtual_id})[2]
     assert bool('in' in attrs) != bool('out' in attrs), attrs
     return attrs['in' if 'in' in attrs else 'out']
Ejemplo n.º 22
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [
            port for port in node.in_ports().values()
            if not port.disconnected()
        ]
        num_inputs = len(connected_in_ports)
        assert node.has_valid(
            'equation'
        ), "Einsum node {} must contain `equation` attribute".format(node_name)
        equation = node.equation

        # parse the equation and extract input and output subscripts
        input_subscripts, output_subscript = Einsum.parse_equation(
            node_name, equation)

        # check that each operand has the corresponding input subscript
        assert len(input_subscripts) == num_inputs, "The number of input operands of Einsum node {} " \
                                                    "must match the number of input subscripts " \
                                                    "in `equation`".format(node_name)

        # check compatibility of dimension sizes with the same label and generate a dictionary of shapes for labels
        label_to_shape = {}
        for input_ind in range(num_inputs):
            input_shape = node.in_port(input_ind).data.get_shape()
            input_subscript = input_subscripts[input_ind]
            labels = Einsum.extract_subscript_labels(node_name,
                                                     input_subscript)
            num_dims = len(input_shape)
            num_labels = len(labels)
            num_broadcasted_dims = num_dims - num_labels + 1
            dim_ind = 0
            label_ind = 0
            while label_ind < num_labels and dim_ind < num_dims:
                label = labels[label_ind]
                if label == "...":
                    sub_shape = input_shape[dim_ind:dim_ind +
                                            num_broadcasted_dims]
                    if label in label_to_shape.keys():
                        common_shape = bi_directional_shape_broadcasting(
                            sub_shape, label_to_shape[label])
                        assert common_shape is not None, "The dimensions labeled of ellipsis must be broadcastable " \
                                                         "for Einsum node {}".format(node_name)
                        label_to_shape[label] = common_shape
                    else:
                        label_to_shape[label] = sub_shape
                    dim_ind += num_broadcasted_dims
                else:
                    dim_size = input_shape[dim_ind]
                    sub_shape = int64_array([dim_size])
                    assert label not in label_to_shape.keys() or np.array_equal(label_to_shape[label], sub_shape), \
                        "Sizes of dimensions with the same label of Einsum node {} " \
                        "must be compatible".format(node_name)
                    label_to_shape[label] = sub_shape
                    dim_ind += 1
                label_ind += 1

        # generate output shape based on the output subscript
        output_shape = int64_array([])
        labels = Einsum.extract_subscript_labels(node_name, output_subscript)
        for label in labels:
            assert label in label_to_shape.keys(), "The label in the output subscript must appear" \
                                                   " in input subscripts in equation {} " \
                                                   "of Einsum node {}".format(equation, node_name)
            output_shape = np.concatenate(
                (output_shape, label_to_shape[label]))

        node.out_port(0).data.set_shape(output_shape)
Ejemplo n.º 23
0
 def infer(node: Node):
     shape = node.in_port(0).data.get_shape().copy()
     shape[1] = shape[1] / node.group
     node.out_port(0).data.set_shape(shape)
Ejemplo n.º 24
0
    def infer(node: Node):
        shape = node.in_node().shape
        if shape is None:
            log.error("Undefined shape for the input tiles for the Tile operation '{}'.".format(node.node))
            return
        shape = np.copy(shape)

        if len(node.in_nodes()) == 2:
            tile_array = node.in_node(1).value
            if tile_array is None:
                log.error('A tile values are None for a node "{}".'.format(node.name))
                return
            if len(shape) != len(tile_array):
                log.error('Shape mismatch for a node "{}": {} vs {}.'.format(node.name, shape.shape, tile_array.shape))
                return
            non_one_tile = np.argwhere(tile_array != 1)
            if len(non_one_tile) == 0:
                log.info(
                    'Redundant "Tile" operation "{}" with tile values for all dimensions equal to 1.'.format(node.name))
                node['axis'] = 0
                node['tiles'] = 1
            elif len(non_one_tile) == 1:
                node['axis'] = non_one_tile[0][0]
                node['tiles'] = tile_array[node['axis']]
            else:
                node['type'] = None
                log.warning("Tile operation with more than one dimension not equal to 1 is not supported.")
                # do not return here to allow infer shape and values for the constant propagation case
            node.graph.remove_edge(node.in_node(1).id, node.id)
        elif len(node.in_nodes()) == 1:  # case when tiled dimension and count are specified in node attributes
            if not node.has_valid('axis') or not node.has_valid('tiles'):
                log.error('Mandatory attributes "axis" or "tiles" are not specified for a Tile node "{}"'.
                          format(node.name))
                return
            tile_array = np.ones([len(shape)], dtype=np.int64)
            tile_array[node.axis] = node.tiles
        else:
            log.error('Unsupported number of input parameters to Tile node "{}"'.format(node.name))
            return

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
        node.out_node().shape = shape * tile_array
        if node.in_node(0).value is not None:
            node.out_node().value = np.tile(node.in_node(0).value, tile_array)
Ejemplo n.º 25
0
def sub_graph_between_nodes(graph: Graph,
                            start_nodes: list,
                            end_nodes: list,
                            detect_extra_start_node: callable = None):
    """
    Finds nodes of the sub-graph between 'start_nodes' and 'end_nodes'. Input nodes for the sub-graph nodes are also
    added to the sub-graph. Constant inputs of the 'start_nodes' are also added to the sub-graph.
    :param graph: graph to operate on.
    :param start_nodes: list of nodes names that specifies start nodes.
    :param end_nodes: list of nodes names that specifies end nodes.
    :return: list of nodes of the identified sub-graph or None if the sub-graph cannot be extracted.
    """
    sub_graph_nodes = list()
    visited = set(start_nodes)
    d = deque(start_nodes)
    extra_start_nodes = []

    nx.set_node_attributes(G=graph, name='prev', values=None)
    while len(d) != 0:
        cur_node_name = d.popleft()
        sub_graph_nodes.append(cur_node_name)
        if cur_node_name not in end_nodes:  # do not add output nodes of the end_nodes
            for _, dst_node_name in graph.out_edges(cur_node_name):
                if dst_node_name not in visited:
                    d.append(dst_node_name)
                    visited.add(dst_node_name)
                    graph.node[dst_node_name]['prev'] = cur_node_name

        for src_node_name, _ in graph.in_edges(cur_node_name):
            # add input nodes for the non-start_nodes
            if cur_node_name not in start_nodes and src_node_name not in visited:
                if detect_extra_start_node is not None and detect_extra_start_node(
                        Node(graph, cur_node_name)):
                    extra_start_nodes.append(cur_node_name)
                else:
                    d.append(src_node_name)
                    graph.node[src_node_name]['prev'] = cur_node_name
                    visited.add(src_node_name)

    # use forward dfs to check that all end nodes are reachable from at least one of input nodes
    forward_visited = set()
    for start_node in start_nodes:
        graph.dfs(start_node, forward_visited)
    for end_node in end_nodes:
        if end_node not in forward_visited:
            raise Error('End node "{}" is not reachable from start nodes: {}. '
                        .format(end_node, start_nodes) + refer_to_faq_msg(74))

    for node_name in sub_graph_nodes:
        # sub-graph should not contain Placeholder nodes
        if graph.node[node_name].get('op', '') == 'Parameter':
            path = list()
            cur_node = node_name
            while cur_node and 'prev' in graph.node[cur_node]:
                path.append(str(cur_node))
                cur_node = graph.node[cur_node]['prev']
            log.debug("The path from input node is the following: {}".format(
                '\n'.join(path)))
            raise Error(
                'The matched sub-graph contains network input node "{}". '.
                format(node_name) + refer_to_faq_msg(75))
    if detect_extra_start_node is None:
        return sub_graph_nodes
    else:
        return sub_graph_nodes, extra_start_nodes
    def infer(node: Node):
        # check a number of input/output edges
        assert len(node.in_nodes()) == 3
        assert len(node.out_nodes()) == 1

        data_shape = node.in_port(0).data.get_shape()
        indices_shape = node.in_port(1).data.get_shape()
        segment_ids_shape = node.in_port(2).data.get_shape()
        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        segment_ids_value = node.in_port(2).data.get_value()

        # check input shapes
        assert data_shape is not None, \
            "Shape for input data tensor to SparseSegmentMean must be defined"
        assert indices_shape is not None and indices_shape.size == 1, \
            "SparseSegmentMean supports only 1D indices tensor"
        assert segment_ids_shape is not None and segment_ids_shape.size == 1, \
            "SparseSegmentMean supports only 1D segment IDs tensor"
        assert segment_ids_shape == indices_shape, \
            "Indices and segment IDs tensors must have the same shape"

        # computes output shape
        output_shape = data_shape
        output_shape[0] = segment_ids_shape[0]
        node.out_port(0).data.set_shape(output_shape)

        # infer if all input is constant
        if data_value is None or indices_value is None or segment_ids_value is None:
            return

        # check that values in segment_ids are sorted
        for i in range(1, len(segment_ids_value)):
            assert segment_ids_value[i-1] <= segment_ids_value[i], \
                "Values in segment IDs are not sorted"
        num_segments = int(segment_ids_value[-1]) + 1

        # check that indices are in a range [0, data_shape[0])
        assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \
            "Some value in indices tensor is out of range"

        # infer
        num_adds = np.zeros(num_segments, dtype=np.int)
        output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float)
        output_shape = output_value.shape
        for i in range(len(segment_ids_value)):
            segment_id = int(segment_ids_value[i])
            indice = int(indices_value[i])
            output_value[segment_id, :] += data_value[indice, :]
            num_adds[segment_id] += 1
        
        for segment_id in range(num_segments):
            if num_adds[segment_id] != 0:
                output_value[segment_id, :] /= num_adds[segment_id]
        node.out_port(0).data.set_shape(output_shape)
        node.out_port(0).data.set_value(output_value)
Ejemplo n.º 27
0
 def type_infer(node: Node):
     if node.has_valid('dst_type'):
         node.out_port(0).set_data_type(node.dst_type)
     else:
         node.out_port(0).set_data_type(data_type_str_to_np(node.graph.graph['cmd_params'].data_type))
def check_phase(node: Node):
    if node.has_valid('pb') and hasattr(node.pb, 'include'):
        for i in node.pb.include:
            if hasattr(i, 'phase'):
                return {'phase': i.phase}
    return {}
 def infer(node: Node):
     if node.axis < 0:
         node.axis = len(node.in_node().shape) + node.axis
     copy_shape_infer(node)
     PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Ejemplo n.º 30
0
 def is_node_casts_to_float_or_shapeof(node: Node):
     return (node.soft_get('type') == 'Convert' and node.soft_get('dst_type') == np.float32) or \
             node.soft_get('type') == 'ShapeOf'