Esempio n. 1
0
def batch_norm_4_infer(node: Node):
    copy_shape_infer(node)
    mark_input_bins(node, ['weights', 'biases', 'mean', 'variance'])
    if node.has('fix_gamma') and node.fix_gamma:
        # go to the 1-st input weights and set all elements to 1
        node.in_node(1).value = np.full_like(node.in_node(1).value,
                                             1,
                                             dtype=np.float32)
Esempio n. 2
0
def partial_infer(graph: Graph, start_node: str = None):
    """
    Tries to execute constant parts of the graph and deduce as much as possible
    information following the data flow, e.g. calculate and propagate shapes and
    constant values. Partially or completely defined values are stored in data
    nodes (kind='data').
    """
    # We have to turn off strict mode due to above we add and remove edeges without attributes that is prohibited
    graph.strict_mode = False
    cycle_nodes = graph.get_nodes_with_attributes(is_cyclic=True)
    cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes]
    ebunch_cyclic = list(graph.out_edges(nbunch=cycle_nodes, data=True, keys=True))
    ebunch_reconnected = exit_bound_edges(graph, sources=cycle_nodes, end_node_attrs={'op': 'Exit'})
    graph.remove_edges_from(ebunch_cyclic)
    graph.add_edges_from(ebunch_reconnected)

    try:
        nodes = list(nx.topological_sort(graph))
    except:
        raise Error('Graph contains a cycle. Can not proceed. ' + refer_to_faq_msg(97))

    graph.remove_edges_from(ebunch_reconnected)
    graph.add_edges_from(ebunch_cyclic)
    graph.strict_mode = True

    # Mark all nodes as not inferred yet
    if start_node is not None:
        start_index = nodes.index(start_node)
        nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]), name='is_partial_inferred', values=False)
    else:
        nx.set_node_attributes(G=graph, name='is_partial_inferred', values=False)

    nx.set_node_attributes(G=graph, name='executable',
                           values={n: True for n in graph.get_nodes_with_attributes(kind='data')})

    # first we infer constant sub-graphs so the reverse infer could use constant values sub-graphs. For example,
    # convolution weights may be reshuffled by some operation in the graph and are not directly consumed by the conv
    # node
    infer_nodes(graph, nodes, True)

    # we may need to deduce shape for Parameter node(s) if it is not defined
    need_reverse_infer = False
    for parameter in graph.get_op_nodes(op='Parameter'):
        if parameter.soft_get('shape', None) is None:
            need_reverse_infer = True

    if need_reverse_infer:
        reverse_infer(graph, nodes)

    infer_nodes(graph, nodes, False)

    not_fully_inferred = graph.get_nodes_with_attributes(is_not_fully_inferred=True)
    for n in not_fully_inferred:
        node = Node(graph, n)
        if node.has('infer') and not node.infer is None:
            node.infer(node)

    return graph
Esempio n. 3
0
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(
            layout
        ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(
            node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(
                shape_for_layout(layout,
                                 batch=input_shape[get_batch_dim(layout, 4)],
                                 features=input_shape[get_features_dim(
                                     layout, 4)],
                                 height=out_height,
                                 width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(
                node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array(
                [dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int(
                        (input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
Esempio n. 4
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        loc_shape = node.in_port(0).data.get_shape()
        conf_shape = node.in_port(1).data.get_shape()
        prior_boxes_shape = node.in_port(2).data.get_shape()

        if loc_shape is None or conf_shape is None or prior_boxes_shape is None:
            raise Error(
                'Shapes for the Detection Output node "{}" are not defined'.
                format(node_name))

        prior_size = 4
        if node.has('normalized') and not node.normalized:
            prior_size = 5

        if is_fully_defined(prior_boxes_shape[-1]
                            ) and prior_boxes_shape[-1] % prior_size != 0:
            raise Error(
                'Amount of confidences "{}" is not divisible by {} for node "{}"'
                ''.format(prior_boxes_shape[-1], prior_size, node_name))

        num_priors = prior_boxes_shape[-1] // prior_size
        if not node.has_valid('keep_top_k') or node.keep_top_k == -1:
            node['keep_top_k'] = num_priors

        num_classes = conf_shape[-1] // num_priors
        num_loc_classes = num_classes
        if node.has_and_set('share_location') and node.share_location:
            num_loc_classes = 1

        if not compatible_dims(num_priors * num_loc_classes * 4,
                               loc_shape[-1]):
            raise Error(
                'Locations and prior boxes shapes mismatch: "{}" vs "{}" for node "{}"'
                ''.format(loc_shape, prior_boxes_shape, node_name))

        if not node.variance_encoded_in_target and not compatible_dims(
                prior_boxes_shape[-2], 2):
            raise Error(
                'The "-2" dimension of the prior boxes must be 2 but it is "{}" for node "{}".'
                ''.format(prior_boxes_shape[-2], node_name))

        if is_fully_defined(conf_shape[-1]) and is_fully_defined(
                num_priors) and conf_shape[-1] % num_priors != 0:
            raise Error(
                'Amount of confidences "{}" is not divisible by amount of priors "{}" for node "{}".'
                ''.format(conf_shape[-1], num_priors, node_name))

        node.out_port(0).data.set_shape(
            [1, 1, conf_shape[0] * node.keep_top_k, 7])

        # the line below is needed for the TF framework so the MO will not change the layout
        node.graph.node[node.out_node(0).id]['nchw_layout'] = True
Esempio n. 5
0
    def add_old_api_map_order_into_rt_info(op: Node):
        # rt info update
        assert op.has(
            'rt_info'
        ), 'Unable to preserve runtime information for node with name={}'.format(
            op)

        old_api_map = OldAPIMapOrder(version=0)
        attr_name = old_api_map.get_name()
        if (attr_name, old_api_map.get_version()) not in op.rt_info.info:
            op.rt_info.info[(attr_name,
                             old_api_map.get_version())] = old_api_map
        return attr_name, old_api_map.get_version()
Esempio n. 6
0
def remove_edges_for_nodes(graph, node_attrs: dict, edge_attrs: dict):
    from openvino.tools.mo.graph.graph import Node
    for node in graph.nodes():
        node = Node(graph, node)
        if all([
                node.has(attr) and node[attr] == node_attrs[attr]
                for attr in node_attrs
        ]):
            nodes_edges = node.in_nodes_edges()
            for port in nodes_edges:
                src_node, edge = nodes_edges[port]
                if all([
                        attr in edge and edge[attr] == edge_attrs[attr]
                        for attr in edge_attrs
                ]):
                    graph.remove_edge(src_node.id, node.id)
Esempio n. 7
0
def tf_op_extractor(node: Node, lowered_keys_map: dict):
    # all required attributes for the 'TFCustomSubgraphCall' are set during their initialization
    if (node.has('op') and node.op
            == 'TFCustomSubgraphCall') or (not node.has_valid('pb')):
        return True, node.graph.node[node.id]

    result = common_tf_fields(node)
    node.graph.node[node.id].update(result)
    supported = False
    op = result['op'].lower()
    if op in lowered_keys_map:
        op = lowered_keys_map[op]
        assert op in tf_op_extractors
        attrs = tf_op_extractors[op](node)
        if attrs:
            result.update(attrs)
            supported = True
    new_attrs = native_tf_node_extractor(node.pb)
    new_attrs.update(result)
    result = new_attrs
    return supported, result
Esempio n. 8
0
    def infer(node: Node):
        # check that all required attributes are set
        assert node.has('sorted') and node.sorted in ['true', 'false'], \
            "Unique does not have valid sorted attribute"
        assert node.has('return_inverse') and node.return_inverse in ['true', 'false'], \
            "Unique does not have valid return_inverse attribute"
        assert node.has('return_counts') and node.return_counts in ['true', 'false'], \
            "Unique does not have valid return_counts attribute"

        # check a number of input and output nodes
        assert len(node.in_nodes()) == 1, "Unique must have one input"
        assert len(node.out_nodes()) <= 3, "Unique must have less or equal to 3 outputs"

        # compute maximum number of outputs if no output port is pruned
        max_num_outputs = 1
        if node.return_inverse == 'true':
            max_num_outputs += 1
        if node.return_counts == 'true':
            max_num_outputs += 1

        # check a number of outputs
        assert len(node.out_nodes()) <= max_num_outputs, \
            "The number of outputs in IR Unique layer must be less or equal to framework graph one"
        
        # check that the output with unique elements remains in a graph after pruning
        # since this is required output
        assert 0 in node.out_nodes(), \
            "The output with unique elements must remain in a graph"

        # check if outputs with indices and counts remain in a graph after pruning
        # and update attributes
        if len(node.out_nodes()) == 1:
            node.return_inverse = 'false'
            node.return_counts = 'false'
        if len(node.out_nodes()) == 2 and 1 in node.out_nodes() \
        and node.return_inverse == 'true' and node.return_counts == 'true':
            node.return_counts = 'false'
        if len(node.out_nodes()) == 2 and 2 in node.out_nodes() \
        and node.return_inverse == 'true' and node.return_counts == 'true':
            node.return_inverse = 'false'

        # check that input is 1-D tensor
        input_shape = node.in_node(0).shape
        assert input_shape is not None and input_shape.size == 1, \
            "Unique accepts only 1-D input"

        # determine a shape for each output
        for out_node_ind in node.out_nodes():
            assert (out_node_ind < max_num_outputs), "Unique has three outputs at most"
            # all outputs have the same shape equal to the input shape
            node.out_node(out_node_ind).shape = input_shape

        input_value = node.in_node(0).value
        if input_value is None:
            return

        # check that input value is 1-D
        assert len(input_value.shape) == 1, \
            "Unique accepts only 1-D input"

        is_sorted = (node.sorted == 'true')
        return_inverse = (node.return_inverse == 'true')
        return_counts = (node.return_counts == 'true')

        # infer if the input is constant
        if is_sorted:
            unique_output = np.unique(input_value, return_inverse = return_inverse,
                                      return_counts = return_counts, return_index = False)
            if not return_inverse and not return_counts:
                unique_output = [unique_output]
        else:
            # np.unique can only return unique elements in sorted order
            # so this case should be handled separately
            sorted_uniques, sorted_index, sorted_inverse, sorted_counts = np.unique(input_value, return_index = True,
                                                               return_inverse = True, return_counts = True)
            # compute uniques that are in the same order as they occur in the input,
            # indices of input values in uniques, counts for each unique element
            uniques = []
            inverse = []
            counts = []
            old_ind_by_elem = dict(zip(sorted_uniques, range(len(sorted_index))))
            new_ind_by_elem = dict()
            new_ind = 0
            for ind in np.sort(sorted_index):
                uniques.append(input_value[ind])
                old_ind = old_ind_by_elem[input_value[ind]]
                counts.append(sorted_counts[old_ind])
                new_ind_by_elem[input_value[ind]] = new_ind
                new_ind += 1
            inverse = [new_ind_by_elem[input_value[ind]] for ind in range(len(input_value))]

            # pack unique_output
            unique_output = []
            unique_output.append(uniques)
            if return_inverse:
                unique_output.append(inverse)
            if return_counts:
                unique_output.append(counts)

        # write result to output nodes
        j = 0
        for out_node_ind in node.out_nodes():
            node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=np.float)
            node.out_node(out_node_ind).shape = int64_array(node.out_node(out_node_ind).value.shape)
            j += 1
Esempio n. 9
0
def infer_nodes(graph: Graph,
                nodes: List[Node],
                constant_subgraph_only: bool = False):
    """
    Run "infer" function of the specified nodes.

    :param graph: graph with nodes
    :param nodes: list of node ids in the topological order
    :param constant_subgraph_only: flag which specifies whether only inference of constant sub-graphs should be done
    """
    debug_logger = log.getLogger().isEnabledFor(log.DEBUG)
    for n in nodes:
        # Data Flow Infer
        node = Node(graph, n)
        node_name = node.soft_get('name', node.id)
        try:
            if node.has(
                    'is_partial_inferred') and not node.is_partial_inferred:
                if node.has('infer') and not node.infer is None:
                    # we consider that operation will produce value if all inputs are constants or it is
                    # 'ShapeOf' operation
                    if constant_subgraph_only:
                        in_values = [
                            port.data.get_value()
                            for port in node.in_ports().values()
                        ]
                        if node.soft_get('op') == 'Parameter' or any(value is None for value in in_values) or \
                                (node.soft_get('op') == 'ShapeOf' and node.in_port(0).data.get_shape() is None):
                            # if here will be any new ShapeOf type operation, we should update condition above
                            continue

                    if debug_logger:
                        log.debug('-' * 20)
                        log.debug('Partial infer for {}'.format(
                            node.soft_get('name')))
                        log.debug('Op: {}'.format(node.soft_get('op')))
                        log.debug('Inputs:')
                        log_debug_dict(node.in_nodes(), 'input')

                    node.infer(node)
                    out_nodes = node.out_nodes()

                    # propagate nchw_layout attributes to data nodes
                    if node.has('nchw_layout'):
                        for out_node in out_nodes.values():
                            out_node['nchw_layout'] = node.nchw_layout

                    # In debug print current node attributes, input shapes/values and output shape/values
                    if debug_logger:
                        log.debug('Outputs:')
                        log_debug_dict(node.out_nodes(), 'output')

                    if not constant_subgraph_only:
                        not_all_output_shapes = False

                        for out_port, out_node in out_nodes.items():
                            not_all_output_shapes = False
                            if not out_node.has_valid('shape'):
                                log.error(
                                    'Shape is not defined for output {} of "{}".'
                                    .format(out_port, node_name))
                                not_all_output_shapes = True

                        if not_all_output_shapes:
                            raise Error(
                                'Not all output shapes were inferred or fully defined for node "{}". '
                                + refer_to_faq_msg(40), node_name)
                elif node.kind != 'data':
                    raise Error(
                        'There is no registered "infer" function for node "{}" with op = "{}". '
                        +
                        'Please implement this function in the extensions. ' +
                        refer_to_faq_msg(37), node_name, node.soft_get('op'))
                node.is_partial_inferred = True
        except Exception as err:
            log.error('Cannot infer shapes or values for node "{}".'.format(
                node.soft_get('name')))
            log.error(str(err))
            log.error('')
            log.error(
                'It can happen due to bug in custom shape infer function {}.'.
                format(node.soft_get('infer')))
            log.error(
                'Or because the node inputs have incorrect values/shapes.')
            log.error(
                'Or because input shapes are incorrect (embedded to the model or passed via --input_shape).'
            )
            debug_messages = '\n'.join([
                'Layer "' + node_name + '": ' + node_attrs['debug_message']
                for node_name, node_attrs in graph.nodes(data=True)
                if 'debug_message' in node_attrs
            ])
            if debug_messages != "":
                log.error('')
                log.error('Other possible failure reasons are listed below:')
                log.error(debug_messages)
            if not debug_logger:
                log.error(
                    'Run Model Optimizer with --log_level=DEBUG for more information.'
                )
            else:
                log.debug('Node "{}" attributes: {}'.format(
                    node.soft_get('name'), node.graph.node[node.id]))
            raise Error('Stopped shape/value propagation at "{}" node. '.
                        format(node.soft_get('name')) +
                        refer_to_faq_msg(38)) from err
        control_flow_infer(graph, n)