Exemplo n.º 1
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        target_shape = node.in_port(1).data.get_value()
        assert target_shape is not None, 'Output shape is not defined for node "{}"'.format(
            node_name)
        assert node.has_and_set(
            'mode'), 'Broadcasting mode is not defined for node "{}"'.format(
                node_name)

        PermuteInputs().set_input_permutation(node.in_node(1), node,
                                              'output:0', 'shape')

        if input_value is not None and not node.has_and_set('stop_value_propagation') and \
                is_fully_defined(target_shape):
            if node.mode == 'numpy':
                node.out_port(0).data.set_value(
                    uni_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_value(
                    bi_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                node.out_port(0).data.set_value(
                    explicit_broadcasting(input_value, target_shape,
                                          axes_mapping))
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
        else:
            if node.mode == 'numpy':
                node.out_port(0).data.set_shape(
                    uni_directional_shape_broadcasting(input_shape,
                                                       target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_shape(
                    bi_directional_shape_broadcasting(input_shape,
                                                      target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                new_shape, _ = explicit_shape_broadcasting(
                    input_shape, target_shape, axes_mapping)
                node.out_port(0).data.set_shape(new_shape)
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
 def is_node_match_for_optimization(self, node: Node):
     """
     Check that the node can be added to the sequence of nodes for the Transpose-Reshape optimization
     :param node: node to check
     :return: result of the check
     """
     # TODO change to 'op' and reshape-like
     return node.has_and_set('type') and node.type in ('Transpose', 'Reshape') and \
         not node.has_and_set(self.OPTIMIZED_NODE_FLAG)
Exemplo n.º 3
0
    def infer(node: Node):
        # order parameter calculation and checks
        in_ports = node.in_ports()
        connected_ports = [
            port for port in in_ports.values() if not port.disconnected()
        ]
        input_shape = node.in_port(0).data.get_shape()

        if node.has_and_set('reverse_order'):
            assert len(connected_ports) == 1 and 0 in in_ports, \
                'Cannot infer `{}` due to both order and reverse_order was set'.format(node.soft_get('name'))
            order = np.arange(len(input_shape))[::-1]  # Reverse order
        else:
            # we import PermuteInputs locally because it uses Transpose inside and we have recursive imports
            from openvino.tools.mo.graph.perm_inputs import PermuteInputs
            assert len(connected_ports) == 2 and 0 in in_ports and 1 in in_ports, \
                "{} node `{}` should have 2 input ports, where 0-input is a data input and 1-input represents " \
                "Transpose `order`".format(node.op, node.id)
            order = node.in_port(1).data.get_value()
            assert order is not None, 'Cannot infer `{}` because order is None'.format(
                node.soft_get('name'))
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'order')

        # setting shape and value if applicable
        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                np.transpose(node.in_port(0).data.get_value(), axes=order))
        else:
            node.out_port(0).data.set_shape(input_shape[order])
Exemplo n.º 4
0
    def infer(node: Node):
        if node.has_and_set('extra_inputs'):
            assert len(node.in_nodes()) == 8
        else:
            assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) in [1, 2]

        hidden_shape = node.in_node(1).shape.copy()
        cell_shape = node.in_node(2).shape.copy()

        mark_input_bins(node, start_port=3)
        node.out_node(0).shape = hidden_shape
        if len(node.out_nodes()) == 2:
            node.out_node(1).shape = cell_shape

        hidden_size = hidden_shape[1]

        if node.has_valid('hidden_size'):
            if node.hidden_size != hidden_size:
                raise Error(
                    "Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}"
                    .format(node.in_node(1).shape, node.soft_get('name')))
        else:
            node['hidden_size'] = hidden_size

        assert cell_shape[1] == hidden_size

        input_shape = node.in_node(0).shape
        assert input_shape is not None
        assert compatible_dims(hidden_shape[0], cell_shape[0]) and \
               compatible_dims(cell_shape[0], input_shape[0]), 'States are not broadcast-able by batch for node {}' \
                                                               ''.format(node.soft_get('name', node.id))
Exemplo n.º 5
0
def arg_ops_infer(node: Node):
    shape = node.in_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)
    assert shape is not None, "Input shape for the node {} is None".format(node_name)

    # there are two inputs in TensorFlow. The second input is the axis for ArgMax
    connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
    if len(connected_in_ports) == 2:
        axis = node.in_port(1).data.get_value()
        if axis is None:
            log.debug('The second argument to {} is None'.format(node.soft_get('name', node.id)))
            return
        node.axis = axis
        # remove the unnecessary input
        node.in_port(1).disconnect()

    num_top_axes = shape.size
    if num_top_axes < 3:
        num_top_axes = 3

    out_shape = np.ones(num_top_axes, dtype=np.int64)

    if node.has_valid('axis'):
        axis = get_canonical_axis_index(shape, node.axis)
        node.axis = axis
        out_shape = shape.copy()
        out_shape[axis] = node.top_k
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    else:
        out_shape[0] = shape[0]
        out_shape[2] = node.top_k
        if node.has_and_set('out_max_val'):
            out_shape[1] = 2

    node.out_port(0).data.set_shape(out_shape)
Exemplo n.º 6
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        in_name = input_node.soft_get('name', input_node.id)
        features_dim_idx, has_layout = get_dim_from_layout(input_node, 'C')
        if features_dim_idx is None:
            if has_layout:
                log.warning('Layout for input {} doesn\'t have channel ("C") dimension to apply {} preprocessing. '
                            'Skipping this input.'.format(in_name, preprocessing_name))
            features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert compatible_dims(value.size, input_node.shape[features_dim_idx]) or value.size == 1, \
            "Incompatible layout, please specify correct layout for the node"

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = in_name + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        if input_node.op == 'Parameter' and input_node.has_and_set('data_type'):
            dtype = input_node.data_type
            if np.issubdtype(dtype, np.floating):
                value = value.astype(dtype)

        if input_node.is_out_port_connected(0) and len(input_node.out_port(0).get_destinations()) == 1:
            # There are models with pattern Parameter(uint8) -> Convert(float).
            # Adding mean/scale leads to the following:
            # Parameter(uint8) -> Mean/Scale -> Convert(float) which is incorrect.
            # To fix this mean and scale preprocessing node is inserted after Convert(float) node.
            out_node = input_node.out_port(0).get_destination().node
            convert_type = out_node.soft_get('dst_type')
            if out_node.soft_get('type') == "Convert" and (convert_type in [np.float32, np.float16]):
                input_node = out_node
                if convert_type != value.dtype:
                    new_value = value.astype(convert_type)
                    const_node = preprocessing.in_port(1).get_connection().get_source().node
                    const_node['value'] = new_value

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0), "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
Exemplo n.º 7
0
    def shape_alignment(node: Node):
        """
        Specification of MatMul operation allows inputs to be aligned together before matrix multiplication.
        Current method raises an error if input shapes are not valid at any step of alignment process
        :return: aligned copies of both input shapes
        """
        node_name = node.soft_get('name', str(node.id))
        input_shapes = [node.in_port(i).data.get_shape() for i in range(2)]
        transpose_a = node.has_and_set('transpose_a')
        transpose_b = node.has_and_set('transpose_b')

        transformed_shapes = []
        for i, shape in enumerate(input_shapes):
            input_shape = shape.copy()
            # prerequisites check
            assert input_shape is not None, "MatMul has shape=`None` for {} input of `{}` node".format(
                i, node_name)
            assert input_shape.ndim == 1, "MatMul doesn't support scalar inputs. {} input of `{}` node has shape {}" \
                                          "".format(i, node_name, input_shape)
            assert input_shape.size >= 1, "MatMul doesn't support inputs with rank lower than 1. {} input of `{}` " \
                                          "node has shape {}".format(i, node_name, input_shape)
            rank = input_shape.size
            # shape alignment
            if rank != 1 and ((i == 0 and transpose_a) or
                              (i == 1 and transpose_b)):
                input_shape[-2], input_shape[-1] = input_shape[
                    -1], input_shape[-2]
            if rank == 1:
                input_shape = shape_insert(input_shape, int(i == 1), 1)

            max_shape_length = max(input_shapes[0].size, input_shapes[1].size)
            input_shape = shape_insert(input_shape, 0, [1] *
                                       (max_shape_length - input_shape.size))
            transformed_shapes.append(input_shape)

        A_shape = shape_array(transformed_shapes[0])
        B_shape = shape_array(transformed_shapes[1])

        assert A_shape.size == B_shape.size, \
            "Shapes were not aligned by length for MatMul `{}`. Shapes: `{}`".format(node_name, transformed_shapes)

        # batch broadcasting
        batch_len = A_shape.size - 2
        for i in range(batch_len):
            if A_shape[i] != B_shape[i]:
                if A_shape[i] == 1:
                    A_shape[i] = B_shape[i]
                if B_shape[i] == 1:
                    B_shape[i] = A_shape[i]

        assert compatible_shapes(A_shape[:-2], B_shape[:-2]), \
            "MatMul input shapes are incorrect. BATCH_DIMs are not equal. Node: {}. Aligned shapes: {}" \
            "".format(node_name, transformed_shapes)

        return A_shape, B_shape
Exemplo n.º 8
0
    def normalize_outputs(node: Node):
        if node.out_port(0).disconnected():
            output = Result(
                node.graph, {
                    'name': node.name + '/Result_port_0/',
                    'keep_output_port':
                    node.has_and_set('remove_values_output')
                }).create_node()
            node.out_port(0).get_connection().set_destination(
                output.in_port(0))

        if node.out_port(1).disconnected():
            output = Result(
                node.graph, {
                    'name': node.name + '/Result_port_1/',
                    'keep_output_port':
                    node.has_and_set('remove_values_output')
                }).create_node()
            node.out_port(1).get_connection().set_destination(
                output.in_port(0))
Exemplo n.º 9
0
    def priorbox_clustered_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape
        num_ratios = len(node.width)

        if node.has_and_set('V10_infer'):
            assert node.in_node(0).value is not None
            node.out_port(0).data.set_shape([2, np.prod(node.in_node(0).value) * num_ratios * 4])
        else:
            res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4
            node.out_port(0).data.set_shape([1, 2, res_prod])
Exemplo n.º 10
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        loc_shape = node.in_port(0).data.get_shape()
        conf_shape = node.in_port(1).data.get_shape()
        prior_boxes_shape = node.in_port(2).data.get_shape()

        if loc_shape is None or conf_shape is None or prior_boxes_shape is None:
            raise Error(
                'Shapes for the Detection Output node "{}" are not defined'.
                format(node_name))

        prior_size = 4
        if node.has('normalized') and not node.normalized:
            prior_size = 5

        if is_fully_defined(prior_boxes_shape[-1]
                            ) and prior_boxes_shape[-1] % prior_size != 0:
            raise Error(
                'Amount of confidences "{}" is not divisible by {} for node "{}"'
                ''.format(prior_boxes_shape[-1], prior_size, node_name))

        num_priors = prior_boxes_shape[-1] // prior_size
        if not node.has_valid('keep_top_k') or node.keep_top_k == -1:
            node['keep_top_k'] = num_priors

        num_classes = conf_shape[-1] // num_priors
        num_loc_classes = num_classes
        if node.has_and_set('share_location') and node.share_location:
            num_loc_classes = 1

        if not compatible_dims(num_priors * num_loc_classes * 4,
                               loc_shape[-1]):
            raise Error(
                'Locations and prior boxes shapes mismatch: "{}" vs "{}" for node "{}"'
                ''.format(loc_shape, prior_boxes_shape, node_name))

        if not node.variance_encoded_in_target and not compatible_dims(
                prior_boxes_shape[-2], 2):
            raise Error(
                'The "-2" dimension of the prior boxes must be 2 but it is "{}" for node "{}".'
                ''.format(prior_boxes_shape[-2], node_name))

        if is_fully_defined(conf_shape[-1]) and is_fully_defined(
                num_priors) and conf_shape[-1] % num_priors != 0:
            raise Error(
                'Amount of confidences "{}" is not divisible by amount of priors "{}" for node "{}".'
                ''.format(conf_shape[-1], num_priors, node_name))

        node.out_port(0).data.set_shape(
            [1, 1, conf_shape[0] * node.keep_top_k, 7])

        # the line below is needed for the TF framework so the MO will not change the layout
        node.graph.node[node.out_node(0).id]['nchw_layout'] = True
Exemplo n.º 11
0
    def normalize_outputs(node: Node):
        if node.out_port(0).disconnected():
            output = Result(
                node.graph, {
                    'name': node.name + '/Result_port_0/',
                    'keep_output_port':
                    node.has_and_set('remove_values_output')
                }).create_node()
            node.out_port(0).get_connection().set_destination(
                output.in_port(0))

        # we check port existing to support MaxPool_1 with only 1 output port and MaxPool_8 with 2 output ports
        if node.has_port('out', 1) and node.out_port(1).disconnected():
            output = Result(
                node.graph, {
                    'name': node.name + '/Result_port_1/',
                    'keep_output_port':
                    node.has_and_set('remove_values_output')
                }).create_node()
            node.out_port(1).get_connection().set_destination(
                output.in_port(0))
Exemplo n.º 12
0
def determine_data_type(node: Node):
    """
    Tries to determine data type of the node. The input node could be either data or op node. If we don't know the data
    type of the node then we recursively check the first parent of the node.
    :param node: node to determine data type.
    :return: data type of the node output in the numpy format.
    """
    if node.has_and_set('data_type'):
        return node.data_type
    if node.has_and_set('kind') and node.kind == 'op':
        if node.has_and_set('pb'):
            if 'dtype' in node.pb.attr:
                return tf_dtype_extractor(node.pb.attr['dtype'].type)
            if 'T' in node.pb.attr:
                return tf_dtype_extractor(node.pb.attr['T'].type)
    if node.has_and_set('kind') and node.kind == 'data':
        if 'value' in node and node.value is not None:
            return node.value.dtype
    if len(node.in_nodes()) != 0:  # try to guess data type from the first parent
        return determine_data_type(node.in_node(0))
    log.error('Failed to determine data type for node "{}"'.format(node.name))
    return None
Exemplo n.º 13
0
def multi_box_prior_infer_mxnet(node: Node):
    v10 = node.has_and_set('V10_infer')
    data_H, data_W = node.in_node(0).value if v10 else node.in_node(
        0).shape[2:]

    num_ratios = len(node.aspect_ratio)
    num_priors = len(node.min_size) + num_ratios - 1
    if v10:
        node.out_node(0).shape = shape_array(
            [2, data_H * data_W * num_priors * 4])
    else:
        node.out_node(0).shape = shape_array(
            [1, 2, data_H * data_W * num_priors * 4])
 def is_nhwc_to_nchw_transpose_needed(node: Node):
     """
     The function checks that it is necessary to insert Transpose from NHWC to NCHW after the node.
     The transpose is needed when all the following conditions are met:
      1. The node is marked as 'reinterp_shape' attribute
      2. The node is *not* marked as generating output in correct layout (implicitly imply that the output port is 0)
      3. The output shape rank is not less than 4
      4. Node is not a part of shape sub-graph (layout permutation is handled separately for such a sub-graph)
     :param node: node to check
     :return: result of the check
     """
     return node.has_and_set('reinterp_shape') and \
            not is_output_data_in_correct_layout(node, 0) and \
            len(node.out_port(0).data.get_shape()) >= 4 and \
            all([port.data.get_value() is None for port in node.out_ports().values() if not port.disconnected()])
Exemplo n.º 15
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) == 1
        inputs = [node.in_node(i) for i in range(5)]
        x, input_low, input_high, output_low, output_high = inputs
        assert x.has_valid('shape')
        # TODO Check all inputs[1..4] shapes are broadcastable to inputs[0] shape
        assert all([broadcastable(inputs[i].shape, inputs[0].shape) for i in range(1, 5)]), \
            "Not all shapes from FakeQuantize inputs can be broadcasted to input[0] for node {}".format(
                node.soft_get('name'))
        node.out_node().shape = x.shape.copy()

        if all([node.in_node(i).has_valid('value') for i in range(5)]):
            x, input_low, input_high, output_low, output_high = \
                [np.array(np.broadcast_to(node.value, x.value.shape), dtype=np.float32) for node in inputs]

            assert node.has_valid('levels')
            assert isinstance(node.levels, int)

            underflow_mask = x <= input_low
            overflow_mask = x > input_high
            # pylint: disable=assignment-from-no-return
            middle_mask = np.logical_not(
                np.logical_or(underflow_mask, overflow_mask))

            def middle_part(x, input_low, input_high, output_low, output_high):
                return round_half_up((x - input_low) / (input_high - input_low) * (node.levels - 1)) / \
                    (node.levels - 1) * (output_high - output_low) + output_low

            output = np.zeros_like(x)
            # pylint: disable=unsupported-assignment-operation
            output[middle_mask] = middle_part(
                x[middle_mask],
                input_low[middle_mask],
                input_high[middle_mask],
                output_low[middle_mask],
                output_high[middle_mask],
            )

            # pylint: disable=unsupported-assignment-operation
            output[overflow_mask] = output_high[overflow_mask]
            # pylint: disable=unsupported-assignment-operation
            output[underflow_mask] = output_low[underflow_mask]

            if not node.has_and_set('stop_value_propagation'):
                node.out_node().value = output
Exemplo n.º 16
0
    def replace_op(self, graph: Graph, node: Node):
        if node.has_and_set('inputs_preprocessed'):
            log.debug('Node "{}" has already been preprocessed'.format(
                node.soft_get('name')))
            return []
        # reshape tensor with batch indices to 2d
        unsqueeze_node = create_op_node_with_second_input(
            graph, Unsqueeze, int64_array([1]),
            {'name': node.name + '/Unsqueeze'}, node.in_node(2))

        convert_node = Cast(
            graph, {
                'name':
                unsqueeze_node.name + '/ToFloat',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()

        convert_node.in_port(0).connect(unsqueeze_node.out_port(0))

        concat_op = Concat(
            graph, {
                'axis': 1,
                'name': node.name + '/concat_batch_indices_and_boxes',
                'in_ports_count': 2
            })
        concat_node = concat_op.create_node([convert_node, node.in_node(1)])

        # do not remove edge with crop_size because it is needed in the partial infer
        graph.remove_edge(node.in_node(1).id, node.id)

        # input to the CropAndResize contains boxes coordinates in YXYX layout. But IE layer ROIPooling expects
        # coordinates in the XYXY layout, so convolution is added here to swap coordinates
        swapped_box_coordinates_node = add_convolution_to_swap_xy_coordinates(
            graph, concat_node, 5)

        # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift
        reshape_2d_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([-1, 5]),
            dict(name=swapped_box_coordinates_node.id + '/reshape_2d_'),
            swapped_box_coordinates_node)
        graph.create_edge(reshape_2d_node, node, 0, 1)

        # do not replace any output edge
        return []
Exemplo n.º 17
0
def reverse_infer(graph: Graph, nodes: list):
    nodes = reversed(nodes)
    debug_logger = log.getLogger().isEnabledFor(log.DEBUG)
    for n in nodes:
        node = Node(graph, n)
        if node.has_and_set('reverse_infer'):
            log.debug("Executed reverse infer for node '{}'".format(
                node.soft_get('name', node.id)))
            node.reverse_infer(node)

            if debug_logger:
                log.debug('-' * 20)
                log.debug('Reverse infer for {}'.format(node.soft_get('name')))
                log.debug('Op: {}'.format(node.soft_get('op')))
                log.debug('Outputs:')
                log_debug_dict(node.out_nodes(), 'outputs')

                log.debug('Inputs:')
                log_debug_dict(node.in_nodes(), 'inputs')
Exemplo n.º 18
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        dst_type = node.soft_get('dst_type', None)

        assert dst_type is not None, \
            'Destination type of "Cast" operation should be extracted earlier, but it`s not for node: ' + node_name

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None
        node.out_port(0).data.set_shape(input_shape)

        value = node.in_port(0).data.get_value()
        if value is None or node.has_and_set('stop_value_propagation'):
            return

        if dst_type in [packed_U4, packed_I4]:  # custom types conversion
            Cast.custom_type_casting_and_packing(node, value, dst_type)
        else:
            node.out_port(0).data.set_value(
                Cast.helper_value_propagation(node_name, value, dst_type))
Exemplo n.º 19
0
    def priorbox_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape

        # calculate all different aspect_ratios (the first one is always 1)
        # in aspect_ratio 1/x values will be added for all except 1 if flip is True
        ar_seen = [1.0]
        ar_seen.extend(node.aspect_ratio.copy())
        if node.flip:
            for s in node.aspect_ratio:
                ar_seen.append(1.0 / s)

        ar_seen = np.unique(mo_array(ar_seen).round(decimals=6))

        num_ratios = 0
        if len(node.min_size) > 0:
            num_ratios = len(ar_seen) * len(node.min_size)

        if node.has_valid('fixed_size') and len(node.fixed_size) > 0:
            num_ratios = len(ar_seen) * len(node.fixed_size)

        if node.has_valid('density') and len(node.density) > 0:
            for d in node.density:
                if node.has_valid('fixed_ratio') and len(node.fixed_ratio) > 0:
                    num_ratios = num_ratios + len(
                        node.fixed_ratio) * (pow(d, 2) - 1)
                else:
                    num_ratios = num_ratios + len(ar_seen) * (pow(d, 2) - 1)

        num_ratios = num_ratios + len(node.max_size)

        if node.has_and_set('V10_infer'):
            assert node.in_node(0).value is not None
            node.out_port(0).data.set_shape(
                [2, np.prod(node.in_node(0).value) * num_ratios * 4])
        else:
            res_prod = data_shape[get_height_dim(
                layout, 4)] * data_shape[get_width_dim(layout,
                                                       4)] * num_ratios * 4
            node.out_port(0).data.set_shape([1, 2, res_prod])
Exemplo n.º 20
0
def reverse_infer(graph: Graph, nodes: list):
    nodes = reversed(nodes)
    debug_logger = log.getLogger().isEnabledFor(log.DEBUG)
    for n in nodes:
        node = Node(graph, n)
        if node.has_and_set('reverse_infer'):
            log.debug("Executed reverse infer for node '{}'".format(
                node.soft_get('name', node.id)))
            node.reverse_infer(node)

            if debug_logger:
                log.debug('-' * 20)
                log.debug('Reverse infer for {}'.format(node.soft_get('name')))
                log.debug('Op: {}'.format(node.soft_get('op')))
                log.debug('Outputs:')
                log_debug_dict(node.out_nodes(), 'outputs')

                log.debug('Inputs:')
                log_debug_dict(node.in_nodes(), 'inputs')

    parameters_with_no_shape = []
    for node in graph.get_op_nodes(op='Parameter'):
        if not node.has_valid('shape'):
            parameters_with_no_shape.append(node)

    if len(parameters_with_no_shape) == 0:
        return

    parameters_names = ''
    for idx, node in enumerate(parameters_with_no_shape):
        parameters_names += "'{}'".format(node.soft_get('name', node.id))
        if idx < len(parameters_with_no_shape) - 1:
            parameters_names += ', '

    if len(parameters_with_no_shape) > 0:
        raise Error(
            "Model Optimizer is unable to deduce input shapes for the following Parameter nodes: {}. "
            "Please use cli options --input or --input_shape to set model input shape."
            .format(parameters_names))
Exemplo n.º 21
0
    def replace_op(self, graph: Graph, node: Node):
        node_name = node.soft_get('name', node.id)
        # check if we have dropout
        input_port = node.in_port(0)
        if node.has_and_set('use_dropout'):
            split_dropout = AttributedVariadicSplit(graph,
                                                    {'name': node_name + '/split_dropout',
                                                     'size_splits': int64_array([-1, 1, 1, 1]),
                                                     'axis': int64_array(1)}).create_node()
            input_port.get_connection().set_destination(split_dropout.in_port(0))
            input_port = split_dropout.out_port(0)
            i_drop_scale = split_dropout.out_port(1)
            f_drop_scale = split_dropout.out_port(2)
            o_drop_scale = split_dropout.out_port(3)

        # split input to (i_part, f_part, c_part, o_part, ct_1)
        split_node = create_op_with_const_inputs(graph, Split, {1: np.int64(1)},
                                                 {'name': node_name + '/split_lstm_input',
                                                  'num_splits': 5})
        input_port.get_connection().set_destination(split_node.in_port(0))

        i_part = split_node.out_port(0)
        f_part = split_node.out_port(1)
        c_part = split_node.out_port(2)
        o_part = split_node.out_port(3)
        ct_1 = split_node.out_port(4)

        # i_t = Sigmoid(i_part + w_ic*ct_1)
        i_scale_attrs = {'name': node_name + '/i_scaleshift',
                         'bias_term': False}
        i_scale = ScaleShiftOp(graph, i_scale_attrs).create_node()
        input_as_const(i_scale, i_scale_attrs, 1, 'weights', node.i_weights)
        ct_1.connect(i_scale.in_port(0))

        sum_i_c = Add(graph, {'name': node_name + '/sum_i_c_'}).create_node()
        i_part.connect(sum_i_c.in_port(0))
        i_scale.out_port(0).connect(sum_i_c.in_port(1))

        i_sigmoid = Sigmoid(graph, {'name': node_name + '/i_sigmoid'}).create_node()
        sum_i_c.out_port(0).connect(i_sigmoid.in_port(0))

        if node['use_dropout']:
            mul_dropout_i = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_i'}).create_node()
            mul_dropout_i.in_port(0).connect(i_sigmoid.out_port(0))
            mul_dropout_i.in_port(1).connect(i_drop_scale)
            i_sigmoid = mul_dropout_i

        # f_t = Sigmoid(f_part + w_fc*ct_1)
        f_scale_attrs = {'name': node_name + '/f_scaleshift',
                         'bias_term': False}
        f_scale = ScaleShiftOp(graph, f_scale_attrs).create_node()
        input_as_const(f_scale, f_scale_attrs, 1, 'weights', node.f_weights)
        ct_1.connect(f_scale.in_port(0))

        sum_f_c = Add(graph, {'name': node_name + '/sum_f_c_'}).create_node()
        f_part.connect(sum_f_c.in_port(0))
        f_scale.out_port(0).connect(sum_f_c.in_port(1))

        f_sigmoid = Sigmoid(graph, {'name': node_name + '/f_sigmoid'}).create_node()
        sum_f_c.out_port(0).connect(f_sigmoid.in_port(0))

        if node['use_dropout']:
            mul_dropout_f = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_f'}).create_node()
            mul_dropout_f.in_port(0).connect(f_sigmoid.out_port(0))
            mul_dropout_f.in_port(1).connect(f_drop_scale)
            f_sigmoid = mul_dropout_f

        # c_t = f_t*ct_1 + i_t * tanh(c_part)
        c_tanh = Tanh(graph, {'name': node_name + '/c_tanh'}).create_node()
        c_part.connect(c_tanh.in_port(0))

        prod_i_c_tanh = Mul(graph, {'name': node_name + '/prod_i_c_tanh_'}).create_node()
        i_sigmoid.out_port(0).connect(prod_i_c_tanh.in_port(0))
        c_tanh.out_port(0).connect(prod_i_c_tanh.in_port(1))

        prod_f_ct_1 = Mul(graph, {'name': node_name + '/prod_f_ct_1_'}).create_node()
        f_sigmoid.out_port(0).connect(prod_f_ct_1.in_port(0))
        ct_1.connect(prod_f_ct_1.in_port(1))

        sum_f_i = Add(graph, {'name': node_name + '/sum_f_i_'}).create_node()
        prod_f_ct_1.out_port(0).connect(sum_f_i.in_port(0))
        prod_i_c_tanh.out_port(0).connect(sum_f_i.in_port(1))

        #  o_t = Sigmoid(o_part + w_oc*c_t)
        o_scale_attrs = {'name': node_name + '/o_scaleshift',
                         'bias_term': False}
        o_scale = ScaleShiftOp(graph, o_scale_attrs).create_node()
        input_as_const(o_scale, o_scale_attrs, 1, 'weights', node.o_weights)
        sum_f_i.out_port(0).connect(o_scale.in_port(0))

        sum_o_c = Add(graph, {'name': node_name + '/sum_o_c_'}).create_node()
        o_part.connect(sum_o_c.in_port(0))
        o_scale.out_port(0).connect(sum_o_c.in_port(1))

        o_sigmoid = Sigmoid(graph, {'name': node_name + '/o_sigmoid'}).create_node()
        sum_o_c.out_port(0).connect(o_sigmoid.in_port(0))

        if node['use_dropout']:
            mul_dropout_o = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_o'}).create_node()
            mul_dropout_o.in_port(0).connect(o_sigmoid.out_port(0))
            mul_dropout_o.in_port(1).connect(o_drop_scale)
            o_sigmoid = mul_dropout_o

        # m_t = o_t * Tanh(c_t)
        c_t_tanh = Tanh(graph, {'name': node_name + '/c_t_tanh'}).create_node()
        sum_f_i.out_port(0).connect(c_t_tanh.in_port(0))

        prod_o_c_t_tanh = Mul(graph, {'name': node_name + '/prod_o_c_t_tanh_'}).create_node()
        o_sigmoid.out_port(0).connect(prod_o_c_t_tanh.in_port(0))
        c_t_tanh.out_port(0).connect(prod_o_c_t_tanh.in_port(1))

        # add concat to create 1 output
        concat = Concat(graph, {'name': node_name + '/concat_c_m'}).create_node()
        concat.add_sequence_of_ports('in', range(2))
        sum_f_i.out_port(0).connect(concat.in_port(0))
        prod_o_c_t_tanh.out_port(0).connect(concat.in_port(1))

        return [concat.id]
Exemplo n.º 22
0
def eltwise_infer(node: Node, op=None, **kwargs):
    def broadcast_dims(dim1, dim2):
        if dim1 is not dynamic_dimension and dim2 is not dynamic_dimension:
            mind = min(dim1, dim2)
            maxd = max(dim1, dim2)
            if mind == 1:
                return maxd
            elif mind != maxd:
                raise Error('Input shapes mismatch for node {}: {}'.format(
                    node_name, shapes))
            return mind
        elif dim1 is dynamic_dimension and dim2 is dynamic_dimension:
            return dynamic_dimension_value
        elif dim1 is dynamic_dimension and dim2 is not dynamic_dimension:
            return broadcast_dims(dim2, dim1)
        else:  # dim1 is static, dim2 is dynamic
            if dim1 != 1:
                return dim1
            else:
                return dim2

    raw_inputs = [
        (inp, attr) for inp, attr in node.get_sorted_inputs()
        if 'control_flow_edge' not in attr or not attr['control_flow_edge']
    ]
    shapes = [node.graph.node[inp]['shape'] for inp, attr in raw_inputs]
    values = [node.graph.node[inp]['value'] for inp, attr in raw_inputs]
    node_name = node.soft_get('name', node.id)

    if any([s is None for s in shapes]):
        raise Error(
            'One of the input shapes for node "{}" is None'.format(node_name))

    max_dims = None
    for id, s in enumerate(shapes):
        if max_dims is None or len(s) > max_dims:
            max_dims = len(s)

    # Make all input shapes of the same size by adding 1's
    axis = node.axis if node.has_valid('axis') else None
    for id, item in enumerate(zip(shapes, values)):
        shape, value = item
        if len(shape) != max_dims and len(shape) > 0 and axis is not None:
            new_shape = shape

            # Extend shape with 1's
            for cnt in range(axis + len(shape), max_dims):
                new_shape = np.ma.append(new_shape, 1)

            shapes[id] = new_shape

            # Reshape value to correctly calculate output shape
            if values[id] is not None:
                values[id] = np.ma.reshape(values[id], new_shape)

    extended_shapes = [
        np.ma.concatenate((np.ma.ones(max_dims - len(s), dtype=np.int64), s))
        for s in shapes
    ]
    output_shape = extended_shapes[0]
    for si in range(1, len(extended_shapes)):
        for ei in range(max_dims):
            output_shape[ei] = broadcast_dims(output_shape[ei],
                                              extended_shapes[si][ei])

    node.out_port(0).data.set_shape(output_shape)

    if node.has_and_set('stop_value_propagation'):
        return

    if op is None or any([v is None for v in values]):
        return

    if len(values) <= 2:
        node.out_port(0).data.set_value(op(*values, **kwargs))
    else:
        node.out_port(0).data.set_value(values[0])
        for i in range(len(values) - 1):
            node.out_port(0).data.set_value(
                op(node.out_node().value, values[i + 1]))
Exemplo n.º 23
0
    def split_path_to_simple_tracks(graph, path):
        # Split complex path into simple linear tracks.
        # In path after If node list with 2 sub-lists should be. In this function such path is split into 2 tracks:
        # one for each sublist with linear structure.
        # Number of tracks got from path is 2 * number of If operations in path.
        # Track is looks like list of paths with 2 fields : list of nodes on current path and list of according graphs
        # Example:
        # input path : [loop_1, loop_2, if_1, [[loop3_1, node_1], [node_2]]]
        # output track: [{'nodes': [loop_1, loop_2, if_1, loop3_1, node_1],
        #                 'graphs':[graph, loop_1.body, loop_2.body, if.then_graph, loop3_1.body]},
        #                {'nodes': [loop_1, loop_2, if_1, node_2],
        #                 'graphs':[graph, loop_1.body, loop_2.body, if.else_graph]}]

        # structure to save tracks
        # list with tracks, each track is list of pairs {'node', 'graph'}
        paths_nodes_graphs = list()
        paths_nodes_graphs.append([])
        # stack for sub-graphs that will be traversed in future
        future_graphs_stack = [graph]
        # index for track that we currently fill
        track_idx = 0
        # save lists that were started but not finished during processing
        lists_stack = [{'list': path, 'pos': -1}]
        while len(lists_stack) != 0:
            cur_list_pos = lists_stack.pop(-1)
            # current list to process
            cur_list = cur_list_pos['list']
            # index in current list/sub-list
            list_idx = cur_list_pos['pos'] + 1
            while list_idx < len(cur_list):
                el = cur_list[list_idx]
                if isinstance(el, (list, np.ndarray)):
                    lists_stack.append({'list': cur_list, 'pos': list_idx})
                    # if we have previous node non-list then current sublist is for If node
                    # and new tracks should be added for sub-graphs (the first subgraph will continue current track)
                    if list_idx != 0 and isinstance(cur_list[list_idx - 1], str):
                        for i in range(len(el) - 1):
                            # copy all nodes from existing track to new one
                            paths_nodes_graphs.append(paths_nodes_graphs[-1][:])
                    # new sublist started, so reset index
                    cur_list = el
                    list_idx = 0
                else:
                    assert isinstance(el, str)
                    cur_graph = future_graphs_stack.pop(-1)
                    step_node = Node(cur_graph, el)
                    paths_nodes_graphs[track_idx].append({'node': step_node, 'graph': cur_graph})

                    # if node is not last, check that next node will be on current track or not
                    if list_idx != len(cur_list) - 1:
                        # so detect if we are in sublist with branches for If
                        # then in stack sublist is not the first node of list
                        # and have previous node with If operation name
                        if len(lists_stack) != 0 and lists_stack[-1]['pos'] != 0 and \
                                isinstance(lists_stack[-1]['list'][lists_stack[-1]['pos']-1], str):
                            # switch to next track
                            if list_idx != len(cur_list) - 1:
                                track_idx += 1
                        else:
                            assert step_node.has_and_set('sub_graphs'), "Node without sub-graphs is not last in path"
                            # the first graph should be first in traverse
                            for sub_graphs_name in reversed(step_node['sub_graphs']):
                                future_graphs_stack.append(step_node[sub_graphs_name])
                    list_idx += 1

        return paths_nodes_graphs
Exemplo n.º 24
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_inputs = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_inputs) == 2 and all([i in connected_inputs for i in range(2)]), \
            "Reshape should have 2 connected input ports, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_inputs)

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None

        new_shape = node.in_port(1).data.get_value()
        assert new_shape is not None, 'Dynamic Reshape second input is not supported. Node {}'.format(
            name)

        assert np.argwhere(new_shape == -1).size <= 1, \
            'Reshape second input should not have several `-1` values set. ' \
            'Node: {}, reshape second input value {}'.format(name, new_shape)

        num_of_input_elements = np.prod(input_shape)
        num_of_output_elements = 1
        for index, x in enumerate(new_shape):
            if x is dynamic_dimension:
                num_of_output_elements = dynamic_dimension_value
            elif x == 0 and node.has_and_set('special_zero'):
                if input_shape[index] is not dynamic_dimension:
                    num_of_output_elements *= input_shape[index]
            elif x != -1:
                num_of_output_elements *= x

        # input_shape = [dynamic, 5, 6], new_shape = [0, -1] => output_shape [dynamic, 30]
        # marker that no dynamic input dimensions or all of them are copied with "0" magic value
        all_dynamic_dimension_are_copied = True
        if not is_fully_defined(input_shape):
            for index, x in enumerate(input_shape):
                if x is dynamic_dimension:
                    if index >= len(new_shape) or new_shape[index] != 0:
                        all_dynamic_dimension_are_copied = False

        undefined_dim = dynamic_dimension
        if num_of_output_elements is not dynamic_dimension and all_dynamic_dimension_are_copied and \
                is_fully_defined(new_shape):
            undefined_dim = num_of_input_elements // num_of_output_elements
        output_shape = []
        for index, x in enumerate(new_shape):
            if x == 0 and node.has_and_set('special_zero'):
                output_shape.append(input_shape[index])
            elif x == -1:
                output_shape.append(undefined_dim)
            else:
                output_shape.append(x)

        # even if the new_shape contains some dynamic values we can calculate the actual value by deducing it from the
        # input shape if it is static: input_shape = [5, 3, 8], new_shape = [4, d] => output_shape = [4, 30]
        if is_fully_defined(input_shape) and not is_fully_defined(new_shape):
            dynamic_indices = np.argwhere(
                [item is dynamic_dimension for item in new_shape])
            num_of_output_elements = 1
            if dynamic_indices.size == 1:
                for index, x in enumerate(new_shape):
                    if x == 0 and node.has_and_set('special_zero'):
                        num_of_output_elements *= input_shape[index]
                    elif x is not dynamic_dimension and x != -1:
                        num_of_output_elements *= x
            assert num_of_input_elements % num_of_output_elements == 0, \
                'Incorrect number of output elements deduced for node {}: '.format(name)
            output_shape[dynamic_indices[0]
                         [0]] = num_of_input_elements // num_of_output_elements

        assert not is_fully_defined(input_shape) or not is_fully_defined(output_shape) or \
               np.prod(input_shape) == np.prod(output_shape), \
               "Number of elements in input {} and output {} of reshape node {} mismatch" \
               "".format(input_shape, output_shape, name)

        PermuteInputs().set_input_permutation(node.in_node(1), node,
                                              'output:0', 'shape')

        if node.in_port(0).data.get_value() is not None and is_fully_defined(
                output_shape):
            node.out_port(0).data.set_value(
                node.in_port(0).data.get_value().reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)
Exemplo n.º 25
0
    def pool_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                [node.batch_dims[0], node.channel_dims[0]])

        input_spatial_shape = input_shape[node.spatial_dims]

        # Setting default pad and stride attrs in case if None specified
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]
                                       for x in range(len(input_shape))])
        if not node.has_valid('pad_spatial_shape'):
            node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('stride'):
            node['stride'] = int64_array([1 for x in range(len(input_shape))])

        if node.has_and_set('global_pool'):
            node['window'] = np.zeros(len(input_shape), dtype=np.int64)
            node.window[node.spatial_dims] = input_spatial_shape

        if not node.has_valid('dilation'):
            node['dilation'] = np.ones(len(input_shape), dtype=np.float32)

        if not node.has_valid('axis'):
            node['axis'] = 0

        if not node.has_valid('index_element_type'):
            node['index_element_type'] = np.int64

        window_spatial_shape = node.window[node.spatial_dims]
        stride_spatial = node.stride[node.spatial_dims]
        dilation_spatial = node.dilation[node.spatial_dims]
        assert any(stride_spatial), 'Stride can not be zero in node {}'.format(
            node.id)

        if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
            node.pad_spatial_shape, node.output_spatial_shape = tf_window_op_pad_infer(
                input=input_spatial_shape,
                window=window_spatial_shape,
                stride=stride_spatial,
                auto_pad=node.auto_pad,
                dilation=dilation_spatial)
            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:

            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)

            rounding = np.floor
            if node.soft_get('pooling_convention') == 'full' or node.soft_get(
                    'rounding_type') == 'ceil':
                rounding = np.ceil

            padded_spatial_shape = input_spatial_shape + pad_spatial_shape - (
                (window_spatial_shape - 1) * dilation_spatial + 1)
            if np.any(padded_spatial_shape < 0):
                raise Error(
                    "Data after padding has dimension less than window size. "
                    +
                    "Possible reason of error is incorrectly specified model input shape(s)."
                )

            output_spatial_shape = shape_array([
                dynamic_dimension_value
                for _ in range(len(padded_spatial_shape))
            ])
            for idx in range(len(padded_spatial_shape)):
                if padded_spatial_shape[
                        idx] is not dynamic_dimension and stride_spatial[
                            idx] is not dynamic_dimension:
                    output_spatial_shape[idx] = int(
                        rounding(padded_spatial_shape[idx] /
                                 stride_spatial[idx])) + 1

            original_pads = mo_array([i[1] for i in node.pad_spatial_shape])

            for i in range(len(input_spatial_shape)):
                if original_pads[i] and (output_spatial_shape[i] - 1) * stride_spatial[i] >= \
                        input_spatial_shape[i] + original_pads[i]:
                    output_spatial_shape[i] -= 1

            node['output_spatial_shape'] = output_spatial_shape

        output_shape = input_shape.copy()
        output_shape[node.spatial_dims] = node.output_spatial_shape
        node.out_port(0).data.set_shape(output_shape)

        if len(node.out_ports()) == 2 and not node.out_port(1).disconnected():
            node.out_port(1).data.set_shape(output_shape)

        if node.has_and_set('pool_method') and node['pool_method'] == 'max':
            node['remove_values_output'] = True

        # Add permute_attrs
        PermuteAttrs.create_permute_attrs(node,
                                          attrs=[('pad', 'input:0'),
                                                 ('stride', 'input:0'),
                                                 ('window', 'input:0'),
                                                 ('spatial_dims', 'input:0'),
                                                 ('dilation', 'input:0')])
Exemplo n.º 26
0
def partial_infer(graph: Graph, start_node: str = None):
    """
    Tries to execute constant parts of the graph and deduce as much as possible
    information following the data flow, e.g. calculate and propagate shapes and
    constant values. Partially or completely defined values are stored in data
    nodes (kind='data').
    """
    # We have to turn off strict mode due to above we add and remove edeges without attributes that is prohibited
    graph.strict_mode = False
    cycle_nodes = graph.get_nodes_with_attributes(is_cyclic=True)
    cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes]
    ebunch_cyclic = list(
        graph.out_edges(nbunch=cycle_nodes, data=True, keys=True))
    ebunch_reconnected = exit_bound_edges(graph,
                                          sources=cycle_nodes,
                                          end_node_attrs={'op': 'Exit'})
    graph.remove_edges_from(ebunch_cyclic)
    graph.add_edges_from(ebunch_reconnected)

    try:
        nodes = list(nx.topological_sort(graph))
    except:
        raise Error('Graph contains a cycle. Can not proceed. ' +
                    refer_to_faq_msg(97))

    graph.remove_edges_from(ebunch_reconnected)
    graph.add_edges_from(ebunch_cyclic)
    graph.strict_mode = True

    # Mark all nodes as not inferred yet
    if start_node is not None:
        start_index = nodes.index(start_node)
        nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]),
                               name='is_partial_inferred',
                               values=False)
    else:
        nx.set_node_attributes(G=graph,
                               name='is_partial_inferred',
                               values=False)

    nx.set_node_attributes(
        G=graph,
        name='executable',
        values={n: True
                for n in graph.get_nodes_with_attributes(kind='data')})

    # first we infer constant sub-graphs so the reverse infer could use constant values sub-graphs. For example,
    # convolution weights may be reshuffled by some operation in the graph and are not directly consumed by the conv
    # node
    infer_nodes(graph, nodes, True)

    # we may need to deduce shape for Parameter node(s) if it is not defined
    need_reverse_infer = False
    for parameter in graph.get_op_nodes(op='Parameter'):
        if parameter.soft_get('shape', None) is None:
            need_reverse_infer = True

    if need_reverse_infer:
        reverse_infer(graph, nodes)

    infer_nodes(graph, nodes, False)

    not_fully_inferred = graph.get_nodes_with_attributes(
        is_not_fully_inferred=True)
    for n in not_fully_inferred:
        node = Node(graph, n)
        if node.has_and_set('infer'):
            node.infer(node)

    return graph