def partial_infer(graph: Graph, start_node: str = None): """ Tries to execute constant parts of the graph and deduce as much as possible information following the data flow, e.g. calculate and propagate shapes and constant values. Partially or completely defined values are stored in data nodes (kind='data'). """ # We have to turn off strict mode due to above we add and remove edeges without attributes that is prohibited graph.strict_mode = False cycle_nodes = graph.get_nodes_with_attributes(is_cyclic=True) cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes] ebunch_cyclic = list(graph.out_edges(nbunch=cycle_nodes, data=True, keys=True)) ebunch_reconnected = exit_bound_edges(graph, sources=cycle_nodes, end_node_attrs={'op': 'Exit'}) graph.remove_edges_from(ebunch_cyclic) graph.add_edges_from(ebunch_reconnected) try: nodes = list(nx.topological_sort(graph)) except: raise Error('Graph contains a cycle. Can not proceed. ' + refer_to_faq_msg(97)) graph.remove_edges_from(ebunch_reconnected) graph.add_edges_from(ebunch_cyclic) graph.strict_mode = True # Mark all nodes as not inferred yet if start_node is not None: start_index = nodes.index(start_node) nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]), name='is_partial_inferred', values=False) else: nx.set_node_attributes(G=graph, name='is_partial_inferred', values=False) nx.set_node_attributes(G=graph, name='executable', values={n: True for n in graph.get_nodes_with_attributes(kind='data')}) # first we infer constant sub-graphs so the reverse infer could use constant values sub-graphs. For example, # convolution weights may be reshuffled by some operation in the graph and are not directly consumed by the conv # node infer_nodes(graph, nodes, True) # we may need to deduce shape for Parameter node(s) if it is not defined need_reverse_infer = False for parameter in graph.get_op_nodes(op='Parameter'): if parameter.soft_get('shape', None) is None: need_reverse_infer = True if need_reverse_infer: reverse_infer(graph, nodes) infer_nodes(graph, nodes, False) not_fully_inferred = graph.get_nodes_with_attributes(is_not_fully_inferred=True) for n in not_fully_inferred: node = Node(graph, n) if node.has('infer') and not node.infer is None: node.infer(node) return graph
def override_placeholder_shapes(graph: Graph, user_shapes: dict, batch=None): """ This function overrides shapes for nodes with 'op' param set to 'Parameter' with shapes defined by users (only for inputs without in/out port specified). And override batch if batch was specified and shape for input is not None. :param graph: graph to operate on :param user_shapes: dictionary, that represents user defined nodes and shapes :param batch: user defined integer value to override batch """ if user_shapes is None: # DON'T MOVE UPPER!!! WE NEED TO SET BATCH FIRST # user did not specify neither shapes nor inputs, keep models values return placeholders = graph.get_nodes_with_attributes(kind='op', op='Parameter') for node_id in placeholders: node_attrs = graph.node[node_id] shape = None if node_id in user_shapes: values = user_shapes[node_id] for value in values: if 'in' not in value and 'out' not in value: shape = value['shape'] if value[ 'shape'] is not None else None break # we assume only one specified shape for one input if shape is not None: node_attrs['shape'] = shape if batch is not None and node_attrs['shape'] is not None and len( node_attrs['shape']) > 0: node_attrs['shape'][0] = batch
def transform_graph(self, graph: Graph, replacement_descriptions): graph.remove_nodes_from(graph.get_nodes_with_attributes(op='Result')) for i, input_node_name in enumerate( replacement_descriptions['entry_points']): if input_node_name not in graph.nodes(): raise Error( 'TensorFlow YOLO V3 conversion mechanism was enabled. ' 'Entry points "{}" were provided in the configuration file. ' 'Entry points are nodes that feed YOLO Region layers. ' 'Node with name {} doesn\'t exist in the graph. ' 'Refer to documentation about converting YOLO models for more information.' .format( ', '.join(replacement_descriptions['entry_points']), input_node_name)) last_node = Node(graph, input_node_name).in_node(0) op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1, do_softmax=0, nchw_layout=True) op_params.update(replacement_descriptions) if 'masks' in op_params: op_params['mask'] = op_params['masks'][i] del op_params['masks'] region_layer_node = RegionYoloOp(graph, op_params).create_node( [last_node]) # TODO: do we need change axis for further permutation region_layer_node.dim_attrs.remove('axis') Result(graph, { 'name': region_layer_node.id + '/Result' }).create_node([region_layer_node])
def add_removed_converts(graph: Graph): for data_node_name in graph.get_nodes_with_attributes( Insert_Convert_operation_after=True): data_node = Node(graph, data_node_name) # Get access to Const node connected to data node const_op = data_node.in_node(0) if const_op.type != 'Const': logger.debug('Error when try to insert Convert operation after {} with {} type'.\ format(const_op.soft_get('name'), const_op.soft_get('type'))) continue if const_op.data_type != np.float32: logger.debug('Error when try to insert Convert operation after Const: {}'.\ format(const_op.soft_get('name'))) continue convert_op = Cast( graph, { 'dst_type': np.float32, 'name': const_op.name + '/restored_convert', 'stop_value_propagation': True }).create_node() # Insert Convert operation after Const operation const_op.out_port(0).get_connection().insert_node(convert_op) convert_op.out_node().value = None # Convert Const value to FP16 to make types in graph consistent const_op.value, _, _ = convert_blob(const_op.value, np.float16) const_op.infer(const_op)
def find_and_replace_pattern(self, graph: Graph): # we need to import these functions here to avoid circular dependent imports from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input if graph.graph['layout'] != 'NHWC': # we check it here because this transformation is called explicitly from the pipeline return # reshape from 4D-5D -> ND. Insert Transpose(NC(D)HW->N(D)HWC) before Reshape for reinterp_shape_node_id in graph.get_nodes_with_attributes( reinterp_shape=True): reinterp_shape_node = Node(graph, reinterp_shape_node_id) assert 0 in reinterp_shape_node.in_nodes( ), 'Node {} does not have 0 input. \n{}'.format( reinterp_shape_node_id, graph.dump_graph_for_graphviz()) input_shape = reinterp_shape_node.in_node(0).shape if self.is_nchw_to_nhwc_transpose_needed(reinterp_shape_node): permute_node = create_op_node_with_second_input( graph, Transpose, PermuteAttrs().get_nchw_to_nhwc_permutation( len(input_shape)).perm, { 'name': reinterp_shape_node.in_port(0).get_source().node.name + '/Transpose' }) reinterp_shape_node.in_port(0).get_connection().insert_node( permute_node) order_const = permute_node.in_port(1).get_source().node order_const.infer(order_const) # do not infer the Transpose node because it should have input data node in NCHW layout (but currently # it is NHWC because data node attributes has not been permuted yet) and produce output in NHWC layout # (which is true at this moment) permute_node['need_shape_inference'] = False # mark the Transpose output data node having correct layout so it's shape will not be permuted mark_output_as_in_correct_layout(permute_node, 0) # keep the reinterp_shape_node in NHWC layout for in_port_id, _ in reinterp_shape_node.in_ports().items(): mark_input_as_in_correct_layout(reinterp_shape_node, in_port_id) # reshape from ND -> 4D-5D. Insert Transpose(N(D)HWC->NC(D)HW) after Reshape for reinterp_shape_node_id in graph.get_nodes_with_attributes( reinterp_shape=True): reinterp_shape_node = Node(graph, reinterp_shape_node_id) assert 0 in reinterp_shape_node.out_nodes( ), 'Node {} does not have 0 output. \n{}'.format( reinterp_shape_node_id, graph.dump_graph_for_graphviz()) output_shape = reinterp_shape_node.out_node(0).shape if self.is_nhwc_to_nchw_transpose_needed(reinterp_shape_node): permute_node = create_op_node_with_second_input( graph, Transpose, PermuteAttrs().get_nhwc_to_nchw_permutation( len(output_shape)).perm, {'name': reinterp_shape_node.id + '/Transpose'}) reinterp_shape_node.out_port(0).get_connection().insert_node( permute_node) # the Reshape and Transpose operations should work in original (NHWC layout) so the Transpose # will convert it to the NCHW mark_input_as_in_correct_layout(permute_node, 0) mark_input_as_in_correct_layout(permute_node, 1) # do not set Transpose output data node 'correct_data_layout' attribute so the data node shape will be # permuted # keep the reinterp_shape_node in NHWC layout mark_output_as_in_correct_layout(reinterp_shape_node, 0) for in_port_id in reinterp_shape_node.in_ports().keys(): if in_port_id: mark_input_as_in_correct_layout( reinterp_shape_node, in_port_id) # do not re-infer the Transpose node because it output data node should be in NHWC layout to make the # rest of the graph consistent permute_node['need_shape_inference'] = False
def replace_sub_graph(self, graph: Graph, match: dict): box_nms = match['box_nms'] top_k = box_nms.topk nms_threshold = box_nms.overlap_thresh ssd_concats = {} concat_names = ['ssd_concat1', 'ssd_concat0', 'ssd_concat2'] for i, concat_match in enumerate(self.concats_pattern): for matches in find_pattern_matches(graph, concat_match['nodes'], concat_match['edges'], None, None): for match in matches: if graph.has_node(match): n = Node(graph, match) if n.op == 'Concat': ssd_concats.update({concat_names[i]: n}) break assert concat_names[0] in ssd_concats assert concat_names[1] in ssd_concats assert concat_names[2] in ssd_concats graph.remove_nodes_from(graph.get_nodes_with_attributes(op='Result')) detection_output_node = DetectionOutput( graph, dict(name=graph.unique_id() + '/DetectionOutput_', top_k=top_k, keep_top_k=top_k, nms_threshold=nms_threshold, background_label_id=0, clip=0, decrease_label_id=1, code_type="caffe.PriorBoxParameter.CENTER_SIZE", confidence_threshold=0.01, share_location=1, variance_encoded_in_target=0, normalized=1)).create_node() reshape_node = create_op_node_with_second_input( graph, Reshape, int64_array([0, -1]), dict(name=graph.unique_id() + '/DetectionOutput_')) ssd_softmax_node = ssd_concats['ssd_concat0'].out_node().out_node() ssd_softmax_node.out_port(0).disconnect() ssd_softmax_node.out_port(0).connect(reshape_node.in_port(0)) reshape_node.out_port(0).connect(detection_output_node.in_port(1)) ssd_concats['ssd_concat2'].axis = 2 self.reshape_priorboxes(ssd_concats['ssd_concat2']) ssd_concats['ssd_concat1'].out_port( 0).get_connection().set_destination( detection_output_node.in_port(0)) ssd_concats['ssd_concat2'].out_port( 0).get_connection().set_destination( detection_output_node.in_port(2)) Result(graph, { 'name': detection_output_node.id + '/Result' }).create_node([detection_output_node])
def get_node_by_fullname(graph: Graph, name: str) -> Node: nodes = graph.get_nodes_with_attributes( **dict(kind='op', fullname=name)) return [Node(graph, nodes[0])] if nodes else None