def infer(node: Node): in_shape = node.in_node().shape if in_shape.size != 4: raise Error('TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. ' 'Current input shape is \'{}\''.format(in_shape)) layout = node.graph.graph['layout'] N = in_shape[get_batch_dim(layout, 4)] H = in_shape[get_height_dim(layout, 4)] W = in_shape[get_width_dim(layout, 4)] C = in_shape[get_features_dim(layout, 4)] block_size = node['block_size'] if (H is not dynamic_dimension and H % block_size) or (W is not dynamic_dimension and W % block_size): raise Error('Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by ' 'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. ' 'block_size = {}'.format(in_shape, H, W, block_size)) out_shape = shape_for_layout(layout, batch=N, features=C * (block_size ** 2), height=H // block_size, width=W // block_size) node.out_port(0).data.set_shape(out_shape)
def regionyolo_infer(node: Node): input_shape = node.in_port(0).data.get_shape() axis = get_canonical_axis_index(input_shape, node.axis) end_axis = get_canonical_axis_index(input_shape, node.end_axis) node.axis = axis node.end_axis = end_axis if node.do_softmax: dims_to_flatten = input_shape[axis:end_axis + 1] if is_fully_defined(dims_to_flatten): flat_dim = np.ma.prod(dims_to_flatten) else: flat_dim = dynamic_dimension_value node.out_port(0).data.set_shape( [*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]]) else: layout = node.graph.graph['layout'] assert len(layout) == 4 node.out_port(0).data.set_shape( shape_for_layout(layout, batch=input_shape[get_batch_dim(layout, 4)], features=(node.classes + node.coords + 1) * len(node.mask), height=input_shape[get_height_dim(layout, 4)], width=input_shape[get_width_dim(layout, 4)]))
def infer(node: Node): in_shape = node.in_port(0).data.get_shape() if in_shape.size != 4: raise Error('TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. ' 'Current input shape is \'{}\''.format(in_shape)) layout = node.graph.graph['layout'] N = in_shape[get_batch_dim(layout, 4)] H = in_shape[get_height_dim(layout, 4)] W = in_shape[get_width_dim(layout, 4)] C = in_shape[get_features_dim(layout, 4)] block_size = node['block_size'] if C is not dynamic_dimension and C % (block_size ** 2): raise Error('Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square ' 'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. ' 'block_size = {}'.format(in_shape, C, block_size)) out_shape = shape_for_layout(layout, batch=N, features=C // (block_size * block_size), height=H * block_size, width=W * block_size) if is_fully_defined(in_shape) and is_fully_defined(out_shape) and np.prod(in_shape) != np.prod(out_shape): raise Error('Number of input elements "{}" is not equal to number of output elements "" for node "{}"' ''.format(in_shape, out_shape, node.soft_get('name', node.id))) node.out_port(0).data.set_shape(out_shape)
def infer(node: Node): assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \ 'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id)) input_shape = node.in_port(0).data.get_shape() name = node.soft_get('name', node.id) assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format( name) assert len( input_shape ) == 4, 'ExtractImagePatches operation supports only 4D tensors' layout = node.graph.graph['layout'] N = input_shape[get_batch_dim(layout, 4)] C = input_shape[get_features_dim(layout, 4)] size_spatial = shape_array(node.sizes)[node.spatial_dims] input_spatial_shape = input_shape[node.spatial_dims] stride_spatial_shape = node.strides[node.spatial_dims] size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1 pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer( input_spatial_shape, size_extent, stride_spatial_shape, node.auto_pad, False) out_shape = shape_for_layout(layout, batch=N, features=C * np.prod(size_spatial), height=output_spatial_shape[0], width=output_spatial_shape[1]) node.out_port(0).data.set_shape(out_shape)
def roipooling_infer(node: Node): """ Sets shape of output node according specified parameters input blobs and node Sets number from the first input blob, channels from the second one, height and width are specified Parameters ---------- node """ shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))] if any(s is None for s in shapes): return if len(node.in_nodes()) == 4: # TensorFlow case of CropAndResize operation crop_size = node.in_node(3).value if crop_size is None: log.error('The ROIPooling size is not known for node {}'.format( node.soft_get('name'))) return if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2: log.error( 'The ROIPooling size is should have 2 elements for node {}'. format(node.soft_get('name'))) node.pooled_h = crop_size[0] node.pooled_w = crop_size[1] node.graph.remove_edge(node.in_node(3).id, node.id) node.graph.remove_edge(node.in_node(2).id, node.id) layout = node.graph.graph['layout'] assert len(layout) == 4 node.out_port(0).data.set_shape( shape_for_layout(layout, batch=shapes[1][get_batch_dim(layout, 4)], features=shapes[0][get_features_dim(layout, 4)], height=node.pooled_h, width=node.pooled_w))
def upsample_infer(node: Node): node_name = node.soft_get('name', node.id) layout = node.graph.graph['layout'] assert len( layout ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format( node_name) input_shape = node.in_port(0).data.get_shape() if len(node.in_nodes()) == 1: in_height = input_shape[get_height_dim(layout, 4)] in_width = input_shape[get_width_dim(layout, 4)] assert node.has('width_scale') is not None and node.has( 'height_scale') is not None if in_height is not dynamic_dimension: out_height = math.floor(in_height * node.height_scale) else: out_height = dynamic_dimension if in_width is not dynamic_dimension: out_width = math.floor(in_width * node.width_scale) else: out_width = dynamic_dimension node.out_port(0).data.set_shape( shape_for_layout(layout, batch=input_shape[get_batch_dim(layout, 4)], features=input_shape[get_features_dim( layout, 4)], height=out_height, width=out_width)) else: scales = node.in_port(1).data.get_value() assert scales is not None, 'The input with scales for node "{}" is not constant'.format( node_name) eps = 1e-5 # This is to make rounding in case of very close number to round to closest instead of down # generic output shape calculation to support 5D input shape case output_shape = shape_array( [dynamic_dimension for _ in range(len(input_shape))]) for idx in range(len(output_shape)): if input_shape[idx] is not dynamic_dimension: output_shape[idx] = int( (input_shape[idx] + eps) * scales[idx]) else: output_shape[idx] = dynamic_dimension_value node.out_port(0).data.set_shape(output_shape)
def pad_op_transform(graph: Graph, match: dict): op = match['op'] pad_op = match['pad_op'] input_data = pad_op.in_node(0) if pad_op.mode != 'constant': log.info( 'The pad node "{}" with pad mode "{}" cannot be fused.'.format( pad_op.soft_get('name'), pad_op.mode)) return if op.type == 'Pooling' and op.pool_method == 'max': return if pad_op.mode == 'constant': fill_value = pad_op.in_port(3).data.get_value() if fill_value is None or fill_value != 0.0: log.info( 'The pad node "{}" with non-zero fill value cannot be fused.'. format(pad_op.soft_get('name'))) return input_tensor_dims = len(match['pad_output'].shape) for in_port in [1, 2]: pads = pad_op.in_port(in_port).data.get_value() if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \ pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0: log.info( 'The pad node "{}" with padding over feature/batch dimension cannot be fused.' .format(pad_op.soft_get('name'))) return op.pad += np.concatenate([ pad_op.in_port(1).data.get_value().reshape([-1, 1]), pad_op.in_port(2).data.get_value().reshape([-1, 1]) ], axis=1) op.pad_spatial_shape = op.pad[op.spatial_dims] op['auto_pad'] = None if op.type == 'Pooling': op['exclude_pad'] = False assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0) edge_attrs = graph.get_edge_data(match['pad_output'].id, match['op'].id)[0] graph.remove_edge(match['pad_output'].id, match['op'].id) graph.add_edge(input_data.id, match['op'].id, **{'in': 0, **edge_attrs})
def pad_op_transform(graph: Graph, match: dict): op = match['op'] pad_op: Node = match['pad_op'] # to keep reshape-ability if Pad receives pads_begin/pads_end from shape subgraph if pad_op.in_port(1).get_source().node.soft_get('can_be_fused') is False: return if pad_op.mode != 'constant': log.info('The pad node "{}" with pad mode "{}" cannot be fused.'.format(pad_op.soft_get('name'), pad_op.mode)) return if op.type == 'Pooling' and op.pool_method == 'max': return if pad_op.mode == 'constant': fill_value = pad_op.in_port(3).data.get_value() if fill_value is None or fill_value != 0.0: log.info('The pad node "{}" with non-zero fill value cannot be fused.'.format(pad_op.soft_get('name'))) return input_tensor_dims = len(match['pad_output'].shape) for in_port in [1, 2]: pads = pad_op.in_port(in_port).data.get_value() if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \ pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0: log.info('The pad node "{}" with padding over feature/batch dimension cannot be fused.'.format( pad_op.soft_get('name'))) return op.pad += np.concatenate([pad_op.in_port(1).data.get_value().reshape([-1, 1]), pad_op.in_port(2).data.get_value().reshape([-1, 1])], axis=1) op.pad_spatial_shape = op.pad[op.spatial_dims] op['auto_pad'] = None if op.type == 'Pooling': op['exclude_pad'] = False assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0) match['op'].in_port(0).disconnect() pad_op.in_port(0).get_connection().add_destination(match['op'].in_port(0))
def psroipooling_infer(node: Node): """ Sets shape of output node according specified parameters input blobs and node Sets number from the first input blob, channels from the second one, height and width are specified Parameters ---------- node """ shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))] if any(s is None for s in shapes): return layout = node.graph.graph['layout'] assert len(layout) == 4 assert node.has_valid('group_size') assert node.group_size == int(node.group_size) node['group_size'] = int(node['group_size']) node.out_node().shape = shape_for_layout(layout, batch=shapes[1][get_batch_dim( layout, 4)], features=node.output_dim, height=node.group_size, width=node.group_size)
def test_get_batch_dim_NDHWC(self): self.assertEqual(get_batch_dim('NHWC', 5), 0)
def test_get_batch_dim_NCDHW(self): self.assertEqual(get_batch_dim('NCHW', 5), 0)
def interp_infer(node: Node): layout = node.graph.graph['layout'] assert len(layout) == 4 if len(node.in_nodes()) == 2: src_shape = node.in_node(0).shape dst_shape = node.in_node(1).shape # in Caffe can be 2 inputs too, but shape should be got from shape of the second input if node.parse_2nd_input == 'shape': dst_shape = [dst_shape[get_height_dim(layout, 4)], dst_shape[get_width_dim(layout, 4)]] else: # it is TF case dst_shape = node.in_node(1).value if src_shape is None or dst_shape is None or len(src_shape) != 4 or len(dst_shape) != 2: log.error( 'Node {} with op {} cannot be converted to Resample layer because there is no enough info about ' 'src/dst shapes: src_shape = {}, dst_shape = {}'.format(node.name, node.op, src_shape, dst_shape)) node.type = None # prevent translation to a valid IE layer return in_height = src_shape[get_height_dim(layout, 4)] in_width = src_shape[get_width_dim(layout, 4)] out_height = dst_shape[0] out_width = dst_shape[1] node.factor = factor_update( node.factor, [float(out_height) / in_height, float(out_width) / in_width], [in_height, in_width], [out_height, out_width], node.soft_get('name') ) if node.factor is None: node['width'] = out_width node['height'] = out_height node.out_node().shape = shape_for_layout(layout, batch=src_shape[get_batch_dim(layout, 4)], features=src_shape[get_features_dim(layout, 4)], height=out_height, width=out_width) node.graph.remove_edge(node.in_node(1).id, node.id) else: outn = node.out_node(0) in_shape = node.in_node(0) num_ = in_shape.shape[get_batch_dim(layout, 4)] channels_ = in_shape.shape[get_features_dim(layout, 4)] height_in_ = in_shape.shape[get_height_dim(layout, 4)] width_in_ = in_shape.shape[get_width_dim(layout, 4)] height_out_ = height_in_ + node.pad_beg + node.pad_end width_out_ = width_in_ + node.pad_beg + node.pad_end if node.shrink_factor != 1 and node.zoom_factor == 1: shrink_factor = node.shrink_factor if shrink_factor < 1: log.error('Shrink factor should be positive in node {}'.format(node.id)) return None height_out_ = (height_out_ - 1) / shrink_factor + 1 width_out_ = (width_out_ - 1) / shrink_factor + 1 elif node.shrink_factor == 1 and node.zoom_factor != 1: zoom_factor = node.zoom_factor if zoom_factor < 1: log.error('Zoom factor should be positive in node {}'.format(node.id)) return None node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \ 'layer shape inference function in the file (openvino/tools/mo/ops/interp.op at the ' \ 'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100) # Reshape methods can be different in some cases # Commented out section represents reshape that used in deeplab-caffe # Uncomment the following lines, if your model was trained with deeplab-caffe # or have the same reshape method # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) # Comment out the following lines if you use the reshape method from previous section height_out_ = height_out_ * zoom_factor width_out_ = width_out_ * zoom_factor elif node.width != 0 and node.height != 0: height_out_ = node.height width_out_ = node.width elif node.shrink_factor != 1 and node.zoom_factor != 1: shrink_factor = node.shrink_factor zoom_factor = node.zoom_factor if shrink_factor < 1: log.error('Shrink factor should be positive in node {}'.format(node.id)) return None if zoom_factor < 1: log.error('Zoom factor should be positive in node {}'.format(node.id)) return None height_out_ = (height_out_ - 1) / shrink_factor + 1 width_out_ = (width_out_ - 1) / shrink_factor + 1 height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) outn.shape = shape_for_layout(layout, batch=num_, features=channels_, height=height_out_, width=width_out_)