def infer(node: Node): in_shape = node.in_port(0).data.get_shape() if in_shape.size != 4: raise Error('TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. ' 'Current input shape is \'{}\''.format(in_shape)) layout = node.graph.graph['layout'] N = in_shape[get_batch_dim(layout, 4)] H = in_shape[get_height_dim(layout, 4)] W = in_shape[get_width_dim(layout, 4)] C = in_shape[get_features_dim(layout, 4)] block_size = node['block_size'] if C is not dynamic_dimension and C % (block_size ** 2): raise Error('Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square ' 'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. ' 'block_size = {}'.format(in_shape, C, block_size)) out_shape = shape_for_layout(layout, batch=N, features=C // (block_size * block_size), height=H * block_size, width=W * block_size) if is_fully_defined(in_shape) and is_fully_defined(out_shape) and np.prod(in_shape) != np.prod(out_shape): raise Error('Number of input elements "{}" is not equal to number of output elements "" for node "{}"' ''.format(in_shape, out_shape, node.soft_get('name', node.id))) node.out_port(0).data.set_shape(out_shape)
def infer(node: Node): in_shape = node.in_node().shape if in_shape.size != 4: raise Error('TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. ' 'Current input shape is \'{}\''.format(in_shape)) layout = node.graph.graph['layout'] N = in_shape[get_batch_dim(layout, 4)] H = in_shape[get_height_dim(layout, 4)] W = in_shape[get_width_dim(layout, 4)] C = in_shape[get_features_dim(layout, 4)] block_size = node['block_size'] if (H is not dynamic_dimension and H % block_size) or (W is not dynamic_dimension and W % block_size): raise Error('Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by ' 'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. ' 'block_size = {}'.format(in_shape, H, W, block_size)) out_shape = shape_for_layout(layout, batch=N, features=C * (block_size ** 2), height=H // block_size, width=W // block_size) node.out_port(0).data.set_shape(out_shape)
def regionyolo_infer(node: Node): input_shape = node.in_port(0).data.get_shape() axis = get_canonical_axis_index(input_shape, node.axis) end_axis = get_canonical_axis_index(input_shape, node.end_axis) node.axis = axis node.end_axis = end_axis if node.do_softmax: dims_to_flatten = input_shape[axis:end_axis + 1] if is_fully_defined(dims_to_flatten): flat_dim = np.ma.prod(dims_to_flatten) else: flat_dim = dynamic_dimension_value node.out_port(0).data.set_shape( [*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]]) else: layout = node.graph.graph['layout'] assert len(layout) == 4 node.out_port(0).data.set_shape( shape_for_layout(layout, batch=input_shape[get_batch_dim(layout, 4)], features=(node.classes + node.coords + 1) * len(node.mask), height=input_shape[get_height_dim(layout, 4)], width=input_shape[get_width_dim(layout, 4)]))
def priorbox_clustered_infer(node: Node): layout = node.graph.graph['layout'] data_shape = node.in_node(0).shape num_ratios = len(node.width) if node.has_and_set('V10_infer'): assert node.in_node(0).value is not None node.out_port(0).data.set_shape([2, np.prod(node.in_node(0).value) * num_ratios * 4]) else: res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4 node.out_port(0).data.set_shape([1, 2, res_prod])
def upsample_infer(node: Node): node_name = node.soft_get('name', node.id) layout = node.graph.graph['layout'] assert len( layout ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format( node_name) input_shape = node.in_port(0).data.get_shape() if len(node.in_nodes()) == 1: in_height = input_shape[get_height_dim(layout, 4)] in_width = input_shape[get_width_dim(layout, 4)] assert node.has('width_scale') is not None and node.has( 'height_scale') is not None if in_height is not dynamic_dimension: out_height = math.floor(in_height * node.height_scale) else: out_height = dynamic_dimension if in_width is not dynamic_dimension: out_width = math.floor(in_width * node.width_scale) else: out_width = dynamic_dimension node.out_port(0).data.set_shape( shape_for_layout(layout, batch=input_shape[get_batch_dim(layout, 4)], features=input_shape[get_features_dim( layout, 4)], height=out_height, width=out_width)) else: scales = node.in_port(1).data.get_value() assert scales is not None, 'The input with scales for node "{}" is not constant'.format( node_name) eps = 1e-5 # This is to make rounding in case of very close number to round to closest instead of down # generic output shape calculation to support 5D input shape case output_shape = shape_array( [dynamic_dimension for _ in range(len(input_shape))]) for idx in range(len(output_shape)): if input_shape[idx] is not dynamic_dimension: output_shape[idx] = int( (input_shape[idx] + eps) * scales[idx]) else: output_shape[idx] = dynamic_dimension_value node.out_port(0).data.set_shape(output_shape)
def priorbox_infer(node: Node): layout = node.graph.graph['layout'] data_shape = node.in_node(0).shape # calculate all different aspect_ratios (the first one is always 1) # in aspect_ratio 1/x values will be added for all except 1 if flip is True ar_seen = [1.0] ar_seen.extend(node.aspect_ratio.copy()) if node.flip: for s in node.aspect_ratio: ar_seen.append(1.0 / s) ar_seen = np.unique(mo_array(ar_seen).round(decimals=6)) num_ratios = 0 if len(node.min_size) > 0: num_ratios = len(ar_seen) * len(node.min_size) if node.has_valid('fixed_size') and len(node.fixed_size) > 0: num_ratios = len(ar_seen) * len(node.fixed_size) if node.has_valid('density') and len(node.density) > 0: for d in node.density: if node.has_valid('fixed_ratio') and len(node.fixed_ratio) > 0: num_ratios = num_ratios + len( node.fixed_ratio) * (pow(d, 2) - 1) else: num_ratios = num_ratios + len(ar_seen) * (pow(d, 2) - 1) num_ratios = num_ratios + len(node.max_size) if node.has_and_set('V10_infer'): assert node.in_node(0).value is not None node.out_port(0).data.set_shape( [2, np.prod(node.in_node(0).value) * num_ratios * 4]) else: res_prod = data_shape[get_height_dim( layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4 node.out_port(0).data.set_shape([1, 2, res_prod])
def replace_pattern(self, graph: Graph, match: Dict[str, Node]): log.debug('UpsampleToResample is triggered') upsample = match['upsample'] upsample_name = upsample.soft_get('name', upsample.id) input_shape = upsample.in_port(0).data.get_shape() input_shape_rank = len(input_shape) if input_shape_rank not in [4, 5]: log.warning('The input shape is not 4D or 5D for op {}'.format( upsample.soft_get('name'))) return depth_scale = None layout = graph.graph['layout'] if len(upsample.in_nodes()) == 2: if upsample.in_node(1).value is None: return scales = upsample.in_node(1).value assert len(scales) in ( 4, 5 ), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format( len(scales), upsample_name) if not (math.isclose(scales[0], 1, rel_tol=1e-5) and math.isclose(scales[1], 1, rel_tol=1e-5)): return height_scale = scales[get_height_dim(layout, input_shape_rank)] width_scale = scales[get_width_dim(layout, input_shape_rank)] if len(scales) == 5: depth_scale = scales[get_depth_dim(layout, input_shape_rank)] else: height_scale = upsample['height_scale'] width_scale = upsample['width_scale'] if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected(): upsample.in_port(1).disconnect() upsample_name = upsample.soft_get('name', upsample.id) shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node() layout = graph.graph['layout'] if input_shape_rank == 4: begin_value = int64_array( [get_height_dim(layout, input_shape_rank)]) factor_value = float32_array([height_scale, width_scale]) else: begin_value = int64_array( [get_depth_dim(layout, input_shape_rank)]) factor_value = float32_array( [depth_scale, height_scale, width_scale]) ss = create_op_with_const_inputs( graph, StridedSlice, { 1: begin_value, 2: int64_array([get_width_dim(layout, input_shape_rank) + 1]), 3: int64_array([1]) }, { 'name': upsample_name + '/ss_0_port', 'begin_mask': int64_array([1]), 'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), 'shrink_axis_mask': int64_array([0]), 'ellipsis_mask': int64_array([0]) }) mul = create_op_node_with_second_input( graph, Mul, factor_value, {'name': upsample_name + '/factor_mul'}) source = upsample.in_port(0).get_connection().get_source() source.connect(shape.in_port(0)) shape.out_port(0).connect(ss.in_port(0)) ss.out_port(0).connect(mul.in_port(0)) # Create Interpolate operation if input_shape_rank == 4: axes = int64_array([ get_height_dim(layout, input_shape_rank), get_width_dim(layout, input_shape_rank) ]) else: axes = int64_array([ get_depth_dim(layout, input_shape_rank), get_height_dim(layout, input_shape_rank), get_width_dim(layout, input_shape_rank) ]) axes_node = Const(graph, { 'name': upsample_name + '/axis', 'value': axes }).create_node() interpolate = Interpolate( graph, { 'mode': upsample.attrs()['mode'], 'antialias': 0, 'pads_begin': int64_array([0]), 'pads_end': int64_array([0]), 'coordinate_transformation_mode': 'half_pixel', 'nearest_mode': 'round_prefer_floor', 'cube_coeff': -0.75, 'shape_calculation_mode': 'scales', 'version': 'opset4', 'in_ports_count': 4 }).create_node() upsample.add_input_port(1, skip_if_exist=True) assert upsample.in_port(1).disconnected() mul.out_port(0).connect(interpolate.in_port(1)) axes_node.out_port(0).connect(interpolate.in_port(3)) scales_node = Const(graph, { 'name': upsample_name + '/scales', 'value': factor_value }).create_node() scales_node.out_port(0).connect(interpolate.in_port(2)) upsample.in_port(0).get_connection().set_destination( interpolate.in_port(0)) upsample.out_port(0).get_connection().set_source( interpolate.out_port(0)) rename_nodes([(upsample, upsample_name + '/delete'), (interpolate, upsample_name)]) convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node() convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node() mul.in_port(0).get_connection().insert_node(convert_to_float) mul.out_port(0).get_connection().insert_node(convert_to_int)
def test_get_width_dim_NDHWC(self): self.assertEqual(get_width_dim('NHWC', 5), 3)
def test_get_width_dim_NCDHW(self): self.assertEqual(get_width_dim('NCHW', 5), 4)
def test_get_width_dim_NHWC(self): self.assertEqual(get_width_dim('NHWC', 4), 2)
def replace_resize(graph: Graph, resize: Node): log.debug("Converting of ONNX Resize-11 to Interpolate-4 " "is triggered for node {}.".format( resize.soft_get('name', resize.id))) input_shape = resize.in_port(0).data.get_shape() input_rank = len(input_shape) resize_name = resize.soft_get('name', resize.id) if input_rank not in {4, 5}: log.warning( 'The input shape is not 4D or 5D for op with name {}'.format( resize_name)) return assert (resize.is_in_port_connected(0) and (resize.is_in_port_connected(2) or resize.is_in_port_connected(3))), \ "Scales or sizes inputs must be connected to Node {} with op {}.".format(resize.soft_get("name", resize.id), resize.op) assert resize.soft_get('coordinate_transformation_mode') != 'tf_crop_and_resize', \ 'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(resize.op, resize.soft_get("name", resize.id)) layout = graph.graph['layout'] if input_rank == 4: begin_dim = get_height_dim(layout, input_rank) end_dim = get_width_dim(layout, input_rank) + 1 else: begin_dim = get_depth_dim(layout, input_rank) end_dim = get_width_dim(layout, input_rank) + 1 sizes_ss = create_op_with_const_inputs( graph, StridedSlice, { 1: int64_array([begin_dim]), 2: int64_array([end_dim]), 3: int64_array([1]) }, { 'name': resize_name + '/StridedSlice_sizes', 'begin_mask': int64_array([1]), 'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), 'shrink_axis_mask': int64_array([0]), 'ellipsis_mask': int64_array([0]) }) scales_ss = create_op_with_const_inputs( graph, StridedSlice, { 1: int64_array([begin_dim]), 2: int64_array([end_dim]), 3: int64_array([1]) }, { 'name': resize_name + '/StridedSlice_scales', 'begin_mask': int64_array([1]), 'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), 'shrink_axis_mask': int64_array([0]), 'ellipsis_mask': int64_array([0]) }) axes_node = Const( graph, { 'name': resize_name + '/axis', 'value': int64_array(np.arange(begin_dim, end_dim)) }).create_node() shape_calculation_mode = 'sizes' if resize.is_in_port_connected( 3) else 'scales' interpolate_node = Interpolate( graph, { 'version': 'opset4', 'mode': convert_mode(resize.mode), 'coordinate_transformation_mode': resize.coordinate_transformation_mode, 'cube_coeff': resize.cube_coeff, 'nearest_mode': resize.nearest_mode, 'pads_begin': int64_array([0]), 'pads_end': int64_array([0]), 'antialias': 0, 'shape_calculation_mode': shape_calculation_mode, 'in_ports_count': 4 }).create_node() axes_node.out_port(0).connect(interpolate_node.in_port(3)) shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node() add_node = create_op_with_const_inputs(graph, Add, {1: float_array([1.0e-5])}, {'name': resize_name + '/Add'}) dst_dtype = np.float32 # even if data_type=FP16 use float32 for shape values if not resize.is_in_port_connected(3): cast_shape_to_float = Cast(graph, { 'dst_type': dst_dtype }).create_node() mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node() shape_of.out_port(0).connect(cast_shape_to_float.in_port(0)) cast_shape_to_float.out_port(0).connect(mul_node.in_port(0)) cast_add_result_to_int = Cast(graph, { 'dst_type': np.int64 }).create_node() floor_node = Floor(graph, { 'name': resize_name + '/Floor' }).create_node() mul_node.out_port(0).connect(add_node.in_port(0)) add_node.out_port(0).connect(floor_node.in_port(0)) floor_node.out_port(0).connect(cast_add_result_to_int.in_port(0)) cast_add_result_to_int.out_port(0).connect(sizes_ss.in_port(0)) sizes_ss.out_port(0).connect(interpolate_node.in_port(1)) scales_ss.out_port(0).connect(interpolate_node.in_port(2)) connection_of_resize_input = resize.in_port(0).get_connection() connection_of_resize_input.set_destination(interpolate_node.in_port(0)) connection_of_scales = resize.in_port(2).get_connection() connection_of_scales.set_destination(scales_ss.in_port(0)) connection_of_resize_input.get_source().connect(shape_of.in_port(0)) connection_of_scales.get_source().connect(mul_node.in_port(1)) else: cast_shape_to_float = Cast(graph, { 'dst_type': dst_dtype }).create_node() cast_sizes_to_float = Cast(graph, { 'dst_type': dst_dtype }).create_node() div_node = Div(graph, {'name': resize_name + '/Div'}).create_node() cast_sizes_to_float.out_port(0).connect(div_node.in_port(0)) cast_shape_to_float.out_port(0).connect(div_node.in_port(1)) shape_of.out_port(0).connect(cast_shape_to_float.in_port(0)) div_node.out_port(0).connect(add_node.in_port(0)) add_node.out_port(0).connect(scales_ss.in_port(0)) scales_ss.out_port(0).connect(interpolate_node.in_port(2)) sizes_ss.out_port(0).connect(interpolate_node.in_port(1)) connection_of_resize_input = resize.in_port(0).get_connection() connection_of_resize_input.set_destination(interpolate_node.in_port(0)) connection_of_sizes = resize.in_port(3).get_connection() connection_of_sizes.set_destination(sizes_ss.in_port(0)) connection_of_resize_input.get_source().connect(shape_of.in_port(0)) connection_of_sizes.get_source().connect( cast_sizes_to_float.in_port(0)) rename_nodes([(resize, resize_name + '/delete'), (interpolate_node, resize_name)]) resize.out_port(0).get_connection().set_source( interpolate_node.out_port(0))
def interp_infer(node: Node): layout = node.graph.graph['layout'] assert len(layout) == 4 if len(node.in_nodes()) == 2: src_shape = node.in_node(0).shape dst_shape = node.in_node(1).shape # in Caffe can be 2 inputs too, but shape should be got from shape of the second input if node.parse_2nd_input == 'shape': dst_shape = [dst_shape[get_height_dim(layout, 4)], dst_shape[get_width_dim(layout, 4)]] else: # it is TF case dst_shape = node.in_node(1).value if src_shape is None or dst_shape is None or len(src_shape) != 4 or len(dst_shape) != 2: log.error( 'Node {} with op {} cannot be converted to Resample layer because there is no enough info about ' 'src/dst shapes: src_shape = {}, dst_shape = {}'.format(node.name, node.op, src_shape, dst_shape)) node.type = None # prevent translation to a valid IE layer return in_height = src_shape[get_height_dim(layout, 4)] in_width = src_shape[get_width_dim(layout, 4)] out_height = dst_shape[0] out_width = dst_shape[1] node.factor = factor_update( node.factor, [float(out_height) / in_height, float(out_width) / in_width], [in_height, in_width], [out_height, out_width], node.soft_get('name') ) if node.factor is None: node['width'] = out_width node['height'] = out_height node.out_node().shape = shape_for_layout(layout, batch=src_shape[get_batch_dim(layout, 4)], features=src_shape[get_features_dim(layout, 4)], height=out_height, width=out_width) node.graph.remove_edge(node.in_node(1).id, node.id) else: outn = node.out_node(0) in_shape = node.in_node(0) num_ = in_shape.shape[get_batch_dim(layout, 4)] channels_ = in_shape.shape[get_features_dim(layout, 4)] height_in_ = in_shape.shape[get_height_dim(layout, 4)] width_in_ = in_shape.shape[get_width_dim(layout, 4)] height_out_ = height_in_ + node.pad_beg + node.pad_end width_out_ = width_in_ + node.pad_beg + node.pad_end if node.shrink_factor != 1 and node.zoom_factor == 1: shrink_factor = node.shrink_factor if shrink_factor < 1: log.error('Shrink factor should be positive in node {}'.format(node.id)) return None height_out_ = (height_out_ - 1) / shrink_factor + 1 width_out_ = (width_out_ - 1) / shrink_factor + 1 elif node.shrink_factor == 1 and node.zoom_factor != 1: zoom_factor = node.zoom_factor if zoom_factor < 1: log.error('Zoom factor should be positive in node {}'.format(node.id)) return None node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \ 'layer shape inference function in the file (openvino/tools/mo/ops/interp.op at the ' \ 'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100) # Reshape methods can be different in some cases # Commented out section represents reshape that used in deeplab-caffe # Uncomment the following lines, if your model was trained with deeplab-caffe # or have the same reshape method # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) # Comment out the following lines if you use the reshape method from previous section height_out_ = height_out_ * zoom_factor width_out_ = width_out_ * zoom_factor elif node.width != 0 and node.height != 0: height_out_ = node.height width_out_ = node.width elif node.shrink_factor != 1 and node.zoom_factor != 1: shrink_factor = node.shrink_factor zoom_factor = node.zoom_factor if shrink_factor < 1: log.error('Shrink factor should be positive in node {}'.format(node.id)) return None if zoom_factor < 1: log.error('Zoom factor should be positive in node {}'.format(node.id)) return None height_out_ = (height_out_ - 1) / shrink_factor + 1 width_out_ = (width_out_ - 1) / shrink_factor + 1 height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) outn.shape = shape_for_layout(layout, batch=num_, features=channels_, height=height_out_, width=width_out_)