def extract(cls, node): if get_onnx_opset_version(node) >= 13: log.warning( 'Ignoring "axis" attribute for DequantizeLinear-{} node, inference might be incorrect.' .format(get_onnx_opset_version(node))) DequantizeLinear.update_node_stat(node) return cls.enabled
def extract(cls, node): attrs = {} if get_onnx_opset_version(node) >= 13: axis = onnx_attr(node, 'axis', 'i', default=1) attrs.update(axis=axis) QuantizeLinear.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node: Node): onnx_opset_version = get_onnx_opset_version(node) if onnx_opset_version is not None and onnx_opset_version >= 11: mode = onnx_attr(node, 'mode', 's', default=b'nearest').decode() transformation_mode = onnx_attr(node, 'coordinate_transformation_mode', 's', default=b'half_pixel').decode() nearest_mode = onnx_attr(node, 'nearest_mode', 's', default=b'round_prefer_floor').decode() cubic_coeff_a = onnx_attr(node, 'cubic_coeff_a', 'f', default=-0.75) attrs = { 'mode': mode, 'coordinate_transformation_mode': transformation_mode, 'nearest_mode': nearest_mode, 'cube_coeff': cubic_coeff_a } ONNXResize11Op.update_node_stat(node, attrs) else: mode = onnx_attr(node, 'mode', 's', default=b'nearest').decode() ONNXResize10.update_node_stat(node, {'mode': mode}) return cls.enabled
def extract(cls, node): mode = onnx_attr(node, 'mode', 's', default='constant', dst_type=lambda x: x.decode()) if get_onnx_opset_version(node) < 11: pads = onnx_attr(node, 'pads', 'ints', dst_type=lambda x: np.array(x, dtype=np.int64)) value = onnx_attr(node, 'value', 'f', default=0.) assert pads is not None # MO Pad op and ONNX Pad op have different format for pads values # MO Pad has Dx2 where D is the total number of dimensions # ONNX Pad pads flat layout, so need to reshape and transpose pads = np.transpose(pads.reshape([2, -1])) AttributedPad.update_node_stat(node, { 'mode': mode, 'pads': pads, 'fill_value': value }) else: ONNXPad.update_node_stat(node, {'mode': mode}) return cls.enabled
def extract(cls, node): onnx_opset_version = get_onnx_opset_version(node) if onnx_opset_version is not None and onnx_opset_version >= 9: mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode()) ONNXResize10.update_node_stat(node, {'mode': mode}) else: mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode()) scales = onnx_attr( node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)) width_scale = onnx_attr(node, 'width_scale', 'f') height_scale = onnx_attr(node, 'height_scale', 'f') supported_modes = ['nearest', 'linear'] if mode not in supported_modes: raise Error( 'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.', node.name, mode, supported_modes) if scales is not None: if scales.shape != (4, ): raise Error( 'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.', node.name) if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5: raise Error( 'Upsampling of batch and feature dimensions is not supported for node {}.', node.name) height_scale = scales[2] width_scale = scales[3] if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2: raise Error( 'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.', width_scale, height_scale, node.name) UpsampleOp.update_node_stat( node, { 'mode': mode, 'height_scale': height_scale, 'width_scale': width_scale }) return cls.enabled
def extract(cls, node): if get_onnx_opset_version(node) < 11: attrs = { 'min': onnx_attr(node, 'min', 'f', np.finfo(np.float32).min), 'max': onnx_attr(node, 'max', 'f', np.finfo(np.float32).max), } AttributedClamp.update_node_stat(node, attrs) else: Clamp.update_node_stat(node) return cls.enabled
def extract(cls, node): if get_onnx_opset_version(node) < 10: starts = int64_array(onnx_attr(node, 'starts', 'ints', default=[])) ends = int64_array(onnx_attr(node, 'ends', 'ints', default=[])) axes = int64_array(onnx_attr(node, 'axes', 'ints', default=[])) if len(starts) == 0 or len(ends) == 0: raise Error("starts or/and ends are not specified for the node {}".format(node.name)) if len(axes) == 0: axes = np.arange(len(starts), dtype=np.int) attrs = {'axes': axes, 'starts': starts, 'ends': ends} AttributedSlice.update_node_stat(node, attrs) else: # onnx_opset_version >= 10 Slice.update_node_stat(node) return cls.enabled
def extract(cls, node): if get_onnx_opset_version(node) < 11: attrs = { 'min': onnx_attr(node, 'min', 'f', np.finfo(np.float32).min), 'max': onnx_attr(node, 'max', 'f', np.finfo(np.float32).max), } AttributedClamp.update_node_stat(node, attrs) else: if onnx_node_has_attr(node, 'min') or onnx_node_has_attr( node, 'max'): log.error( "ONNX Clip-11 operation '{}' shouldn't have attributes 'min' and 'max', this may mean that " "this operation created with older opset version.".format( node.soft_get('name', node.id)), extra={'is_warning': True}) Clamp.update_node_stat(node) return cls.enabled
def extract(cls, node): mode = onnx_attr(node, 'mode', 's', default='constant', dst_type=lambda x: x.decode()) # Pytorch 1.3 while converting to opset 11, creates Pad from older opset. # To be able to convert such models we have to check if pads attribute exists. pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array) if get_onnx_opset_version(node) < 11 or pads is not None: value = onnx_attr(node, 'value', 'f', default=0.) assert pads is not None, 'pads is required attribute for Pad operation' # MO Pad op and ONNX Pad op have different format for pads values # MO Pad has Dx2 where D is the total number of dimensions # ONNX Pad pads flat layout, so need to reshape and transpose pads = np.transpose(pads.reshape([2, -1])) AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value}) else: ONNXPad.update_node_stat(node, {'mode': mode}) return cls.enabled