def onnx_constant_ext(node): pb_value = onnx_attr(node, 'value', 't') value = numpy_helper.to_array(pb_value) result = { 'data_type': value.dtype, 'shape': np.array(value.shape), 'value': value, 'infer': tf_const_infer } return result
def extract(cls, node): value = onnx_attr(node, 'value', 'f', default=float(0.0)) input_as_shape = onnx_attr(node, 'input_as_shape', 'i') extra_shape = onnx_attr(node, 'extra_shape', 'ints') shape = onnx_attr(node, 'shape', 'ints') dtype = onnx_attr(node, 'dtype', 'i', 1) assert input_as_shape assert extra_shape is None assert shape is None assert dtype == 1 attrs = { 'fill_value': value, 'input_as_shape': input_as_shape, } ConstantFill.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): axis = onnx_attr(node, 'axis', 'i', default=0, dst_type=np.int64) size_splits = onnx_attr(node, 'split', 'ints', default=None, dst_type=int64_array) if size_splits is None: AttributedSplit.update_node_stat( node, { 'axis': axis, 'num_splits': onnx_get_num_outputs(node), }) else: AttributedVariadicSplit.update_node_stat( node, { 'axis': axis, 'size_splits': size_splits, }) return cls.enabled
def extract(cls, node: Node): shape = onnx_attr(node, 'shape', 'ints', default=None, dst_type=int64_array) out_type = get_onnx_datatype_as_numpy( onnx_attr(node, 'dtype', 'i', default=1)) seed = onnx_attr(node, 'seed', 'f', default=0.0) min_val = onnx_attr(node, 'low', 'f', default=0.0) max_val = onnx_attr(node, 'high', 'f', default=1.0) AttributedRandomUniform.update_node_stat( node, { 'shape': shape, 'output_type': out_type, 'seed': seed, 'min_val': out_type(min_val), 'max_val': out_type(max_val) }) return cls.enabled
def extract(cls, node): # Extract pads attribute # In case if pads is not specified it will be set in default (1) in infer function pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) assert pads is None or len(pads) % 2 == 0 final_pad = None if pads is not None: pads = pads.reshape([2, -1]) pads = np.transpose(pads) final_pad = np.array([[0, 0], [0, 0], *pads], dtype=np.int64) # Extract dilations attribute # In case if dilations is not specified it will be set in default (1) in infer function dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) final_dilations = np.array([1, 1, *dilations], dtype=np.int64) if dilations is not None else None # Extract dilations attribute # In case if dilations is not specified it will be set in default (1) in infer function strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) final_strides = np.array([1, 1, *strides], dtype=np.int64) if strides is not None else None kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None) auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: np.array(x, dtype=np.int64)) deformable_groups = onnx_attr(node, 'deformable_groups', 'i', default=1) attrs = { 'op': __class__.op, 'auto_pad': auto_pad, 'bias_addable': False, 'bias_term': False, 'pad': final_pad, 'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None, 'dilation': final_dilations, 'output_spatial_shape': None, 'output_shape': None, 'stride': final_strides, 'group': group, 'deformable_group': deformable_groups, 'output': None, 'weights_index': 2, 'kernel_spatial': np.array(kernel_shape, dtype=np.int64) if kernel_shape is not None else None, 'input_feature_channel': 1, 'output_feature_channel': 0, 'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3])) 'spatial_dims': None, # Will be calculated in infer function 'channel_dims': np.array([1], dtype=np.int64), 'batch_dims': np.array([0], dtype=np.int64), 'layout': 'NCHW' } # update the attributes of the node DeformableConvolution.update_node_stat(node, attrs) return cls.enabled
def onnx_reshape_ext(node): ''' Extract ONNX Reshape op of different versions. Support both latest Reshape and Reshape-1. The first one has 2 arguments, Reshape-1 has one input and shape is coded in attribute. ''' dim = onnx_attr(node, 'shape', 'ints', None) if dim is not None: dim = np.array(dim, dtype=np.int64) Reshape.update_node_stat(node, {'dim': dim}) else: Reshape.update_node_stat(node) return node.graph.node[node.id]
def extract(node): scale = onnx_attr(node, 'scale', 'f', default=np.array(1.0), dst_type=lambda x: np.array(x)) node['scale'] = scale node['bias'] = np.array(0) node['op'] = 'ImageScaler' return __class__.enabled
def extract(node): # In case of undefined 'perm' attribute, Transpose operation in ONNX reverse the dimensions order = onnx_attr(node, 'perm', 'ints', default=None) attrs = { 'order': np.array(order, dtype=np.int64) if order is not None else None, 'reverse_order': order is None } # update the attributes of the node Permute.update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): # update the attributes of the node node_name = node.soft_get('name', node.id) block_size = onnx_attr(node, 'blocksize', 'i', default=None) assert block_size is not None, \ 'DepthToSpace should have "blocksize" attribute specified for node {}'.format(node_name) onnx_mode = onnx_attr(node, 'mode', 's', default=b'DCR').decode() assert onnx_mode in [ 'DCR', 'CRD' ], 'Unrecognized mode provided for DepthToSpace node {}'.format( node_name) if onnx_mode == 'DCR': mode = 'blocks_first' else: mode = 'depth_first' DepthToSpaceOp.update_node_stat(node, { 'block_size': block_size, 'mode': mode }) return cls.enabled
def extract(cls, node): mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode()) scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)) width_scale = onnx_attr(node, 'width_scale', 'f') height_scale = onnx_attr(node, 'height_scale', 'f') supported_modes = ['nearest', 'linear'] if mode not in supported_modes: raise Error( 'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.', node.name, mode, supported_modes ) if scales is not None: if scales.shape != (4,): raise Error( 'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.', node.name ) if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5: raise Error( 'Upsampling of batch and feature dimentions is not supported for node {}.', node.name ) height_scale = scales[2] width_scale = scales[3] if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2: raise Error( 'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.', width_scale, height_scale, node.name ) UpsampleOp.update_node_stat(node, {'mode': mode, 'height_scale': height_scale, 'width_scale': width_scale}) return cls.enabled
def extract(cls, node): attrs = dict(output_size=onnx_attr(node, 'output_size', 'i', 7), sampling_ratio=onnx_attr(node, 'sampling_ratio', 'i', 2), distribute_rois_between_levels=onnx_attr(node, 'distribute_rois_between_levels', 'i', 1), preserve_rois_order=onnx_attr(node, 'preserve_rois_order', 'i', 1), num_classes=onnx_attr(node, 'num_classes', 'i', 81), post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), pyramid_scales=np.array(onnx_attr(node, 'pyramid_scales', 'ints', [4, 8, 16, 32, 64]), dtype=np.int64), ) ExperimentalDetectronROIFeatureExtractor.update_node_stat(node, attrs) return cls.enabled
def extract(node): activation_alpha = onnx_attr(node, 'activation_alpha', 'floats', default=None, dst_type=lambda x: np.array(x, dtype=np.float32)) activation_beta = onnx_attr(node, 'activation_beta', 'floats', default=None, dst_type=lambda x: np.array(x, dtype=np.float32)) activations = onnx_attr(node, 'activations', 'strings', default=None, dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x)))) clip = onnx_attr(node, 'clip', 'f', default=None) attrs = { 'batch_dim': 1, 'sequence_dim': 0, 'blobs_wrb': True, 'has_num_directions': True, 'num_layers': 1, 'format': 'onnx', 'multilayers': False, 'gate_order': [0], # ONNX attrs 'activation_alpha': activation_alpha, 'activation_beta': activation_beta, 'activations': activations, 'clip': clip, 'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(), 'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64), } RNN.update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): # some Dropout flavors doesn't have is_test attribute; when it is missing, interpret it as 1 is_test = onnx_attr(node, 'is_test', 'i', 1) if len(node.out_nodes()) > 1: raise Error( 'Dropout node {} has more than one consumer. Unsupported.', node.name) if not is_test: raise Error( 'Dropout node {} has is_test: 0. This means training mode which is not supported.', node.name) Identity.update_node_stat(node) return cls.enabled
def extract(cls, node): axes = onnx_attr(node, 'axes', 'ints', default=int64_array([0, 2, 3]), dst_type=lambda x: np.array(x, dtype=np.int64)) attrs = { 'eps': 1e-9, 'normalize_variance': 1, 'axes': axes, 'eps_mode': 'outside_sqrt', } MVNOnnx.update_node_stat(node, attrs) return cls.enabled
def onnx_reshape_ext(node): ''' Extract ONNX Reshape op of different versions. Support both latest Reshape and Reshape-1. The first one has 2 arguments, Reshape-1 has one input and shape is coded in attribute. ''' dim = onnx_attr(node, 'shape', 'ints', None) if dim is not None: dim = np.array(dim, dtype=np.int64) return { 'type': 'Reshape', 'dim': dim, 'infer': lambda node: single_output_infer(node, tf_reshape_shape_infer, lambda node: np.reshape(node.in_node().value, node.out_node().shape) if node.in_node().value is not None else None) }
def dropout_ext(node): # some Dropout flavors doesn't have is_test attribute; when it is missing, interpret it as 1 is_test = onnx_attr(node, 'is_test', 'i', 1) if len(node.out_nodes()) > 1: raise Error('Dropout node {} has more than one consumer. Unsupported.', node.name) if not is_test: raise Error( 'Dropout node {} has is_test: 0. This means training mode which is not supported.', node.name) return { # redefine op to automatically remove a node in the next tranformations 'op': 'Identity', }
def extract(cls, node): attrs = dict(class_agnostic_box_regression=onnx_attr( node, 'class_agnostic_box_regression', 'i', 0), max_detections_per_image=onnx_attr( node, 'max_detections_per_image', 'i', 100), nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.5), num_classes=onnx_attr(node, 'num_classes', 'i', 81), post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f', log(1000. / 16.)), deltas_weights=np.array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.]), dtype=np.float32)) ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs) return cls.enabled
def replace_sub_graph(graph: Graph, match: dict): max_pool_input = match['input'] max_pool = match['max_pool0'] unpool = match['unpool'] unpool_input = unpool.in_port(0).get_source().node max_pool.out_port(1).disconnect() # Inputs: [max_pool_input, max_pool_output, unpool_input] res = MaxPoolGrad(graph, dict(name=unpool.name + '/fused')).create_node( [max_pool_input, max_pool, unpool_input]) unpool.out_port(0).get_connection().set_source(res.out_port(0)) output_size = onnx_attr(unpool, 'output_size', 'ints', default=None) if output_size: MaxPoolGrad.update_node_stat(res, attrs={'output_size': output_size})
def extract(cls, node): name = node.soft_get('name', node.id) axes = onnx_attr(node, 'axes', 'ints', default=np.array([0, 2, 3], dtype=np.int64), dst_type=lambda x: np.array(x, dtype=np.int64)) axes = Const(node.graph, {'value': axes, 'name': name + '/Axes'}).create_node() node.add_input_port(1, skip_if_exist=True) node.in_port(1).connect(axes.out_port(0)) attrs = { 'eps': 1e-9, 'normalize_variance': 1, 'eps_mode': 'outside_sqrt' } MVN.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): attrs = { 'alpha': onnx_attr(node, 'alpha', 'f', 1), 'beta': onnx_attr(node, 'beta', 'f', 1), 'transpose_a': onnx_attr(node, 'transA', 'i', 0), 'transpose_b': onnx_attr(node, 'transB', 'i', 0), 'broadcast_c': onnx_attr(node, 'broadcast', 'i', 1), # TODO: there is no axis in onnx operators.md 'axis': np.array(onnx_attr(node, 'axis', 'i', default=0), dtype=np.int64) } GemmONNX.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): direction = onnx_attr(node, 'direction', 's', b'forward').decode().lower() activation_alpha = onnx_attr(node, 'activation_alpha', 'floats', default=None, dst_type=lambda x: np.array(x, dtype=np.float32)) activation_beta = onnx_attr(node, 'activation_beta', 'floats', default=None, dst_type=lambda x: np.array(x, dtype=np.float32)) activations = onnx_attr(node, 'activations', 'strings', default=['tanh', 'tanh'] if direction == 'bidirectional' else ['tanh'], dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x)))) clip = onnx_attr(node, 'clip', 'f', default=None) # Since pytorch generates ONNX bidirectional RNN models with only one activation, duplicating activation if direction == 'bidirectional' and len(activations) == 1: activations.append(activations[0]) attrs = { 'batch_dim': 1, 'sequence_dim': 0, 'blobs_wrb': True, 'has_num_directions': True, 'num_layers': 1, 'format': 'onnx', 'multilayers': False, 'gate_order': [0], # ONNX attrs 'activation_alpha': activation_alpha, 'activation_beta': activation_beta, 'activations': activations, 'clip': clip, 'direction': direction, 'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64), } RNN.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): axis = np.array(onnx_attr(node, 'axes', 'ints', default=[]), dtype=np.int64) ExpandDims.update_node_stat(node, {'expand_axis': axis}) return cls.enabled
def extract(cls, node: Node): axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x)) keep_dims = onnx_attr(node, 'keepdims', 'i', default=True) ReduceProd.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims}) return cls.enabled
def extract(node): epsilon = onnx_attr(node, 'epsilon', 'f', default=float(1e-5)) InstanceNormalization.update_node_stat(node, {'epsilon': epsilon}) return __class__.enabled
def extract(cls, node): attrs = dict(max_rois=onnx_attr(node, 'max_rois', 'i', 1000)) ExperimentalDetectronTopKROIs.update_node_stat(node, attrs) return cls.enabled
def extract(node): alpha = onnx_attr(node, 'alpha', 'f', default=1.0) Elu.update_node_stat(node, {'alpha': alpha}) return EluFrontExtractor.enabled
def extract(cls, node): encoding_map = {0: 'corner', 1: 'center'} center_point_box = onnx_attr(node, 'center_point_box', 'i', default=0) NonMaxSuppression.update_node_stat(node, {'sort_result_descending': 0, 'box_encoding': encoding_map[center_point_box]}) return cls.enabled
def extract(node): variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: np.array(x, dtype=np.float32)) if len(variance) == 0: variance = [0.1] update_attrs = { 'width': onnx_attr(node, 'width', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)), 'height': onnx_attr(node, 'height', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32)), 'flip': onnx_attr(node, 'flip', 'i', default=0), 'clip': onnx_attr(node, 'clip', 'i', default=0), 'variance': list(variance), 'img_size': onnx_attr(node, 'img_size', 'i', default=0), 'img_h': onnx_attr(node, 'img_h', 'i', default=0), 'img_w': onnx_attr(node, 'img_w', 'i', default=0), 'step': onnx_attr(node, 'step', 'f', default=0.0), 'step_h': onnx_attr(node, 'step_h', 'f', default=0.0), 'step_w': onnx_attr(node, 'step_w', 'f', default=0.0), 'offset': onnx_attr(node, 'offset', 'f', default=0.0), } # update the attributes of the node Op.get_op_class_by_name(__class__.op).update_node_stat( node, update_attrs) return __class__.enabled
def extract(cls, node): to = onnx_attr(node, 'to', 'i', default=None) Cast.update_node_stat(node, {'dst_type': get_onnx_datatype_as_numpy(to)}) return cls.enabled
def extract(cls, node): axis = onnx_attr(node, 'axis', 'i', default=1) LogSoftmaxONNX.update_node_stat(node, {'axis': axis}) return cls.enabled