def backend_attrs(self): return [ ('preprocess_collapse_repeated', lambda node: bool_to_str(node, 'preprocess_collapse_repeated')), ('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated')), ('unique', lambda node: bool_to_str(node, 'unique')) ]
def backend_attrs(self): version = self.get_opset() if version == "extension": return [('with_right_bound', lambda node: bool_to_str(node, 'with_right_bound'))] else: return [ ('with_right_bound', lambda node: bool_to_str(node, 'with_right_bound')), ('output_type', lambda node: np_data_type_to_destination_type( node.output_type)), ]
def backend_attrs(self): version = self.get_opset() if version in ['opset3', 'opset4', 'opset5']: return [('sort_result_descending', lambda node: bool_to_str(node, 'sort_result_descending')), 'box_encoding', ('output_type', lambda node: np_data_type_to_destination_type(node.output_type))] elif version == 'opset1': return [('sort_result_descending', lambda node: bool_to_str(node, 'sort_result_descending')), 'box_encoding'] else: raise Error( 'Unsupported operation opset version "{}"'.format(version))
def backend_attrs(self): return [ ('flip', lambda node: bool_to_str(node, 'flip')), ('clip', lambda node: bool_to_str(node, 'clip')), 'step', 'offset', ('scale_all_sizes', lambda node: bool_to_str(node, 'scale_all_sizes')), ('min_size', lambda node: attr_getter(node, 'min_size')), ('max_size', lambda node: attr_getter(node, 'max_size')), ('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')), ('variance', lambda node: attr_getter(node, 'variance')), ('density', lambda node: attr_getter(node, 'density')), ('fixed_size', lambda node: attr_getter(node, 'fixed_size')), ('fixed_ratio', lambda node: attr_getter(node, 'fixed_ratio')), ]
def backend_attrs(self): version = self.get_opset() if version == 'opset6': return [('classes_index_type', lambda node: np_data_type_to_destination_type(node.classes_index_type)), ('sequence_length_type', lambda node: np_data_type_to_destination_type(node.sequence_length_type)), ('merge_repeated', lambda node: bool_to_str(node, 'merge_repeated'))] else: raise Error('Unknown opset version "{}"'.format(version))
def backend_attrs(self): return [ 'feat_stride', 'base_size', 'min_size', ('ratio', lambda node: attr_getter(node, 'ratio')), ('scale', lambda node: attr_getter(node, 'scale')), 'pre_nms_topn', 'post_nms_topn', 'nms_thresh', 'framework', 'box_coordinate_scale', 'box_size_scale', ('normalize', lambda node: bool_to_str(node, 'normalize')), ('clip_after_nms', lambda node: bool_to_str(node, 'clip_after_nms')), ('clip_before_nms', lambda node: bool_to_str(node, 'clip_before_nms')), ]
def backend_attrs(self): version = self.get_opset() if version == 'opset2': return [ 'eps', ('across_channels', lambda node: bool_to_str(node, 'across_channels')), ('normalize_variance', lambda node: bool_to_str(node, 'normalize_variance')) ] elif version == 'opset6': return [ 'eps', 'eps_mode', ('normalize_variance', lambda node: bool_to_str(node, 'normalize_variance')) ] else: raise Error('Unsupported MVN opset version "{}"'.format(version))
def backend_attrs(self): return [ 'hidden_size', # number of the elements in hidden cell size ('activations', lambda node: ','.join(node.activations) if node.activations is not None else None), 'activation_alpha', 'activation_beta', 'clip', ('linear_before_reset', lambda node: bool_to_str(node, 'linear_before_reset')), ]
def __init__(self, graph: Graph, attrs: dict): self.attributes_for_opsets = { 'opset1': [ ('axes', lambda node: ','.join(map(str, node.axes))), ('antialias', lambda node: bool_to_str(node, 'antialias')), ('align_corners', lambda node: bool_to_str(node, 'align_corners')), 'mode', 'pads_begin', 'pads_end', ], 'opset4': [ 'mode', 'nearest_mode', 'cube_coeff', 'coordinate_transformation_mode', 'shape_calculation_mode', ('antialias', lambda node: bool_to_str(node, 'antialias')), ('pads_begin', lambda node: pad_attribute_to_str(node, 'pads_begin')), ('pads_end', lambda node: pad_attribute_to_str(node, 'pads_end')), ] } mandatory_props = { 'op': self.op, 'type': self.op, 'version': 'opset1', 'axes': None, 'mode': None, 'align_corners': 0, 'antialias': 0, 'pads_begin': 0, 'pads_end': 0, 'infer': self.infer, 'force_precision_in_ports': { 1: 'int64' }, 'in_ports_count': 2, 'out_ports_count': 1, } super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self): return [ 'coords', 'classes', 'num', 'axis', 'end_axis', ('do_softmax', lambda node: bool_to_str(node, 'do_softmax')), ('anchors', lambda node: attr_getter(node, 'anchors')), ('mask', lambda node: attr_getter(node, 'mask')) ]
def test_bool_to_str(self): graph = build_graph(nodes_attributes, [('input', 'pool_1'), ('pool_1', 'output'), ('output', 'op_output')], {'pool_1': { 'bool_attr': None }}) pool_1_node = Node(graph, 'pool_1') attrs = [(True, 'true'), (False, 'false'), (1, 'true'), (0, 'false')] for attr in attrs: pool_1_node.bool_attr = attr[0] self.assertEqual(attr[1], bool_to_str(pool_1_node, 'bool_attr'))
def backend_attrs(self): return [ ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), ('exclude-pad', lambda node: bool_to_str(node, 'exclude_pad')), 'rounding_type', ('auto_pad', lambda node: node.auto_pad if node.has_valid('auto_pad') else 'explicit'), ]
def backend_attrs(self): return [ ('clip', lambda node: bool_to_str(node, 'clip')), 'img_h', 'img_w', 'step', 'step_h', 'step_w', 'offset', ('variance', lambda node: attr_getter(node, 'variance')), ('width', lambda node: attr_getter(node, 'width')), ('height', lambda node: attr_getter(node, 'height')) ]
def backend_attrs(self): return [ ('flip', lambda node: int(node.flip)), # We need to convert this boolean attribute value to int to keep # forward compatibility with IE 2021.2 ('clip', lambda node: int(node.clip)), # We need to convert this boolean attribute value to int to keep # forward compatibility with IE 2021.2 'step', 'offset', ('scale_all_sizes', lambda node: bool_to_str(node, 'scale_all_sizes')), ('min_size', lambda node: attr_getter(node, 'min_size')), ('max_size', lambda node: attr_getter(node, 'max_size')), ('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')), ('variance', lambda node: attr_getter(node, 'variance')), ('density', lambda node: attr_getter(node, 'density')), ('fixed_size', lambda node: attr_getter(node, 'fixed_size')), ('fixed_ratio', lambda node: attr_getter(node, 'fixed_ratio')), ]
def supported_attrs(self): return [ 'background_label_id', ('clip_after_nms', lambda node: bool_to_str(node, 'clip_after_nms')), ('clip_before_nms', lambda node: bool_to_str(node, 'clip_before_nms')), 'code_type', 'confidence_threshold', ('decrease_label_id', lambda node: bool_to_str(node, 'decrease_label_id')), 'input_height', 'input_width', 'keep_top_k', 'nms_threshold', ('normalized', lambda node: bool_to_str(node, 'normalized')), 'num_classes', ('share_location', lambda node: bool_to_str(node, 'share_location')), 'top_k', ('variance_encoded_in_target', lambda node: bool_to_str(node, 'variance_encoded_in_target')), 'objectness_score', ]
def supported_attrs(self): return [ ('keep_dims', lambda node: bool_to_str(node, 'keep_dims')), ]
def backend_attrs(self): return ['eps', ('across_channels', lambda node: bool_to_str(node, 'across_channels')), ('normalize_variance', lambda node: bool_to_str(node, 'normalize_variance'))]
def supported_attrs(self): return [('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated'))]
def supported_attrs(self): return [('exclusive', lambda node: bool_to_str(node, 'exclusive')), ('reverse', lambda node: bool_to_str(node, 'reverse'))]
def supported_attrs(self): return [('special_zero', lambda node: bool_to_str(node, 'special_zero'))]
def supported_attrs(self): return [ ('transpose_a', lambda node: bool_to_str(node, 'transpose_a')), ('transpose_b', lambda node: bool_to_str(node, 'transpose_b')), ]