def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned): batch_indices = _cast_Long( g, squeeze( g, select( g, rois, 1, g.op('Constant', value_t=torch.tensor([0], dtype=torch.long))), 1), False) rois = select( g, rois, 1, g.op('Constant', value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long))) if aligned: warnings.warn( "ONNX export of ROIAlign with aligned=True does not match PyTorch when using malformed boxes," " ONNX forces ROIs to be 1x1 or larger.") scale = torch.tensor(0.5 / spatial_scale).to(dtype=torch.float) rois = g.op("Sub", rois, scale) return g.op('RoiAlign', input, rois, batch_indices, spatial_scale_f=spatial_scale, output_height_i=pooled_height, output_width_i=pooled_width, sampling_ratio_i=sampling_ratio)
def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned): if (aligned): raise RuntimeError( 'Unsupported: ONNX export of roi_align with aligned') batch_indices = _cast_Long( g, squeeze( g, select( g, rois, 1, g.op('Constant', value_t=torch.tensor([0], dtype=torch.long))), 1), False) rois = select( g, rois, 1, g.op('Constant', value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long))) return g.op('RoiAlign', input, rois, batch_indices, spatial_scale_f=spatial_scale, output_height_i=pooled_height, output_width_i=pooled_width, sampling_ratio_i=sampling_ratio)
def embedding_bag(g, embedding_matrix, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx): if scale_grad_by_freq and sym_help._training_mode: return sym_help._onnx_unsupported("embedding_bag with scale_grad_by_freq for training mode") if padding_idx is not None and padding_idx >= 0: raise RuntimeError("embedding_bag with padding_idx") from torch.onnx.symbolic_opset9 import select import warnings warnings.warn("Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. " "Please use opset 11 or higher to export model for dynamic input shape.'") offsets_dim_0 = sym_help._get_tensor_dim_size(offsets, 0) if offsets_dim_0 is not None: if include_last_offset: offset_len = offsets_dim_0 - 1 offsets_extended = offsets else: offset_len = offsets_dim_0 offsets_extended = [offsets, g.op("Constant", value_t=torch.tensor([maxsize]))] offsets_extended = g.op("Concat", *offsets_extended, axis_i=0) list_ = [] for i in range(offset_len): start_ = sym_help._unsqueeze_helper(g, select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), [0]) end_ = sym_help._unsqueeze_helper(g, select(g, offsets_extended, torch.tensor(0), torch.tensor(i + 1)), [0]) axes_ = g.op("Constant", value_t=torch.tensor([0])) indices_row = g.op("Slice", indices, start_, end_, axes_) embeddings = g.op("Gather", embedding_matrix, indices_row) if not sym_help._is_none(per_sample_weights): per_sample_weights_row = g.op("Slice", per_sample_weights, start_, end_, axes_) per_sample_weights_row = sym_help._unsqueeze_helper(g, per_sample_weights_row, [1]) embeddings = g.op("Mul", embeddings, per_sample_weights_row) if mode == 0: embeddings = sym_help._reducesum_helper(g, embeddings, axes_i=[0], keepdims_i=0) elif mode == 1: embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0) else: embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0) embeddings = sym_help._unsqueeze_helper(g, embeddings, [0]) list_.append(embeddings) output = g.op("Concat", *list_, axis_i=0) # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. return output, None, None, None else: return sym_help._onnx_unsupported("embedding_bag with unknown shape of offsets for opset 10 is not supported. " "please use opset 11 or higher.")
def symbolic(g, bboxes, scores, iou_threshold, offset): from ..onnx import is_custom_op_loaded has_custom_op = is_custom_op_loaded() # TensorRT nms plugin is aligned with original nms in ONNXRuntime is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' if has_custom_op and (not is_trt_backend): return g.op('mmcv::NonMaxSuppression', bboxes, scores, iou_threshold_f=float(iou_threshold), offset_i=int(offset)) else: from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze boxes = unsqueeze(g, bboxes, 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) max_output_per_class = g.op('Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) return squeeze( g, select( g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1)
def symbolic(g, bboxes, scores, iou_threshold, offset): from ..onnx import is_custom_op_loaded has_custom_op = is_custom_op_loaded() if has_custom_op: return g.op('mmcv::NonMaxSuppression', bboxes, scores, iou_threshold_f=float(iou_threshold), offset_i=int(offset)) else: from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze boxes = unsqueeze(g, bboxes, 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) max_output_per_class = g.op('Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) return squeeze( g, select( g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1)
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold): boxes = unsqueeze(g, boxes, 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) max_output_per_class = g.op('Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) return squeeze(g, select(g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1)
def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio): batch_indices = _cast_Long( g, squeeze( g, select( g, rois, 1, g.op('Constant', value_t=torch.tensor([0], dtype=torch.long))), 1), False) rois = select( g, rois, 1, g.op('Constant', value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long))) return g.op('RoiAlign', input, rois, batch_indices, spatial_scale_f=spatial_scale, output_height_i=pooled_height, output_width_i=pooled_width, sampling_ratio_i=sampling_ratio)
def symbolic(g, bboxes, scores, iou_threshold, offset): from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze boxes = unsqueeze(g, bboxes, 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) max_output_per_class = g.op( 'Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) iou_threshold = g.op( 'Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) return squeeze( g, select( g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1)
def symbolic_nmsfilt(g, boxes, scores, iou_threshold, score_threshold, max_output_boxes): # if should return all if max_output_boxes <= 0: max_output_boxes = 10000 shape = g.op("Shape", scores) # original shape boxes = view(g, boxes, (1, -1, 4)) max_output_per_class = g.op('Constant', value_t=torch.tensor([max_output_boxes], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) score_threshold = g.op('Constant', value_t=torch.tensor([score_threshold], dtype=torch.float)) # center_point_box == 1 is for our center_x, centr_y, width, height format nms_out = g.op('NonMaxSuppression', boxes, view(g, scores, (1, 1, -1)), max_output_per_class, iou_threshold, score_threshold, center_point_box_i=1) idx = view(g, select(g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), (-1,)) scores = view(g, scores, (-1,)) flat_shape = g.op("Shape", scores) src = index_select(g, scores, 0, idx) src = view(g, src, (-1,)) filt = g.op("ConstantOfShape", flat_shape) filt = scatter(g, filt, 0, idx, src) return view(g, filt, shape)
def symbolic_nms(g, boxes, scores, iou_threshold, max_output_boxes): # if should return all if max_output_boxes <= 0: max_output_boxes = 10000 boxes = view(g, boxes, (1, -1, 4)) max_output_per_class = g.op('Constant', value_t=torch.tensor([max_output_boxes], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) # center_point_box == 1 is for our center_x, centr_y, width, height format nms_out = g.op('NonMaxSuppression', boxes, view(g, scores, (1, 1, -1)), max_output_per_class, iou_threshold, center_point_box_i=1) idx = select( g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))) return view(g, idx, (-1, ))
def _size_helper(g, self, dim): full_shape = g.op("Shape", self) from torch.onnx.symbolic_opset9 import select return select(g, full_shape, g.op("Constant", value_t=torch.tensor([0])), dim)
def embedding_bag(g, embedding_matrix, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset): if scale_grad_by_freq and sym_help._training_mode: return sym_help._onnx_unsupported( 'embedding_bag with scale_grad_by_freq for training mode') from torch.onnx.symbolic_opset9 import size, div, select # Check if initial indices was 2D. In functional.py: # offsets is set to torch.arange(0, indices.numel(), indices.size(1)) # Then indices is reshaped to 1D: indices.reshape(-1) if len(list(indices.node().inputs())) > 0 and indices.node().inputs().__next__().type().sizes() is not None \ and len(indices.node().inputs().__next__().type().sizes()) == 2: # Assert include_last_offset is False assert not include_last_offset embeddings = g.op("Gather", embedding_matrix, indices) dim_0 = size(g, offsets, g.op("Constant", value_t=torch.LongTensor([0]))) dim_1 = div( g, size(g, indices, g.op("Constant", value_t=torch.LongTensor([0]))), dim_0) dim_2 = g.op("Constant", value_t=torch.LongTensor([-1])) shape = [dim_0, dim_1, dim_2] shape = g.op("Concat", *shape, axis_i=0) if not sym_help._is_none(per_sample_weights): per_sample_weights = g.op("Unsqueeze", per_sample_weights, axes_i=[1]) embeddings = g.op("Mul", embeddings, per_sample_weights) embeddings = g.op("Reshape", embeddings, shape) if mode == 0: embeddings = g.op("ReduceSum", embeddings, axes_i=[1], keepdims_i=0) elif mode == 1: embeddings = g.op("ReduceMean", embeddings, axes_i=[1], keepdims_i=0) else: embeddings = g.op("ReduceMax", embeddings, axes_i=[1], keepdims_i=0) # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. return embeddings, None, None, None elif offsets.type().sizes() is not None: if include_last_offset: offset_len = offsets.type().sizes()[0] - 1 offsets_extended = offsets else: offset_len = offsets.type().sizes()[0] offsets_extended = [ offsets, g.op("Constant", value_t=torch.tensor([maxsize])) ] offsets_extended = g.op("Concat", *offsets_extended, axis_i=0) list_ = [] for i in range(offset_len): start_ = g.op("Unsqueeze", select(g, offsets_extended, torch.tensor(0), torch.tensor(i)), axes_i=[0]) end_ = g.op("Unsqueeze", select(g, offsets_extended, torch.tensor(0), torch.tensor(i + 1)), axes_i=[0]) axes_ = g.op("Constant", value_t=torch.tensor([0])) indices_row = g.op("Slice", indices, start_, end_, axes_) embeddings = g.op("Gather", embedding_matrix, indices_row) if not sym_help._is_none(per_sample_weights): per_sample_weights_row = g.op("Slice", per_sample_weights, start_, end_, axes_) per_sample_weights_row = g.op("Unsqueeze", per_sample_weights_row, axes_i=[1]) embeddings = g.op("Mul", embeddings, per_sample_weights_row) if mode == 0: embeddings = g.op("ReduceSum", embeddings, axes_i=[0], keepdims_i=0) elif mode == 1: embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0) else: embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0) embeddings = g.op("Unsqueeze", embeddings, axes_i=[0]) list_.append(embeddings) output = g.op("Concat", *list_, axis_i=0) # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices. # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag. return output, None, None, None else: return sym_help._onnx_unsupported( 'embedding_bag with unknown shape of indices')