def nms_core_symbolic(g, dets, iou_thr, score_thr, max_num): from torch.onnx.symbolic_opset9 import reshape, unsqueeze, squeeze from torch.onnx.symbolic_opset10 import _slice assert 0 <= iou_thr <= 1 multi_bboxes = _slice(g, dets, axes=[1], starts=[0], ends=[4]) # multi_bboxes = unsqueeze(g, multi_bboxes, 0) multi_bboxes = reshape(g, multi_bboxes, [1, -1, 4]) multi_scores = _slice(g, dets, axes=[1], starts=[4], ends=[5]) multi_scores = reshape(g, multi_scores, [1, 1, -1]) assert max_num > 0 indices = g.op('NonMaxSuppression', multi_bboxes, multi_scores, g.op('Constant', value_t=torch.LongTensor([max_num])), g.op('Constant', value_t=torch.FloatTensor([iou_thr])), g.op('Constant', value_t=torch.FloatTensor([score_thr]))) indices = squeeze(g, _slice(g, indices, axes=[1], starts=[2], ends=[3]), 1) # Sort indices by score. scores = reshape(g, multi_scores, [ -1, ]) keeped_scores = g.op('Gather', scores, indices, axis_i=0) elements_num = sym_help._size_helper(g, keeped_scores, dim=g.op('Constant', value_t=torch.LongTensor( [0]))) _, order = sym_help._topk_helper(g, keeped_scores, elements_num, dim=0) indices = g.op('Gather', indices, order, axis_i=0) return indices
def symbolic(g, features, rois, out_size, spatial_scale, sample_num=0, aligned=True): batch_indices = reshape( g, g.op('Cast', _slice(g, rois, axes=[1], starts=[0], ends=[1]), to_i=sym_help.cast_pytorch_to_onnx['Long']), [-1]) bboxes = _slice(g, rois, axes=[1], starts=[1], ends=[5]) if aligned: scale = sym_help._maybe_get_scalar(spatial_scale) offset = g.op("Constant", value_t=torch.tensor(0.5 / scale, dtype=torch.float32)) bboxes = sub(g, bboxes, offset) out_h, out_w = _pair(out_size) return g.op('RoiAlign', features, bboxes, batch_indices, output_height_i=out_h, output_width_i=out_w, sampling_ratio_i=sample_num, spatial_scale_f=spatial_scale)
def symbolic(g, features, rois, out_size, spatial_scale, sample_num=0): batch_indices = reshape( g, g.op('Cast', _slice(g, rois, axes=[1], starts=[0], ends=[1]), to_i=sym_help.cast_pytorch_to_onnx['Long']), [-1]) bboxes = _slice(g, rois, axes=[1], starts=[1], ends=[5]) out_h, out_w = _pair(out_size) return g.op('RoiAlign', features, bboxes, batch_indices, output_height_i=out_h, output_width_i=out_w, sampling_ratio_i=sample_num, spatial_scale_f=spatial_scale)
def _slice_helper(g, input, axes, starts, ends, steps=None, dynamic_slice=False): # TODO(ruobing): add support for opset<10 from torch.onnx.symbolic_opset10 import _slice return _slice(g, input, axes, starts, ends, steps, dynamic_slice)
def get_size(x, dim): shape = g.op('Shape', x) dim = _slice(g, shape, axes=[0], starts=[dim], ends=[dim + 1]) return cast(dim, 'Long')
def multiclass_nms_core_symbolic(g, multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1): from torch.onnx.symbolic_opset9 import reshape, squeeze from torch.onnx.symbolic_opset10 import _slice def cast(x, dtype): return g.op('Cast', x, to_i=sym_help.cast_pytorch_to_onnx[dtype]) def get_size(x, dim): shape = g.op('Shape', x) dim = _slice(g, shape, axes=[0], starts=[dim], ends=[dim + 1]) return cast(dim, 'Long') nms_op_type = nms_cfg.get('type', 'nms') assert nms_op_type == 'nms' assert 'iou_thr' in nms_cfg iou_threshold = nms_cfg['iou_thr'] assert 0 <= iou_threshold <= 1 # Transpose and reshape input tensors to fit ONNX NonMaxSuppression. multi_bboxes = reshape(g, multi_bboxes, [0, -1, 4]) multi_bboxes = g.op('Transpose', multi_bboxes, perm_i=[1, 0, 2]) batches_num = get_size(multi_bboxes, 0) spatial_num = get_size(multi_bboxes, 1) multi_scores = g.op('Transpose', multi_scores, perm_i=[1, 0]) scores_shape = g.op('Concat', batches_num, g.op('Constant', value_t=torch.LongTensor([-1])), spatial_num, axis_i=0) multi_scores = reshape(g, multi_scores, scores_shape) classes_num = get_size(multi_scores, 1) assert max_num > 0 indices = g.op( 'NonMaxSuppression', multi_bboxes, multi_scores, g.op('Constant', value_t=torch.LongTensor([max_num])), g.op('Constant', value_t=torch.FloatTensor([iou_threshold])), g.op('Constant', value_t=torch.FloatTensor([score_thr]))) # Flatten bboxes and scores. multi_bboxes_flat = reshape(g, multi_bboxes, [-1, 4]) multi_scores_flat = reshape(g, multi_scores, [ -1, ]) # Flatten indices. batch_indices = _slice(g, indices, axes=[1], starts=[0], ends=[1]) class_indices = _slice(g, indices, axes=[1], starts=[1], ends=[2]) box_indices = _slice(g, indices, axes=[1], starts=[2], ends=[3]) def add(*args, dtype='Long'): x = g.op('Add', args[0], args[1]) if dtype is not None: x = cast(x, dtype) return x def mul(*args, dtype='Long'): x = g.op('Mul', args[0], args[1]) if dtype is not None: x = cast(x, dtype) return x flat_box_indices = add(mul(batch_indices, spatial_num), box_indices) flat_score_indices = add( mul(add(mul(batch_indices, classes_num), class_indices), spatial_num), box_indices) # Select bboxes. out_bboxes = reshape( g, g.op('Gather', multi_bboxes_flat, flat_box_indices, axis_i=0), [-1, 4]) out_scores = reshape( g, g.op('Gather', multi_scores_flat, flat_score_indices, axis_i=0), [-1, 1]) # Having either batch size or number of classes here equal to one is the limitation of implementation. class_indices = reshape(g, cast(add(class_indices, batch_indices), 'Float'), [-1, 1]) # Combine bboxes, scores and labels into a single tensor. # This a workaround for a PyTorch bug (feature?), # limiting ONNX operations to output only single tensor. out_combined_bboxes = g.op('Concat', out_bboxes, out_scores, class_indices, axis_i=1) # Get the top scored bboxes only. elements_num = sym_help._size_helper(g, out_scores, dim=g.op('Constant', value_t=torch.LongTensor( [0]))) max_num = g.op('Constant', value_t=torch.LongTensor([max_num])) if sym_help._export_onnx_opset_version < 12: kn = g.op('Concat', max_num, elements_num, axis_i=0) kn = g.op('ReduceMin', kn, keepdims_i=0) else: kn = g.op('Min', max_num, elements_num) _, top_indices = sym_help._topk_helper(g, out_scores, kn, dim=0) # top_indices = squeeze(g, top_indices, dim=1) top_indices = reshape(g, top_indices, [ -1, ]) out_combined_bboxes = g.op('Gather', out_combined_bboxes, top_indices, axis_i=0) return out_combined_bboxes