Esempio n. 1
0
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight,
                                     reduction):
    from torch.onnx.symbolic_opset9 import sigmoid, log, sub, neg, mul, add
    p = g.op("Constant", value_t=torch.tensor([1]))
    sig_x = sigmoid(g, input)
    log_sig_x = log(g, sig_x)
    sub_1_x = sub(g, p, sig_x)
    sub_1_y = sub(g, p, target)
    log_1_x = log(g, sub_1_x)
    if pos_weight is None or sym_help._is_none(pos_weight):
        output = neg(
            g, add(g, mul(g, target, log_sig_x), mul(g, sub_1_y, log_1_x)))
    else:
        output = neg(
            g,
            add(g, mul(g, mul(g, target, log_sig_x), pos_weight),
                mul(g, sub_1_y, log_1_x)))

    if weight is not None and not sym_help._is_none(weight):
        output = mul(g, weight, output)

    reduction = sym_help._maybe_get_const(reduction, 'i')
    if reduction == 0:
        return output
    elif reduction == 1:
        return g.op("ReduceMean", output)
    elif reduction == 2:
        return g.op("ReduceSum", output)
    else:
        return sym_help._onnx_unsupported(
            "binary_cross_entropy_with_logits with reduction other than none, mean, or sum"
        )
Esempio n. 2
0
 def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio,
              pool_mode, aligned):
     from torch.onnx.symbolic_opset9 import sub, squeeze
     from torch.onnx.symbolic_helper import _slice_helper
     from torch.onnx import TensorProtoDataType
     # batch_indices = rois[:, 0].long()
     batch_indices = _slice_helper(g, rois, axes=[1], starts=[0], ends=[1])
     batch_indices = squeeze(g, batch_indices, 1)
     batch_indices = g.op('Cast',
                          batch_indices,
                          to_i=TensorProtoDataType.INT64)
     # rois = rois[:, 1:]
     rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
     if aligned:
         # rois -= 0.5/spatial_scale
         aligned_offset = g.op('Constant',
                               value_t=torch.tensor([0.5 / spatial_scale],
                                                    dtype=torch.float32))
         rois = sub(g, rois, aligned_offset)
     # roi align
     return g.op('RoiAlign',
                 input,
                 rois,
                 batch_indices,
                 output_height_i=output_size[0],
                 output_width_i=output_size[1],
                 spatial_scale_f=spatial_scale,
                 sampling_ratio_i=max(0, sampling_ratio),
                 mode_s=pool_mode)
Esempio n. 3
0
 def symbolic(g,
              features,
              rois,
              out_size,
              spatial_scale,
              sample_num=0,
              aligned=True):
     batch_indices = reshape(
         g,
         g.op('Cast',
              _slice(g, rois, axes=[1], starts=[0], ends=[1]),
              to_i=sym_help.cast_pytorch_to_onnx['Long']), [-1])
     bboxes = _slice(g, rois, axes=[1], starts=[1], ends=[5])
     if aligned:
         scale = sym_help._maybe_get_scalar(spatial_scale)
         offset = g.op("Constant",
                       value_t=torch.tensor(0.5 / scale,
                                            dtype=torch.float32))
         bboxes = sub(g, bboxes, offset)
     out_h, out_w = _pair(out_size)
     return g.op('RoiAlign',
                 features,
                 bboxes,
                 batch_indices,
                 output_height_i=out_h,
                 output_width_i=out_w,
                 sampling_ratio_i=sample_num,
                 spatial_scale_f=spatial_scale)
Esempio n. 4
0
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight,
                                     reduction):
    p = g.op("Constant", value_t=torch.tensor([1]))
    sig_x = opset9.sigmoid(g, input)
    log_sig_x = opset9.log(g, sig_x)
    sub_1_x = opset9.sub(g, p, sig_x)
    sub_1_y = opset9.sub(g, p, target)
    log_1_x = opset9.log(g, sub_1_x)
    if pos_weight is None or symbolic_helper._is_none(pos_weight):
        output = opset9.neg(
            g,
            opset9.add(g, opset9.mul(g, target, log_sig_x),
                       opset9.mul(g, sub_1_y, log_1_x)),
        )
    else:
        output = opset9.neg(
            g,
            opset9.add(
                g,
                opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight),
                opset9.mul(g, sub_1_y, log_1_x),
            ),
        )

    if weight is not None and not symbolic_helper._is_none(weight):
        output = opset9.mul(g, weight, output)

    reduction = symbolic_helper._maybe_get_const(reduction, "i")
    if reduction == 0:
        return output
    elif reduction == 1:
        return g.op("ReduceMean", output, keepdims_i=0)
    elif reduction == 2:
        return g.op("ReduceSum", output, keepdims_i=0)
    else:
        return symbolic_helper._onnx_unsupported(
            "binary_cross_entropy_with_logits with reduction other than none, mean, or sum",
            input,
        )
Esempio n. 5
0
    def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio,
                 pool_mode, aligned):
        has_custom_op = False
        try:
            import os.path as osp

            from mmcv.ops import get_onnxruntime_op_path
            ort_op_path = get_onnxruntime_op_path()
            has_custom_op = osp.exists(ort_op_path)
        except ImportError:
            pass
        if has_custom_op:
            return g.op(
                'mmcv::MMCVRoiAlign',
                input,
                rois,
                aligned_height_i=output_size[0],
                aligned_width_i=output_size[1],
                spatial_scale_f=spatial_scale,
                sampling_ratio_i=max(0, sampling_ratio),
                pool_mode_s=pool_mode,
                aligned_i=aligned)

        from torch.onnx.symbolic_opset9 import sub, squeeze
        from torch.onnx.symbolic_helper import _slice_helper
        from torch.onnx import TensorProtoDataType
        # batch_indices = rois[:, 0].long()
        batch_indices = _slice_helper(g, rois, axes=[1], starts=[0], ends=[1])
        batch_indices = squeeze(g, batch_indices, 1)
        batch_indices = g.op(
            'Cast', batch_indices, to_i=TensorProtoDataType.INT64)
        # rois = rois[:, 1:]
        rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
        if aligned:
            # rois -= 0.5/spatial_scale
            aligned_offset = g.op(
                'Constant',
                value_t=torch.tensor([0.5 / spatial_scale],
                                     dtype=torch.float32))
            rois = sub(g, rois, aligned_offset)
        # roi align
        return g.op(
            'RoiAlign',
            input,
            rois,
            batch_indices,
            output_height_i=output_size[0],
            output_width_i=output_size[1],
            spatial_scale_f=spatial_scale,
            sampling_ratio_i=max(0, sampling_ratio),
            mode_s=pool_mode)
Esempio n. 6
0
    def symbolic_fn(g, input, kernel_size, stride, padding, dilation,
                    ceil_mode):
        if not stride:
            stride = kernel_size
        kwargs = {
            "kernel_shape_i": tuple_fn(kernel_size),
            "pads_i": tuple_fn(padding) * 2,
            "strides_i": tuple_fn(stride),
            "ceil_mode_i": ceil_mode,
        }
        if set(tuple_fn(dilation)) != {1}:
            kwargs["dilations_i"] = tuple_fn(dilation)
        # easy but hacky way to get flattened indices values
        # to be used to convert the indices values to non-flattened.
        # In ONNX the indices are computed as a flatten 1-D tensor,
        # so the values in indices are in [0, N x C x D1 x ... x Dn).
        # To convert the indices to the same format used by Pytorch,
        # we first execute a maxpool with a kernel and stride of 1 on the same input.
        # This will result in a tensor of indices in which each index will have it's own value.
        # Using this tensor as a reference, we extract the first index of each axis and subtract
        # it from each index of this axis in the indices to convert.
        # This step will result in a tensor were each dimension has values of indices within
        # the dimension it is in.
        # For more information :
        # https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407
        if return_indices:
            r, indices = g.op("MaxPool", input, outputs=2, **kwargs)
            _, flattened_indices = g.op(
                "MaxPool",
                input,
                outputs=2,
                kernel_shape_i=[1 for _ in range(ndims)],
                strides_i=[1 for _ in range(ndims)],
            )
            # convert indices to have non-flattened indices values
            from torch.onnx.symbolic_opset9 import sub

            s = sym_help._slice_helper(
                g,
                flattened_indices,
                axes=[2 + i for i in range(ndims)],
                starts=tuple_fn(0),
                ends=tuple_fn(1),
            )
            indices = sub(g, indices, s)
            return r, indices
        else:
            r = g.op("MaxPool", input, outputs=1, **kwargs)
            return r