Пример #1
0
def clamp_max(g, self, max):
    dtype = self.type().scalarType()
    max = g.op("Cast", max, to_i=sym_help.cast_pytorch_to_onnx[dtype])
    if sym_help._get_tensor_rank(max) == 0:
        min = unused(g)
        return op_with_optional_float_cast(g, "Clip", self, min, max, opset_before=12)
    else:
        return op_with_optional_float_cast(g, "Min", self, max, opset_before=12)
Пример #2
0
def clamp_min(g, self, min):
    dtype = self.type().scalarType()
    min = g.op("Cast", min, to_i=symbolic_helper.cast_pytorch_to_onnx[dtype])
    if symbolic_helper._get_tensor_rank(min) == 0:
        max = opset9.unused(g)
        return opset9.op_with_optional_float_cast(
            g, "Clip", self, min, max, opset_before=12
        )
    else:
        return opset9.op_with_optional_float_cast(g, "Max", self, min, opset_before=12)
Пример #3
0
def clamp(g, self, min, max):
    dtype = self.type().scalarType()

    def _cast_if_not_none(tensor, dtype):
        if tensor is not None and not symbolic_helper._is_none(tensor):
            return g.op(
                "Cast", tensor, to_i=symbolic_helper.cast_pytorch_to_onnx[dtype]
            )
        else:
            return tensor

    if dtype is not None:
        min = _cast_if_not_none(min, dtype)
        max = _cast_if_not_none(max, dtype)

    if symbolic_helper._is_none(min):
        return clamp_max(g, self, max)
    elif symbolic_helper._is_none(max):
        return clamp_min(g, self, min)
    else:
        if (
            symbolic_helper._get_tensor_rank(min) == 0
            and symbolic_helper._get_tensor_rank(max) == 0
        ):
            return opset9.op_with_optional_float_cast(
                g, "Clip", self, min, max, opset_before=12
            )
        else:
            return clamp_max(g, clamp_min(g, self, min), max)
Пример #4
0
 def symbolic_fn(
     g,
     input,
     kernel_size,
     stride,
     padding,
     ceil_mode,
     count_include_pad,
     divisor_override=None,
 ):
     if not stride:
         stride = kernel_size
     padding = symbolic_helper._avgpool_helper(tuple_fn, padding,
                                               kernel_size, stride,
                                               divisor_override, name)
     if count_include_pad:
         input = opset9.op_with_optional_float_cast(
             g,
             "Pad",
             input,
             pads_i=((0, ) * 2 + padding) * 2,
             mode_s="constant",
             value_f=0.0,
             opset_before=11,
         )
         padding = (0, ) * len(padding)
     output = g.op(
         "AveragePool",
         input,
         kernel_shape_i=tuple_fn(kernel_size),
         strides_i=tuple_fn(stride),
         pads_i=padding * 2,
         ceil_mode_i=ceil_mode,
     )
     return output
Пример #5
0
def hardtanh(g, self, min_val, max_val):
    dtype = self.type().scalarType()
    if dtype is None:
        dtype = ScalarType.FLOAT
    else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return op_with_optional_float_cast(g, "Clip", self, min_val, max_val, opset_before=12)
Пример #6
0
def relu6(g, input):
    relu = op_with_optional_float_cast(g, "Relu", input, opset_before=14)
    dtype = input.type().scalarType()
    if dtype is None:
        dtype = ScalarType.FLOAT
    else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(6, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return clamp(g, relu, min_val, max_val)