예제 #1
0
def _floor_divide(g, self, other):
    if sym_help._is_fp(self) or sym_help._is_fp(other):
        out = sym_opset9.true_divide(g, self, other)
        return g.op('Floor', out)
    else:
        raise RuntimeError(
            'Integer floor division requires ONNX opset 9 or greater')
def __lshift_(g, self, other):
    # make sure to cast other to self's type
    # (when self is long, make sure that other is not float)
    if other.type().scalarType() != self.type().scalarType():
        other = g.op(
            "Cast",
            other,
            to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])

    if self.type().scalarType() == 'Byte':
        return g.op('BitShift', self, other, direction_s="LEFT")

    two = g.op('Constant', value_t=torch.tensor(2, dtype=torch.float32))
    # exponent (same type as self) has to be float or double in onnx::Pow
    if not sym_help._is_fp(self):
        other = g.op("Cast",
                     other,
                     to_i=sym_help.cast_pytorch_to_onnx['Float'])
    two_pow = g.op('Pow', two, other)
    two_pow = g.op(
        'Cast',
        two_pow,
        to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
    lshift = g.op('Mul', self, two_pow)
    return lshift
예제 #3
0
def __rshift_(g, self, other):
    # make sure to cast other to self's type
    # (when self is long, make sure that other is not float)
    if other.type().scalarType() != self.type().scalarType():
        other = g.op(
            "Cast",
            other,
            to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])

    if self.type().scalarType() == "Byte":
        return g.op("BitShift", self, other, direction_s="RIGHT")

    two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
    # exponent (same type as self) has to be float or double in onnx::Pow
    if not sym_help._is_fp(self):
        other = g.op("Cast",
                     other,
                     to_i=sym_help.cast_pytorch_to_onnx["Float"])
    two_pow = g.op("Pow", two, other)
    two_pow = g.op(
        "Cast",
        two_pow,
        to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
    rshift = g.op("Div", self, two_pow)
    return rshift
예제 #4
0
def nan_to_num(g, input, nan, posinf, neginf):
    from torch.onnx.symbolic_opset9 import isnan, lt, gt, logical_and

    # Cannot create a int type tensor with inf/nan values, so we simply
    # return the original tensor
    if not sym_help._is_fp(input):
        return input
    input_dtype = sym_help.pytorch_name_to_type[input.type().scalarType()]
    if nan is None:
        nan = 0.0
    nan_cond = isnan(g, input)
    nan_result = g.op("Where", nan_cond,
                      g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)), input)

    # For None values of posinf, neginf we use the greatest/lowest finite
    # value representable by input’s dtype.
    finfo = torch.finfo(input_dtype)
    if posinf is None:
        posinf = finfo.max
    posinf_cond = logical_and(g, isinf(g, nan_result),
                              gt(g, nan_result, g.op("Constant", value_t=torch.LongTensor([0]))))
    nan_posinf_result = g.op("Where", posinf_cond,
                             g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)), nan_result)

    if neginf is None:
        neginf = finfo.min
    neginf_cond = logical_and(g, isinf(g, nan_posinf_result),
                              lt(g, nan_posinf_result, g.op("Constant", value_t=torch.LongTensor([0]))))
    return g.op("Where", neginf_cond,
                g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)), nan_posinf_result)
예제 #5
0
def _floor_divide(g, self, other):
    if sym_help._is_fp(self) or sym_help._is_fp(other):
        out = torch.onnx.symbolic_opset9.true_divide(g, self, other)
        return g.op('Floor', out)
    else:
        # Integer division does trunction rounding
        div = g.op('Div', self, other)
        # Division is negative if: self < 0 != other < 0
        zero = g.op('Constant', value_t=torch.tensor(0, dtype=torch.int64))
        negative = g.op('Xor', g.op('Less', self, zero),
                        g.op('Less', other, zero))

        # For negative numbers with self % other != 0, subtract 1 to round down instead of up
        mod = g.op('Mod', self, other, fmod_i=0)
        fixup_mask = g.op('And', negative, g.op('Not',
                                                g.op('Equal', mod, zero)))

        one = g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64))
        fixup = g.op('Sub', div, one)
        return g.op('Where', fixup_mask, fixup, div)
예제 #6
0
def _floor_divide(g, self, other):
    if sym_help._is_fp(self) or sym_help._is_fp(other):
        out = torch.onnx.symbolic_opset9.true_divide(g, self, other)
        return g.op("Floor", out)
    else:
        # Integer division does trunction rounding
        div = g.op("Div", self, other)
        # Division is negative if: self < 0 != other < 0
        zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
        negative = g.op("Xor", g.op("Less", self, zero),
                        g.op("Less", other, zero))

        # For negative numbers with self % other != 0, subtract 1 to round down instead of up
        mod = g.op("Mod", self, other, fmod_i=0)
        fixup_mask = g.op("And", negative, g.op("Not",
                                                g.op("Equal", mod, zero)))

        one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
        fixup = g.op("Sub", div, one)
        return g.op("Where", fixup_mask, fixup, div)
예제 #7
0
def nan_to_num(g, input, nan, posinf, neginf):
    # Cannot create a int type tensor with inf/nan values, so we simply
    # return the original tensor
    if not symbolic_helper._is_fp(input):
        return input
    input_dtype = _type_utils.JitScalarType.from_name(
        input.type().scalarType()).dtype()
    if nan is None:
        nan = 0.0
    nan_cond = opset9.isnan(g, input)
    nan_result = g.op(
        "Where",
        nan_cond,
        g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)),
        input,
    )

    # For None values of posinf, neginf we use the greatest/lowest finite
    # value representable by input’s dtype.
    finfo = torch.finfo(input_dtype)
    if posinf is None:
        posinf = finfo.max
    posinf_cond = opset9.logical_and(
        g,
        isinf(g, nan_result),
        opset9.gt(g, nan_result, g.op("Constant",
                                      value_t=torch.LongTensor([0]))),
    )
    nan_posinf_result = g.op(
        "Where",
        posinf_cond,
        g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)),
        nan_result,
    )

    if neginf is None:
        neginf = finfo.min
    neginf_cond = opset9.logical_and(
        g,
        isinf(g, nan_posinf_result),
        opset9.lt(g, nan_posinf_result,
                  g.op("Constant", value_t=torch.LongTensor([0]))),
    )
    return g.op(
        "Where",
        neginf_cond,
        g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)),
        nan_posinf_result,
    )
예제 #8
0
def remainder(g, input, other):
    if sym_help._is_fp(input) or sym_help._is_fp(other):
        from torch.onnx.symbolic_opset9 import remainder as _remainder_9
        return _remainder_9(g, input, other)
    return g.op("Mod", input, other, fmod_i=0)
예제 #9
0
def remainder(g, input, other):
    if symbolic_helper._is_fp(input) or symbolic_helper._is_fp(other):
        return opset9.remainder(g, input, other)
    return g.op("Mod", input, other, fmod_i=0)