Ejemplo n.º 1
0
    'aten::zeros_like': ConstantOfShape(
        Shape('self')
    ),  #the default constant is 0, so don't need to speicify attribute
    'aten::sum.dim_IntList': ReduceSum('self', 'dim', keepdims='keepdim'),
    'aten::threshold_backward': ReluGrad('grad_output', 'self'),
    'aten::fmod.Scalar': Mod('self', 'other', fmod=1),
    'aten::fmod.Tensor': Mod('self', 'other', fmod=1),
    'aten::softshrink': Shrink('self', bias='lambd',
                               lambd='lambd'),  #yes, bias is set to 'lambd'
    'aten::hardshrink': Shrink('self', bias=0, lambd='lambd'),
    'aten::gelu': Gelu('self'),
    'aten::max': ReduceMax('self', keepdims=1),
    'aten::min': ReduceMin('self', keepdims=1),
    'aten::_cat': Concat('tensors', 'dim'),
    'aten::fill_.Scalar': ConstantOfShape('self', value='value'),
    'aten::ne.Scalar': MakeTorchFallback(),
    'aten::ne.Scalar_out': MakeTorchFallback(),
    'aten::ne.Tensor_out': MakeTorchFallback(),
    'aten::eq.Tensor': MakeTorchFallback(),
    'aten::eq.Tensor_out': MakeTorchFallback(),
    'aten::bitwise_and.Tensor_out': MakeTorchFallback(),
    'aten::masked_select': MakeTorchFallback(),
    'aten::_local_scalar_dense': MakeTorchFallback(),
    'aten::gt.Scalar_out': MakeTorchFallback(),
}

# Signature of gelu_backward was changed in this commit id 983ba5e585485ed61a0c0012ef6944f5685e3d97 and PR 61439
# This is done to make sure it is backward and future compatible
if version.parse(torch.__version__) < version.parse(TORCH_API_CHANGE_VERSION):
    hand_implemented['aten::gelu_backward'] = GeluGrad('grad', 'self')
else:
Ejemplo n.º 2
0
    "aten::mm": MatMul("self", "mat2"),
    "aten::zeros_like": ConstantOfShape(
        Shape("self")
    ),  # the default constant is 0, so don't need to speicify attribute
    "aten::sum.dim_IntList": ReduceSum("self", "dim", keepdims="keepdim"),
    "aten::threshold_backward": ReluGrad("grad_output", "self"),
    "aten::fmod.Scalar": Mod("self", "other", fmod=1),
    "aten::fmod.Tensor": Mod("self", "other", fmod=1),
    "aten::softshrink": Shrink("self", bias="lambd", lambd="lambd"),  # yes, bias is set to 'lambd'
    "aten::hardshrink": Shrink("self", bias=0, lambd="lambd"),
    "aten::gelu": Gelu("self"),
    "aten::max": ReduceMax("self", keepdims=1),
    "aten::min": ReduceMin("self", keepdims=1),
    "aten::_cat": Concat("tensors", "dim"),
    "aten::fill_.Scalar": ConstantOfShape("self", value="value"),
    "aten::ne.Scalar": MakeTorchFallback(),
    "aten::ne.Scalar_out": MakeTorchFallback(),
    "aten::ne.Tensor_out": MakeTorchFallback(),
    "aten::eq.Tensor": MakeTorchFallback(),
    "aten::eq.Tensor_out": MakeTorchFallback(),
    "aten::bitwise_and.Tensor_out": MakeTorchFallback(),
    "aten::masked_select": MakeTorchFallback(),
    "aten::_local_scalar_dense": MakeTorchFallback(),
    "aten::gt.Scalar_out": MakeTorchFallback(),
    "aten::equal": MakeTorchFallback(),
}

# Signature of gelu_backward was changed in this commit id 983ba5e585485ed61a0c0012ef6944f5685e3d97 and PR 61439
# This is done to make sure it is backward and future compatible
if version.parse(torch.__version__) < version.parse(TORCH_API_CHANGE_VERSION):
    hand_implemented["aten::gelu_backward"] = GeluGrad("grad", "self")
Ejemplo n.º 3
0
 "aten::ne.Tensor_out":
 Cast(Not(Equal("self", "other")),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::eq.Tensor_out":
 Cast(Equal("self", "other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::eq.Scalar_out":
 Cast(Equal("self", "other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::bitwise_and.Tensor_out":
 And("self",
     "other"),  # This generates a fallback for all but Bool, as expected.
 "aten::masked_select":
 GatherND("self", Transpose(NonZero(Expand("mask", Shape("self"))))),
 "aten::_local_scalar_dense":
 MakeTorchFallback(),  # This function extracts a scalar value from
 #   a tensor with exactly one value; there's no need to try to do this on an ORT device.
 #   See CPU impl at pytorch/blob/master/aten/src/ATen/native/Scalar.cpp
 "aten::lt.Scalar_out":
 Cast(Less(A="self", B="other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::lt.Tensor_out":
 Cast(Less(A="self", B="other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::gt.Scalar_out":
 Cast(Greater(A="self", B="other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::gt.Tensor_out":
 Cast(Greater(A="self", B="other"),
      to="GetONNXTensorProtoDataType(out.scalar_type())"),
 "aten::equal":