Esempio n. 1
0
def hard_sigmoid(x):
    out_dtype = scalar.upgrade_to_float(scalar.Scalar(dtype=x.dtype))[0].dtype
    slope = T.constant(0.2, dtype=out_dtype)
    shift = T.constant(0.5, dtype=out_dtype)
    x = (x * slope) + shift
    x = T.clip(x, 0, 1)
    return x
Esempio n. 2
0
 def PRelu(x):
     out_dtype = scalar.upgrade_to_float(
         scalar.Scalar(dtype=x.dtype))[0].dtype
     a = T.constant(0.625, dtype=out_dtype)
     b = T.constant(0.375, dtype=out_dtype)
     # x = (x * slope) + shift
     y = x * a + abs(x) * b
     r = T.clip(y, 0, 1)
     return r
Esempio n. 3
0
 def Relu(x):
     out_dtype = scalar.upgrade_to_float(
         scalar.Scalar(dtype=x.dtype))[0].dtype
     a = T.constant(0.5, dtype=out_dtype)
     # ab = T.constant(abs(x), dtype=out_dtype)
     # x = (x * slope) + shift
     y = (x + abs(x)) * a
     r = T.clip(y, 0, 1)
     return r
Esempio n. 4
0
def hard_sigmoid(x):
    """An approximation of sigmoid.

    More approximate and faster than ultra_fast_sigmoid.

    Approx in 3 parts: 0, scaled linear, 1

    Removing the slope and shift does not make it faster.

    """
    # Use the same dtype as determined by "upgrade_to_float",
    # and perform computation in that dtype.
    out_dtype = scalar.upgrade_to_float(scalar.Scalar(dtype=x.dtype))[0].dtype
    slope = tensor.constant(0.2, dtype=out_dtype)
    shift = tensor.constant(0.5, dtype=out_dtype)
    x = (x * slope) + shift
    x = tensor.clip(x, 0, 1)
    return x