Exemplo n.º 1
0
 def log_ggd(x, p, mu, alpha):
     cp = tf.cast(
         log(p) -
         ((p + 1) / p) * tf.cast(log(2.0), settings.float_type) -
         lgamma(1 / p), settings.float_type)
     res = tf.cast(
         cp - log(alpha) - pow(abs(x - mu), p) / (2 * pow(alpha, p)),
         settings.float_type)
     return res
Exemplo n.º 2
0
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
def adaptive_wing_loss(labels, output):
    alpha = 2.1
    omega = 14
    epsilon = 1
    theta = 0.5
    with tf.name_scope('adaptive_wing_loss'):
        x = output - labels
        theta_over_epsilon_tensor = tf.fill(tf.shape(labels), theta/epsilon)
        A = omega*(1/(1+pow(theta_over_epsilon_tensor, alpha-labels)))*(alpha-labels)*pow(theta_over_epsilon_tensor, alpha-labels-1)*(1/epsilon)
        C = theta*A-omega*log(1+pow(theta_over_epsilon_tensor, alpha-labels))
        absolute_x = abs(x)
        losses = tf.where(greater(theta, absolute_x), omega*log(1+pow(absolute_x/epsilon, alpha-labels)), A*absolute_x-C)
        loss = reduce_mean(reduce_sum(losses, axis=[1, 2]), axis=0)
        return loss
Exemplo n.º 4
0
def train_bound(t):
    """Trains the model to equalize values and spatial derivatives at boundaries x=5 
    and x=-5 to enforce periodic boundary condition

    Args:
        t : A tf.Tensor of shape (batch_size,).
    """

    x1 = 5 * tf.ones(t.shape)
    x2 = -5 * tf.ones(t.shape)
    with tf.GradientTape(True, False) as tape:
        tape.watch(PINN.trainable_weights)
        with tf.GradientTape(True, False) as grtape1:
            grtape1.watch([t, x1, x2])
            #Automatic differentiation of complex functions is weird in tensorflow
            #so we differentiate real and imaginary parts seperately
            h_real_1 = tfm.real(PINN(tf.stack([t, x1], -1)))
            h_imag_1 = tfm.imag(PINN(tf.stack([t, x1], -1)))
            h_real_2 = tfm.real(PINN(tf.stack([t, x2], -1)))
            h_imag_2 = tfm.imag(PINN(tf.stack([t, x2], -1)))
        #First order derivatives
        h_x1_real = grtape1.gradient(h_real_1, x1)
        h_x1_imag = grtape1.gradient(h_imag_1, x1)
        h_x2_real = grtape1.gradient(h_real_2, x2)
        h_x2_imag = grtape1.gradient(h_imag_2, x2)
        #h1_real and h1_imag have shape (batch_size,2)
        del grtape1
        h1 = tf.complex(h_real_1, h_imag_1)
        h1_x = tf.complex(h_x1_real, h_x1_imag)
        h2 = tf.complex(h_real_2, h_imag_2)
        h2_x = tf.complex(h_x2_real, h_x2_imag)
        MSE = tfm.reduce_mean(
            tfm.pow(tfm.abs(h1 - h2), 2) + tfm.pow(tfm.abs(h1_x - h2_x), 2))
    grads = tape.gradient(MSE, PINN.trainable_weights)
    sgd_opt.apply_gradients(zip(grads, PINN.trainable_weights))
    return MSE
Exemplo n.º 5
0
def sample_batch(n_samples, max_dim, max_power, dtype):
    """ Calculate a sum of distance samples for various dimensions and norms

    Generates a batch of *n_samples* of pairs of points, and uses those points
    to build a table of 1 to *max_dim* rows and 1 to *max_power* columns,
    where each entry is the sum across all samples of the distance metric
    with the given power for pairs of points in the given dimensional
    hypercube.
    """
    # create two lists of random points and their vector difference,
    # where the first dimension is the sample number and the second the
    # coordinates for the sample
    zero = tf.zeros((), dtype=dtype)  # used to pass in dtype to Uniform
    x1s = tfd.Uniform(low=zero).sample((n_samples, max_dim))
    x2s = tfd.Uniform(low=zero).sample((n_samples, max_dim))
    vector_difference = x2s - x1s

    # add a third dimension to the tensor which is the vector coordinate
    # raised to ascending powers (eg x, x^2, x^3...)
    sum_terms = tf.abs(
        tfm.cumprod(tf.tile(
            tf.reshape(vector_difference, (n_samples, max_dim, 1)),
            (1, 1, max_power)),
                    axis=2))

    # generate cumulative sums along the coordinate (second) dimension
    # and raise the sum to the 1/n power (where n is the third dimension)
    # to generate a new tensor where the first dimension is the samples,
    # the second the dimension of the hypercube, and the third the power
    # of the norm.
    norm = tfm.pow(
        tfm.cumsum(sum_terms, axis=1),
        tf.reshape(1 / tf.cast(tf.range(1, max_power + 1), dtype=dtype),
                   (1, 1, max_power)))

    # return the sum of the norms
    return tf.reduce_sum(norm, axis=0)
Exemplo n.º 6
0
def gelu_(X):

    return 0.5 * X * (1.0 + math.tanh(0.7978845608028654 *
                                      (X + 0.044715 * math.pow(X, 3))))
Exemplo n.º 7
0
 def conditional_variance(self, F):
     alpha = self.alpha
     p = self.p
     tmp = 4 * pow(alpha, 2) * exp(lgamma(3 / p) - lgamma(1 / p))
     return tf.fill(tf.shape(F), tf.squeeze(tmp))