Esempio n. 1
0
    def compute_py(self, x):
        '''
        compute probability for each class
        INPUTS:
        x - input
        OUTPUTS:
        py - histogram of probabilities for each class
        '''

        hidden1_pre = tfm.add(tfl.matmul(x, self.weights['W_x_to_h1']),
                              self.weights['b_x_to_h1'])
        hidden_post = self.nonlinearity(hidden1_pre)

        num_layers_middle = np.shape(self.N_h)[0] - 1

        for i in range(num_layers_middle):
            ni = i + 2

            hidden_pre = tfm.add(
                tfl.matmul(hidden_post,
                           self.weights['W_h{}_to_h{}'.format(ni - 1, ni)]),
                self.weights['b_h{}_to_h{}'.format(ni - 1, ni)])
            hidden_post = self.nonlinearity(hidden_pre)

        p_un = tfm.add(
            tfl.matmul(hidden_post, self.weights['W_h{}_to_py'.format(ni)]),
            self.weights['b_h{}_to_py'.format(ni)])
        p_un = tf.nn.sigmoid(p_un) + 1e-6
        py = tfm.divide(
            p_un,
            tf.tile(tf.expand_dims(tfm.reduce_sum(p_un, axis=1), axis=1),
                    [1, self.n_y]))

        return py
Esempio n. 2
0
    def conv_block(self,
                   input_layer,
                   n_filters,
                   length=2,
                   pool=True,
                   stride=1):

        layer = input_layer
        for i in range(length):
            layer = Conv2D(n_filters, (3, 3), strides=stride,
                           padding='same')(layer)
            layer = BatchNormalization()(layer)
            layer = ReLU()(layer)

        parallel = Conv2D(n_filters, (1, 1),
                          strides=stride**length,
                          padding='same')(input_layer)
        parallel = BatchNormalization()(parallel)
        parallel = ReLU()(parallel)

        output = Add()([layer, parallel])

        #output = BatchNormalization()(output)

        # output=Multiply()([output,K.variable(0.5,shape=K.shape(output),dtype='float64',name='const')])
        # output=Multiply()([output,K.ones(shape=K.shape(output))])
        output = Lambda(lambda x: divide(x, 2.0))(output)

        if pool:
            output = MaxPooling2D(pool_size=(3, 3), strides=2)(output)

        return output
def augmentation(img, msk):

    # Call in skimage package, which will be used for transformations.
    import tensorflow.math as Math

    # Create some random floats, which will be used in augmentation steps.
    tilt = tf.random.uniform(shape=[], minval=-30, maxval=30, dtype=tf.float32)
    dx = tf.random.uniform(shape=[], minval=-5, maxval=5, dtype=tf.float32)
    dy = tf.random.uniform(shape=[], minval=-5, maxval=5, dtype=tf.float32)

    # Use TensforFlow-style if conditionals, used to flip image and mask.
    img = tf.cond(tilt > 0, lambda: tf.image.flip_left_right(img),
                  lambda: tf.image.flip_up_down(img))
    msk = tf.cond(tilt > 0, lambda: tf.image.flip_left_right(msk),
                  lambda: tf.image.flip_up_down(msk))

    # Rotate the image and mask to some degree.
    # img = rotate(img, angle = tilt, mode = 'reflect')
    # msk = rotate(msk, angle = tilt, mode = 'reflect')
    toRads = Math.multiply(Math.divide(tilt, 180), tf.constant(math.pi))

    img = tfa.image.rotate(img, toRads)
    msk = tfa.image.rotate(msk, toRads)

    # Affine transformation
    img = tfa.image.translate(img, [dx, dy], 'BILINEAR')
    msk = tfa.image.translate(msk, [dx, dy], 'BILINEAR')

    # Convert the inputs back into tensors, put back into a tuple.
    finalTuple = (img, msk)

    return finalTuple
    def call(self, inputs):
        mean = reduce_mean(inputs, axis=0)
        std = reduce_std(inputs, axis=0) + 1e-6

        InputBatchNormalization.temp += 1
        InputBatchNormalization.mean += mean
        InputBatchNormalization.std += std

        inputs = divide(subtract(inputs, mean), std)

        return inputs.squeeze(0)
Esempio n. 5
0
    def compute_py(self, xl):
        '''
        compute moments of output Gaussian distribution
        INPUTS:
        x -  input
        OUTPUTS:
        mu_y - mean of output Gaussian distribution
        log_sig_sq_y - log variance of output Gaussian distribution
        '''

        x, _ = NN_utils.reshape_and_extract(xl, self.sz_im)

        hidden_post = layers.tf_conv_layer(x, self.weights['W_x_to_h1'],
                                           self.weights['b_x_to_h1'],
                                           self.St[0], self.nonlinearity)
        # print(tf.shape(hidden_post).numpy())

        num_layers_1 = np.shape(self.N_h1)[0] - 1

        for i in range(num_layers_1):
            ni = i + 2

            hidden_post = layers.tf_conv_layer(
                hidden_post, self.weights['W_h{}_to_h{}'.format(ni - 1, ni)],
                self.weights['b_h{}_to_h{}'.format(ni - 1, ni)],
                self.St[ni - 1], self.nonlinearity)
            # print(tf.shape(hidden_post).numpy())

        hidden_post = NN_utils.flatten(hidden_post)
        # print(tf.shape(hidden_post).numpy())

        num_layers_F = np.shape(self.NF_h)[0]

        for i in range(num_layers_F):
            ni = ni + 1

            hidden_pre = tfm.add(
                tfl.matmul(hidden_post,
                           self.weights['W_h{}_to_h{}'.format(ni - 1, ni)]),
                self.weights['b_h{}_to_h{}'.format(ni - 1, ni)])
            hidden_post = self.nonlinearity(hidden_pre)
            # print(tf.shape(hidden_post).numpy())

        p_un = tfm.add(
            tfl.matmul(hidden_post, self.weights['W_h{}_to_py'.format(ni)]),
            self.weights['b_h{}_to_py'.format(ni)])
        p_un = tf.nn.sigmoid(p_un) + 1e-6
        py = tfm.divide(
            p_un,
            tf.tile(tf.expand_dims(tfm.reduce_sum(p_un, axis=1), axis=1),
                    [1, self.n_y]))

        return py
Esempio n. 6
0
def tf_ssim3(x, y, is_normalized=True):
    [x1, x2, x3] = tf.split(x, 3, axis=2)
    [y1, y2, y3] = tf.split(y, 3, axis=2)

    s1 = tf_ssim(x1, y1, is_normalized)
    s2 = tf_ssim(x2, y2, is_normalized)
    s3 = tf_ssim(x3, y3, is_normalized)

    three = tf.constant(3.0)
    result = divide(multiOperation(add, s1, s2, s3), three)

    return result
Esempio n. 7
0
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
def compute_angle_tensor(pts1, pts2):
    """ Compute the angle between pt1 and pt2 with respect to the origin
        Input:
          pts1: batch_size x 1 x 3 tensor
          pts2: batch_size x 1 x 3 tensor
    """
    b = tf.constant([0.,0.,0.])
    angle_diff = []
    for pt1, pt2 in zip(pts1, pts2):
        ba = tf.subtract(pt1, b)
        bc = tf.subtract(pt2, b)
        cosine_angle = tm.divide(tf.tensordot(ba, bc, 1), tm.multiply(tf.norm(ba), tf.norm(bc)))
        angle = tm.acos(cosine_angle)
        angle_diff.append(tf.cast(angle, tf.float32))
    return tf.stack(angle_diff, axis=0)
Esempio n. 9
0
def gaussian_log_likelihood(x, mu_x, log_sig_sq_x, SMALL_CONSTANT=1e-5):
    '''
    Element-wise Gaussian log likelihood
    INPUTS:
        x = points
        mu_x - means of Gaussians
        log_sig_sq_x - log variance of Gaussian
    OPTIONAL INPUTS:
        SMALL_CONSTANT - small constant to avoid taking the log of 0 or dividing by 0
    OUTPUTS:
        log_lik - element-wise log likelihood
    '''

    # -E_q(z|x) log(p(x|z))
    normalising_factor = -0.5 * tfm.log(
        SMALL_CONSTANT + tfm.exp(log_sig_sq_x)) - 0.5 * np.log(2.0 * np.pi)
    square_diff_between_mu_and_x = tfm.square(mu_x - x)
    inside_exp = -0.5 * tfm.divide(square_diff_between_mu_and_x,
                                   SMALL_CONSTANT + tfm.exp(log_sig_sq_x))
    log_lik = normalising_factor + inside_exp

    return log_lik
Esempio n. 10
0
    def call(self, x):

        X = []
        for i in range(self.n_activations):
            self.i = i
            frame = x[:, :, i:i + 1]

            sigma1 = K.variable(
                np.zeros((self.batch, frame.shape[1], frame.shape[2])))
            for j in range(self.v_order):
                j_fact = math.factorial(j)
                p = tfm.divide(K.pow(frame, j), j_fact)
                sigma1 = tfm.add(sigma1, tfm.multiply(self.v[i][j], p))

            sigma2 = K.variable(
                np.zeros((self.batch, frame.shape[1], frame.shape[2])))
            for j in range(1, self.w_order + 1):
                self.k = j
                sigma2 = tfm.add(sigma2, K.map_fn(self.basis2, frame))

            X.append(tfm.add(sigma1, sigma2))

        output = K.concatenate(X, axis=2)
        return output
Esempio n. 11
0
def log(x, base=None):
    if base is None:
        return tf.math.log(x)
    else:
        return divide(tf.math.log(x), tf.math.log(base))
Esempio n. 12
0
 def f2():
     val1 = tfm.divide(
         tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(k, k_1)), 2)
     val2 = tfm.multiply(tfm.subtract(k, k_1), tfm.subtract(x, k))
     val = tfm.add(val1, val2)
     return val
Esempio n. 13
0
 def f1():
     return tfm.add(
         tfm.subtract(tfm.divide(tfm.multiply(x, x), 2),
                      tfm.multiply(k_1, x)),
         tfm.divide(tfm.multiply(k_1, k_1), 2))
Esempio n. 14
0
def DSSIM(y_true, y_pred):
    return tfmath.divide(
        tfmath.subtract(1.0, tfimage.ssim(y_true, y_pred, max_val=1.0)), 2.0)
Esempio n. 15
0
def tf_ssim(x, y, is_normalized=False):
    """
    k1 = 0.01
    k2 = 0.03
    L = 1.0 if is_normalized else 255.0
    c1 = np.power(k1 * L, 2)
    c2 = np.power(k2 * L, 2)
    c3 = c2 / 2
    """
    k1 = 0.01
    k2 = 0.03
    L = 1.0 if is_normalized else 255.0
    c1 = tf_pow(multiply(k1, L), 2.0)
    c2 = tf_pow(multiply(k2, L), 2.0)
    c3 = divide(c2, 2.0)

    # if type(x) is np.ndarray:
    #      x = tf.convert_to_tensor(x, dtype=tf.float32)
    # if type(y) is np.ndarray:
    #      y = tf.convert_to_tensor(y, dtype=tf.float32)
    """
    ux = x.mean()
    uy = y.mean()
    """
    ux = tf_mean(x)
    uy = tf_mean(y)
    """
    std_x = x.std()
    std_y = y.std()
    """
    std_x = tf_std(x)
    std_y = tf_std(y)
    """
    xy = (x - ux) * (y - uy)
    std_xy = xy.mean()
    """
    xy = multiply(subtract(x, ux), subtract(y, uy))
    std_xy = tf_mean(xy)
    """
    l_xy = (2 * ux * uy + c1) / (np.power(ux, 2) + np.power(uy, 2) + c1)
    """
    l_son = add(multiOperation(multiply, 2.0, ux, uy), c1)
    l_mom = multiOperation(add, tf_pow(ux, 2.0), tf_pow(uy, 2.0), c1)
    l_xy = divide(l_son, l_mom)
    """
    c_xy = (2 * std_x * std_y + c2) / (np.power(std_x, 2) + np.power(std_y, 2) + c2)
    """
    c_son = add(multiOperation(multiply, 2.0, std_x, std_y), c2)
    c_mom = multiOperation(add, tf_pow(std_x, 2.0), tf_pow(std_y, 2.0), c2)
    c_xy = divide(c_son, c_mom)
    """
    s_xy = (std_xy + c3) / (std_x * std_y + c3)
    """
    s_son = add(std_xy, c3)
    s_mom = add(multiply(std_x, std_y), c3)
    s_xy = divide(s_son, s_mom)

    one = tf.constant(1.0)
    _ssim = multiOperation(multiply, l_xy, c_xy, s_xy)
    _result = tf.cond(greater(_ssim, one), lambda: one, lambda: _ssim)

    return _result