コード例 #1
0
ファイル: PST_func.py プロジェクト: Adigorla/PCNN
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
コード例 #2
0
def tf_unmasked_op(x, idx=0):
    scaling_norm = tf.dtypes.cast(
        tf.math.sqrt(tf.to_float(tf.math.reduce_prod(tf.shape(x)[1:3]))),
        x.dtype)
    return tf.expand_dims(_temptf_ifft_shift(
        fft2d(_temptf_fft_shift(x[..., idx]))),
                          axis=-1) / scaling_norm
コード例 #3
0
    def poisson_solve(self, func, ps, eps, symm, reflect):
        N = len(func)

        if reflect != 0:
            N = N * 2
            func = tf.concat([func, tf.image.flip_left_right(func)], axis=1)
            func = tf.concat([func, tf.image.flip_up_down(func)], axis=0)

        wx = 2 * np.pi * tf.range(0, N, 1, dtype=tf.float32) / N
        fx = 1 / (2 * np.pi * ps) * (wx - np.pi * (1 - N % 2 / N))
        [Fx, Fy] = tf.meshgrid(fx, fx)
        func_ft = signal.fftshift(signal.fft2d(tf.cast(func, tf.complex64)))

        Psi_ft = func_ft / tf.cast(
            (-4 * np.pi**2 * (Fx**2 + Fy**2 + eps)), tf.complex64)
        if (symm):
            # Psi_xy = np.fft.irfft2(signal.ifftshift(Psi_ft)[:,0:N//2+1])
            Psi_xy = signal.ifft2d(signal.ifftshift(Psi_ft))
        else:
            Psi_xy = signal.ifft2d(signal.ifftshift(Psi_ft))

        if reflect != 0:
            N = N // 2
            Psi_xy = Psi_xy[:N, :N]
        # print("Psi_ft: ", Psi_ft.shape, "Psi_xy: ", Psi_xy.shape)
        return Psi_xy
コード例 #4
0
ファイル: fft.py プロジェクト: sunc13/CMR-DL-challenge
 def call(self, image, *args):
     dtype = tf.math.real(image).dtype
     axes = [tf.rank(image) - 2,
             tf.rank(image) - 1]  # axes have to be positive...
     scale = tf.math.sqrt(
         tf.cast(tf.math.reduce_prod(tf.shape(image)[-2:]), dtype))
     return dlmri_tutorial.complex_scale(
         fftshift(fft2d(ifftshift(image, axes=axes)), axes=axes), 1 / scale)
コード例 #5
0
ファイル: GS_TF.py プロジェクト: gracehzhang/OpenCM
    def propagate(self, Ein, lambd, Z, ps):  #, varargin):

        (m, n) = Ein.shape
        M = m
        N = n

        gpu_num = 0  # check gpu

        mask = 1

        # Initialize variables into CPU or GPU
        if (gpu_num == 0):
            if self.Eout is None:
                self.Eout = tf.Variable(1j * tf.zeros(
                    (m, n, len(Z)), dtype=tf.complex64))
            aveborder = tf.reduce_mean(
                tf.concat((Ein[0, :], Ein[m - 1, :], tf.transpose(
                    Ein[:, 0]), tf.transpose(Ein[:, n - 1])),
                          axis=0))
            #np.mean(cat(2,Ein(1,:),Ein(m,:),Ein(:,1)',Ein(:,n)'));
            H = tf.zeros((M, N, len(Z)))

        else:
            # reset(gpuDevice(1));
            raise NotImplementedError
            # lambd = gpuArray(lambd);
            # Z = gpuArray(Z);
            # ps = gpuArray(ps);
            # Eout = gpuArray.zeros(m,n,length(Z));
            # aveborder=gpuArray(mean(cat(2,Ein(1,:),Ein(m,:),Ein(:,1)',Ein(:,n)')));
            # if nargout>1
            #     H = gpuArray.zeros(M,N,length(Z));

        # Spatial Sampling
        [x, y] = tf.meshgrid(tf.range(-N / 2, (N / 2 - 1) + 1),
                             tf.range(-M / 2, (M / 2 - 1) + 1))

        fx = (x / (ps * M))  #frequency space width [1/m]
        fy = (y / (ps * N))  #frequency space height [1/m]
        fx2fy2 = fx**2 + fy**2

        # Padding value
        Ein_pad = Ein
        # Ein_pad = tf.ones((M,N), dtype = tf.complex64) * aveborder #pad by average border value to avoid sharp jumps
        # Ein_pad[(M-m)//2:(M+m)//2,(N-n)//2:(N+n)//2] = Ein # what is this?
        # Ein_pad = tf.pad(Ein, ((), ()), aveborder)

        # FFT of E0
        E0fft = signal.fftshift(signal.fft2d(Ein_pad))
        for z in range(len(Z)):
            H = tf.exp(-1j * np.pi * tf.cast(
                lambd * Z[z] * fx2fy2, tf.complex64))  #Fast Transfer Function
            Eout_pad = signal.ifft2d(signal.ifftshift(E0fft * H * mask))

            self.Eout[:, :, z].assign(Eout_pad[(M - m) // 2:(M + m) // 2,
                                               (N - n) // 2:(N + n) // 2])
            # Eout[:,:,z]=Eout_pad[(M-m)//2:(M+m)//2,(N-n)//2:(N+n)//2]

        # Gather variables from GPU if necessary
        if (gpu_num > 0):
            raise NotImplementedError
            # Eout=gather(Eout);
            # if nargout > 1:
            #     H=gather(H);

        return self.Eout  # H not returned?
コード例 #6
0
 def F(x):
     return signal.ifftshift(signal.fft2d(signal.fftshift(x)))
コード例 #7
0
ファイル: fft.py プロジェクト: sunc13/CMR-DL-challenge
 def call(self, image, *args):
     dtype = tf.math.real(image).dtype
     scale = tf.math.sqrt(
         tf.cast(tf.math.reduce_prod(tf.shape(image)[-2:]), dtype))
     return dlmri_tutorial.complex_scale(fft2d(image), 1 / scale)