def Shrinkwrap(self, sigma, thresh):
     dist = tf.contrib.distributions.MultivariateNormalDiag(
         [0., 0., 0.], [sigma, sigma, sigma])
     kernelFFT = tf.fft3d(
         tf.cast(tf.reshape(dist.prob(self._domain), self._probSize),
                 tf.complex64))
     blurred = tf.abs(
         tf.ifft3d(
             tf.multiply(
                 tf.fft3d(tf.cast(tf.abs(self._cImage), tf.complex64)),
                 kernelFFT)))
     blurred = tf.concat((blurred[(self._probSize[0] // 2):, :, :],
                          blurred[:(self._probSize[0] // 2), :, :]),
                         axis=0)
     blurred = tf.concat((blurred[:, (self._probSize[1] // 2):, :],
                          blurred[:, :(self._probSize[1] // 2), :]),
                         axis=1)
     blurred = tf.concat((blurred[:, :, (self._probSize[2] // 2):],
                          blurred[:, :, :(self._probSize[2] // 2)]),
                         axis=2)
     self._support = tf.cast(blurred > thresh * tf.reduce_max(blurred),
                             tf.complex64)
     self._support_comp = tf.cast(
         blurred <= thresh * tf.reduce_max(blurred), tf.complex64)
     return
Example #2
0
def get_highPass(img, mode='gradient', paras={}):
    if mode == 'gradient':
        imgY, imgX = tf.image.image_gradients(img)
        return tf.sqrt(tf.square(imgX) + tf.square(imgY), name='xHighPass')
    elif mode == 'fft':
        # n, h, w, c = tf.shape(img)
        n = 5
        h = 320
        w = 320
        c = 1
        # freq_thershold = paras.get('freq_thershold', int(h/3))
        # # freq_thershold = int(h/3)
        freq_thershold = 8
        # img = tf.reshape(img[:,:,:,0], [5, 320,320,1])
        img_fft = tf.fft3d(tf.cast(img, tf.complex64))

        # left = freq_thershold;   right = w - freq_thershold
        # up = freq_thershold;     down = h - freq_thershold
        mask = np.zeros((h, w, c))
        mask[freq_thershold:h - freq_thershold, :, :] = 1
        mask[:, freq_thershold:w - freq_thershold, :] = 1
        # maskHP = np.zeros((h, w))
        # maskHP[freqThreshold_HP:h-freqThreshold_HP, :] = 1
        # maskHP[:, freqThreshold_HP:w-freqThreshold_HP] = 1
        # mask = np.ones([h, w, c])
        # mask[up:down, left:right, :] = 0
        # plt.imsave('./highPass_mask.png', mask, cmap='gray')
        # plt.imsave('./highPass_masked.png', mask, cmap='gray')
        return tf.real(tf.ifft3d(tf.multiply(img_fft, mask)))
        # return tf.ifft(tf.multiply(img_fft, mask))
        # return tf.real(tf.ifft(tf.fft(tf.cast(img, tf.complex64))))
    elif mode == 'fft_Gaussian':
        mask = 1 - get_Gaussian_mask(0.007, 0.001, 320, 320)
        img_fft = tf.fft3d(tf.cast(img, tf.complex64))
        return tf.real(tf.ifft3d(tf.multiply(img_fft, mask)))
Example #3
0
    def defineShrinkwrap(self, varDict, gpu):

        # Array coordinates
        x, y, z = np.meshgrid(list(range(varDict['cImage'].shape[0])),
                              list(range(varDict['cImage'].shape[1])),
                              list(range(varDict['cImage'].shape[2])))
        y = y.max() - y

        x = x - x.mean()
        y = y - y.mean()
        z = z - z.mean()

        with tf.device('/gpu:%d' % gpu):
            # these are Tensorflow variables
            self._x = tf.constant(fftshift(x), dtype=tf.float32, name='x')
            self._y = tf.constant(fftshift(y), dtype=tf.float32, name='y')
            self._z = tf.constant(fftshift(z), dtype=tf.float32, name='z')
            self._blurred = tf.Variable(np.zeros(varDict['support'].shape),
                                        dtype=tf.complex64,
                                        name='blurred')
            self._dist = tf.Variable(tf.zeros(self._x.shape, dtype=tf.float32),
                                     name='dist')

            # These are shrinkwrap-specific symbolic ops
            with tf.name_scope('Shrinkwrap'):
                self._getNewDist = tf.assign(
                    self._dist,
                    tf.exp(self._neg * (self._x * self._x + self._y * self._y +
                                        self._z * self._z) /
                           (self._sigma * self._sigma)),
                    name='getNewDist')
                #                self._copyDistToRollBuffer = tf.assign( self._rollBuffer, tf.cast( self._dist, dtype=tf.complex64 ), name='CopyDistToRollBuffer' )
                #                self._retrieveDistFromRollBuffer = tf.assign( self._blurred, self._rollBuffer, name='retrieveDistFromRollBuffer' )
                #                self._copyImageToRollBuffer = tf.assign( self._rollBuffer, tf.cast( tf.abs( self._cImage ), dtype=tf.complex64 ), name='copyImgToRollBuffer' )
                #                self._convolveRollBufferWithBlur = tf.assign( self._rollBuffer, self._rollBuffer*self._blurred, name='convolve' )
                #                self._retrieveBlurred = tf.assign( self._blurred, self._rollBuffer, name='retrieveBlurred' )
                self._blurShape = tf.assign(
                    self._blurred,
                    tf.ifft3d(
                        tf.fft3d( tf.cast( self._dist, dtype=tf.complex64 ) ) *\
                        tf.fft3d( tf.cast( tf.abs( self._cImage ), dtype=tf.complex64 ) )
                    ),
                    name='blurShape'
                )
                self._updateSupport = tf.assign(
                    self._support,
                    tf.cast(
                        tf.abs(self._blurred) >
                        self._thresh * tf.reduce_max(tf.abs(self._blurred)),
                        tf.complex64),
                    name='updateSup')
                self._updateSupComp = tf.assign(
                    self._support_comp,
                    tf.cast(
                        tf.abs(self._blurred) <=
                        self._thresh * tf.reduce_max(tf.abs(self._blurred)),
                        tf.complex64),
                    name='updateSupComp')
 def __ERKernel__(self, mycount, myimage, myimage_fft_mod):
     myimage = tf.ifft3d(
         tf.multiply(
             self._modulus,
             tf.exp(
                 tf.complex(tf.zeros(myimage.shape),
                            tf.angle(tf.fft3d(myimage))))))
     myimage = tf.multiply(myimage, self._support)
     myimage_fft_mod = tf.cast(tf.abs(tf.fft3d(myimage)),
                               dtype=tf.complex64)
     mycount -= 1
     return mycount, myimage, myimage_fft_mod
Example #5
0
def fft3d(input, gamma=0.1):
    input = apodize3d(input, napodize=5)
    temp = K.permute_dimensions(input, (0, 4, 1, 2, 3))
    fft = tf.fft3d(tf.complex(temp, tf.zeros_like(temp)))
    absfft = tf.pow(tf.abs(fft) + 1e-8, gamma)
    output = K.permute_dimensions(absfft, (0, 2, 3, 4, 1))
    return output
Example #6
0
def visualize_FC_neuron():
    # create natural image power spectrum
    nimg_spectrum = np.fromfunction(lambda i, j, k: 1 / (
        (i + 1)**2 + (j + 1)**2) + (k + 1**2), (227, 277, 3),
                                    dtype=complex64)
    nimg_spectrum = tf.reshape(nimg_spectrum, (227, 277, 3))

    fc8_neuron = fc8[0, 107]
    optimizer = tf.train.GradientDescentOptimizer(0.1)

    x = tf.fft3d(tf.cast(input_var[0], dtype=complex64))
    # print(x.get_shape(), nimg_spectrum.get_shape())
    regularization = tf.norm(nimg_spectrum - x)
    train = optimizer.minimize(-fc8_neuron + regularization)
    sess = tf.Session()
    sess.run(init)
    max = 0
    image = None

    for step in range(STEPS_NUM):
        sess.run(train)
        neuron = sess.run(fc8_neuron)

        # when the neuron's value is maximal, save the input image
        if neuron > max:
            image = sess.run(input_var)
            max = neuron
        print("step: ", step, " neuron: ", neuron, "  regularization: ",
              sess.run(regularization))

    if image is None:
        print('you have a problem')
    else:
        scipy.misc.imsave("fc_visualization.jpg", image[0])
    def initialize_space_space_domain(self):
        if self.use_spatial_patching:
            recon_shape = self.recon_shape_full
        else:
            recon_shape = self.recon_shape

        if self.use_deep_image_prior:
            with tf.variable_scope('deep_image_prior'):
                self.deep_image_prior()
        else:
            if self.DT_recon_r_initialize is not None:
                self.DT_recon_r = tf.Variable(self.DT_recon_r_initialize,
                                              dtype=tf.float32,
                                              name='recon_real')
            else:
                self.DT_recon_r = tf.get_variable(
                    shape=recon_shape,
                    dtype=tf.float32,
                    initializer=tf.random_uniform_initializer(0, 1e-7),
                    name='recon_real')
            self.DT_recon_i = tf.get_variable(
                shape=recon_shape,
                dtype=tf.float32,
                initializer=tf.random_uniform_initializer(0, 1e-7),
                name='recon_imag')
        self.DT_recon = tf.complex(self.DT_recon_r, self.DT_recon_i)
        self.k_space = self.tf_ifftshift3(
            tf.fft3d(self.tf_fftshift3(self.DT_recon)))
Example #8
0
def my_ft3d(tensor, scaling=1.):
    """
    fftshift(fft(ifftshift(a)))
    
    Applies shifts to work with arrays whose "zero" is in the middle 
    instead of the first element.
    
    Uses standard normalization of fft unlike dip_image.
    """
    return fftshift(tf.fft3d(ifftshift(tensor))) / scaling
Example #9
0
 def fft(self, x):
     rank = len(x.shape) - 2
     assert rank >= 1
     if rank == 1:
         return tf.stack([tf.fft(c) for c in tf.unstack(x, axis=-1)], axis=-1)
     elif rank == 2:
         return tf.stack([tf.fft2d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
     elif rank == 3:
         return tf.stack([tf.fft3d(c) for c in tf.unstack(x, axis=-1)], axis=-1)
     else:
         raise NotImplementedError('n-dimensional FFT not implemented.')
 def __SFKernel__(self, mycount, myimage, myimage_fft_mod):
     myimage = 2. * (self._support * myimage) - myimage
     myimage = tf.ifft3d(
         tf.multiply(
             self._modulus,
             tf.exp(
                 tf.complex(tf.zeros(myimage.shape),
                            tf.angle(tf.fft3d(myimage))))))
     myimage = 2. * (self._support * myimage) - myimage
     mycount -= 1
     return mycount, myimage, myimage_fft_mod
 def __HIOKernel__(self, mycount, myimage, myimage_fft_mod):
     origImage = tf.identity(myimage)
     myimage = tf.ifft3d(
         tf.multiply(
             self._modulus,
             tf.exp(
                 tf.complex(tf.zeros(myimage.shape),
                            tf.angle(tf.fft3d(myimage))))))
     myimage = tf.multiply(self._support, myimage) + tf.multiply(
         self._support_comp, origImage - self._beta * myimage)
     mycount -= 1
     return mycount, myimage, myimage_fft_mod
def conv3d_fft_tf(vol, otf):
    input = tf.complex(vol, tf.zeros(vol.shape, dtype=tf.float32))
    input = tf.cast(input, dtype=tf.complex64)
    otf = tf.cast(otf, dtype=tf.complex64)
    vol_fft = tf.fft3d(input)
    vol_fftshift = hp.fftshift3d(vol_fft)

    vol_fftshift = tf.multiply(vol_fftshift, otf)

    vol_fftshift = hp.ifftshift3d(vol_fftshift)
    vol_fft = tf.ifft3d(vol_fftshift)
    return abssqr_tf(vol_fft)
Example #13
0
def fft3d_b01c(x_b01c):
    #x_shape=tf.to_int64(tf.shape(x_b01c))
    x = int(x_b01c.get_shape()[1])
    y = int(x_b01c.get_shape()[2])
    z = int(x_b01c.get_shape()[3])
    # fft2d for b01c type images. fft2d performs only for inner most 2 dims, so
    # we need transpose to put w, h dim to the final 2 dims.
    x_bc01 = tf.transpose(x_b01c, (0, 4, 1, 2, 3))

    fft_x_bc01 = tf.fft3d(x_bc01)
    fft_x_b01c = tf.transpose(fft_x_bc01, (0, 2, 3, 4, 1))
    return (fft_x_b01c) / (x * y * z)
Example #14
0
    def defineER(self, gpu):  # symbolic ops for error reduction

        with tf.device('/gpu:%d' % gpu):
            with tf.name_scope('DiffractionPattern'):
                self._getIntermediateFFT = tf.assign(self._intermedFFT,
                                                     tf.fft3d(self._cImage),
                                                     name='intermedFFT')
                self._getIntermediateInt = tf.assign(
                    self._intermedInt,
                    tf.square(
                        tf.cast(tf.abs(self._intermedFFT),
                                dtype=tf.complex64)),
                    name='intermedInt')
Example #15
0
def _ir2tf(imp_resp, shape, sess, dim=None, is_real=True):
    """Compute the transfer function of an impulse response (IR).
    This function makes the necessary correct zero-padding, zero
    convention, correct fft2, etc... to compute the transfer function
    of IR. To use with unitary Fourier transform for the signal (ufftn
    or equivalent).

    Args:
        imp_resp (ndarray/tensor): he impulse responses.
        shape (tuple): A tuple of integer corresponding to the target shape of 
            the transfer function.
        sess (InteractiveSession): Tensorflow session.
        dim (int): The last axis along which to compute the transform. All
            axes by default.
        is_real (boolean): If True (default), imp_resp is supposed real and the
            Hermitian property is used with rfftn Fourier transform.

    Return:
        tensor: The transfer function of shape ``shape``.
    """
    if not dim:
        if tf.contrib.framework.is_tensor(imp_resp):
            dim = len(imp_resp.shape)
        else:
            dim = imp_resp.ndim
    irpadded = tf.Variable(tf.zeros(shape))
    init_op = tf.variables_initializer([irpadded])
    sess.run(init_op)
    if tf.contrib.framework.is_tensor(imp_resp):
        imp_shape = tuple(tf.shape(imp_resp).eval())
    else:
        imp_shape = imp_resp.shape
    op = tf.assign(irpadded[tuple([slice(0, s) for s in imp_shape])], imp_resp)
    sess.run(op)
    for axis, axis_size in enumerate(imp_shape):
        if axis >= len(imp_resp.shape) - dim:
            irpadded = tf.manip.roll(
                irpadded,
                shift=-tf.cast(tf.math.floor(axis_size / 2), tf.int32),
                axis=axis)
    if dim == 1:
        return tf.spectral.rfft(irpadded) if is_real else tf.fft(
            tf.cast(irpadded, tf.complex64))
    elif dim == 2:
        return tf.spectral.rfft2d(irpadded) if is_real else tf.fft2d(
            tf.cast(irpadded, tf.complex64))
    elif dim == 3:
        return tf.spectral.rfft3d(irpadded) if is_real else tf.fft3d(
            tf.cast(irpadded, tf.complex64))
    else:
        raise ValueError('Bad dimension, dim can only be 1, 2 and 3')
Example #16
0
def conv3d_fft_tf(vol, otf):
    ''' convolve given volume with OTF
        Requirement/Assumption:
            volumne AND OTF are not shifted
    '''
    # input = tf.complex(vol, tf.zeros(vol.shape, dtype=tf.float32))
    input = tf.cast(vol, dtype=tf.complex64)
    otf = tf.cast(otf, dtype=tf.complex64)
    vol_fft = tf.fft3d(input)
    # vol_fftshift = hp.fftshift3d(vol_fft)
    vol_fftshift = tf.multiply(vol_fft, otf)
    # vol_fftshift = hp.ifftshift3d(vol_fftshift)
    vol_fft = tf.ifft3d(vol_fftshift)
    return tf.real(vol_fft)
Example #17
0
def get_lowPass(img, mode='average', paras={}):
    if mode == 'average':
        size = paras.get('size', 7)
        lowPassFilter_C3 = tf.constant(1 / size**2,
                                       shape=[size, size, 3, 3],
                                       name='lowPass_filter_C1')
        return tf.nn.conv2d(img,
                            lowPassFilter_C3,
                            strides=[1, 1, 1, 1],
                            padding='SAME',
                            name='xlowPass')
    elif mode == 'fft':
        # n, h, w, c = img.shape
        n = 5
        h = 320
        w = 320
        c = 1
        # img = tf.reshape(img[:,:,:,0], [5, 320,320,1])
        # freq_thershold = paras.get('freq_thershold', int(h/3))
        freq_thershold = 8
        img_fft = tf.fft3d(tf.cast(img, tf.complex64))

        # left = freq_thershold;   right = w - freq_thershold
        # up = freq_thershold;     down = h - freq_thershold
        # mask = np.ones([h, w, c])
        # mask = np.zeros([h, w, c])
        # mask[up:down, left:right, :] = 1
        mask = np.ones((h, w, c))
        mask[freq_thershold:h - freq_thershold, :, :] = 0
        mask[:, freq_thershold:w - freq_thershold, :] = 0
        return tf.real(tf.ifft3d(tf.multiply(img_fft, mask)))
        # return tf.ifft(tf.multiply(img_fft, mask))
        # return tf.real(tf.ifft(tf.fft(tf.cast(img, tf.complex64))))
    elif mode == 'fft_Gaussian':
        mask = get_Gaussian_mask(0.007, 0.001, 320, 320)
        img_fft = tf.fft3d(tf.cast(img, tf.complex64))
        return tf.real(tf.ifft3d(tf.multiply(img_fft, mask)))
Example #18
0
def fftconvolve3d(x, y, padding):
    # FIXME SAME will not work correctly
    # FIXME specifically designed for normxcorr (need to work more to make it general)
    # Read shapes
    x_shape = np.array(tuple(x.get_shape().as_list()), dtype=np.int32)
    y_shape = np.array(tuple(y.get_shape().as_list()), dtype=np.int32)
    # Construct paddings and pad
    x_shape[1:4] = x_shape[1:4] - 1
    y_pad = [[0, 0], [0, x_shape[1]], [0, x_shape[2]], [0, x_shape[3]]]
    y_shape[1:4] = y_shape[1:4] - 1
    x_pad = [[0, 0], [0, y_shape[1]], [0, y_shape[2]], [0, y_shape[3]]]

    x = tf.pad(x, x_pad)
    y = tf.pad(y, y_pad)

    y = tf.cast(y, tf.complex64, name='complex_Y')
    x = tf.cast(x, tf.complex64, name='complex_X')

    convftt = tf.real(
        tf.ifft3d(tf.multiply(tf.fft3d(x), tf.fft3d(y), name='fft_mult')))

    print(convftt.get_shape())
    #Slice correctly based on requirements
    if padding == 'VALID':
        begin = [0, y_shape[1], y_shape[2], y_shape[3]]
        size = [
            x_shape[0], x_shape[1] - y_shape[1], x_shape[2] - y_shape[1], 1
        ]

    if padding == 'SAME':
        begin = [0, y_shape[1] / 2 - 1, y_shape[2] / 2 - 1, y_shape[3] - 1]
        size = x_shape  #[-1, x_shape[0], x_shape[1]]

    z = tf.slice(convftt, begin, size)
    z = tf.squeeze(z)
    return z
Example #19
0
def gpu_gaussian_random_field(size=32, scale=1, length=48 * 28):
    shape = (size, size, length)
    amplitude = tf.constant(form_spectral_matrix(shape), dtype=tf.float32)
    complex_amplitude = tf.complex(amplitude, tf.zeros(shape,
                                                       dtype=tf.float32))
    random_noise = tf.random_normal(shape=shape,
                                    stddev=scale,
                                    dtype=tf.float32)
    zeros = tf.zeros(shape, dtype=tf.float32)
    complex_noise = tf.complex(random_noise, zeros)
    noise_spectrum = tf.fft3d(complex_noise)
    convolved = tf.multiply(complex_amplitude, noise_spectrum)
    simulation = tf.ifft3d(convolved)
    with tf.Session() as sess:
        result = sess.run(simulation)
        return result
Example #20
0
def upsample_FT(inputs, upsam_size, scope, data_format='channels_last'):

    if data_format == 'channels_first' or data_format == 'NCDHW':
        raise RuntimeError("This has not been tested for channels_first")
    sh = np.array(inputs.shape.as_list())
    f_ny_old = sh // 2  #nyqvist frequency of original tensor

    with tf.name_scope(scope):
        t_cmplx = tf.complex(inputs, tf.zeros(inputs.shape))
        t_cmplx_ft = tf.fft3d(t_cmplx)
        t_cmplx_ft_pad = tf.manip.roll(t_cmplx_ft, f_ny_old, axis=(0, 1, 2))
        t_cmplx_ft_pad = tf.pad(
            t_cmplx_ft_pad, ((0, (upsam_size[0] - 1) * t_cmplx_ft.shape[0]),
                             (0, (upsam_size[1] - 1) * t_cmplx_ft.shape[1]),
                             (0, (upsam_size[2] - 1) * t_cmplx_ft.shape[2])),
            'constant')
        t_cmplx_ft_pad = tf.manip.roll(t_cmplx_ft_pad,
                                       -f_ny_old,
                                       axis=(0, 1, 2))
        t_upsam = tf.real(tf.ifft3d(t_cmplx_ft_pad))
    # the test found a significant imag part though --> bc of hard edge?
    return t_upsam
Example #21
0
def tensor_t_compress(A,k,n,m):
    D = tf.fft3d(A);
   
    #n = np.shape(A,3);
    Uy=tf.Variable(tf.zeros(k,m,n))
    for i in n:
        Ux,Sx,Vx = np.linalg.svd(D[:,:,i])
        Uy[:,:,i].assign(processU(Ux,k))
       # Sy[:,:,i] = processS(Sx,k)
       # Vy[:,:,i] = processV(Vx,k)
 
  
 #   U = ifft(Uy,[],3);
 #   S = ifft(Sy,[],3);
 #   V = ifft(Vy,[],3);
    (n1,n2,n3) =tf.shape(Ux)
    (m1,m2,m3) = tf.shape(A)

   # if n2!= m1 and n3 != m3: 
  #      error('Inner tensor dimensions must agree.');

    C =tf.Variable(tf.zeros(n1,m2,n3));

# first frontal slice
    

    halfn3 = np.round(n3/2.0)
    for i in range(halfn3):
        C[:,:,i].assign(np.transpose(Ux[:,:,i])*D[:,:,i])
        C[:,:,n3+2-i].assign(np.conjuate(C[:,:,i]))
    C[:,:,1].assign(np.dot(np.transpose(Ux[:,:,1]),A[:,:,1]))

    if np.mod(n3,2) == 0:
        i = halfn3+1
        C[:,:,i].assign(np.transpose(Uy[:,:,i])*D[:,:,i])

    C = tf.spectral.ifft3d(C)
    return C
    def computemodel(self):
        ''' Perform Multiple Scattering here
        1.) Multiple Scattering is perfomed by slice-wise propagation the E-Field throught the sample
        2.) Each Field has to be backprojected to the BFP
        3.) Last step is to ceate a focus-stack and sum over all angles

        This is done for all illumination angles (coming from illumination NA
        simultaneasly)'''

        print("Buildup Q-PHASE Model ")
        ###### make sure, that the first dimension is "batch"-size; in this case it is the illumination number
        # @beniroquai It's common to have to batch dimensions first and not last.!!!!!
        # the following loop propagates the field sequentially to all different Z-planes

        ## propagate the field through the entire object for all angles simultaneously
        A_prop = np.transpose(
            self.A_input,
            [3, 0, 1, 2
             ])  # ??????? what the hack is happening with transpose?!

        myprop = np.exp(1j * self.dphi) * (self.dphi > 0)
        # excludes the near field components in each step
        myprop = tf_helper.repmat4d(myprop, self.Nc)
        myprop = np.transpose(
            myprop, [3, 0, 1, 2
                     ])  # ??????? what the hack is happening with transpose?!

        RefrEffect = 1j * self.dz * self.k0 * self.RefrCos
        # Precalculate the oblique effect on OPD to speed it up
        RefrEffect = np.transpose(RefrEffect, [3, 0, 1, 2])

        # for now orientate the dimensions as (alpha_illu, x, y, z) - because tensorflow takes the first dimension as batch size
        with tf.name_scope('Variable_assignment'):
            self.TF_A_input = tf.constant(A_prop, dtype=tf.complex64)
            self.TF_RefrEffect = tf.reshape(
                tf.constant(RefrEffect, dtype=tf.complex64), [self.Nc, 1, 1])
            self.TF_myprop = tf.squeeze(tf.constant(myprop,
                                                    dtype=tf.complex64))
            self.TF_Po = tf.cast(tf.constant(self.Po), tf.complex64)
            self.TF_Zernikes = tf.constant(self.myzernikes, dtype=tf.float32)

            if (self.is_optimization_psf):
                self.TF_zernikefactors = tf.Variable(self.zernikefactors,
                                                     dtype=tf.float32,
                                                     name='var_zernikes')
            else:
                self.TF_zernikefactors = tf.constant(self.zernikefactors,
                                                     dtype=tf.float32,
                                                     name='const_zernikes')

        # TODO: Introduce the averraged RI along Z - MWeigert

        self.TF_A_prop = tf.squeeze(self.TF_A_input)
        self.U_z_list = []

        # Initiliaze memory
        self.allInt = 0
        self.allSumAmp = 0
        self.TF_allSumAmp = tf.zeros([self.mysize[0], self.Nx, self.Ny],
                                     dtype=tf.complex64)

        self.tf_iterator = tf.Variable(1)
        # simulate multiple scattering through object
        with tf.name_scope('Fwd_Propagate'):
            for pz in range(0, self.mysize[0]):
                self.tf_iterator += self.tf_iterator
                #self.TF_A_prop = tf.Print(self.TF_A_prop, [self.tf_iterator], 'Prpagation step: ')
                with tf.name_scope('Refract'):
                    TF_f_phase = tf.cast(self.TF_obj_phase_do[pz, :, :],
                                         tf.complex64)
                    self.TF_f = tf.exp(1j * self.TF_RefrEffect * TF_f_phase)
                    self.TF_A_prop = self.TF_A_prop * self.TF_f  # refraction step

                with tf.name_scope('Propagate'):
                    self.TF_A_prop = tf_helper.my_ift2d(
                        tf_helper.my_ft2d(self.TF_A_prop) *
                        self.TF_myprop)  # diffraction step

        # Bring the slice back to focus - does this make any sense?!
        print('----------> Bringing field back to focus')
        self.TF_A_prop = tf_helper.my_ift2d(
            tf_helper.my_ft2d(self.TF_A_prop) *
            (-self.Nz / 2 * self.TF_myprop))  # diffraction step

        # in a final step limit this to the detection NA:
        self.TF_Po_aberr = tf.exp(1j * tf.cast(
            tf.reduce_sum(self.TF_zernikefactors * self.TF_Zernikes, axis=2),
            tf.complex64)) * self.TF_Po
        self.TF_A_prop = tf_helper.my_ift2d(
            tf_helper.my_ft2d(self.TF_A_prop) * self.TF_Po_aberr)

        self.TF_myAllSlicePropagator = tf.constant(self.myAllSlicePropagator,
                                                   dtype=tf.complex64)
        self.kzcoord = np.reshape(self.kz[self.Ic > 0], [1, 1, 1, self.Nc])

        # create Z-Stack by backpropagating Information in BFP to Z-Position
        # self.mid3D = ([np.int(np.ceil(self.A_input.shape[0] / 2) + 1), np.int(np.ceil(self.A_input.shape[1] / 2) + 1), np.int(np.ceil(self.mysize[0] / 2) + 1)])
        self.mid3D = ([
            np.int(self.mysize[0] // 2),
            np.int(self.A_input.shape[0] // 2),
            np.int(self.A_input.shape[1] // 2)
        ])

        with tf.name_scope('Back_Propagate'):
            for pillu in range(0, self.Nc):
                with tf.name_scope('Back_Propagate_Step'):
                    with tf.name_scope('Adjust'):
                        #    fprintf('BackpropaAngle no: #d\n',pillu);
                        OneAmp = tf.expand_dims(self.TF_A_prop[pillu, :, :], 0)

                        # Fancy backpropagation assuming what would be measured if the sample was moved under oblique illumination:
                        # The trick is: First use conceptually the normal way
                        # and then apply the XYZ shift using the Fourier shift theorem (corresponds to physically shifting the object volume, scattered field stays the same):
                        self.TF_AdjustKXY = tf.squeeze(
                            tf.conj(self.TF_A_input[pillu, :, :, ])
                        )  # tf.transpose(tf.conj(TF_A_input[pillu, :,:,]), [2, 1, 0]) # Maybe a bit of a dirty hack, but we first need to shift the zero coordinate to the center
                        self.TF_AdjustKZ = tf.transpose(
                            tf.constant(np.exp(
                                2 * np.pi * 1j * self.dz *
                                np.reshape(np.arange(0, self.mysize[0], 1),
                                           [1, 1, self.mysize[0]]) *
                                self.kzcoord[:, :, :, pillu]),
                                        dtype=tf.complex64), [2, 1, 0])
                        self.TF_allAmp = tf_helper.my_ift2d(
                            tf_helper.my_ft2d(OneAmp) *
                            self.TF_myAllSlicePropagator
                        ) * self.TF_AdjustKZ * self.TF_AdjustKXY  # * (TF_AdjustKZ);  # 2x bfxfun.  Propagates a single amplitude pattern back to the whole stack
                        self.TF_allAmp = self.TF_allAmp * tf.exp(
                            1j * tf.cast(
                                tf.angle(self.TF_allAmp[
                                    self.mid3D[0], self.mid3D[1],
                                    self.mid3D[2]]), tf.complex64)
                        )  # Global Phases need to be adjusted at this step!  Use the zero frequency

                    if (0):
                        with tf.name_scope('Propagate'):
                            self.TF_allAmp_3dft = tf.fft3d(
                                tf.expand_dims(self.TF_allAmp, axis=0))
                            self.TF_allAmp = self.TF_allAmp * tf.exp(
                                -1j * tf.cast(
                                    tf.angle(self.TF_allAmp_3dft[
                                        self.mid3D[2], self.mid3D[1],
                                        self.mid3D[0]]), tf.complex64))
                            # Global Phases need to be adjusted at this step!  Use the zero frequency
                    #print('Global phase: '+str(tf.exp(1j*tf.cast(tf.angle(self.TF_allAmp[self.mid3D[0],self.mid3D[1],self.mid3D[2]]), tf.complex64).eval()))

                    with tf.name_scope(
                            'Sum_Amps'
                    ):  # Normalize amplitude by condenser intensity
                        self.TF_allSumAmp = self.TF_allSumAmp + self.TF_allAmp  #/ self.intensityweights[pillu];  # Superpose the Amplitudes
                    # print('Current illumination angle # is: ' + str(pillu))

        # Normalize the image such that the values do not depend on the fineness of
        # the source grid.
        self.TF_allSumAmp = self.TF_allSumAmp / self.Nc  #/tf.cast(tf.reduce_max(tf.abs(self.TF_allSumAmp)), tf.complex64)

        # Following is the normalization according to Martin's book. It ensures
        # that a transparent specimen is imaged with unit intensity.
        # normfactor=abs(Po).^2.*abs(Ic); We do not use it, because it leads to
        # divide by zero for dark-field system. Instead, through normalizations
        # perfomed above, we ensure that image of a point under matched
        # illumination is unity. The brightness of all the other configurations is
        # relative to this benchmark.
        #

        # negate padding
        if self.is_padding:
            self.TF_allSumAmp = self.TF_allSumAmp[:, self.Nx // 2 -
                                                  self.Nx // 4:self.Nx // 2 +
                                                  self.Nx // 4, self.Ny // 2 -
                                                  self.Ny // 4:self.Ny // 2 +
                                                  self.Ny // 4]

        return self.TF_allSumAmp
Example #23
0
 def test_FFT3D(self):
     # only defined for gpu
     if DEVICE == GPU:
         t = tf.fft3d(self.random(3, 4, 5, complex=True))
         self.check(t)
Example #24
0
                    np.cos(2 * math.pi * LL)))

Lk = tf.placeholder("complex64", shape=(N, N, N))
Vk = tf.placeholder("complex64", shape=(N, N, N))
nkplace = tf.placeholder("complex64", shape=(N, N, N))
nrplace = tf.placeholder("complex64", shape=(N, N, N))
nk = tf.Variable(nkplace)
nr = tf.Variable(nrplace)
''' Control time step'''
dt = 0.0001
dtr = tf.constant(dt, dtype=tf.complex64)

Tr = tf.constant(Tr1, dtype=tf.complex64)
'''Update rule '''
SS = tf.log(nr / (1 - nr))
Sk = tf.fft3d(SS)
dnkdt = Lk * (Vk * nk / T0 + Tr * (Sk))
nk1 = nk + dnkdt * dtr
nr_ = tf.ifft3d(nk1)

step1 = tf.group(
    nk.assign(nk1),
    nr.assign(nr_),
)
'''Calculate order parameter '''


def make_kernel(a):
    """Transform a 3D array into a convolution kernel"""
    a = np.asarray(a)
    a = a.reshape(list(a.shape) + [1, 1])
Example #25
0
def phase_corr(ashape, bshape, filter_shape=None):
    """Construct a TensorFlow op to compute phase correlation.

    Parameters
    ----------
    ashape : tuple of ints
        Shape of input array.
    bshape : tuple of ints
        Shape of input array.
    filter_shape : tuple
        Shape of filter array. Optional. If not given, the window filter is
        not applied.

    Returns
    -------
    phase_corr : tf.Operation
        The op to be run to compute phase correlation. When running the op,
        values for the following placeholders must be fed:
        `input/a_ph:0`, `input/b_ph:0`, `input/filter_ph:0`.
    """
    my_filter_t = None

    with tf.name_scope('input'):
        aph = tf.placeholder(dtype=tf.uint16, shape=ashape, name='a_ph')
        bph = tf.placeholder(dtype=tf.uint16, shape=bshape, name='b_ph')

        if filter_shape is not None:
            my_filter_t = tf.placeholder(dtype=tf.float32,
                                         shape=filter_shape,
                                         name='filter_ph')

        at = tf.to_float(aph)
        bt = tf.to_float(bph)

    with tf.name_scope('subtract_mean'):
        at -= tf.reduce_mean(at)
        bt -= tf.reduce_mean(bt)

    if filter_shape is not None:
        with tf.name_scope('window_filter'):
            at = at * my_filter_t
            bt = bt * my_filter_t

    with tf.name_scope('FFT'):
        ac = tf.cast(at, tf.complex64, name='to_complex')
        bc = tf.cast(bt, tf.complex64, name='to_complex')

        aft = tf.fft3d(ac)
        bft = tf.fft3d(bc)

    with tf.name_scope('cross_power_spectrum'):
        prod = aft * tf.conj(bft)
        prodnorm = tf.abs(prod, name='norm')
        ratio = prod / tf.cast(prodnorm, tf.complex64, name='to_complex')

    with tf.name_scope('phase_correlation'):
        ifft = tf.ifft3d(ratio)
        phase_corr = tf.square(tf.real(ifft) + tf.square(tf.imag(ifft)))
        phase_corr = tf.sqrt(phase_corr)

    return phase_corr
Example #26
0
def _ftconvolve(tensor1, tensor2):
    return tf.real(tf.ifft3d(tf.fft3d(tensor1) * tf.fft3d(tensor2)))
Example #27
0
    def inference(self, use_bias=False):
        self.config['ksize'] = 7
        self.config['stride'] = 1
        self.config['use_bias'] = True

        with tf.variable_scope('sen1_1_1'):   # sen1: 1,2
            sen1_x_1 = self.in_sen1[..., :2]
            sen1_abs_1 = self.in_sen1[..., :2]
            sen1_1_complex = tf.complex(tf.expand_dims(sen1_abs_1[..., 0], axis=-1), tf.expand_dims(sen1_abs_1[..., 1], axis=-1))
            sen1_abs_1 = tf.abs(sen1_1_complex)
            sen1_angle_1 = tf.cast(tf.angle(sen1_1_complex), dtype=dtype)
            sen1_fft_1 = tf.fft3d(sen1_1_complex)
            sen1_fft_abs_1 = tf.abs(sen1_fft_1)
            sen1_fft_angle_1 = tf.cast(tf.angle(sen1_fft_1), dtype=dtype)
            sen1_x_1 = tf.concat([sen1_x_1, sen1_abs_1, sen1_angle_1, sen1_fft_abs_1, sen1_fft_angle_1], axis=-1)
            sen1_x_1 = batch_normal(sen1_x_1, training=self.is_training)
            with tf.variable_scope('block2'):
                sen1_x_1 = self.cnns_stack_block_adj(sen1_x_1)  # 16 * 16 * (256+4)=260
        with tf.variable_scope('sen1_1_2'):   # sen1: 3,4
            sen1_x_2 = self.in_sen1[..., 2:4]
            sen1_abs_2 = self.in_sen1[..., 2:4]
            sen1_2_complex = tf.complex(tf.expand_dims(sen1_abs_2[..., 0], axis=-1), tf.expand_dims(sen1_abs_2[..., 1], axis=-1))
            sen1_abs_2 = tf.abs(sen1_2_complex)
            sen1_angle_2 = tf.cast(tf.angle(sen1_2_complex), dtype=dtype)
            sen1_fft_2 = tf.fft3d(sen1_2_complex)
            sen1_fft_abs_2 = tf.abs(sen1_fft_2)
            sen1_fft_angle_2 = tf.cast(tf.angle(sen1_fft_2), dtype=dtype)
            sen1_x_2 = tf.concat([sen1_x_2, sen1_abs_2, sen1_angle_2, sen1_fft_abs_2, sen1_fft_angle_2], axis=-1)
            sen1_x_2 = batch_normal(sen1_x_2, training=self.is_training)
            with tf.variable_scope('block2'):
                sen1_x_2 = self.cnns_stack_block_adj(sen1_x_2)  # 16 * 16 * (256+4)=260
        with tf.variable_scope('sen1_1_3'):   # sen1: 5,6
            sen1_x_3 = self.in_sen1[..., 4:6]
            sen1_abs_3 = self.in_sen1[..., 4:6]
            sen1_3_complex = tf.complex(tf.expand_dims(sen1_abs_3[..., 0], axis=-1), tf.expand_dims(sen1_abs_3[..., 1], axis=-1))
            sen1_abs_3 = tf.abs(sen1_3_complex)
            sen1_angle_3 = tf.cast(tf.angle(sen1_3_complex), dtype=dtype)
            sen1_fft_3 = tf.fft3d(sen1_3_complex)
            sen1_fft_abs_3 = tf.abs(sen1_fft_3)
            sen1_fft_angle_3 = tf.cast(tf.angle(sen1_fft_3), dtype=dtype)
            sen1_x_3 = tf.concat([sen1_x_3, sen1_abs_3, sen1_angle_3, sen1_fft_abs_3, sen1_fft_angle_3], axis=-1)
            sen1_x_3 = batch_normal(sen1_x_3, training=self.is_training)
            with tf.variable_scope('block2'):
                sen1_x_3 = self.cnns_stack_block_adj(sen1_x_3)  # 16 * 16 * (256+2)=258
        with tf.variable_scope('sen1_1_4'):    # sen1: 7,8
            sen1_x_4 = self.in_sen1[...,6:]
            sen1_abs_4 = self.in_sen1[..., 6:]
            sen1_4_complex = tf.complex(tf.expand_dims(sen1_abs_4[..., 0], axis=-1), tf.expand_dims(sen1_abs_4[..., 1], axis=-1))
            sen1_abs_4 = tf.abs(sen1_4_complex)
            sen1_angle_4 = tf.cast(tf.angle(sen1_4_complex), dtype=dtype)
            sen1_fft_4 = tf.fft3d(sen1_4_complex)
            sen1_fft_abs_4 = tf.abs(sen1_fft_4)
            sen1_fft_angle_4 = tf.cast(tf.angle(sen1_fft_4), dtype=dtype)
            sen1_x_4 = tf.concat([sen1_x_4, sen1_abs_4, sen1_angle_4, sen1_fft_abs_4, sen1_fft_angle_4], axis=-1)
            sen1_x_4 = batch_normal(sen1_x_4, training=self.is_training)
            with tf.variable_scope('block2'):
                sen1_x_4 = self.cnns_stack_block_adj(sen1_x_4)  # 16 * 16 * (256+2)=258

        with tf.variable_scope('sen2_1_1'):   # sen2: 1,2,3
            sen2_x_1 = self.in_sen2[..., :3]
            sen2_x_1 = batch_normal(sen2_x_1, training=self.is_training)
            with tf.variable_scope('block2'):
                sen2_x_1 = self.cnns_stack_block_adj(sen2_x_1)  # 16 * 16 * 64
        with tf.variable_scope('sen2_1_2'):   # sen2: 4,5,6
            sen2_x_2 = self.in_sen2[..., 3:6]
            sen2_x_2 = batch_normal(sen2_x_2, training=self.is_training)
            with tf.variable_scope('block2'):
                sen2_x_2 = self.cnns_stack_block_adj(sen2_x_2)  # 16 * 16 * 64
        with tf.variable_scope('sen2_1_3'):   # sen2: 7,8
            sen2_x_3 = self.in_sen2[..., 6:8]
            sen2_x_3 = batch_normal(sen2_x_3, training=self.is_training)
            with tf.variable_scope('block2'):
                sen2_x_3 = self.cnns_stack_block_adj(sen2_x_3)  # 16 * 16 * 64
        with tf.variable_scope('sen2_1_4'):   # sen2: 9,10
            sen2_x_4 = self.in_sen2[..., 8:]
            sen2_x_4 = batch_normal(sen2_x_4, training=self.is_training)
            with tf.variable_scope('block2'):
                sen2_x_4 = self.cnns_stack_block_adj(sen2_x_4)  # 16 * 16 * 64

        sen1_x_1 = tf.concat([sen1_x_1, sen1_x_2, sen1_x_3, sen1_x_4], axis=-1)  # 16 * 16 * (260+258+258+259)=1035
        sen2_x_1 = tf.concat([sen2_x_1, sen2_x_2, sen2_x_3, sen2_x_4], axis=-1)  # 16 * 16 * 266

        with tf.variable_scope('sen1_2'):
            with tf.variable_scope('block1'):
                sen1_x = self.cnns_stack_block_adj_noscale(sen1_x_1)
            with tf.variable_scope('block2'):
                sen1_x = self.cnns_stack_block_adj(sen1_x)  # 8 * 8 * (512+1035)=1547
            with tf.variable_scope('block3'):
                sen1_x = self.cnns_stack_block_adj_noscale(sen1_x)
        with tf.variable_scope('sen2_2'):
            with tf.variable_scope('block1'):
                sen2_x = self.cnns_stack_block_adj_noscale(sen2_x_1)
            with tf.variable_scope('block2'):
                sen2_x = self.cnns_stack_block_adj(sen2_x)  # 8 * 8 * (512+266)=778
            with tf.variable_scope('block3'):
                sen2_x = self.cnns_stack_block_adj_noscale(sen2_x)

        x = tf.concat([sen1_x, sen2_x], axis=-1)  # 8 * 8 * (1547+778)=2325
        x_aux_2 = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool_aux2")  # 1 * 1 * 3349
        dims = x_aux_2.get_shape()[-1]
        x_aux_2 = tf.reshape(x_aux_2, shape=(-1, dims))
        with tf.variable_scope('fc1_aux2'):
            self.config['fc_units_out'] = 526
            x_aux_2 = self.fc(x_aux_2)
            x_aux_2 = relu(x_aux_2)
        with tf.variable_scope('fc2_aux2'):
            self.config['fc_units_out'] = 1024
            x_aux_2 = self.fc(x_aux_2)
            x_aux_2 = relu(x_aux_2)
        x_aux_2 = tf.nn.dropout(x_aux_2, self.keep_drop)
        with tf.variable_scope('fc_proj_aux2'):
            self.config['fc_units_out'] = self.config['label_class']
            x_aux_2 = self.fc(x_aux_2)
        self.logit_aux2 = x_aux_2

        with tf.variable_scope('hub_1'):
            with tf.variable_scope('block1'):
                x = self.cnns_stack_block_adj_noscale(x)
            with tf.variable_scope('block2'):
                x = self.cnns_stack_block_adj(x)   # 4 * 4 * (1024+2325)=3349
            with tf.variable_scope('block3'):
                x = self.cnns_stack_block_adj_noscale(x)
        x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")  # 1 * 1 * 3349
        dims = x.get_shape()[-1]
        x = tf.reshape(x, shape=(-1, dims))
        with tf.variable_scope('fc1'):
            self.config['fc_units_out'] = 1024
            x = self.fc(x)
            x = relu(x)
        x = tf.nn.dropout(x, self.keep_drop)
        with tf.variable_scope('fc_proj_hub'):
            self.config['fc_units_out'] = self.config['label_class']
            x = self.fc(x)
        self.logit = x
        self.output = softmax(x)
Example #28
0
    def defineBaseVariables(self, varDict, gpu):
        # Tensorflow variables specified here.
        with tf.device('/gpu:%d' % gpu):
            self._modulus = tf.constant(varDict['modulus'],
                                        dtype=tf.complex64,
                                        name='mod_measured')
            self._support = tf.Variable(varDict['support'],
                                        dtype=tf.complex64,
                                        name='sup')
            self._support_comp = tf.Variable(1. - varDict['support'],
                                             dtype=tf.complex64,
                                             name='Support_comp')
            self._cImage = tf.Variable(varDict['cImage'],
                                       dtype=tf.complex64,
                                       name='Image')
            self._buffImage = tf.Variable(varDict['cImage'],
                                          dtype=tf.complex64,
                                          name='buffImage')
            self._beta = tf.constant(varDict['beta'],
                                     dtype=tf.complex64,
                                     name='beta')
            self._probSize = self._cImage.shape
            self._thresh = tf.placeholder(dtype=tf.float32, name='thresh')
            self._sigma = tf.placeholder(dtype=tf.float32, name='sigma')
            self._neg = tf.constant(-0.5, dtype=tf.float32)
            self._intermedFFT = tf.Variable(tf.zeros(self._cImage.shape,
                                                     dtype=tf.complex64),
                                            name='intermedFFT')
            self._intermedInt = tf.Variable(tf.zeros(self._cImage.shape,
                                                     dtype=tf.complex64),
                                            name='intermedInt')

            with tf.name_scope('Support'):
                self._supproject = tf.assign(self._cImage,
                                             self._cImage * self._support,
                                             name='supProject')
            # These are defined only if high-energy phasing is required.
            if 'bin_left' in varDict.keys():
                bL = varDict['bin_left']
                sh = bL.shape
                self._binL = tf.constant(bL.reshape(sh[0], sh[1], 1).repeat(
                    varDict['modulus'].shape[-1], axis=2),
                                         dtype=tf.complex64,
                                         name='binL')
                self._binR = tf.constant(bL.T.reshape(sh[1], sh[0], 1).repeat(
                    varDict['modulus'].shape[-1], axis=2),
                                         dtype=tf.complex64,
                                         name='binR')
                self._scale = tf.constant(varDict['scale'],
                                          dtype=tf.complex64,
                                          name='scale')
                self._binned = tf.Variable(tf.zeros(self._modulus.shape,
                                                    dtype=tf.complex64),
                                           name='binned')
                self._expanded = tf.Variable(tf.zeros(self._support.shape,
                                                      dtype=tf.complex64),
                                             name='expanded')
                self._scaled = tf.Variable(tf.zeros(self._modulus.shape,
                                                    dtype=tf.complex64),
                                           name='scaled')

                with tf.name_scope('highEnergy'):
                    self._binThis = tf.assign(
                        self._binned,
                        tf.transpose(
                            tf.matmul(
                                tf.matmul(
                                    tf.transpose(self._binL, [2, 0, 1]),
                                    tf.transpose(
                                        tf.cast(tf.square(
                                            tf.abs(self._intermedFFT)),
                                                dtype=tf.complex64),
                                        [2, 0, 1])),
                                tf.transpose(self._binR, [2, 0, 1])),
                            [1, 2, 0]),
                        name='Binning')
                    self._scaleThis = tf.assign(self._scaled,
                                                tf.divide(
                                                    self._modulus,
                                                    tf.sqrt(self._binned)),
                                                name='Scaling')
                    self._expandThis = tf.assign(
                        self._expanded,
                        tf.transpose(
                            tf.matmul(
                                tf.matmul(
                                    tf.transpose(self._binR, [2, 0, 1]),
                                    tf.transpose(self._scaled, [2, 0, 1])),
                                tf.transpose(self._binL, [2, 0, 1])),
                            [1, 2, 0]),
                        name='Expansion')
                    self._HEImgUpdate = tf.assign(
                        self._cImage,
                        tf.multiply(
                            self._support,
                            tf.ifft3d(self._scale * tf.multiply(
                                self._expanded, self._intermedFFT))),
                        name='HEImgUpdate')
                    self._HEImgCorrect = tf.assign(
                        self._cImage,
                        self._cImage + tf.multiply(
                            self._support_comp,
                            self._buffImage - self._beta * self._cImage),
                        name='HEImgCorrect')

            else:  # regular phasing
                with tf.name_scope('ER'):
                    self._modproject = tf.assign(
                        self._cImage,
                        tf.ifft3d(
                            tf.divide(self._modulus, tf.sqrt(
                                self._intermedInt)) * tf.fft3d(self._cImage)),
                        name='modProject')
        return
    def initializeGaussianPCF(self,
                              vardict,
                              array_shape,
                              gpu,
                              learning_rate=1.e-1):

        x, y, z = np.meshgrid(list(range(array_shape[0])),
                              list(range(array_shape[1])),
                              list(range(array_shape[2])))
        y = y.max() - y

        x = (fftshift(x - x.mean())).reshape(1, -1)
        y = (fftshift(y - y.mean())).reshape(1, -1)
        z = (fftshift(z - z.mean())).reshape(1, -1)
        pts = np.concatenate((x, y, z), axis=0)

        if 'initial_guess' not in vardict.keys():
            l1p, l2p, l3p, psip, thetap, phip = 2., 2., 2., 0., 0., 0.
        else:
            l1p, l2p, l3p, psip, thetap, phip = tuple(vardict['initial_guess'])

        with tf.device('/gpu:%d' % gpu):
            with tf.name_scope('GaussianPCF'):
                self._roll = [n // 2 for n in self._probSize
                              ]  # defined in GPUModule_Base

                with tf.name_scope('Constants'):
                    self._coherentEstimate = tf.Variable(
                        tf.zeros(self._cImage.shape, dtype=tf.float32),
                        name='coherentEstimate')
                    self._intensity = tf.constant(vardict['modulus']**2,
                                                  dtype=tf.float32,
                                                  name='Measurement')
                    self._q = tf.constant(pts,
                                          dtype=tf.float32,
                                          name='domainPoints')
                    self._v0 = tf.constant(np.array([1., 0.,
                                                     0.]).reshape(-1, 1),
                                           dtype=tf.float32)
                    self._v1 = tf.constant(np.array([0., 1.,
                                                     0.]).reshape(-1, 1),
                                           dtype=tf.float32)
                    self._v2 = tf.constant(np.array([0., 0.,
                                                     1.]).reshape(-1, 1),
                                           dtype=tf.float32)
                    self._nskew0 = tf.constant(np.array([[0., 0., 0.],
                                                         [0., 0., -1.],
                                                         [0., 1., 0.]]),
                                               dtype=tf.float32)
                    self._nskew1 = tf.constant(np.array([[0., 0., 1.],
                                                         [0., 0., 0.],
                                                         [-1., 0., 0.]]),
                                               dtype=tf.float32)
                    self._nskew2 = tf.constant(np.array([[0., -1., 0.],
                                                         [1., 0., 0.],
                                                         [0., 0., 0.]]),
                                               dtype=tf.float32)
                    self._one = tf.constant(1., dtype=tf.float32)
                    self._neg = tf.constant(-0.5, dtype=tf.float32)
                    self._twopi = tf.constant((2 * np.pi)**(3. / 2.),
                                              dtype=tf.float32)
                    self._I = tf.eye(3)

                with tf.name_scope('Parameters'):
                    self._l1 = tf.Variable(l1p,
                                           dtype=tf.float32,
                                           name='Lambda1')  #
                    self._l2 = tf.Variable(
                        l2p, dtype=tf.float32, name='Lambda2'
                    )  # Sqrt of eigenvalues of covariance matrix
                    self._l3 = tf.Variable(l3p,
                                           dtype=tf.float32,
                                           name='Lambda3')  #
                    self._psi = tf.Variable(
                        psip, dtype=tf.float32,
                        name='Psi')  # Rotation angle of eigenbasis
                    self._theta = tf.Variable(
                        thetap, dtype=tf.float32,
                        name='Theta')  # Polar angle of rotation axis
                    self._phi = tf.Variable(
                        phip, dtype=tf.float32,
                        name='Phi')  # Azimuth angle of rotation axis

                with tf.name_scope('Auxiliary'):
                    self._FreqSupportMask = tf.placeholder(
                        dtype=tf.float32, shape=self._intensity.shape)
                    self._mD = tf.diag([self._l1, self._l2, self._l3])
                    self._n0 = tf.sin(self._theta) * tf.cos(self._phi)
                    self._n1 = tf.sin(self._theta) * tf.sin(self._phi)
                    self._n2 = tf.cos(self._theta)
                    self._n = self._n0 * self._v0 + self._n1 * self._v1 + self._n2 * self._v2
                    self._nskew = self._n0 * self._nskew0 + self._n1 * self._nskew1 + self._n2 * self._nskew2
                    self._R = tf.cos( self._psi )*self._I +\
                        tf.sin( self._psi )*self._nskew +\
                        ( self._one - tf.cos( self._psi ) )*tf.matmul( self._n, tf.transpose( self._n ) )
                    self._C = tf.matmul(
                        self._R,
                        tf.matmul(tf.matmul(self._mD, self._mD),
                                  tf.transpose(self._R)))

                with tf.name_scope('Blurring'):
                    self._pkfft = tf.Variable(np.zeros(self._probSize),
                                              dtype=tf.complex64,
                                              name='pkfft')

                    self._blurKernel = tf.reshape(
                        tf.exp(self._neg * tf.reduce_sum(
                            self._q * tf.matmul(self._C, self._q), axis=0)),
                        shape=self._coherentEstimate.shape) * (
                            self._l1 * self._l2 * self._l3) / self._twopi

                    self._tf_intens_f = tf.fft3d(
                        tf.cast(self._coherentEstimate, dtype=tf.complex64))
                    self._tf_blur_f = tf.fft3d(
                        tf.cast(self._blurKernel, dtype=tf.complex64))
                    self._tf_prod_f = self._tf_intens_f * self._tf_blur_f
                    self._imgBlurred = tf.abs(tf.ifft3d(self._tf_prod_f))

                with tf.name_scope('Optimizer'):
                    self._var_list = [
                        self._l1, self._l2, self._l3, self._psi, self._theta,
                        self._phi
                    ]

                    self._poissonNLL = tf.reduce_mean(
                        self._FreqSupportMask *
                        (self._imgBlurred -
                         self._intensity * tf.log(self._imgBlurred)))
                    self._poissonOptimizer = tf.train.AdagradOptimizer(
                        learning_rate=vardict['pcc_learning_rate'],
                        name='poissonOptimize')
                    self._trainPoisson = self._poissonOptimizer.minimize(
                        self._poissonNLL, var_list=self._var_list)
                    self._currentGradients = [
                        n[0] for n in self._poissonOptimizer.compute_gradients(
                            self._poissonNLL, var_list=self._var_list)
                    ]

#                    self._gaussNLL = tf.reduce_mean(
#                        self._FreqSupportMask * ( tf.sqrt( self._imgBlurred ) - tf.sqrt( self._intensity ) )**2
#                    )
#                    self._gaussOptimizer = tf.train.AdagradOptimizer( learning_rate=vardict[ 'pcc_learning_rate' ], name='gaussOptimize' )
#                    self._trainGauss = self._gaussOptimizer.minimize( self._gaussNLL, var_list=self._var_list )
#                    self._currentGradients = [
#                        n[0] for n in self._gaussOptimizer.compute_gradients( self._gaussNLL, var_list=self._var_list )
#                    ]

                with tf.name_scope('Preparation'):
                    self._getCoherentEstimate = tf.assign(
                        self._coherentEstimate,
                        tf.cast(self._intermedInt, dtype=tf.float32),
                        name='getCoherentEstimate')
                    self._getPCFKernelFFT = tf.assign(
                        self._pkfft,
                        tf.fft3d(tf.cast(self._blurKernel,
                                         dtype=tf.complex64)),
                        name='getPCFKernelFFT')

                with tf.name_scope('Convolution'):
                    self._convolveWithCoherentEstimate = tf.assign(
                        self._intermedInt,
                        tf.cast(tf.abs(
                            tf.ifft3d(self._pkfft *
                                      tf.fft3d(self._intermedInt))),
                                dtype=tf.complex64),
                        name='convolveWithCoherentEstimate')

        self._progress = []
Example #30
0
 def fft2D(self, V):
     ''' gather view slices in FFT domain and reurn them '''
     VF = tf.fft3d(V)
     # 2D slices at planes locations
     # symmetry x nviews x xdim x ydim x 3dcoord
     return tf.gather_nd(VF, self._planes)