def fresnel_propagate(wavefront_real, wavefront_imag, energy_ev, psize_cm,
                      dist_cm):
    lmbda_nm = 1240. / energy_ev
    lmbda_cm = 0.000124 / energy_ev
    psize_nm = psize_cm * 1e7
    dist_nm = dist_cm * 1e7

    wavefront = wavefront_real + 1j * wavefront_imag
    wave_shape = wavefront.get_shape().as_list()

    if dist_cm == 'inf':
        wavefront = fftshift(tf.fft2d(wavefront))
    else:
        n = np.mean(wave_shape)
        z_crit_cm = (psize_cm * n)**2 / (lmbda_cm * n)
        # algorithm = 'TF' if dist_cm < z_crit_cm else 'IR'
        algorithm = 'TF'
        if algorithm == 'TF':
            h = get_kernel(dist_nm, lmbda_nm, [psize_nm, psize_nm], wave_shape)
            h = tf.convert_to_tensor(h, dtype=tf.complex64)
            wavefront = tf.ifft2d(ifftshift(fftshift(tf.fft2d(wavefront)) * h))
        else:
            h = get_kernel_ir(dist_nm, lmbda_nm, [psize_nm, psize_nm],
                              wave_shape)
            h = tf.convert_to_tensor(h, dtype=tf.complex64)
            wavefront = ifftshift(tf.ifft2d(fftshift(tf.fft2d(wavefront)) * h))

    return wavefront
Exemple #2
0
        def At_func(input_img, kernel, pinhole):
            input_img = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(input_img)
            kernel = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(kernel)
            pinhole = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(pinhole)

            conj_input_img = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(input_img)
            conj_input_img = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.ifft2d(fftshift2d_tf(x))))(conj_input_img)
            plane_field_ft = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(conj_input_img)

            plane_field_ft = tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, 1))(plane_field_ft)
            plane_field_ft = tf.keras.layers.Lambda(lambda x: tf.tile(x, (1, Nz, 1, 1)))(plane_field_ft)

            conj_kernel = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(kernel)
            vol_field_ft = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([conj_kernel, plane_field_ft])

            ft_pinhole = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.fft2d(fftshift2d_tf(x))))(pinhole)
            point_field = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([conj_kernel, ft_pinhole])

            point_field_ft = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.ifft2d(fftshift2d_tf(x))))(point_field)

            volume_field = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.fft2d(fftshift2d_tf(tf.math.conj(x)))))(vol_field_ft)

            conj_point_field_ft = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(point_field_ft)
            conj_volume_field = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(volume_field)

            field3d = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([conj_point_field_ft, conj_volume_field])

            return field3d
Exemple #3
0
def fft_cost(true, pred, conf, fft_weights=None):

    #loop over the color channels:
    cost = 0.
    true_fft_abssum = 0
    pred_fft_abssum = 0
    for i in range(3):

        slice_true = tf.slice(true, [0, 0, 0, i], [-1, -1, -1, 1])
        slice_pred = tf.slice(pred, [0, 0, 0, i], [-1, -1, -1, 1])

        slice_true = tf.squeeze(
            tf.complex(slice_true, tf.zeros_like(slice_true)))
        slice_pred = tf.squeeze(
            tf.complex(slice_pred, tf.zeros_like(slice_pred)))

        true_fft = tf.fft2d(slice_true)
        pred_fft = tf.fft2d(slice_pred)

        if 'fft_emph_highfreq' in conf:
            abs_diff = tf.mul(tf.complex_abs(true_fft - pred_fft), fft_weights)
            cost += tf.reduce_sum(tf.square(abs_diff)) / tf.to_float(
                tf.size(pred_fft))
        else:
            cost += tf.reduce_sum(
                tf.square(tf.complex_abs(true_fft - pred_fft))) / tf.to_float(
                    tf.size(pred_fft))

        true_fft_abssum += tf.complex_abs(true_fft)
        pred_fft_abssum += tf.complex_abs(pred_fft)

    return cost, true_fft_abssum, pred_fft_abssum
Exemple #4
0
        def A_func(input_img, kernel, pinhole, type):
            realimg = input_img
            input_img = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(input_img)
            kernel = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(kernel)
            pinhole = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.complex64))(pinhole)

            ft_pinhole = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.fft2d(fftshift2d_tf(x))))(pinhole)

            conj_kernel = tf.keras.layers.Lambda(lambda x: tf.math.conj(x))(kernel)
            point_field = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([conj_kernel, ft_pinhole])
            plane_wave = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.ifft2d(fftshift2d_tf(x))))(point_field)

            vol_field = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([input_img, plane_wave])
            vol_field_ft = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.fft2d(fftshift2d_tf(x))))(vol_field)

            plane_field_ft = tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([vol_field_ft, kernel])
            plane_field_ft = tf.keras.layers.Lambda(lambda x: tf.reduce_sum(x, axis=-3))(plane_field_ft)

            field2d = tf.keras.layers.Lambda(lambda x: ifftshift2d_tf(tf.ifft2d(fftshift2d_tf(x))))(plane_field_ft)

            if type == 0:
                return tf.keras.layers.Lambda(lambda x: 2 * tf.math.real(x))(field2d)
            else:
                holo = Add()([tf.keras.layers.Lambda(lambda x: 2 * tf.math.real(x))(field2d),
                              tf.keras.layers.Lambda(lambda x: tf.abs(x)**2)(field2d)])
                return  tf.keras.layers.Lambda(lambda x: x/(1e-10+tf.math.reduce_max(x)))(holo)
Exemple #5
0
        def stage(x, v, I, kernel, ifftkernel):
            squarefftres = SquareFFTfunc(x, kernel)
            fftres = FFTfunc(x, kernel)
            diff = tf.keras.layers.Lambda(
                lambda x: tf.math.subtract(x[0], x[1]))([squarefftres, I])
            diff = scalelayer(diff)
            diff = tf.keras.layers.Lambda(
                lambda x: tf.math.multiply(x[0], x[1]))([diff, fftres])
            diff = tf.keras.layers.Lambda(
                lambda x: tf.tile(x, (1, 128, 1, 1)))(diff)

            fft2data = tf.keras.layers.Lambda(lambda x: tf.fft2d(x))(diff)
            kernel = tf.keras.layers.Lambda(lambda x: tf.fft2d(x))(ifftkernel)
            temp_mul = tf.keras.layers.Lambda(
                lambda x: tf.math.multiply(x[0], x[1]))([fft2data, kernel])
            temp_mul = tf.keras.layers.Lambda(lambda x: tf.ifft2d(x))(temp_mul)

            diff_xv = tf.keras.layers.Lambda(
                lambda x: tf.math.subtract(x[0], x[1]))([v, x])
            diff_xv = scalelayer(diff_xv)

            x_next = tf.keras.layers.Lambda(lambda x: tf.math.add(x[0], x[1]))(
                [x, diff_xv])
            x_next = tf.keras.layers.Lambda(lambda x: tf.math.add(x[0], x[1]))(
                [x_next, temp_mul])
            v_next = denoiseblock(x_next)

            return x_next, v_next
Exemple #6
0
def est_kernel(blurs, deblurs, nstd=2, ksz=27):
    assert ksz % 2 == 1
    hksz = (ksz - 1) // 2
    if nstd == 0:
        nstd = 1e-6

    blurs = tf.cast(tf.transpose(blurs, [0, 3, 1, 2]), tf.complex64)
    deblurs = tf.cast(tf.transpose(deblurs, [0, 3, 1, 2]), tf.complex64)

    fft_blurs = tf.fft2d(blurs)
    fft_deblurs = tf.fft2d(deblurs)

    numerator = fft_deblurs * tf.conj(fft_blurs)
    denominator = tf.abs(fft_blurs)**2 + (nstd / 255.)**2.
    out = tf.real(tf.ifft(numerator / denominator))
    out = tf.transpose(out, [0, 2, 3, 1])

    out1 = tf.concat([out[:, -hksz:, -hksz:], out[:, :hksz + 1, -hksz:]],
                     axis=1)
    out2 = tf.concat([out[:, -hksz:, :hksz + 1], out[:, :hksz + 1, :hksz + 1]],
                     axis=1)
    kernels = tf.concat([out1, out2], axis=2)
    kernels = kernels / tf.reduce_mean(kernels, axis=[1, 2])

    return kernels
    def get_loss(self, gt, pred, type='mse'):
        '''
        'mse', 'inverse_mse', 'fft_mse'
        'perceptual', 'texture'
        'adv, 'cycle_adv'
        '''
        if type == 'mse': # See SRCNN. MSE is very simple loss function.
            gt = self._preprocess(gt)
            pred = self._preprocess(pred)
            return self._mse(gt, pred)
        elif type == 'inverse_mse':
            # gt is the input_lr image!!!
            gt = self._preprocess(gt)
            pred = self._preprocess(pred)
            pred = tf.image.resize_bilinear(pred, size=[tf.shape(gt)[1], tf.shape(gt)[2]])
            return self._mse(gt, pred)
        elif type == 'fft_mse':
            # check whether both gt and pred need preprocessing
            gt = self._preprocess(gt)
            pred = self._preprocess(pred)
            
            ### fft then mse
            gt = tf.cast(gt, tf.complex64)
            pred = tf.cast(pred, tf.complex64)

            gt = tf.fft2d(gt)
            pred = tf.fft2d(pred)

            return self._mse(gt, pred)
        elif type == 'l1_loss':
            gt = self._preprocess(gt)
            pred = self._preprocess(pred)

            return self._l1_loss(gt, pred)
        elif type == 'perceptual': # See Enhancenet.
            if not self.vgg_used:
                self.build_vgg_19(gt, pred)
            
            pl_pool5 = self._perceptual_loss()

            return pl_pool5
        elif type == 'texture': # See Enhancenet, Style transfer papers.
            if not self.vgg_used:
                self.build_vgg_19(gt, pred)
            
            tl_conv1 = self._texture_loss(self.vgg_19['conv1_1'])
            tl_conv2 = self._texture_loss(self.vgg_19['conv2_1'])
            tl_conv3 = self._texture_loss(self.vgg_19['conv3_1'])

            return tl_conv1, tl_conv2, tl_conv3
        elif type == 'adv':
            gt_logits, pred_logits = self.build_discriminator(gt, pred)
            
            adv_gen_loss, adv_disc_loss = self._adv_loss(gt_logits, pred_logits)

            return adv_gen_loss, adv_disc_loss
        else:
            print('%s is not implemented.' % (type))
Exemple #8
0
def spectral_loss_batch(feature1, ref):
    f1_trans = tf.transpose(feature1, (0, 3, 1, 2))
    f2_trans = tf.transpose(ref, (0, 3, 1, 2))

    m1 = tf.abs(tf.fft2d(tf.cast(f1_trans, tf.complex64)))
    m2 = tf.abs(tf.fft2d(tf.cast(f2_trans, tf.complex64)))

    loss = tf.reduce_mean(tf.square(m1 - m2), axis=(1, 2, 3))
    return loss
Exemple #9
0
def cross_domain_mse(y_true, y_pred):
    y_loss = tf.losses.mean_squared_error(y_true, y_pred)
    f_rep = tf.fft2d(tf.cast(y_true, 'complex64'))
    f_true = tf.concat([tf.real(f_rep), tf.imag(f_rep)], axis=-1)
    f_rep = tf.fft2d(tf.cast(y_pred, 'complex64'))
    f_pred = tf.concat([tf.real(f_rep), tf.imag(f_rep)], axis=-1)
    f_loss = tf.reduce_mean(
        tf.square(tf.math.tanh(f_true) - tf.math.tanh(f_pred)))

    return 1.0 * y_loss + 0.0 * f_loss
Exemple #10
0
    def call(self, inputs, mask=None):
        padded_inputs, adjustments, observations, blur_kernels, lambdas = inputs

        imagesize = tf.shape(padded_inputs)[1:3]
        kernelsize = tf.shape(blur_kernels)[1:3]
        padding = tf.floor_div(kernelsize, 2)

        mask_int = tf.ones(
            (imagesize[0] - 2 * padding[0], imagesize[1] - 2 * padding[1]),
            dtype=tf.float32)
        mask_int = tf.pad(mask_int,
                          [[padding[0], padding[0]], [padding[1], padding[1]]],
                          mode='CONSTANT')
        mask_int = tf.expand_dims(mask_int, 0)

        filters = tf.matmul(self.B, self.filter_weights)
        filters = tf.reshape(
            filters,
            [self.filter_size[0], self.filter_size[1], 1, self.nb_filters])

        filter_otfs = psf2otf(filters, imagesize)
        otf_term = tf.reduce_sum(tf.square(tf.abs(filter_otfs)), axis=1)

        k = tf.expand_dims(tf.transpose(blur_kernels, [1, 2, 0]), -1)
        k_otf = psf2otf(k, imagesize)[:, 0, :, :]

        if self.stage > 1:
            # boundary adjustment
            Kx_fft = tf.fft2d(tf.cast(padded_inputs[:, :, :, 0],
                                      tf.complex64)) * k_otf
            Kx = tf.to_float(tf.ifft2d(Kx_fft))
            Kx_outer = (1.0 - mask_int) * Kx
            y_inner = mask_int * observations[:, :, :, 0]
            y_adjusted = y_inner + Kx_outer
            dataterm_fft = tf.fft2d(tf.cast(y_adjusted,
                                            tf.complex64)) * tf.conj(k_otf)
        else:
            # standard data term
            observations_fft = tf.fft2d(
                tf.cast(observations[:, :, :, 0], tf.complex64))
            dataterm_fft = observations_fft * tf.conj(k_otf)

        lambdas = tf.expand_dims(lambdas, -1)

        adjustment_fft = tf.fft2d(
            tf.cast(adjustments[:, :, :, 0], tf.complex64))
        numerator_fft = tf.cast(lambdas,
                                tf.complex64) * dataterm_fft + adjustment_fft

        KtK = tf.square(tf.abs(k_otf))
        denominator_fft = lambdas * KtK + otf_term
        denominator_fft = tf.cast(denominator_fft, tf.complex64)

        frac_fft = numerator_fft / denominator_fft
        return tf.expand_dims(tf.to_float(tf.ifft2d(frac_fft)), -1)
Exemple #11
0
def test_fft2d_of_tensorflow():
    #a = np.mgrid[:5, :5][0]
    #print(a)
    #np_fft_a = np.fft.fft2(a)
    #print(np_fft_a)
    #array([[ 50.0 +0.j        ,   0.0 +0.j        ,   0.0 +0.j        ,
    #0.0 +0.j        ,   0.0 +0.j        ],
    #[-12.5+17.20477401j,   0.0 +0.j        ,   0.0 +0.j        ,
    #0.0 +0.j        ,   0.0 +0.j        ],
    #[-12.5 +4.0614962j ,   0.0 +0.j        ,   0.0 +0.j        ,
    #0.0 +0.j        ,   0.0 +0.j        ],
    #[-12.5 -4.0614962j ,   0.0 +0.j        ,   0.0 +0.j        ,
    #0.0 +0.j        ,   0.0 +0.j        ],
    #[-12.5-17.20477401j,   0.0 +0.j        ,   0.0 +0.j        ,
    #0.0 +0.j        ,   0.0 +0.j        ]])
    # check if np.fft2d of TF.fft2d and NP have the same result

    size = 2
    testimage = np.random.rand(size, size)
    testimage = testimage + 0j
    print(type(testimage))

    ft_testimage = np.fft.fft2(testimage)
    print("Test avec 2D element")
    print("Numpy fft")
    print(ft_testimage)
    np_result = np.sum(ft_testimage)
    print(np_result)

    sess = tf.Session()
    with sess.as_default():
        tf_ft_testimage = tf.fft2d(testimage)
        tf_result = np.sum(tf_ft_testimage.eval())
        print("Tensorflow fft")
        print(tf_ft_testimage.eval())
        print(tf_result)

        tensor3D = tf.constant(np.expand_dims(testimage, axis=2),
                               dtype=tf.complex64)
        print("Tensorflow fft with expand dim")
        print('Dims tensor', tensor3D.shape)
        #tensor3D = tf.constant(y)
        tf_ft_testimage = tf.fft2d(tensor3D)
        tf_result = np.sum(tf_ft_testimage.eval())
        print(tf_ft_testimage.eval())
        print(tf_result)

        tensor3D = tf.transpose(tensor3D, [2, 0, 1])
        print("Tensorflow fft with expand dim transpose")
        print('Dims tensor', tensor3D.shape)
        #tensor3D = tf.constant(y)
        tf_ft_testimage = tf.fft2d(tensor3D)
        tf_result = np.sum(tf_ft_testimage.eval())
        print(tf_ft_testimage.eval())
        print(tf_result)
def gen_PSFs(h, OOFphase, wvls, idx, N_R, N_G, N_B):
    n = 1.5  # diffractive index

    with tf.variable_scope("Red"):
        OOFphase_R = OOFphase[:, :, :, 0]
        phase_R = tf.add(2 * np.pi / wvls[0] * (n - 1) * h, OOFphase_R)
        Pupil_R = tf.pad(
            tf.multiply(tf.complex(idx, 0.0), tf.exp(tf.complex(0.0,
                                                                phase_R))),
            [[0, 0], [(N_R - N_B) // 2,
                      (N_R - N_B) // 2], [(N_R - N_B) // 2, (N_R - N_B) // 2]],
            name='Pupil_R')
        Norm_R = tf.cast(N_R * N_R * np.sum(idx**2), tf.float32)
        PSF_R = tf.divide(tf.square(tf.abs(fft2dshift(tf.fft2d(Pupil_R)))),
                          Norm_R,
                          name='PSF_R')

    with tf.variable_scope("Green"):
        OOFphase_G = OOFphase[:, :, :, 1]
        phase_G = tf.add(2 * np.pi / wvls[1] * (n - 1) * h, OOFphase_G)
        Pupil_G = tf.pad(
            tf.multiply(tf.complex(idx, 0.0), tf.exp(tf.complex(0.0,
                                                                phase_G))),
            [[0, 0], [(N_G - N_B) // 2,
                      (N_G - N_B) // 2], [(N_G - N_B) // 2, (N_G - N_B) // 2]],
            name='Pupil_G')
        Norm_G = tf.cast(N_G * N_G * np.sum(idx**2), tf.float32)
        PSF_G = tf.divide(tf.square(tf.abs(fft2dshift(tf.fft2d(Pupil_G)))),
                          Norm_G,
                          name='PSF_G')

    with tf.variable_scope("Blue"):
        OOFphase_B = OOFphase[:, :, :, 2]
        phase_B = tf.add(2 * np.pi / wvls[2] * (n - 1) * h, OOFphase_B)
        Pupil_B = tf.multiply(tf.complex(idx, 0.0),
                              tf.exp(tf.complex(0.0, phase_B)),
                              name='Pupil_B')
        Norm_B = tf.cast(N_B * N_B * np.sum(idx**2), tf.float32)
        PSF_B = tf.divide(tf.square(tf.abs(fft2dshift(tf.fft2d(Pupil_B)))),
                          Norm_B,
                          name='PSF_B')

    N_crop_R = int(
        (N_R - N_B) / 2)  # Num of pixel need to cropped at each side for R
    N_crop_G = int(
        (N_G - N_B) / 2)  # Num of pixel need to cropped at each side for G

    PSFs = tf.stack([
        PSF_R[:, N_crop_R:-N_crop_R, N_crop_R:-N_crop_R],
        PSF_G[:, N_crop_G:-N_crop_G, N_crop_G:-N_crop_G], PSF_B
    ],
                    axis=3)
    return PSFs
Exemple #13
0
def fftconvolve2d(x, y, padding="VALID"):
    #return convolve2d(x,y)
    """
    x and y must be real 2-d tensors.

    mode must be "SAME" or "VALID".
    Input is x=[batch, width, height] and kernel is [batch, width, height]

    need to add custom striding
    """
    # Read shapes
    x_shape = tf.shape(
        x)  #np.array(tuple(x.get_shape().as_list()), dtype=np.int32)
    y_shape = tf.shape(
        y)  #np.array(tuple(y.get_shape().as_list()), dtype=np.int32)

    # Check if they are 2D add one artificial batch layer
    # Do the same for kernel seperately

    # Construct paddings and pad

    y_pad = [[0, 0], [0, x_shape[1] - 1], [0, x_shape[2] - 1]]
    x_pad = [[0, 0], [0, y_shape[1] - 1], [0, y_shape[2] - 1]]

    x = tf.pad(x, x_pad)
    y = tf.pad(y, y_pad)

    # Go to FFT domain
    y = tf.cast(y, tf.complex64, name='complex_Y')
    x = tf.cast(x, tf.complex64, name='complex_X')

    y_fft = tf.fft2d(y, name='fft_Y')
    x_fft = tf.fft2d(x, name='fft_X')

    # Do elementwise multiplication
    convftt = tf.multiply(x_fft, y_fft, name='fft_mult')

    # Come back
    z = tf.ifft2d(convftt, name='ifft_z')
    z = tf.real(z)

    #Slice correctly based on requirements
    if padding == 'VALID':
        begin = [0, y_shape[1] - 1, y_shape[2] - 1]
        size = [x_shape[0], x_shape[1] - y_shape[1], x_shape[2] - y_shape[2]]

    if padding == 'SAME':
        begin = [0, (y_shape[1] - 1) / 2 - 1, (y_shape[2] - 1) / 2 - 1]
        size = x_shape  #[-1, x_shape[0], x_shape[1]]

    z = tf.slice(z, begin, size)
    return z
def inverse_filter(blurred, estimate, psf, gamma=None, init_gamma=2.):
    """Inverse filtering in the frequency domain.

    Args:
        blurred: image with shape (batch_size, height, width, num_img_channels)
        estimate: image with shape (batch_size, height, width, num_img_channels)
        psf: filters with shape (kernel_height, kernel_width, num_img_channels, num_filters)
        gamma: Optional. Scalar that determines regularization (higher --> more regularization, output is closer to
               "estimate", lower --> less regularization, output is closer to straight inverse filtered-result). If
               not passed, a trainable variable will be created.
        init_gamma: Optional. Scalar that determines the square root of the initial value of gamma.
    """
    img_shape = blurred.shape.as_list()

    if gamma is None:  # Gamma (the regularization parameter) is also a trainable parameter.
        gamma_initializer = tf.constant_initializer(init_gamma)
        gamma = tf.get_variable(name="gamma",
                                shape=(),
                                dtype=tf.float32,
                                trainable=True,
                                initializer=gamma_initializer)
        gamma = tf.square(gamma)  # Enforces positivity of gamma.
        tf.summary.scalar('gamma', gamma)

    a_tensor_transp = tf.transpose(blurred, [0, 3, 1, 2])
    estimate_transp = tf.transpose(estimate, [0, 3, 1, 2])

    # Everything has shape (batch_size, num_channels, height, width)
    img_fft = tf.fft2d(tf.complex(a_tensor_transp, 0.))
    # otf = my_psf2otf(psf, output_size=img_shape[1:3])
    otf = psf2otf(psf, output_size=img_shape[1:3])
    otf = tf.transpose(otf, [2, 3, 0, 1])

    adj_conv = img_fft * tf.conj(otf)

    # This is a slight modification to standard inverse filtering - gamma not only regularizes the inverse filtering,
    # but also trades off between the regularized inverse filter and the unfiltered estimate_transp.
    numerator = adj_conv + tf.fft2d(tf.complex(gamma * estimate_transp, 0.))

    kernel_mags = tf.square(tf.abs(otf))  # Magnitudes of the blur kernel.

    denominator = tf.complex(kernel_mags + gamma, 0.0)
    filtered = tf.div(numerator, denominator)
    cplx_result = tf.ifft2d(filtered)
    real_result = tf.real(cplx_result)  # Discard complex parts.
    real_result = tf.maximum(1e-5,real_result)

    # Get back to (batch_size, num_channels, height, width)
    result = tf.transpose(real_result, [0, 2, 3, 1])
    return result
Exemple #15
0
def setup_inputs(x, mask, batch_size):

    channel = x.shape[-1].value // 2
    mask = np.tile(mask, (channel, 1, 1))
    mask_tf = tf.cast(tf.constant(mask), tf.float32)
    mask_tf_c = tf.cast(mask_tf, tf.complex64)
    x_complex = real2complex(x)
    x_complex = tf.cast(x_complex, tf.complex64)
    x_complex = tf.transpose(x_complex, [2, 0, 1])
    kx = tf.fft2d(x_complex)
    kx_mask = kx * mask_tf_c
    x_u = tf.ifft2d(kx_mask)
    x_u = tf.transpose(x_u, [1, 2, 0])
    kx_mask = tf.transpose(kx_mask, [1, 2, 0])

    x_u_cat = complex2real(x_u)
    x_cat = tf.cast(x, tf.float32)
    mask_tf_c = tf.transpose(mask_tf_c, [1, 2, 0])

    features, labels, kx_mask, masks = tf.train.shuffle_batch(
        [x_u_cat, x_cat, kx_mask, mask_tf_c],
        batch_size=batch_size,
        num_threads=64,
        capacity=50,
        min_after_dequeue=10)

    return features, labels, kx_mask, masks
Exemple #16
0
def get_kernel_ir(dist_nm, lmbda_nm, voxel_nm, grid_shape):
    """
    Get Fresnel propagation kernel for IR algorithm.

    Parameters:
    -----------
    simulator : :class:`acquisition.Simulator`
        The Simulator object.
    dist : float
        Propagation distance in cm.
    """
    size_nm = np.array(voxel_nm) * np.array(grid_shape)
    k = 2 * PI / lmbda_nm
    ymin, xmin = np.array(size_nm)[:2] / -2.
    dy, dx = voxel_nm[0:2]
    x = np.arange(xmin, xmin + size_nm[1], dx)
    y = np.arange(ymin, ymin + size_nm[0], dy)
    x, y = np.meshgrid(x, y)
    try:
        h = np.exp(1j * k * dist_nm) / (1j * lmbda_nm * dist_nm) * np.exp(
            1j * k / (2 * dist_nm) * (x**2 + y**2))
        H = np_fftshift(fft2(h)) * voxel_nm[0] * voxel_nm[1]
        dxchange.write_tiff(x,
                            '2d_512/monitor_output/x',
                            dtype='float32',
                            overwrite=True)
    except:
        h = tf.exp(1j * k * dist_nm) / (1j * lmbda_nm * dist_nm) * tf.exp(
            1j * k / (2 * dist_nm) * (x**2 + y**2))
        # h = tf.convert_to_tensor(h, dtype='complex64')
        H = fftshift(tf.fft2d(h)) * voxel_nm[0] * voxel_nm[1]

    return H
Exemple #17
0
    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
Exemple #18
0
 def random_spatial_to_spectral(self, channels, filters, height, width):
     # Create a truncated random image, then compute the FFT of that image and return it's values
     # used to initialize spectrally parameterized filters
     # an alternative to this is to initialize directly in the spectral domain
     w = tf.truncated_normal([channels, filters, height, width], mean=0, stddev=0.01)
     fft = tf.fft2d(tf.complex(w, 0.0 * w), name='spectral_initializer')
     return fft.eval(session=self.sess)
Exemple #19
0
def setup_inputs_one_sources(sess,
                             filenames_input,
                             filenames_output,
                             image_size=None,
                             axis_undersample=1,
                             capacity_factor=3):

    DEFAULT_MASK, _ = getMask([image_size, image_size],
                              axis_undersample=axis_undersample)
    DEFAULT_MAKS_TF = tf.cast(tf.constant(DEFAULT_MASK), tf.float32)
    DEFAULT_MAKS_TF_c = tf.cast(DEFAULT_MAKS_TF, tf.complex64)

    if image_size is None:
        image_size = FLAGS.sample_size

    # Read each JPEG file
    reader_input = tf.WholeFileReader()
    filename_queue_input = tf.train.string_input_producer(filenames_input)
    key, value_input = reader_input.read(filename_queue_input)
    channels = 3
    image_input = tf.image.decode_jpeg(value_input,
                                       channels=channels,
                                       name="input_image")
    image_input.set_shape([None, None, channels])

    # cast
    image_input = tf.cast(image_input, tf.float32) / 255.0

    # take channel0 real part, channel1 imag part
    image_output = image_input[:, :, -1]
    image_input = image_input[:, :, -1]

    # undersample here
    kspace_input = tf.fft2d(tf.cast(image_input, tf.complex64))
    kspace_zpad = kspace_input * DEFAULT_MAKS_TF_c
    image_zpad = tf.ifft2d(kspace_zpad)
    image_zpad_real = tf.real(image_zpad)
    image_zpad_real = tf.reshape(image_zpad_real, [image_size, image_size, 1])
    # image_zpad_real.set_shape([image_size, image_size, 1])
    image_zpad_imag = tf.imag(image_zpad)
    image_zpad_imag = tf.reshape(image_zpad_imag, [image_size, image_size, 1])
    # image_zpad_imag.set_shape([image_size, image_size, 1])
    image_zpad_concat = tf.concat(axis=2,
                                  values=[image_zpad_real, image_zpad_imag])

    # The feature is simply a Kx downscaled version
    feature = tf.reshape(image_zpad_concat, [image_size, image_size, 2])
    label = tf.reshape(image_output, [image_size, image_size, 1])

    # Using asynchronous queues
    features, labels = tf.train.batch([feature, label],
                                      batch_size=FLAGS.batch_size,
                                      num_threads=4,
                                      capacity=capacity_factor *
                                      FLAGS.batch_size,
                                      name='labels_and_features')

    tf.train.start_queue_runners(sess=sess)

    return features, labels
def fft_test(N=size):
    s = dL * dL / (N * N)
    if False:
        img_raw = tf.io.read_file("E:/ONNet/data/MNIST/test_2.jpg")
        img_raw = tf.image.decode_jpeg(img_raw)
    else:  #tf.io与skimage.io居然不一样,令人难以理解
        img_raw = io.imread("E:/ONNet/data/MNIST/test_2.jpg")
        #print(img_raw)
    img_tensor = tf.squeeze(img_raw)
    with tf.Session() as sess:
        img_tensor = img_tensor.eval()
        print(img_tensor.shape, img_tensor.dtype)
        #print(img_tensor)

    u0 = tf.cast(img_tensor, dtype=tf.complex64)
    print(u0.shape, H_f.shape)
    u1 = tf.fft2d(u0)
    with tf.Session() as sess:
        print(u0.eval())
        print(u1.eval())
    u1 = H_f * u1
    u2 = tf.ifft2d(u1)
    with tf.Session() as sess:
        print(u1.eval())
        print(u2.eval())
def buildModel_fft(input_dim):
    # This network is used to pre-train the optical flow.
    input_ = Input(shape=(input_dim))
    # =========================================================================
    act_ = net_base(input_, nb_filter=64)
    # =========================================================================
    density_pred =  Convolution2D(1, 1, 1, bias = False, activation='linear',\
                                  init='orthogonal',name='pred',border_mode='same')(act_)

    imageMean = tf.reduce_mean(density_pred)
    node4 = tf.reshape(density_pred, [1, 1, 128, 128])
    fftstack = tf.fft2d(tf.complex(node4, tf.zeros((1, 1, 128, 128))))
    out = (tf.cast(tf.complex_abs(tf.ifft2d(fftstack * tf.conj(fftstack))),
                   dtype=tf.float32) / imageMean**2 / (128 * 128)) - 1

    def count(out):
        sigma = 4.0
        rough_sig = sigma * 2.3588 * 0.8493218
        return (128 * 128) / (
            (tf.reduce_max(out) - tf.reduce_min(out)) * np.pi * (rough_sig**2))

    #lam.build((1,1))
    y_true_cast = K.placeholder(shape=(1, 1, 128, 128), dtype='float32')
    #K.set_value(y_true_cast,out)

    # =========================================================================
    model = Model(input=input_, output=density_pred)
    opt = SGD(lr=1e-2, momentum=0.9, nesterov=True)
    model.compile(optimizer=opt, loss='mse')
    return model
Exemple #22
0
    def __call__(self, x):
        """
        The forward process of a MRI scan is supposedly well understood.

        Args:
            x (tf.tensor): The input image
                shape is [None, width, height, channels],
                dtype is tf.float32

        Returns:
            y (tf.tensor): The outputs in k-space
                shape is [None, width, height, channels],
                dtype is tf.complex 64
        """
        if x.dtype != tf.complex64:
            x = tf.complex(x, tf.zeros_like(x))

        y = tf.fft2d(x)
        y = tf.concat([tf.real(y) + tf.imag(y)], axis=1)  # cheating?
        # y = tf.sqrt(tf.imag(y)**2 + tf.real(y)**2)

        # TODO not sure this works as intended
        # generate a random mask. aka the samples from y that we choose
        # mask = tf.random_uniform(tf.shape(y), minval=0, maxval=self.n, dtype=tf.int32)
        # mask = 1-tf.cast(tf.greater(mask, tf.ones_like(mask)), tf.float32)
        # self.mask = tf.complex(mask, mask)
        # y *= self.mask

        y = tf.layers.flatten(y)
        y = tf.gather(y, self.idx, axis=1)

        # also add some noise
        y += tf.random_normal(tf.shape(y))*self.stddev

        return y
Exemple #23
0
def fivelim(inp, features):  # CNN for spatial domain
    scale = tf.complex(tf.sqrt(256.0 * 170.0), 0.0)
    inp = r2c(inp)
    inp = tf.squeeze(inp, axis=1)
    inp = tf.squeeze(inp, axis=-1)
    inp = tf.signal.fftshift(tf.ifft2d(tf.signal.ifftshift(inp,
                                                           axes=(-2, -1))),
                             axes=(-2, -1)) * scale
    inp = tf.expand_dims(inp, axis=-1)
    inp = c2r(inp)
    with tf.name_scope('Unetim'):
        x = convLayer(inp, (3, 3, 2, features), 11)
        x = convLayer(x, (3, 3, features, features), 12)
        x = convLayer(x, (3, 3, features, features), 13)
        x = convLayer(x, (3, 3, features, features), 14)
        x = convLayer(x, (3, 3, features, 2), 15)
        x = inp + x
    x = r2c(x)
    x = tf.squeeze(x, axis=-1)
    x = tf.signal.fftshift(tf.fft2d(tf.signal.ifftshift(x, axes=(-2, -1))),
                           axes=(-2, -1)) / scale
    x = tf.expand_dims(x, axis=-1)
    x = tf.expand_dims(x, axis=1)
    x = c2r(x)
    return x
Exemple #24
0
def fft2c(im, name="fft2c", do_orthonorm=True):
    """Centered FFT2 on second and third dimensions."""
    with tf.name_scope(name):
        im_out = im
        dims = tf.shape(im_out)
        if do_orthonorm:
            fftscale = tf.sqrt(tf.cast(dims[1] * dims[2], dtype=tf.float32))
        else:
            fftscale = 1.0
        fftscale = tf.cast(fftscale, dtype=tf.complex64)

        # permute FFT dimensions to be the last (faster!)
        tpdims = list(range(len(im_out.get_shape().as_list())))
        tpdims[-1], tpdims[1] = tpdims[1], tpdims[-1]
        tpdims[-2], tpdims[2] = tpdims[2], tpdims[-2]

        im_out = tf.transpose(im_out, tpdims)
        im_out = fftshift(im_out, axis=-1)
        im_out = fftshift(im_out, axis=-2)

        # with tf.device('/gpu:0'):
        im_out = tf.fft2d(im_out) / fftscale

        im_out = fftshift(im_out, axis=-1)
        im_out = fftshift(im_out, axis=-2)
        im_out = tf.transpose(im_out, tpdims)

    return im_out
Exemple #25
0
def psf2otf(input_filter, output_size):
    """Convert 4D tensorflow filter into its FFT.
    """
    # pad out to output_size with zeros
    # circularly shift so center pixel is at 0,0
    fh, fw, _, _ = input_filter.shape.as_list()

    if output_size[0] != fh:
        pad = (output_size[0] - fh) / 2

        if (output_size[0] - fh) % 2 != 0:
            pad_top = pad_left = int(np.ceil(pad))
            pad_bottom = pad_right = int(np.floor(pad))
        else:
            pad_top = pad_left = int(pad) + 1
            pad_bottom = pad_right = int(pad) - 1

        padded = tf.pad(
            input_filter,
            [[pad_top, pad_bottom], [pad_left, pad_right], [0, 0], [0, 0]],
            "CONSTANT")
    else:
        padded = input_filter

    padded = tf.transpose(padded, [2, 0, 1, 3])
    padded = ifftshift2d_tf(padded)
    padded = tf.transpose(padded, [1, 2, 0, 3])

    ## Take FFT
    tmp = tf.transpose(padded, [2, 3, 0, 1])
    tmp = tf.fft2d(tf.complex(tmp, 0.))
    return tf.transpose(tmp, [2, 3, 0, 1])
Exemple #26
0
def tf_fft2(image_in, dimmensions):
    """
        2D fourer transform matrix along dimmensions

        image_in: n-dimmensional complex tensor
        dimmensions: the dimmensions to do 2D FFt
    """
    assert len(dimmensions) == 2

    # image_shifted = np.array(image_in)
    for _i in dimmensions:
        assert int(image_in.shape[_i]) % 2 == 0
        dim_shift = int(int(image_in.shape[_i]) / 2)
        image_in = tf.manip.roll(image_in, shift=dim_shift, axis=_i)

    # function is only made for inner two dimmensions to be fourier transformed
    # assert image_in.shape[0] == 1
    assert image_in.shape[3] == 1

    image_in = tf.transpose(image_in, perm=[0, 3, 1, 2])
    image_in = tf.fft2d(image_in)
    image_in = tf.transpose(image_in, perm=[0, 2, 3, 1])

    for _i in dimmensions:
        dim_shift = int(int(image_in.shape[_i]) / 2)
        image_in = tf.manip.roll(image_in, shift=dim_shift, axis=_i)

    return image_in
Exemple #27
0
    def run(self, hr_img, lr_img):
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(self.loss)
        self.sess.run(tf.global_variables_initializer())
        print('run: ->', hr_img.shape)
        # shape = np.zeros(hr_img.shape)
        # err_ = []
        # print(shape)
        for er in range(self.epoch):
            # image = tf.reshape(image,[image.shape[0],image.shape[1]])
            _, x = self.sess.run([self.train_op, self.loss],
                                 feed_dict={
                                     self.images: lr_img,
                                     self.label: hr_img
                                 })

            print(x)

        result = self.pred.eval({self.images: lr_img})
        result = result * 255 / (1e3 * 1e-5)
        imshow_spectrum(self.sess.run(tf.fft2d(result)))
        # plt_imshow(result)
        # result = np.clip(result, 0.0, 255.0).astype(np.uint8)
        result = np.abs(result).astype(np.uint8)
        imshow(result)
        plt_imshow(result)
        lr = self.sess.run([self.images],
                           feed_dict={
                               self.images: lr_img,
                               self.label: hr_img
                           })
        print(result + (np.asarray(lr) * 255 / (1e3 * 1e-5)))
        plt_imshow(result + (np.asarray(np.squeeze(lr)) * 255 / (1e3 * 1e-5)))

        return result
Exemple #28
0
    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
Exemple #29
0
    def _propagate_tf_fft(self, input):
        '''
        propagation for tensor input
        '''
        input = self._pad_input(input)

        Ax = self.xx_new[0]
        Ay = self.yy_new[0]

        # create recipocal grid space
        k_xlist_pos = 2 * np.pi * np.linspace(
            -0.5 * self.num_pix_x_new / (2 * Ax), 0.5 * self.num_pix_x_new /
            (2 * Ax), self.num_pix_x_new)
        k_ylist_pos = 2 * np.pi * np.linspace(
            -0.5 * self.num_pix_y_new / (2 * Ay), 0.5 * self.num_pix_y_new /
            (2 * Ay), self.num_pix_y_new)

        k_x, k_y = np.meshgrid(k_xlist_pos, k_ylist_pos)

        k = 2 * np.pi / self.lmb

        k_z = np.sqrt(k**2 - k_x**2 - k_y**2 + 0j)

        # propagator kernel
        H_freq = np.exp(1.0j * k_z * self.distance)

        # convolution
        self.out = tf.ifft2d(tf.fft2d(input) * np.fft.ifftshift(H_freq))

        self.out = self.out[self.pad_x:self.num_pix_x_og + self.pad_x,
                            self.pad_y:self.num_pix_y_og + self.pad_x]
        self.out = tf.reshape(self.out,
                              [self.num_pix_x_og * self.num_pix_y_og])

        return self.out
Exemple #30
0
def retrieve_phase_far_field(src_fname,
                             save_path,
                             output_fname=None,
                             pad_length=256,
                             n_epoch=100,
                             learning_rate=0.001):

    # raw data is assumed to be centered at zero frequency
    prj_np = dxchange.read_tiff(src_fname)
    if output_fname is None:
        output_fname = os.path.basename(
            os.path.splitext(src_fname)[0]) + '_recon'

    # take modulus and inverse shift
    prj_np = ifftshift(np.sqrt(prj_np))

    obj_init = np.random.normal(50, 10, list(prj_np.shape) + [2])

    obj = tf.Variable(obj_init, dtype=tf.float32, name='obj')
    prj = tf.constant(prj_np, name='prj')

    obj_real = tf.cast(obj[:, :, 0], dtype=tf.complex64)
    obj_imag = tf.cast(obj[:, :, 1], dtype=tf.complex64)

    # obj_pad = tf.pad(obj, [[pad_length, pad_length], [pad_length, pad_length], [0, 0]], mode='SYMMETRIC')
    det = tf.fft2d(obj_real + 1j * obj_imag, name='detector_plane')

    loss = tf.reduce_mean(tf.squared_difference(tf.abs(det), prj, name='loss'))

    sess = tf.Session()

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    optimizer = optimizer.minimize(loss)

    sess.run(tf.global_variables_initializer())

    for i_epoch in range(n_epoch):
        t0 = time.time()
        _, current_loss = sess.run([optimizer, loss])
        print('Iteration {}: loss = {}, Δt = {} s.'.format(
            i_epoch, current_loss,
            time.time() - t0))

    det_final = sess.run(det)
    obj_final = sess.run(obj)
    res = np.linalg.norm(obj_final, 2, axis=2)
    dxchange.write_tiff(res,
                        os.path.join(save_path, output_fname),
                        dtype='float32',
                        overwrite=True)
    dxchange.write_tiff(fftshift(np.angle(det_final)),
                        os.path.join(save_path, 'detector_phase'),
                        dtype='float32',
                        overwrite=True)
    dxchange.write_tiff(fftshift(np.abs(det_final)**2),
                        os.path.join(save_path, 'detector_mag'),
                        dtype='float32',
                        overwrite=True)

    return
Exemple #31
0
 def FFTfunc(input_img, kernel):
     fft2data = tf.keras.layers.Lambda(lambda x: tf.fft2d(x))(input_img)
     temp_mul = tf.keras.layers.Lambda(
         lambda x: tf.math.multiply(x[0], x[1]))([fft2data, kernel])
     temp_mul = tf.keras.layers.Lambda(lambda x: tf.ifft2d(x))(temp_mul)
     return tf.keras.layers.Lambda(lambda x: tf.reduce_sum(x, axis=-3))(
         temp_mul)
Exemple #32
0
def get_angle(page):
  img = tf.cast(page.image, tf.float32)
  square = get_square(img)
  f = tf.complex_abs(tf.fft2d(tf.cast(square, tf.complex64))[:MAX_SIZE//2, :])
  x_arr = (
      tf.cast(tf.concat(0,
                        [tf.range(MAX_SIZE // 2),
                         tf.range(1, MAX_SIZE // 2 + 1)[::-1]]),
              tf.float32))[None, :]
  y_arr = tf.cast(tf.range(MAX_SIZE // 2), tf.float32)[:, None]
  f = tf.select(x_arr * x_arr + y_arr * y_arr < 32 * 32, tf.zeros_like(f), f)
  m = tf.argmax(tf.reshape(f, [-1]), dimension=0)
  x = tf.cast((m + MAX_SIZE // 4) % (MAX_SIZE // 2) - (MAX_SIZE // 4), tf.float32)
  y = tf.cast(tf.floordiv(m, MAX_SIZE // 2), tf.float32)
  return(tf.cond(
      y > 0, lambda: tf.atan(x / y), lambda: tf.constant(np.nan, tf.float32)),
      square)
    def __init__(self, shape, vShape = [13, 13], dt = 0.01, y = 0.25):
        """
        The shape of the image to segment must be specified at construction.

        The size of the kernel V can be made larger or smaller, but just remember that if it's 13x13, that's 269 parameters to fit, and that'll grow quadratically!

        Arguments:
        shape -- 2 element iterable, for instance [256, 127]. Size of image to segment. 3D simulations are not supported (required)

        vShape -- 2 element iterable, for instance [7, 7]. Size of convolution kernel that'll be fit to make the segmentation facet-y (default : [13, 13])
        
        dt -- Timestep of the Cahn-Hilliard solver. In terms of the equations on https://en.wikipedia.org/wiki/Cahn%E2%80%93Hilliard_equation, it's actually the timestep times the diffusion coefficient. Because we don't care about actual physical units here, we just merge em' together (default : 0.01)

        y -- Gamma parameter of the Cahn-Hilliard equation (https://en.wikipedia.org/wiki/Cahn%E2%80%93Hilliard_equation) (default : 0.25)

        """
        cahnhilliard.CahnHilliard.__init__(self, shape = shape, dt = dt, y = y)

        if len(shape) != 2:
            raise Exception("shape must be length 2")

        if len(vShape) != 2:
            raise Exception("vShape must be length 2")

        self.shape = shape
        self.vShape = vShape

        # Set up the network for figuring out V
        self.toFit = tf.Variable(tf.truncated_normal([1, self.shape[0], self.shape[1], 1], 0.1))
        self.V = tf.Variable(tf.truncated_normal([vShape[0], vShape[1], 1, 1], 0.1))
        self.b = tf.Variable(tf.constant(0.01, shape = [1]))
        self.xV = tf.nn.conv2d(tf.reshape(self.ix, [1, self.shape[0], self.shape[1], 1]), self.V, [1, 1, 1, 1], padding = 'SAME') + self.b
        
        self.error = tf.nn.l2_loss(self.toFit - self.xV)##tf.reduce_mean(tf.abs((self.toFit - self.xV)))

        self.train_step = tf.train.AdamOptimizer(1e-2).minimize(self.error, var_list = [self.V, self.b])

        # Modify the parent update functions to use the V
        self.fftV = tf.fft2d(tf.complex(tf.reshape(self.xV, self.shape), self.zeros))

        self.saver = tf.train.Saver({ "V" : self.V, "b" : self.b })

        self.is_fit = False
def compute_fft(x, direction="C2C", inverse=False):

    if direction == 'C2R':
        inverse = True

    x_shape = x.get_shape().as_list()
    h, w = x_shape[-2], x_shape[-3]

    x_complex = tf.complex(x[..., 0], x[..., 1])

    if direction == 'C2R':
        out = tf.real(tf.ifft2d(x_complex)) * h * w
        return out

    else:
        if inverse:
            out = stack_real_imag(tf.ifft2d(x_complex)) * h * w
        else:
            out = stack_real_imag(tf.fft2d(x_complex))
        return out
Exemple #35
0
 def _tfFFT2D(self, x, use_gpu=False):
   with self.test_session(use_gpu=use_gpu):
     return tf.fft2d(x).eval()
Exemple #36
0
 def test_FFT2D(self):
     # only defined for gpu
     if DEVICE == GPU:
         t = tf.fft2d(self.random(3, 4, complex=True))
         self.check(t)