Ejemplo n.º 1
0
        def body(i, decon):
            # Richardson-Lucy Iteration - logic taken largely from a combination of
            # the scikit-image (real domain) and DeconvolutionLab2 implementations (complex domain)
            conv1 = conv(decon, kern_fft)

            # High-pass filter to avoid division by very small numbers (see DeconvolutionLab2)
            blur1 = tf.where(conv1 < self.epsilon, tf.zeros_like(datat), datat / conv1, name='blur1')

            conv2 = conv(blur1, kern_fft_conj)

            # Positivity constraint on result for iteration
            decon = tf.maximum(decon * conv2, 0.)

            # If given an "observer", pass the current image restoration and iteration counter to it
            if self.observer_fn is not None:
                decon, i = tf_observer([decon, i], self.observer_fn)

            return i + 1, decon
Ejemplo n.º 2
0
        def body(i, decon):
            # Richardson-Lucy Iteration - logic taken largely from a combination of
            # the scikit-image (real domain) and DeconvolutionLab2 implementations (complex domain)
            conv1 = conv(decon, kern_fft)

            # High-pass filter to avoid division by very small numbers (see DeconvolutionLab2)
            blur1 = tf.where(conv1 < self.epsilon, tf.zeros_like(datat), datat / conv1, name='blur1')

            conv2 = conv(blur1, kern_fft_conj)

            # Positivity constraint on result for iteration
            decon = tf.maximum(decon * conv2, 0.)

            # If given an "observer", pass the current image restoration and iteration counter to it
            if self.observer_fn is not None:
                # Remove any cropping that may have been added as this is usually not desirable in observers
                decon_crop = unpad_around_center(decon, tf.shape(datah))
                _, i, decon = tf_observer([decon_crop, i, decon], self.observer_fn)

            return i + 1, decon
Ejemplo n.º 3
0
        def body(
            i,
            decon,
        ):
            '''# Richardson-Lucy Iteration - logic taken largely from a combination of
            # the scikit-image (real domain) and DeconvolutionLab2 implementations (complex domain)
            # conv1 is the current model blurred with the PSF
            conv1 = conv(decon, kern_fft)

            # High-pass filter to avoid division by very small numbers (see DeconvolutionLab2)
			blur1 = tf.where(conv1 < self.epsilon, tf.zeros_like(datat), datat / conv1, name='blur1')

            # conv2 is the blurred model convolved with the flipped PSF
            conv2 = conv(blur1, kern_fft_conj)

            # Positivity constraint on result for iteration
			decon = tf.maximum(decon * conv2, 0.)
            '''

            # Gold algorithm, ratio method, simpler then RL, doesnt use flipped OTF
            # conv1 is the current model blurred with the PSF
            conv1 = conv(decon, kern_fft)

            # High-pass filter to avoid division by very small numbers (see DeconvolutionLab2)?
            # we wont do it here as we will use the delta parameter in denom and numerrator of division to get blur2
            # as per Stephan Ludwig et al 2019
            # should normalise blur2 and decon each time because numbers get big and we risk overflow when multiplying in next step
            conv1norm = conv1 / (tf.math.reduce_sum(conv1))
            datatNorm = datat / (tf.math.reduce_sum(datat))
            # this value seems to work well fo rthe images that are normalised to sum of 1
            deltaParam = 1e-4
            ratio = (datatNorm + deltaParam) / (conv1norm + deltaParam)
            #blur1 = tf.where(conv1 < self.epsilon, tf.zeros_like(datat), datat / conv1, name='blur1')
            #ratioNorm = ratio / (tf.math.reduce_sum(ratio))
            #deconNorm = decon / (tf.math.reduce_sum(decon))
            # decon is the  normalised blurred model multiplied by the model
            # Positivity constraint on result for iteration
            decon = tf.maximum(decon * ratio, 0.)
            # Smooth the intermediate result image with Gaussian of sigma 1 every 5th iteration
            # to control noise buildup that Gold method is succeptible to.
            # Use tf.nn.conv3d to convolve a Gaussian kernel with an image:
            # Make Gaussian Kernel with desired specs using gaussian_kernel function defined above
            if i % 5 == 0:
                # Convolve decon with gauss kernel.
                tf.nn.conv3d(decon,
                             filter=gaussKernel,
                             strides=[1, 1, 1, 1, 1],
                             padding="SAME")
            # normalise the result so the sum of the data is 1
            decon = decon / (tf.math.reduce_sum(decon))

            # TODO - Smoothing every 5 iterations with gaussian or wiener.
            # TODO rescale back to input data sum intensity -  probably need to adjust deltaParam too.

            # If given an "observer", pass the current image restoration and iteration counter to it
            if self.observer_fn is not None:
                # Remove any cropping that may have been added as this is usually not desirable in observers
                decon_crop = unpad_around_center(decon, tf.shape(datah))
                # normalise the result so the sum of the data is 1
                decon_crop = decon_crop / (tf.math.reduce_sum(decon_crop))
                # we can use these captured observed tensors to evaluate eg convergence
                # in eg. the observer function used.
                _, i, decon, conv1 = tf_observer([decon_crop, i, decon, conv1],
                                                 self.observer_fn)

            return i + 1, decon