def schulzSnyder(correlation, kernel, prior=np.float32(0), iterations=10, measure=True, clip=True, verbose=True): xp = cp.get_array_module(correlation) epsilon = 1e-7 # starting guess with a flat image if prior.any() == 0: signal_decorr = xp.full( correlation.shape, 0.5) + 0.01 * xp.random.rand(*correlation.shape) else: signal_decorr = prior #+ 0.1*prior.max()*xp.random.rand(*signal.shape) R_0 = signal_decorr.sum() signal_decorr = signal_decorr / R_0 # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): if verbose == True and (i % 100) == 0: print('Iteration ' + str(i)) relative_corr = my_correlation(signal_decorr, signal_decorr) if measure == True: error[i] = xp.linalg.norm(correlation - relative_corr) # relative_corr = 0.5*(correlation + axisflip(correlation)) / relative_corr relative_corr = (correlation) / relative_corr # avoid errors due to division by zero or inf relative_corr[xp.isinf(relative_corr)] = epsilon relative_corr = xp.nan_to_num(relative_corr) # multiplicative update # signal_decorr *= my_correlation(axisflip(signal_decorr), (relative_corr)) / R_0 signal_decorr *= my_correlation((relative_corr), (signal_decorr)) / R_0 # signal_decorr *= (my_correlation((relative_corr),(signal_decorr)) + my_convolution((signal_decorr), (relative_corr))) / R_0 if clip: signal_decorr[signal_decorr > +1] = +1 signal_decorr[signal_decorr < -1] = -1 return signal_decorr, error
def anchorUpdate_MAP(signal, kernel, prior=0, iterations=10, measure=True, clip=True, verbose=False): xp = cp.get_array_module(signal) signal_deconv = signal signal = signal / signal.sum() kernel = kernel / kernel.sum() epsilon = 1e-7 # starting guess with a flat image if prior.any() == 0: signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) else: signal_deconv = prior #+ 0.1*prior.max()*xp.random.rand(*signal.shape) # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): if verbose == True: print('Iteration ' + str(i)) kernel_update = my_convolution(signal_deconv, kernel) kernel_update = my_correlation(kernel_update, kernel) kernel_update = kernel_update / kernel_update.sum() kernel_mirror = axisflip(kernel_update) relative_blur = my_convolution(signal_deconv, kernel_update) if measure == True: error[i] = xp.linalg.norm(signal - relative_blur) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update signal_deconv *= xp.exp( my_convolution(relative_blur - 1, kernel_mirror)) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 return signal_deconv, error
# now I start processing x, first threshold, then pad threshold_indices = x < threshold x[threshold_indices] = threshold x = x - threshold x_pad = np.pad(x, ((0, 0), (0, 0), (0, 0), (0, 256 - x.shape[3])), 'constant') # I rotate all the projections so that we can overlap autocorrelations x_pad[0, :, :, :] = np.rot90(x_pad[0, :, :, :], 0, axes=(1, 2)) x_pad[1, :, :, :] = np.rot90(x_pad[1, :, :, :], -1, axes=(1, 2)) x_pad[2, :, :, :] = np.rot90(x_pad[2, :, :, :], -2, axes=(1, 2)) x_pad[3, :, :, :] = np.rot90(x_pad[3, :, :, :], -3, axes=(1, 2)) x_pad[4, :, :, :] = np.rot90(x_pad[4, :, :, :], -4, axes=(1, 2)) # casting to float32 to calculate correlations x_pad = np.float32(x_pad) correlation0 = pf.my_correlation(x_pad[0, :, :, :], x_pad[0, :, :, :]) correlation1 = pf.my_correlation(x_pad[1, :, :, :], x_pad[1, :, :, :]) correlation2 = pf.my_correlation(x_pad[2, :, :, :], x_pad[2, :, :, :]) correlation3 = pf.my_correlation(x_pad[3, :, :, :], x_pad[3, :, :, :]) correlation4 = pf.my_correlation(x_pad[4, :, :, :], x_pad[4, :, :, :]) # KEY POINT, correlation averaging correlation_average = (correlation0 + correlation1 + correlation2 + correlation3) / 4 # correlation_average = (correlation0 * correlation1 * correlation2 * correlation3) ** 1/4 # plt.plot([correlation0[256,128,128], correlation1[256,128,128] , correlation4[256,128,128] , correlation3[256,128,128]]) correlation_maximum = np.max( [correlation0, correlation1, correlation2, correlation3], axis=(0)) # modulus calculatbased on the autocorrelation average
# satellite = test.copy() # normalization satellite = satellite / satellite.mean() psf_long /= psf_long.sum() psf_round /= psf_round.sum() image1 = satellite.copy() image2 = satellite.copy() plt.subplot(121), plt.imshow(image1) plt.subplot(122), plt.imshow(image2) # %% test my convolutions autoconv = pf.my_convolution(image1, image2[::-1, ::-1]) autocorr = pf.my_correlation(image1, image2) autoconv /= autoconv.max() autocorr /= autocorr.max() print(np.array_equal(autoconv, autocorr)) plt.subplot(131), plt.imshow(autoconv) plt.subplot(132), plt.imshow(autocorr) plt.subplot(133), plt.imshow(np.abs(autoconv - autocorr)) # test scipy convolutions autoconv = signal.convolve(image1, image2[::-1, ::-1], mode='same', method='fft') autocorr = signal.correlate(image1, image2, mode='same', method='fft')
def anchorUpdateSK(signal, kernel, signal_deconv=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): # for code agnosticity between Numpy/Cupy xp = cp.get_array_module(signal) xps = cupyx.scipy.get_array_module(signal) # for performance evaluation start_time = time.time() if iterations<100: breakcheck = iterations else: breakcheck = 100 # normalization signal /= signal.sum() epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any()==0: # xp.random.seed(0) signal_deconv = xp.full(signal.shape,0.5) + 0.01*xp.random.rand(*signal.shape) # signal_deconv = signal.copy() else: signal_deconv = signal_deconv #+ 0.1*prior.max()*xp.random.rand(*signal.shape) # normalization signal_deconv = signal_deconv/signal_deconv.sum() # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): # I use this property to make computation faster kernel_update = xps.ndimage.gaussian_filter(signal_deconv, sigma) # kernel_update = xps.ndimage.fourier_gaussian(signal_deconv, sigma) kernel_mirror = (kernel_update) relative_blur = my_correlation(signal_deconv, kernel_update) # compute the measured distance metric if given if measure==True: # error[i] = xp.linalg.norm(signal/signal.sum()-relative_blur/relative_blur.sum()) error[i] = snrIntensity_db(signal/signal.sum(), xp.abs(signal/signal.sum()-relative_blur/relative_blur.sum())) if (error[i] < error[i-breakcheck]) and i > breakcheck: break if verbose==True and (i % 100)==0 and measure==False: print('Iteration ' + str(i)) elif verbose==True and (i % 100)==0 and measure==True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update, for the full model signal_deconv *= 0.5 * (my_convolution(relative_blur, kernel_mirror) + my_correlation(axisflip(relative_blur), kernel_mirror)) # signal_deconv *= (my_convolution(relative_blur, kernel_mirror) + my_correlation(relative_blur,kernel_mirror)) # multiplicative update, for the Anchor Update approximation # signal_deconv *= my_convolution(kernel_mirror, relative_blur) # multiplicative update, remaining term. This gives wrong reconstructions # signal_deconv *= my_correlation(axisflip(relative_blur), kernel_mirror) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time)/iterations)) return signal_deconv, error #,kernel_update
def rotatecrosStack(origStack, anglelist1, anglelist2, rotateaxes1=(0,1), rotateaxes2=(1,2), reference=0, follow=True ,gpu=True): # create the new stack rotatedStack = np.zeros_like(origStack) referenceVolume = origStack[reference,:,:,:] xcorr_max = np.zeros((anglelist1.shape[0],anglelist2.shape[0])) rotateAngle1 = np.zeros(origStack.shape[0]) rotateAngle2 = np.zeros(origStack.shape[0]) psi_previous = 0 phi_previous = 0 # perform the rotation for i in range(origStack.shape[0]): # for performance evaluation start_time = time.time() if i != reference: xcorr_store = 0 for phi in anglelist1: for psi in anglelist2: print('Rotating view ' + str(i) + ' with phi = ' + str(phi) + ' and psi = ' + str(psi)) if gpu == True: tempStack = cp.asarray(origStack[i,:,:,:]) referenceVolume = cp.asarray(referenceVolume) else: tempStack = origStack[i,:,:,:] tempVolume = pv.tiltVolume(tempStack, phi+phi_previous, psi+psi_previous, rotateaxes1, rotateaxes2) xcorr = pf.my_correlation(referenceVolume, tempVolume) xcorr_max = xcorr.max() if gpu == True: xcorr_max = xcorr_max.get() if xcorr.max() > xcorr_store: print('Update angle!') xcorr_store = xcorr.max() phi_store = phi+phi_previous psi_store = psi+psi_previous # volume_store = tempVolume.copy() # else: # break if follow == True: phi_previous = phi_store.copy() psi_previous = psi_store.copy() print('Best angle found: phi = ' + str(phi_store) + ' psi = ' + str(psi_store)) if gpu == True: tempStack = cp.asarray(origStack[i,:,:,:]) rotatedStack[i,:,:,:] = pv.tiltVolume(tempStack, phi_store, psi_store, rotateaxes1, rotateaxes2).get() else: tempStack = origStack[i,:,:,:] rotatedStack[i,:,:,:] = pv.tiltVolume(tempStack, phi_store, psi_store, rotateaxes1, rotateaxes2) rotateAngle1[i] = phi_store rotateAngle2[i] = psi_store print("\n\n Angles checked. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) else: rotatedStack[i,:,:,:] = origStack[i,:,:,:].copy() rotateAngle1[i] = 0. rotateAngle2[i] = 0. return rotatedStack, rotateAngle1, rotateAngle2
def anchorUpdateZ(signal, kernel, signal_deconv=np.float32(0), kerneltype='B', iterations=10, measure=True, clip=False, verbose=True): """ Reconstruction of signal_deconv from its auto-correlation signal, via a RichardsonLucy-like multiplicative procedure. At the same time, the kernel psf is deconvolved from the reconstruction so that the iteration converges corr(conv(signal_deconv, kernel), conv(signal_deconv, kernel),) -> signal. Parameters ---------- signal : ndarray, either numpy or cupy. The auto-correlation to be inverted kernel : ndarray, either numpy or cupy. Point spread function that blurred the signal. It must be signal.shape == kernel.shape. signal_deconv : ndarray, either numpy or cupy or 0. It must be signal.shape == signal_deconv.shape. The de-autocorrelated signal deconvolved with kernel at ith iteration. The default is np.float32(0). kerneltype : string. Type of kernel update used for the computation choosing from blurring directly the autocorrelation 'A', blurring the signal that is then autocorrelated 'B' and the window applied in fourier domain 'C'. The default is 'B'. iterations : int, optional Number of iteration to be done. The default is 10. measure : boolean, optional If true computes the euclidean distance between signal and the auto-correlation of signal_deconv. The default is True. clip : boolean, optional Clip the results within the range -1 to 1. Useless for the moment. The default is False. verbose : boolean, optional Print current step value. The default is True. Returns ------- signal_deconv : ndarray, either numpy or cupy. The de-autocorrelated signal deconvolved with kernel at ith iteration.. error : vector. Euclidean distance between signal and the auto-correlation of signal_deconv. Last implementation returns the SNR instead of euclidean distance. """ # for code agnosticity between Numpy/Cupy xp = pyb.get_array_module(signal) # for performance evaluation start_time = time.time() if iterations < 100: breakcheck = iterations else: breakcheck = 100 # normalization signal /= signal.sum() kernel /= kernel.sum() epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any() == 0: # xp.random.seed(0) signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) # signal_deconv = signal.copy() else: signal_deconv = signal_deconv #+ 0.1*prior.max()*xp.random.rand(*signal.shape) # normalization signal_deconv = signal_deconv / signal_deconv.sum() # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): # I use this property to make computation faster K = my_convolution(signal_deconv, my_correlation(kernel, kernel)) relative_blur = my_correlation(K, signal_deconv) # compute the measured distance metric if given if measure == True: #error[i] = xp.linalg.norm(signal/signal.sum()-relative_blur/relative_blur.sum()) error[i] = snrIntensity_db( signal / signal.sum(), xp.abs(signal / signal.sum() - relative_blur / relative_blur.sum())) if (error[i] < error[i - breakcheck]) and i > breakcheck: break if verbose == True and (i % 100) == 0 and measure == False: print('Iteration ' + str(i)) elif verbose == True and (i % 100) == 0 and measure == True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update, for the full model # signal_deconv *= 0.5 * (my_convolution(relative_blur, kernel_mirror) + my_correlation(axisflip(relative_blur), kernel_mirror)) # signal_deconv *= (my_convolution(kernel_mirror,relative_blur) + my_correlation(relative_blur, kernel_mirror)) # multiplicative update, for the Anchor Update approximation signal_deconv *= my_correlation((relative_blur), (K)) # signal_deconv *= (my_correlation(relative_blur, K) + my_convolution(relative_blur, K)) # multiplicative update, remaining term. This gives wrong reconstructions # signal_deconv *= my_correlation(axisflip(relative_blur), kernel_mirror) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error #,kernel_update
def invert_autoconvolution(magnitude, prior=None, mask=None, measure=True, steps=200, mode='deautocorrelation', verbose=True): # agnostic code, xp is either numpy or cupy depending on the magnitude array module xp = pyb.get_array_module(magnitude) # object support constraint if mask is None: mask = xp.ones(magnitude.shape) # assert magnitude.shape == mask.shape, 'mask and magnitude should have same shape' assert steps > 0, 'steps should be a positive number' assert mode == 'deautoconvolution' or mode == 'deautocorrelation',\ 'mode should be \'deautoconvolution\' or \'deautocorrelation\'' # random phase if prior is None, otherwise start with the prior Fourier if prior is None: x_hat = 1 + 0.01 * xp.random.rand(*magnitude.shape) else: x_hat = prior if measure == True: ratio = xp.zeros(steps) else: ratio = None x_hat = x_hat * mask y_mes = 0.5 * (magnitude + magnitude[::-1, ::-1]) # normalization for energy preservation y0 = (xp.sum(x_hat))**2 # y0 = (xp.sum(x_hat)) x_hat = xp.divide(x_hat, xp.sqrt(y0)) # monitoring the convergence of the solution # convergence = xp.zeros(steps) # loop for the minimization, I guess there can be an analogue for the autocorrelation if mode == "deautoconvolution": for i in range(0, steps): y = my_convolution(x_hat, x_hat) # u_hat = y_mes / y # zero divided by zero is equal to zero u_hat = xp.divide(y_mes, y, out=xp.zeros_like(y_mes), where=y != 0) if measure == True: ratio[i] = u_hat.mean() # convergence[i] = xp.mean(u_hat) r_hat = 1 / xp.sqrt(y0) * my_convolution(u_hat, x_hat) x_hat = x_hat * r_hat # not ready yet elif mode == "deautocorrelation": for i in range(0, steps): y = my_correlation(x_hat, x_hat) # if measure==True: # ratio[i] = xp.linalg.norm(y_mes - y) u_hat = xp.divide(y_mes, y, out=xp.zeros_like(y_mes), where=y != 0) if measure == True: ratio[i] = u_hat.mean() r_hat = (0.5 / xp.sqrt(y0)) * (my_correlation(x_hat, u_hat) + (my_convolution(x_hat, u_hat))) # r_hat = (0.5/(y0)) * ( my_correlation(x_hat[::-1,::-1], u_hat) + my_convolution(x_hat, u_hat) ) x_hat = x_hat * r_hat # r_hat = (1/xp.sqrt(y0)) * my_correlation(x_hat[::-1,::-1], u_hat) # x_hat = x_hat * r_hat return (x_hat, ratio)
def schulzSnyder(correlation, prior=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): """ De-AutoCorrelation protocol implemented by Schultz-Snyder. It needs to be checked to assess the working procedure. Parameters ---------- correlation : TYPE DESCRIPTION. prior : TYPE, optional DESCRIPTION. The default is np.float32(0). iterations : TYPE, optional DESCRIPTION. The default is 10. measure : TYPE, optional DESCRIPTION. The default is True. clip : TYPE, optional DESCRIPTION. The default is True. verbose : TYPE, optional DESCRIPTION. The default is True. Returns ------- signal_decorr : TYPE DESCRIPTION. error : TYPE DESCRIPTION. """ xp = pyb.get_array_module(correlation) # for performance evaluation start_time = time.time() epsilon = 1e-7 if iterations < 10: breakcheck = iterations else: breakcheck = 10 # starting guess with a flat image if prior.any() == 0: signal_decorr = xp.full( correlation.shape, 0.5) + 0.01 * xp.random.rand(*correlation.shape) else: signal_decorr = prior.copy( ) #+ 0.1*prior.max()*xp.random.rand(*signal.shape) R_0 = signal_decorr.sum() signal_decorr = signal_decorr / R_0 relative_corr = xp.zeros_like(signal_decorr) # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): relative_corr = my_correlation(signal_decorr, signal_decorr) if measure == True: # error[i] = xp.linalg.norm(correlation/correlation.sum()-relative_corr/relative_corr.sum()) error[i] = snrIntensity_db( correlation / correlation.sum(), xp.abs(correlation / correlation.sum() - relative_corr / relative_corr.sum())) if (error[i] < error[i - breakcheck]) and i > breakcheck: break if verbose == True and (i % 100) == 0 and measure == False: print('Iteration ' + str(i)) elif verbose == True and (i % 100) == 0 and measure == True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) # relative_corr = 0.5*(correlation + axisflip(correlation)) / relative_corr relative_corr = (correlation) / relative_corr # avoid errors due to division by zero or inf relative_corr[xp.isinf(relative_corr)] = epsilon relative_corr = xp.nan_to_num(relative_corr) # multiplicative update # signal_decorr *= my_correlation(axisflip(signal_decorr), (relative_corr)) / R_0 # signal_decorr *= my_correlation((relative_corr), (signal_decorr)) / R_0 # signal_decorr *= (my_correlation(relative_corr, signal_decorr) + my_correlation(relative_corr, axisflip(signal_decorr))) / R_0 signal_decorr *= (my_correlation(relative_corr, signal_decorr) + my_convolution(relative_corr, signal_decorr)) / R_0 if clip: signal_decorr[signal_decorr > +1] = +1 signal_decorr[signal_decorr < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_decorr, error
def anchorUpdate_H(signal, kernel, signal_deconv=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): xp = cp.get_array_module(signal) start_time = time.time() signal = signal / signal.sum() # kernel = kernel / kernel.sum() # compute the norm of the fourier transform of the kernel # kernel = xp.fft.rfftn(kernel) epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any() == 0: signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) else: signal_deconv = signal_deconv #+ 0.1*prior.max()*xp.random.rand(*signal.shape) signal_deconv = signal_deconv / signal_deconv.sum() # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): if verbose == True and (i % 100) == 0: print('Iteration ' + str(i)) # I use this property to make computation faster # kernel_update = my_correlation_withfft(signal_deconv, kernel) kernel_update = my_correlation(signal_deconv, kernel) # kernel_update = kernel_update / kernel_update.sum() kernel_mirror = axisflip(kernel_update) relative_blur = my_convolution(signal_deconv, kernel_update) if measure == True: error[i] = xp.linalg.norm(signal / signal.max() - relative_blur / relative_blur.max()) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update signal_deconv *= my_convolution(relative_blur, kernel_mirror) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error
def anchorUpdate(signal, kernel, signal_deconv=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): """ Reconstruction of signal_deconv from its auto-correlation signal, via a RichardsonLucy-like multiplicative procedure. At the same time, the kernel psf is deconvolved from the reconstruction so that the iteration converges corr(conv(signal_deconv, kernel), conv(signal_deconv, kernel),) -> signal. Parameters ---------- signal : ndarray, either numpy or cupy. The auto-correlation to be inverted kernel : ndarray, either numpy or cupy. It must be signal.shape == kernel.shape. DESCRIPTION. signal_deconv : ndarray, either numpy or cupy or 0. It must be signal.shape == signal_deconv.shape. The de-autocorrelated signal deconvolved with kernel at ith iteration. The default is np.float32(0). iterations : int, optional Number of iteration to be done. The default is 10. measure : boolean, optional If true computes the euclidean distance between signal and the auto-correlation of signal_deconv. The default is True. clip : boolean, optional Clip the results within the range -1 to 1. Useless for the moment. The default is False. verbose : boolean, optional Print current step value. The default is True. Returns ------- signal_deconv : ndarray, either numpy or cupy. The de-autocorrelated signal deconvolved with kernel at ith iteration.. error : vector. Euclidean distance between signal and the auto-correlation of signal_deconv. """ xp = cp.get_array_module(signal) start_time = time.time() signal = signal / signal.sum() kernel = kernel / kernel.sum() epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any() == 0: signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) else: signal_deconv = signal_deconv # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): if verbose == True and (i % 100) == 0: print('Iteration ' + str(i)) # kernel update rule kernel_update = my_convolution(signal_deconv, kernel) kernel_update = my_correlation(kernel_update, kernel) kernel_update = kernel_update / kernel_update.sum() kernel_mirror = axisflip(kernel_update) relative_blur = my_convolution(signal_deconv, kernel_update) if measure == True: error[i] = xp.linalg.norm(signal - relative_blur) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update signal_deconv *= my_convolution((relative_blur), kernel_mirror) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error