def my_findshiftND(function1, function2): xp = pyb.get_array_module(function1) xcorr = my_correlation(function1, function2) maxvalue = xcorr.max() if cupy_enabled and xp != np: maxposition = xp.unravel_index(xp.argmax(xcorr).get(), xcorr.shape) else: maxposition = xp.unravel_index(xp.argmax(xcorr), xcorr.shape) print('Max position is ' + str(maxposition)) center = tuple(int(x / 2) for x in xcorr.shape) shift = np.asarray(center) - np.asarray(maxposition) # if xp == cp: # shift = tuple(shift.get()) # # maxvalue = maxvalue.get() # else: shift = tuple(shift) print('The shift between images is ' + str(shift)) return shift, maxvalue
def threshold_maskND(function, fraction=0): # default value is positive thresholding xp = pyb.get_array_module(function) value = xp.max(function) * fraction mask = (function > value) return mask
def maxAPosteriori_smallKernel(image, psf, iterations=10, clip=True, verbose=False): print('This procedure may be very slow! -> Use it with small psf!!!') xp = pyb.get_array_module(image) xps = cupyx.scipy.get_array_module(image) image = image.astype(xp.float) psf = psf.astype(xp.float) im_deconv = xp.full(image.shape, 0.5) psf_flip = axisflip(psf) for i in range(iterations): if verbose == True: print('Iteration ' + str(i)) relative_blur = xps.ndimage.convolve(im_deconv, psf) relative_blur = image / relative_blur - 1 im_deconv *= xp.exp(xps.ndimage.convolve(relative_blur, psf_flip)) if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 return im_deconv
def weighted_average(x, axis=0, mode='poisson'): xp = pyb.get_array_module(x) if mode == 'poisson': w = xp.abs(x) w = xp.sqrt(w) # w_sum = xp.sum(w, axis=axis) mean = xp.sum((w * x), axis=axis) # mean = mean / w_sum return mean
def _convolve( in1, in2, use_convolve, swapped_inputs, mode, ): xp = pyb.get_array_module(in1) val = _valfrommode(mode) # Promote inputs promType = xp.promote_types(in1.dtype, in2.dtype) in1 = in1.astype(promType) in2 = in2.astype(promType) # Create empty array to hold number of aout dimensions out_dimens = np.empty(in1.ndim, np.int) if val == VALID: for i in range(in1.ndim): out_dimens[i] = (max(in1.shape[i], in2.shape[i]) - min(in1.shape[i], in2.shape[i]) + 1) if out_dimens[i] < 0: raise Exception( "no part of the output is valid, use option 1 (same) or 2 \ (full) for third argument") elif val == SAME: for i in range(in1.ndim): if not swapped_inputs: out_dimens[i] = in1.shape[i] # Per scipy docs else: out_dimens[i] = min(in1.shape[i], in2.shape[i]) elif val == FULL: for i in range(in1.ndim): out_dimens[i] = in1.shape[i] + in2.shape[i] - 1 else: raise Exception("mode must be 0 (valid), 1 (same), or 2 (full)") # Create empty array out on GPU out = xp.empty(out_dimens.tolist(), in1.dtype) out = _convolve_gpu( in1, out, in2, val, use_convolve, swapped_inputs, ) return out
def my_correlationCentered(function1, function2): xp = pyb.get_array_module(function1) temp = xp.conj(xp.fft.rfftn(function1)) temp = temp * xp.fft.rfftn(function2) temp = xp.fft.irfftn(temp, s=function1.shape) temp = xp.fft.fftshift(temp) # trying to remove residual shifts temp = autocorrelation2fouriermod(temp) temp = fouriermod2autocorrelation(temp) return temp
def sparsity_maskND(function, fraction=0): # default value is positive thresholding xp = pyb.get_array_module(function) k = int(np.size(function) * fraction) print(k) linear = xp.reshape(function, -1) # find k max values within function idx = xp.argpartition(linear, -k)[-k:] # masking out all the min-k values mask = (function >= xp.min(linear[idx])) return mask
def my_convcorr(function1, function2): xp = pyb.get_array_module(function1) temp = xp.fft.rfftn(function2) temp = xp.conj((xp.fft.rfftn(function1)) * temp) * temp # temp = xp.square(xp.abs(xp.fft.rfftn(function2))) # temp = xp.conj(xp.fft.rfftn(function1)) * temp # temp = xp.fft.rfftn(function2) # temp = temp.real**2 + temp.imag**2 # temp = xp.conj(xp.fft.rfftn(function1)) * temp # return xp.fft.fftshift((xp.fft.irfftn(temp))) return xp.fft.irfftn(temp, s=function1.shape)
def my_gaussBlurInv(function, alpha): xp = pyb.get_array_module(function) direction = function.ndim normalization = 1. # energy is preserved # loop through all dimension, the problem can be split fourier transformations # along each dimension of the function for i in range(direction): size = function.shape[i] x = xp.arange(0, size, dtype=xp.float32) gaussian_1d = xp.fft.fftshift( xp.exp(-0.5 * ((x - size / 2)**2.0) / (alpha**2)) / normalization) reshape = np.ones_like(function.shape) reshape[i] = gaussian_1d.shape[0] temp = gaussian_1d.reshape(reshape) function = xp.fft.ifft(xp.fft.fft(function, axis=i) * temp, axis=i) return xp.fft.fftshift(xp.real(function))
def wiener_deconvolution(signal, kernel, snr): xp = pyb.get_array_module(signal) # clever way to create a tuple of tuple containing the difference between two lists difference = tuple((0, x - y) for x, y in zip(signal.shape, kernel.shape)) kernel = np.pad(kernel, difference, mode='constant') # wiener deconvolution starts here, snr is the signal to noise ratio H = xp.fft.fftn(kernel) # G = ( xp.conj(H) / (H*xp.conj(H) + snr**2) ) # deconvolved = xp.fft.fftshift( xp.real( xp.fft.ifft(xp.fft.fftn(signal) * G) ) ) deconvolved = xp.fft.fftshift( xp.real( xp.fft.ifftn((xp.fft.fftn(signal) * xp.conj(H)) / (H * xp.conj(H) + snr**2)))) return deconvolved
def my_alignND(function1, function2, mode='normal'): """ Shift the function2 keeping function1 as a reference based on cross-correlation between the two Parameters ---------- function1 : ndimage reference ndimage for the shift. function2 : TYPE ndimage to be shifted. mode : string, optional 'normal' performs full cross-correlation registration, 'fast' project functions1 and function2 along each axis and performs cross-correlation check on them. 'flip' check if the image needs to be flipped to achieve higher correlation. The default is 'normal'. Returns ------- None. """ xp = pyb.get_array_module(function1) if mode == 'fast': shift = my_findshiftND_fast(function1, function2) elif mode == 'flip': (shift, maxvalue) = my_findshiftND(function1, function2) (shift_flip, maxvalue_flip) = my_findshiftND(function1, np.flip(function2)) if maxvalue_flip > maxvalue: shift = shift_flip function2 = xp.flip(function2) print('We did flip it!') else: (shift, _) = my_findshiftND(function1, function2) for i in range(0, len(shift)): function2 = xp.roll(function2, shift[i], axis=i) return function2
def _convolve_gpu( inp, out, ker, mode, use_convolve, swapped_inputs, ): xp = pyb.get_array_module(inp) d_inp = xp.array(inp) d_kernel = xp.array(ker) threadsperblock, blockspergrid = _get_tpb_bpg() if use_convolve: k_type = "convolve" _populate_kernel_cache(out.dtype, k_type) kernel = _get_backend_kernel( out.dtype, blockspergrid, threadsperblock, k_type, ) else: k_type = "correlate" _populate_kernel_cache(out.dtype, k_type) kernel = _get_backend_kernel( out.dtype, blockspergrid, threadsperblock, k_type, ) kernel(d_inp, d_kernel, mode, swapped_inputs, out) _print_atts(kernel) return out
def _numeric_arrays(arrays, kinds="buifc"): """ See if a list of arrays are all numeric. Parameters ---------- ndarrays : array or list of arrays arrays to check if numeric. numeric_kinds : string-like The dtypes of the arrays to be checked. If the dtype.kind of the ndarrays are not in this string the function returns False and otherwise returns True. """ xp = pyb.get_array_module(arrays) if type(arrays) == xp.ndarray: return arrays.dtype.kind in kinds for array_ in arrays: if array_.dtype.kind not in kinds: return False return True
def _convolve2d_gpu( inp, out, ker, mode, boundary, use_convolve, fillvalue, ): xp = pyb.get_array_module(inp) if (boundary != PAD) and (boundary != REFLECT) and (boundary != CIRCULAR): raise Exception("Invalid boundary flag") S = np.zeros(2, dtype=int) # If kernel is square and odd if ker.shape[0] == ker.shape[1]: # square if ker.shape[0] % 2 == 1: # odd pick = 1 S[0] = (ker.shape[0] - 1) // 2 if mode == 2: # full P1 = P2 = P3 = P4 = S[0] * 2 else: # same/valid P1 = P2 = P3 = P4 = S[0] else: # even pick = 2 S[0] = ker.shape[0] // 2 if mode == 2: # full P1 = P2 = P3 = P4 = S[0] * 2 - 1 else: # same/valid if use_convolve: P1 = P2 = P3 = P4 = S[0] else: P1 = P3 = S[0] - 1 P2 = P4 = S[0] else: # Non-square pick = 3 S[0] = ker.shape[0] S[1] = ker.shape[1] if mode == 2: # full P1 = S[0] - 1 P2 = S[0] - 1 P3 = S[1] - 1 P4 = S[1] - 1 else: # same/valid if use_convolve: P1 = S[0] // 2 P2 = S[0] // 2 if (S[0] % 2) else S[0] // 2 - 1 P3 = S[1] // 2 P4 = S[1] // 2 if (S[1] % 2) else S[1] // 2 - 1 else: P1 = S[0] // 2 if (S[0] % 2) else S[0] // 2 - 1 P2 = S[0] // 2 P3 = S[1] // 2 if (S[1] % 2) else S[1] // 2 - 1 P4 = S[1] // 2 if mode == 1: # SAME pad = ((P1, P2), (P3, P4)) # 4x5 if boundary == REFLECT: inp = xp.pad(inp, pad, "symmetric") if boundary == CIRCULAR: inp = xp.pad(inp, pad, "wrap") if boundary == PAD: inp = xp.pad(inp, pad, "constant", constant_values=(fillvalue)) if mode == 2: # FULL pad = ((P1, P2), (P3, P4)) if boundary == REFLECT: inp = xp.pad(inp, pad, "symmetric") if boundary == CIRCULAR: inp = xp.pad(inp, pad, "wrap") if boundary == PAD: inp = xp.pad(inp, pad, "constant", constant_values=(fillvalue)) paddedW = inp.shape[1] paddedH = inp.shape[0] outW = out.shape[1] outH = out.shape[0] d_inp = xp.array(inp) d_kernel = xp.array(ker) threadsperblock = (16, 16) blockspergrid = ( _iDivUp(out.shape[1], threadsperblock[0]), _iDivUp(out.shape[0], threadsperblock[1]), ) if use_convolve: k_type = "convolve2D" _populate_kernel_cache(out.dtype, k_type) kernel = _get_backend_kernel( out.dtype, blockspergrid, threadsperblock, k_type, ) else: k_type = "correlate2D" _populate_kernel_cache(out.dtype, k_type) kernel = _get_backend_kernel( out.dtype, blockspergrid, threadsperblock, k_type, ) kernel(d_inp, paddedW, paddedH, d_kernel, S[0], S[1], out, outW, outH, pick) _print_atts(kernel) return out
def my_convcorr_sqfft(function1, function2): xp = pyb.get_array_module(function1) temp = xp.conj(xp.fft.rfftn(function1)) * function2 return xp.fft.irfftn(temp, s=function1.shape)
def maxAPosteriori(signal, kernel, iterations=10, measure=True, clip=True, verbose=False): """ Deconvolution using the Maximum a Posteriori algorithm. Implementation identical to Richardson Lucy algorithm but with a different moltiplicative rule for the update. Parameters ---------- signal : ndarray, either numpy or cupy. The signal to be deblurred. kernel : ndarray, either numpy or cupy. Point spread function that blurred the signal. It must be signal.shape == kernel.shape. prior : ndarray, either numpy or cupy, optional the prior information to start the reconstruction. The default is np.float32(0). iterations : integer, optional Number of iteration to be done. The default is 10. measure : boolean, optional If true computes the euclidean distance between signal and the auto-correlation of signal_deconv. The default is True. clip : boolean, optional Clip the results within the range -1 to 1. The default is False. verbose : boolean, optional Print current step value. The default is True. Returns ------- signal_deconv : ndarray, either numpy or cupy. The deconvolved signal with respect the given kernel at ith iteration. error : one dimensional ndarray. Euclidean distance between signal and the auto-correlation of signal_deconv. """ xp = pyb.get_array_module(signal) start_time = time.time() epsilon = 1e-7 # starting guess with a flat image if prior.any() == 0: signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) else: signal_deconv = prior #+ 0.1*prior.max()*xp.random.rand(*signal.shape) kernel_mirror = axisflip(kernel) error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): if verbose == True and (i % 100) == 0: print('Iteration ' + str(i)) relative_blur = my_convolution(signal_deconv, kernel) if measure == True: error[i] = xp.linalg.norm(signal / signal.sum() - relative_blur / relative_blur.sum()) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update given by the MAP signal_deconv *= xp.exp( my_convolution(relative_blur - 1, kernel_mirror)) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200): """ Perform total-variation denoising on n-dimensional images. this function was imported directly from skimage.restoration, the np calls were replaced with xp for code agnosticity. It runs also on the GPU. Parameters ---------- image : ndarray n-D input data to be denoised. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`). eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. Returns ------- out : ndarray Denoised array of floats. Notes ----- Rudin, Osher and Fatemi algorithm. """ xp = pyb.get_array_module(image) ndim = image.ndim p = xp.zeros((image.ndim, ) + image.shape, dtype=image.dtype) g = xp.zeros_like(p) d = xp.zeros_like(image) i = 0 while i < n_iter_max: if i > 0: # d will be the (negative) divergence of p d = -p.sum(0) slices_d = [ slice(None), ] * ndim slices_p = [ slice(None), ] * (ndim + 1) for ax in range(ndim): slices_d[ax] = slice(1, None) slices_p[ax + 1] = slice(0, -1) slices_p[0] = ax d[tuple(slices_d)] += p[tuple(slices_p)] slices_d[ax] = slice(None) slices_p[ax + 1] = slice(None) out = image + d else: out = image E = (d**2).sum() # g stores the gradients of out along each axis # e.g. g[0] is the first order finite difference along axis 0 slices_g = [ slice(None), ] * (ndim + 1) for ax in range(ndim): slices_g[ax + 1] = slice(0, -1) slices_g[0] = ax g[tuple(slices_g)] = xp.diff(out, axis=ax) slices_g[ax + 1] = slice(None) norm = xp.sqrt((g**2).sum(axis=0))[xp.newaxis, ...] E += weight * norm.sum() tau = 1. / (2. * ndim) norm *= tau / weight norm += 1. p -= tau * g p /= norm E /= float(image.size) if i == 0: E_init = E E_previous = E else: if xp.abs(E_previous - E) < eps * E_init: break else: E_previous = E i += 1 return out
def schulzSnyder(correlation, prior=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): """ De-AutoCorrelation protocol implemented by Schultz-Snyder. It needs to be checked to assess the working procedure. Parameters ---------- correlation : TYPE DESCRIPTION. prior : TYPE, optional DESCRIPTION. The default is np.float32(0). iterations : TYPE, optional DESCRIPTION. The default is 10. measure : TYPE, optional DESCRIPTION. The default is True. clip : TYPE, optional DESCRIPTION. The default is True. verbose : TYPE, optional DESCRIPTION. The default is True. Returns ------- signal_decorr : TYPE DESCRIPTION. error : TYPE DESCRIPTION. """ xp = pyb.get_array_module(correlation) # for performance evaluation start_time = time.time() epsilon = 1e-7 if iterations < 10: breakcheck = iterations else: breakcheck = 10 # starting guess with a flat image if prior.any() == 0: signal_decorr = xp.full( correlation.shape, 0.5) + 0.01 * xp.random.rand(*correlation.shape) else: signal_decorr = prior.copy( ) #+ 0.1*prior.max()*xp.random.rand(*signal.shape) R_0 = signal_decorr.sum() signal_decorr = signal_decorr / R_0 relative_corr = xp.zeros_like(signal_decorr) # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): relative_corr = my_correlation(signal_decorr, signal_decorr) if measure == True: # error[i] = xp.linalg.norm(correlation/correlation.sum()-relative_corr/relative_corr.sum()) error[i] = snrIntensity_db( correlation / correlation.sum(), xp.abs(correlation / correlation.sum() - relative_corr / relative_corr.sum())) if (error[i] < error[i - breakcheck]) and i > breakcheck: break if verbose == True and (i % 100) == 0 and measure == False: print('Iteration ' + str(i)) elif verbose == True and (i % 100) == 0 and measure == True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) # relative_corr = 0.5*(correlation + axisflip(correlation)) / relative_corr relative_corr = (correlation) / relative_corr # avoid errors due to division by zero or inf relative_corr[xp.isinf(relative_corr)] = epsilon relative_corr = xp.nan_to_num(relative_corr) # multiplicative update # signal_decorr *= my_correlation(axisflip(signal_decorr), (relative_corr)) / R_0 # signal_decorr *= my_correlation((relative_corr), (signal_decorr)) / R_0 # signal_decorr *= (my_correlation(relative_corr, signal_decorr) + my_correlation(relative_corr, axisflip(signal_decorr))) / R_0 signal_decorr *= (my_correlation(relative_corr, signal_decorr) + my_convolution(relative_corr, signal_decorr)) / R_0 if clip: signal_decorr[signal_decorr > +1] = +1 signal_decorr[signal_decorr < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_decorr, error
def snrIntensity_db(signal, noise, kind='mean'): xp = pyb.get_array_module(signal) if kind == 'mean': return 20 * xp.log10(xp.mean(signal) / xp.mean(noise)) if kind == 'peak': return 20 * xp.log10(xp.max(signal) / xp.mean(noise))
def anchorUpdateZ(signal, kernel, signal_deconv=np.float32(0), kerneltype='B', iterations=10, measure=True, clip=False, verbose=True): """ Reconstruction of signal_deconv from its auto-correlation signal, via a RichardsonLucy-like multiplicative procedure. At the same time, the kernel psf is deconvolved from the reconstruction so that the iteration converges corr(conv(signal_deconv, kernel), conv(signal_deconv, kernel),) -> signal. Parameters ---------- signal : ndarray, either numpy or cupy. The auto-correlation to be inverted kernel : ndarray, either numpy or cupy. Point spread function that blurred the signal. It must be signal.shape == kernel.shape. signal_deconv : ndarray, either numpy or cupy or 0. It must be signal.shape == signal_deconv.shape. The de-autocorrelated signal deconvolved with kernel at ith iteration. The default is np.float32(0). kerneltype : string. Type of kernel update used for the computation choosing from blurring directly the autocorrelation 'A', blurring the signal that is then autocorrelated 'B' and the window applied in fourier domain 'C'. The default is 'B'. iterations : int, optional Number of iteration to be done. The default is 10. measure : boolean, optional If true computes the euclidean distance between signal and the auto-correlation of signal_deconv. The default is True. clip : boolean, optional Clip the results within the range -1 to 1. Useless for the moment. The default is False. verbose : boolean, optional Print current step value. The default is True. Returns ------- signal_deconv : ndarray, either numpy or cupy. The de-autocorrelated signal deconvolved with kernel at ith iteration.. error : vector. Euclidean distance between signal and the auto-correlation of signal_deconv. Last implementation returns the SNR instead of euclidean distance. """ # for code agnosticity between Numpy/Cupy xp = pyb.get_array_module(signal) # for performance evaluation start_time = time.time() if iterations < 100: breakcheck = iterations else: breakcheck = 100 # normalization signal /= signal.sum() kernel /= kernel.sum() epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any() == 0: # xp.random.seed(0) signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) # signal_deconv = signal.copy() else: signal_deconv = signal_deconv #+ 0.1*prior.max()*xp.random.rand(*signal.shape) # normalization signal_deconv = signal_deconv / signal_deconv.sum() # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): # I use this property to make computation faster K = my_convolution(signal_deconv, my_correlation(kernel, kernel)) relative_blur = my_correlation(K, signal_deconv) # compute the measured distance metric if given if measure == True: #error[i] = xp.linalg.norm(signal/signal.sum()-relative_blur/relative_blur.sum()) error[i] = snrIntensity_db( signal / signal.sum(), xp.abs(signal / signal.sum() - relative_blur / relative_blur.sum())) if (error[i] < error[i - breakcheck]) and i > breakcheck: break if verbose == True and (i % 100) == 0 and measure == False: print('Iteration ' + str(i)) elif verbose == True and (i % 100) == 0 and measure == True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) relative_blur = signal / relative_blur # avoid errors due to division by zero or inf relative_blur[xp.isinf(relative_blur)] = epsilon relative_blur = xp.nan_to_num(relative_blur) # multiplicative update, for the full model # signal_deconv *= 0.5 * (my_convolution(relative_blur, kernel_mirror) + my_correlation(axisflip(relative_blur), kernel_mirror)) # signal_deconv *= (my_convolution(kernel_mirror,relative_blur) + my_correlation(relative_blur, kernel_mirror)) # multiplicative update, for the Anchor Update approximation signal_deconv *= my_correlation((relative_blur), (K)) # signal_deconv *= (my_correlation(relative_blur, K) + my_convolution(relative_blur, K)) # multiplicative update, remaining term. This gives wrong reconstructions # signal_deconv *= my_correlation(axisflip(relative_blur), kernel_mirror) if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error #,kernel_update
def invert_autoconvolution(magnitude, prior=None, mask=None, measure=True, steps=200, mode='deautocorrelation', verbose=True): # agnostic code, xp is either numpy or cupy depending on the magnitude array module xp = pyb.get_array_module(magnitude) # object support constraint if mask is None: mask = xp.ones(magnitude.shape) # assert magnitude.shape == mask.shape, 'mask and magnitude should have same shape' assert steps > 0, 'steps should be a positive number' assert mode == 'deautoconvolution' or mode == 'deautocorrelation',\ 'mode should be \'deautoconvolution\' or \'deautocorrelation\'' # random phase if prior is None, otherwise start with the prior Fourier if prior is None: x_hat = 1 + 0.01 * xp.random.rand(*magnitude.shape) else: x_hat = prior if measure == True: ratio = xp.zeros(steps) else: ratio = None x_hat = x_hat * mask y_mes = 0.5 * (magnitude + magnitude[::-1, ::-1]) # normalization for energy preservation y0 = (xp.sum(x_hat))**2 # y0 = (xp.sum(x_hat)) x_hat = xp.divide(x_hat, xp.sqrt(y0)) # monitoring the convergence of the solution # convergence = xp.zeros(steps) # loop for the minimization, I guess there can be an analogue for the autocorrelation if mode == "deautoconvolution": for i in range(0, steps): y = my_convolution(x_hat, x_hat) # u_hat = y_mes / y # zero divided by zero is equal to zero u_hat = xp.divide(y_mes, y, out=xp.zeros_like(y_mes), where=y != 0) if measure == True: ratio[i] = u_hat.mean() # convergence[i] = xp.mean(u_hat) r_hat = 1 / xp.sqrt(y0) * my_convolution(u_hat, x_hat) x_hat = x_hat * r_hat # not ready yet elif mode == "deautocorrelation": for i in range(0, steps): y = my_correlation(x_hat, x_hat) # if measure==True: # ratio[i] = xp.linalg.norm(y_mes - y) u_hat = xp.divide(y_mes, y, out=xp.zeros_like(y_mes), where=y != 0) if measure == True: ratio[i] = u_hat.mean() r_hat = (0.5 / xp.sqrt(y0)) * (my_correlation(x_hat, u_hat) + (my_convolution(x_hat, u_hat))) # r_hat = (0.5/(y0)) * ( my_correlation(x_hat[::-1,::-1], u_hat) + my_convolution(x_hat, u_hat) ) x_hat = x_hat * r_hat # r_hat = (1/xp.sqrt(y0)) * my_correlation(x_hat[::-1,::-1], u_hat) # x_hat = x_hat * r_hat return (x_hat, ratio)
def fouriermod2autocorrelation(x): xp = pyb.get_array_module(x) return xp.fft.fftshift(xp.fft.irfftn(x**2))
def anchorUpdateSK(signal, kernel, signal_deconv=np.float32(0), iterations=10, measure=True, clip=False, verbose=True): # for code agnosticity between Numpy/Cupy xp = pyb.get_array_module(signal) xps = cupyx.scipy.get_array_module(signal) # for performance evaluation start_time = time.time() if iterations < 100: breakcheck = iterations else: breakcheck = 100 # normalization signal /= signal.sum() epsilon = 1e-7 # starting guess with a flat image if signal_deconv.any() == 0: # xp.random.seed(0) signal_deconv = xp.full(signal.shape, 0.5) + 0.01 * xp.random.rand(*signal.shape) # signal_deconv = signal.copy() else: signal_deconv = signal_deconv #+ 0.1*prior.max()*xp.random.rand(*signal.shape) # normalization signal_deconv = signal_deconv / signal_deconv.sum() # to measure the distance between the guess convolved and the signal error = None if measure == True: error = xp.zeros(iterations) for i in range(iterations): # I use this property to make computation faster kernel_update = pyconv.correlate(signal_deconv, kernel, mode='same', method='fft') kernel_mirror = axisflip(kernel_update) relative_blur = pyconv.correlate(signal_deconv, kernel_update, mode='same', method='fft') # compute the measured distance metric if given if measure == True: # error[i] = xp.linalg.norm(signal/signal.sum()-relative_blur/relative_blur.sum()) error[i] = snrIntensity_db( signal / signal.sum(), xp.abs(signal / signal.sum() - relative_blur / relative_blur.sum())) if (error[i] < error[i - breakcheck]) and i > breakcheck: break if verbose == True and (i % 100) == 0 and measure == False: print('Iteration ' + str(i)) elif verbose == True and (i % 100) == 0 and measure == True: print('Iteration ' + str(i) + ' - noise level: ' + str(error[i])) relative_blur = signal / relative_blur # # avoid errors due to division by zero or inf # relative_blur[xp.isinf(relative_blur)] = epsilon # relative_blur = xp.nan_to_num(relative_blur) # multiplicative update, for the full model # signal_deconv *= 0.5 * (pyconv.convolve(relative_blur, kernel_mirror, mode='same') + pyconv.correlate((relative_blur), kernel_mirror, mode='same')) # signal_deconv *= (my_convolution(relative_blur, kernel_mirror) + my_correlation(relative_blur,kernel_mirror)) # multiplicative update, for the Anchor Update approximation signal_deconv *= pyconv.correlate(relative_blur, kernel_mirror, mode='same', method='fft') # multiplicative update, remaining term. This gives wrong reconstructions # signal_deconv *= pyconv.correlate((relative_blur), kernel_mirror, mode='same') if clip: signal_deconv[signal_deconv > +1] = +1 signal_deconv[signal_deconv < -1] = -1 print("\n\n Algorithm finished. Performance:") print("--- %s seconds ----" % (time.time() - start_time)) print("--- %s sec/step ---" % ((time.time() - start_time) / iterations)) return signal_deconv, error #,kernel_update
def autocorrelation2fouriermod(x): xp = pyb.get_array_module(x) return xp.sqrt(xp.abs(xp.fft.rfftn(x)))
def my_convolution(function1, function2): xp = pyb.get_array_module(function1) # return xp.fft.fftshift(xp.fft.irfftn(xp.fft.rfftn(function1) * xp.fft.rfftn(function2), s=function1.shape)) return xp.fft.ifftshift( xp.fft.irfftn(xp.fft.rfftn(function1) * xp.fft.rfftn(function2), s=function1.shape))
def choose_conv_method(in1, in2, mode="full", measure=False): """ Find the fastest convolution/correlation method. This primarily exists to be called during the ``method='auto'`` option in `convolve` and `correlate`, but can also be used when performing many convolutions of the same input shapes and dtypes, determining which method to use for all of them, either to avoid the overhead of the 'auto' option or to use accurate real-world measurements. Parameters ---------- in1 : array_like The first argument passed into the convolution function. in2 : array_like The second argument passed into the convolution function. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. measure : bool, optional If True, run and time the convolution of `in1` and `in2` with both methods and return the fastest. If False (default), predict the fastest method using precomputed values. Returns ------- method : str A string indicating which convolution method is fastest, either 'direct' or 'fft' times : dict, optional A dictionary containing the times (in seconds) needed for each method. This value is only returned if ``measure=True``. See Also -------- convolve correlate Examples -------- Estimate the fastest method for a given input: >>> import cusignal >>> import cupy as cp >>> a = cp.random.randn(1000) >>> b = cp.random.randn(1000000) >>> method = cusignal.choose_conv_method(a, b, mode='same') >>> method 'fft' This can then be applied to other arrays of the same dtype and shape: >>> c = cp.random.randn(1000) >>> d = cp.random.randn(1000000) >>> # `method` works with correlate and convolve >>> corr1 = cusignal.correlate(a, b, mode='same', method=method) >>> corr2 = cusignal.correlate(c, d, mode='same', method=method) >>> conv1 = cusignal.convolve(a, b, mode='same', method=method) >>> conv2 = cusignal.convolve(c, d, mode='same', method=method) """ xp = pyb.get_array_module(in1) volume = xp.asarray(in1) kernel = xp.asarray(in2) if measure: times = {} for method in ("fft", "direct"): times[method] = _timeit_fast( lambda: convolve(volume, kernel, mode=mode, method=method)) chosen_method = "fft" if times["fft"] < times["direct"] else "direct" return chosen_method, times # fftconvolve doesn't support complex256 fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192" if hasattr(cp, fftconv_unsup): if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup: return "direct" # for integer input, # catch when more precision required than float provides (representing an # integer as float can lose precision in fftconvolve if larger than 2**52) if any([_numeric_arrays([x], kinds="ui") for x in [volume, kernel]]): max_value = int(xp.abs(volume).max()) * int(xp.abs(kernel).max()) max_value *= int(min(volume.size, kernel.size)) if max_value > 2**xp.finfo("float").nmant - 1: return "direct" if _numeric_arrays([volume, kernel], kinds="b"): return "direct" if _numeric_arrays([volume, kernel]): if _fftconv_faster(volume, kernel, mode): return "fft" return "direct"
def _convolve2d(in1, in2, use_convolve, mode, boundary, fillvalue): xp = pyb.get_array_module(in1) val = _valfrommode(mode) bval = _bvalfromboundary(boundary) # Promote inputs promType = xp.promote_types(in1.dtype, in2.dtype) in1 = in1.astype(promType) in2 = in2.astype(promType) if (bval != PAD) and (bval != REFLECT) and (bval != CIRCULAR): raise Exception("Incorrect boundary value.") if (bval == PAD) and (fillvalue is not None): fill = np.array(fillvalue, in1.dtype) if fill is None: raise Exception("fill must no be None.") if fill.size != 1: if fill.size == 0: raise Exception("`fillvalue` cannot be an empty array.") raise Exception( "`fillvalue` must be scalar or an array with one element") else: fill = np.zeros(1, in1.dtype) if fill is None: raise Exception("Unable to create fill array") # Create empty array to hold number of aout dimensions out_dimens = np.empty(in1.ndim, np.int) if val == VALID: for i in range(in1.ndim): out_dimens[i] = in1.shape[i] - in2.shape[i] + 1 if out_dimens[i] < 0: raise Exception( "no part of the output is valid, use option 1 (same) or 2 \ (full) for third argument") elif val == SAME: for i in range(in1.ndim): out_dimens[i] = in1.shape[i] elif val == FULL: for i in range(in1.ndim): out_dimens[i] = in1.shape[i] + in2.shape[i] - 1 else: raise Exception("mode must be 0 (valid), 1 (same), or 2 (full)") # Create empty array out on GPU out = xp.empty(out_dimens.tolist(), in1.dtype) out = _convolve2d_gpu( in1, out, in2, val, bval, use_convolve, fill, ) return out
def my_correlation(function1, function2): xp = pyb.get_array_module(function1) return xp.fft.ifftshift( xp.fft.irfftn(xp.conj(xp.fft.rfftn(function1)) * xp.fft.rfftn(function2), s=function1.shape))
def my_correlation_withfft(function1, function2): xp = pyb.get_array_module(function1) temp = xp.conj(xp.fft.rfftn(function1)) * function2 return xp.fft.fftshift(xp.fft.irfftn(temp, s=function1.shape))
def convolve( in1, in2, mode="full", method="auto", ): """ Convolve two N-dimensional arrays. Convolve `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the convolution. ``direct`` The convolution is determined directly from sums, the definition of convolution. ``fft`` The Fourier Transform is used to perform the convolution by calling `fftconvolve`. ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). Returns ------- convolve : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- choose_conv_method : chooses the fastest appropriate convolution method fftconvolve Notes ----- By default, `convolve` and `correlate` use ``method='auto'``, which calls `choose_conv_method` to choose the fastest method using pre-computed values (`choose_conv_method` can also measure real-world timing with a keyword argument). Because `fftconvolve` relies on floating point numbers, there are certain constraints that may force `method=direct` (more detail in `choose_conv_method` docstring). Examples -------- Smooth a square pulse using a Hann window: >>> import cusignal >>> import cupy as cp >>> sig = cp.repeat(cp.asarray([0., 1., 0.]), 100) >>> win = cusignal.hann(50) >>> filtered = cusignal.convolve(sig, win, mode='same') / cp.sum(win) >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(cp.asnumpy(sig)) >>> ax_orig.set_title('Original pulse') >>> ax_orig.margins(0, 0.1) >>> ax_win.plot(cp.asnumpy(win)) >>> ax_win.set_title('Filter impulse response') >>> ax_win.margins(0, 0.1) >>> ax_filt.plot(cp.asnumpy(filtered)) >>> ax_filt.set_title('Filtered signal') >>> ax_filt.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show() """ xp = pyb.get_array_module(in1) volume = xp.asarray(in1) kernel = xp.asarray(in2) if volume.ndim == kernel.ndim == 0: return volume * kernel elif volume.ndim != kernel.ndim: raise ValueError("in1 and in2 should have the same dimensionality") if _inputs_swap_needed(mode, volume.shape, kernel.shape): # Convolution is commutative # order doesn't have any effect on output volume, kernel = kernel, volume if method == "auto": method = choose_conv_method(volume, kernel, mode=mode) if method == "fft": out = fftconvolve(volume, kernel, mode=mode) result_type = xp.result_type(volume, kernel) if result_type.kind in {"u", "i"}: out = xp.around(out) return out.astype(result_type) elif method == "direct": if volume.ndim > 1: raise ValueError("Direct method is only implemented for 1D") swapped_inputs = (mode != "valid") and (kernel.size > volume.size) if swapped_inputs: volume, kernel = kernel, volume return _convolution_cuda._convolve(volume, kernel, True, swapped_inputs, mode) else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.")