def compute(self, fn, fns, k=[0.01, 0.03]): c1 = (k[0]*255)**2 c2 = (k[1]*255)**2 win = self.gaussian(11, 1.5) im1 = scipy.misc.imread(fn, 1) mu1 = filters.correlate(im1, win) mu1_sq = mu1*mu1; s1sq =filters.correlate(im1*im1, win)-mu1_sq for f in fns: im2 = scipy.misc.imread(f, 1) if im1.shape != im2.shape: print("{}: Incorrect image. All images " "should be of equal size".format(f)) continue mu2 = filters.correlate(im2, win) mu2_sq = mu2*mu2; mu1_mu2 = mu1*mu2; s2sq = filters.correlate(im2*im2, win)-mu2_sq s12 = filters.correlate(im1*im2, win)-mu1_mu2 ssims = ((2*mu1_mu2 + c1)*(2*s12 + c2))/ \ ((mu1_sq + mu2_sq + c1)*(s1sq + s2sq + c2)) print("{:24} {:.4f}".format(os.path.basename(f), ssims.mean()))
def _process_psd_for_nf(sigma_psd: np.ndarray, psd_k: Union[np.ndarray, None], profile: BM3DProfile)\ -> np.ndarray: """ Process PSD so that Nf-size PSD is usable. :param sigma_psd: the PSD :param psd_k: a previously generated kernel to convolve the PSD with, or None if not used :param profile: the profile used :return: processed PSD """ if profile.nf == 0: return sigma_psd # Reduce PSD size to start with max_ratio = 16 sigma_psd_copy = np.copy(sigma_psd) single_kernel = np.ones((3, 3, 1)) / 9 orig_ratio = np.max(sigma_psd.shape) / profile.nf ratio = orig_ratio while ratio > max_ratio: mid_corr = correlate(sigma_psd_copy, single_kernel, mode='wrap') sigma_psd_copy = mid_corr[1::3, 1::3] ratio = np.max(sigma_psd_copy.shape) / profile.nf # Scale PSD because the binary expects it to be scaled by size sigma_psd_copy *= (ratio / orig_ratio)**2 if psd_k is not None: sigma_psd_copy = correlate(sigma_psd_copy, psd_k, mode='wrap') return sigma_psd_copy
def compute(self, fn, fns, k=[0.01, 0.03]): c1 = (k[0] * 255)**2 c2 = (k[1] * 255)**2 win = self.gaussian(11, 1.5) im1 = scipy.misc.imread(fn, 1) mu1 = filters.correlate(im1, win) mu1_sq = mu1 * mu1 s1sq = filters.correlate(im1 * im1, win) - mu1_sq for f in fns: im2 = scipy.misc.imread(f, 1) if im1.shape != im2.shape: print("{}: Incorrect image. All images " "should be of equal size".format(f)) continue mu2 = filters.correlate(im2, win) mu2_sq = mu2 * mu2 mu1_mu2 = mu1 * mu2 s2sq = filters.correlate(im2 * im2, win) - mu2_sq s12 = filters.correlate(im1 * im2, win) - mu1_mu2 ssims = ((2*mu1_mu2 + c1)*(2*s12 + c2))/ \ ((mu1_sq + mu2_sq + c1)*(s1sq + s2sq + c2)) print("{:24} {:.4f}".format(os.path.basename(f), ssims.mean()))
def getGradient(I, signed=False): """ Given an image I, calculate the magnitude and orientation of gradient. Parameter: - I: Gray scale image I - signed: determain return signed orientation or unsigned Return: - mag: the magnitute of gradient - ori: the orientation of gradient """ # Define filters fx = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) fy = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]) # Calculate correlation dx = correlate(I, fx) dy = correlate(I, fy) # Calculate magnitude and orientation mag = np.sqrt(dx * dx, dy * dy) ori = np.arctan2(dx, dy) # unsigned if not signed: pi = np.pi ori[ori < -pi / 2] = ori[ori < -pi / 2] + pi ori[ori > pi / 2] = ori[ori > pi / 2] - pi return mag, ori
def pyrdown_impl(image): """ Prefilters an image with a gaussian kernel and then downsamples the result by a factor of 2. The following 1D convolution kernel should be used in both the x and y directions. K = 1/16 [ 1 4 6 4 1 ] Functions such as cv2.GaussianBlur and scipy.ndimage.filters.gaussian_filter are prohibited. You must implement the separable kernel. However, you may use functions such as cv2.filter2D or scipy.ndimage.filters.correlate to do the actual correlation / convolution. Filtering should mirror the input image across the border. For scipy this is mode = mirror. For cv2 this is mode = BORDER_REFLECT_101. Downsampling should take the even-numbered coordinates with coordinates starting at 0. Input: image -- height x width [x channels] image of type float32. Output: down -- ceil(height/2) x ceil(width/2) [x channels] image of type float32. """ K = 1.0/16.0*np.array([[1,4,6,4,1]]) size = image.shape width = size[1] height = size[0] down = np.zeros_like(image) if len(size) == 3: # more than 1 channel channel = size[2] for i in range(channel): down[:,:,i] = correlate(image[:,:,i],K,mode = 'mirror') K = K.transpose() for i in range(channel): down[:,:,i] = correlate(down[:,:,i],K,mode = 'mirror') w = [i for i in range(0,width,2)] h = [i for i in range(0,height,2)] down = down[h,:,:] down = down[:,w,:] else: down = correlate(image,K,mode = 'mirror') K = K.transpose() down = correlate(down,K,mode = 'mirror') w = [i for i in range(0,width,2)] h = [i for i in range(0,height,2)] down = down[h,:,] down = down[:,w,] return down
def __filter2(B, X, shape='nearest'): B2 = np.rot90(np.rot90(B)) if len(X.shape) < 3: return correlate(X, B2, mode=shape) else: channels = X.shape[2] f = [ correlate(X[:, :, c], B2, mode=shape) for c in range(channels) ] return np.array(f)
def backward_cpu(self, x, error, w, mu1, mu2, sigma, num_dau_units_ignore=0, unit_testing=True, single_dim_kernel=False, aggr_forbid_positive=False): # we get back-propagated error by rotating offsets i.e. we just use negatives of offsets backprop_error = self.forward_cpu(error, np.swapaxes(w, 1,3), np.swapaxes(-1 * mu1, 1,3), np.swapaxes(-1 * mu2, 1,3), sigma, do_error_backprop=True, single_dim_kernel=single_dim_kernel, aggr_forbid_positive=aggr_forbid_positive) N = x.shape[0] F = x.shape[1] sigma_val = sigma[0] filter,deriv_w, deriv_mu1,deriv_mu2,_,_ = self._get_filters(sigma_val, single_dim_kernel=single_dim_kernel, aggr_forbid_positive=aggr_forbid_positive) # next we need to get gradients wrt w,mu1,mu2 if True: x_w_blur = np.zeros(x.shape,dtype=np.float32) # pre-blur the X for n in range(N): for f in range(F): x_w_blur[n,f,:,:] = correlate(x[n,f,:,:],weights=deriv_w,mode='constant') # then offset and sum element-wise w_grad = self._offset_and_dot(x_w_blur,error,mu1,mu2, num_dau_units_ignore=num_dau_units_ignore, ignore_edge_gradients=unit_testing) if True: x_mu1_blur = np.zeros(x.shape,dtype=np.float32) # pre-blur the X for n in range(N): for f in range(F): x_mu1_blur[n,f,:,:] = correlate(x[n,f,:,:],weights=deriv_mu1,mode='constant') # then offset and sum element-wise mu1_grad = self._offset_and_dot(x_mu1_blur,error,mu1,mu2, num_dau_units_ignore=num_dau_units_ignore, ignore_edge_gradients=unit_testing) if True: x_mu2_blur = np.zeros(x.shape,dtype=np.float32) # pre-blur the X for n in range(N): for f in range(F): x_mu2_blur[n,f,:,:] = correlate(x[n,f,:,:],weights=deriv_mu2,mode='constant') # then offset and sum element-wise mu2_grad = self._offset_and_dot(x_mu2_blur,error,mu1,mu2, num_dau_units_ignore=num_dau_units_ignore, ignore_edge_gradients=unit_testing) # add multiplication with weight for mean gradients mu1_grad = np.multiply(mu1_grad, w) mu2_grad = np.multiply(mu2_grad, w) return (backprop_error, w_grad, mu1_grad, mu2_grad)
def mvd_map_tikhonov(initImg, imgList, psfList, iterNum, gamma, weights='even', sigma=None): viewNum = len(imgList) assert len(psfList) == viewNum if isinstance(weights, (list, tuple)): assert len(weights) == viewNum c2 = weights ** 2 elif weights == 'even': c2 = [1.0] * viewNum ## initialization ## print 'initializing...' # pre-blur inputs if sigma is not None: imgList = [gaussian_filter(img, sigma) for img in imgList] psfList = [gaussian_filter(psf, sigma) for psf in psfList] # check and process init. guess if iterNum == 0: return initImg initImg[initImg < 0.0] = 0.0 x = np.sqrt(initImg) # initializing u, v, w u = np.zeros(psfList[0].shape) v = np.zeros(imgList[0].shape) w = 0.0 for idx in xrange(viewNum): u = u + c2[idx] * correlate(psfList[idx], psfList[idx]) v = v + c2[idx] * correlate(imgList[idx], psfList[idx]) w = w + c2[idx] * inner_product(imgList[idx]) # initializing previous gradient power p_rp = 1.0 # initializing init. search direction d = np.zeros(imgList[0].shape) ## start iteration ## print 'start iteration...' for k in xrange(iterNum): # temp. t_xx t_xx = np.square(x) # temp. t t = convolve(t_xx, u) + gamma*t_xx - v # gradient r r = 4*x*t # search direction p_r = inner_product(r) d = (p_r/p_rp)*d - r # step size alpha = mapgg_step_size(x, d, u, v, w, gamma, t, t_xx) # save previous power of r p_rp = p_r # update update = alpha*d x = x + update print 'iter #%d, residue: %f.' %\ (k+1, phi_eval(t_xx, t, v, w)/w) ## return result ## return np.square(x)
def imblur(Y, sig=5, siz=11, nDimBlur=None, kernel=None): """Spatial filtering with a Gaussian or user defined kernel The parameters are specified in GreedyROI """ from scipy.ndimage.filters import correlate X = np.zeros(np.shape(Y)) if kernel is None: if nDimBlur is None: nDimBlur = Y.ndim - 1 else: nDimBlur = np.min((Y.ndim, nDimBlur)) if np.isscalar(sig): sig = sig * np.ones(nDimBlur) if np.isscalar(siz): siz = siz * np.ones(nDimBlur) # xx = np.arange(-np.floor(siz[0] / 2), np.floor(siz[0] / 2) + 1) # yy = np.arange(-np.floor(siz[1] / 2), np.floor(siz[1] / 2) + 1) # hx = np.exp(-xx**2 / (2 * sig[0]**2)) # hx /= np.sqrt(np.sum(hx**2)) # hy = np.exp(-yy**2 / (2 * sig[1]**2)) # hy /= np.sqrt(np.sum(hy**2)) # temp = correlate(Y, hx[:, np.newaxis, np.newaxis], mode='constant') # X = correlate(temp, hy[np.newaxis, :, np.newaxis], mode='constant') # the for loop helps with memory # for t in range(np.shape(Y)[-1]): # temp = correlate(Y[:,:,t],hx[:,np.newaxis])#,mode='constant', cval=0.0) # X[:,:,t] = correlate(temp,hy[np.newaxis,:])#,mode='constant', cval=0.0) X = Y.copy() for i in range(nDimBlur): h = np.exp( old_div( -np.arange(-np.floor(old_div(siz[i], 2)), np.floor(old_div(siz[i], 2)) + 1)**2, (2 * sig[i]**2))) h /= np.sqrt(h.dot(h)) shape = [1] * len(Y.shape) shape[i] = -1 X = correlate(X, h.reshape(shape), mode='constant') else: X = correlate(Y, kernel[..., np.newaxis], mode='constant') # for t in range(np.shape(Y)[-1]): # X[:,:,t] = correlate(Y[:,:,t],kernel,mode='constant', cval=0.0) return X
def cor(img): #sum1=np.zeros((img.shape[0],28,28)) img = img.detach().numpy() img2 = img[:, :, 5:22, 5:22] #K=17 #img2=img[:,:,8:19,8:19] #K=2k+1=11 #img2=img[:,:,8:19,8:19] #img2=img[:,:,8:19,8:19] print(img.shape) print(img2.shape) #sum1=np.zeros((img.shape[0],28,28)) #img=img.detach().numpy() #img2=img2.detach().numpy() img3 = np.transpose(img, (0, 2, 3, 1)) img4 = np.transpose(img2, (0, 2, 3, 1)) print("image is") plt.imshow(img3[1, :, :, :], interpolation='nearest') plt.show() print("template is") plt.imshow(img4[1, :, :, :], interpolation='nearest') plt.show() sum = np.zeros((img.shape[0], 28, 28)) cor1 = np.zeros((img.shape[0], 3, 28, 28)) for i in range(img.shape[0]): #print("image shape",img.shape,img.shape[0]) #for j in range (img.shape[1]): cor1[i, 0, :, :] = C.correlate(img[i, 0, :, :], img2[i, 0, :, :], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 1, :, :] = C.correlate(img[i, 1, :, :], img2[i, 1, :, :], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 2, :, :] = C.correlate(img[i, 2, :, :], img2[i, 2, :, :], output=None, mode='constant', cval=0.0, origin=0) #cor1[i,3,:,:]=C.correlate(img[i,0,:,:],img2[i,1,:,:], output=None, mode='constant', cval=0.0, origin=0) #cor1[i,4,:,:]=C.correlate(img[i,0,:,:],img2[i,2,:,:], output=None, mode='constant', cval=0.0, origin=0) #cor1[i,5,:,:]=C.correlate(img[i,1,:,:],img2[i,2,:,:], output=None, mode='constant', cval=0.0, origin=0) sum[i, :, :] = cor1[i, 0, :, :] + cor1[i, 1, :, :] + cor1[ i, 2, :, :] #+cor1[i,3,:,:]+cor1[i,4,:,:]+cor1[i,5,:,:] plt.imshow(sum[1, :, :], interpolation='nearest') plt.show() sum = np.reshape(sum, (img.shape[0], 1, 28, 28)) #print(sum.shape,"sim shape") sum = torch.from_numpy(sum) return sum
def imblur(Y, sig=5, siz=11, nDimBlur=None, kernel=None): """Spatial filtering with a Gaussian or user defined kernel The parameters are specified in GreedyROI """ from scipy.ndimage.filters import correlate X = np.zeros(np.shape(Y)) if kernel is None: if nDimBlur is None: nDimBlur = Y.ndim - 1 else: nDimBlur = np.min((Y.ndim, nDimBlur)) if np.isscalar(sig): sig = sig * np.ones(nDimBlur) if np.isscalar(siz): siz = siz * np.ones(nDimBlur) # xx = np.arange(-np.floor(siz[0] / 2), np.floor(siz[0] / 2) + 1) # yy = np.arange(-np.floor(siz[1] / 2), np.floor(siz[1] / 2) + 1) # hx = np.exp(-xx**2 / (2 * sig[0]**2)) # hx /= np.sqrt(np.sum(hx**2)) # hy = np.exp(-yy**2 / (2 * sig[1]**2)) # hy /= np.sqrt(np.sum(hy**2)) # temp = correlate(Y, hx[:, np.newaxis, np.newaxis], mode='constant') # X = correlate(temp, hy[np.newaxis, :, np.newaxis], mode='constant') # the for loop helps with memory # for t in range(np.shape(Y)[-1]): # temp = correlate(Y[:,:,t],hx[:,np.newaxis])#,mode='constant', cval=0.0) # X[:,:,t] = correlate(temp,hy[np.newaxis,:])#,mode='constant', cval=0.0) X = Y.copy() for i in range(nDimBlur): h = np.exp(-np.arange(-np.floor(siz[i] / 2), np.floor(siz[i] / 2) + 1)**2 / (2 * sig[i]**2)) h /= np.sqrt(h.dot(h)) shape = [1] * len(Y.shape) shape[i] = -1 X = correlate(X, h.reshape(shape), mode='constant') else: X = correlate(Y, kernel[..., np.newaxis], mode='constant') # for t in range(np.shape(Y)[-1]): # X[:,:,t] = correlate(Y[:,:,t],kernel,mode='constant', cval=0.0) return X
def image_hog(image): nwin_x = 3 # set here the number of HOG windows per bound box nwin_y = 3 B = 9 # set here the number of histogram bins L, C = image.shape # L num of lines ; C num of columns H = np.zeros((nwin_x*nwin_y, B)) # result vector m = np.sqrt(L/2) # convert to float, retain uint8 range Im = img_as_float(image) * 255. step_x = int(np.floor(C/(nwin_x+1))) step_y = int(np.floor(L/(nwin_y+1))) # correlate image with orthogonal gradient masks hx = [[-1,0,1]]; hy = np.rot90(hx); grad_xr = correlate(Im,hx,mode='constant') grad_yu = correlate(Im,hy,mode='constant') # compute orientation vectors angles = np.arctan2(grad_yu,grad_xr) magnit = np.sqrt(grad_yu**2 + grad_xr**2) # compute histogram cont = 0 for n in range(nwin_y): for m in range(nwin_x): # subset angles and magnitudes angles2 = angles[n*step_y:(n+2)*step_y, m*step_x:(m+2)*step_x] magnit2 = magnit[n*step_y:(n+2)*step_y, m*step_x:(m+2)*step_x] v_angles = angles2.reshape((-1,1)) v_magnit = magnit2.reshape((-1,1)) K = v_angles.shape[0] # assembling the histogram with 9 bins (range of 20 degrees per bin) H2 = np.zeros((B)) for b, ang_lim in zip(range(B), np.linspace(0-np.pi+2*np.pi/B, np.pi, B)): # for angles in this angle bin, accumulate magnitude H2[b] = H2[b] + np.sum(v_magnit[v_angles < ang_lim]) # exclude this angle bin from further iterations v_angles[v_angles < ang_lim] = 999 # normalize H2 /= (norm(H2)+0.01) H[cont,:] = H2 cont += 1 # flatten results into column vector of size B * nwin_x * nwin_y return H.reshape((-1,1))
def pool_corr(im): return np.array([pool(correlate(im, rot)) for rot in rots]) # In[ ]: plots(pool_corr(eights[0]))
def test_convolve2d(self): np.random.seed(1234) img = np.random.randn(1, 5, 5, 1) kernel = np.random.randn(3, 3, 1, 1) output = bn.convolve2d(img, kernel) self.assertTrue( np.allclose( output.value[0, ..., 0], correlate(img[0, ..., 0], kernel[..., 0, 0])[1:-1, 1:-1] ) ) p = bn.Parameter(kernel) output = bn.convolve2d(img, p, 2, 1) loss = bn.sum(bn.square(output)) loss.backward() grad_backprop = p.grad grad_numerical = np.zeros_like(grad_backprop) eps = 1e-8 for i, j in itertools.product(range(3), repeat=2): e = np.zeros_like(kernel) e[i, j] += eps loss_p = bn.sum(bn.square(bn.convolve2d(img, kernel + e, 2, 1))).value loss_m = bn.sum(bn.square(bn.convolve2d(img, kernel - e, 2, 1))).value grad_numerical[i, j] = (loss_p - loss_m) / (2 * eps) self.assertTrue(np.allclose(grad_backprop, grad_numerical))
def makemask(wkernel, gassslice): #gassfile = '/Users/susanclark/Documents/gass_10.zea.fits' #gassdata = pyfits.getdata(gassfile, 0) #gassslice = gassdata[45, :, :] datay, datax = np.shape(gassslice) mnvals = np.indices((datax, datay)) pixcrd = np.zeros((datax * datay, 2), np.float_) pixcrd[:, 0] = mnvals[:, :][0].reshape(datax * datay) pixcrd[:, 1] = mnvals[:, :][1].reshape(datax * datay) w = wcs.WCS(naxis=2) #TODO READ FROM FITS HEADER FILE! w.wcs.crpix = [1.125000000E3, 1.125000000E3] w.wcs.cdelt = np.array([-8.00000000E-2, 8.00000000E-2]) w.wcs.crval = [0.00000000E0, -9.00000000E1] w.wcs.ctype = ['RA---ZEA', 'DEC--ZEA'] worldc = w.wcs_pix2world(pixcrd, 1) worldcra = worldc[:, 0].reshape(datax, datay) worldcdec = worldc[:, 1].reshape(datax, datay) gm = np.zeros(gassslice.shape) #gm[worldcdec < 0] = 1 gmconv = filters.correlate(gm, weights=wkernel) gg = copy.copy(gmconv) gg[gmconv < np.max(gmconv)] = 0 gg[gmconv == np.max(gmconv)] = 1 return gg
def _generate_output(self): img = self.input.astype( 'float32') # always convert to float for downstream processing orig_shape = img.shape if len(orig_shape) < 3: img = img[np.newaxis, ...] self.size = self.size + (self.size[-1], ) self.sigma = self.sigma + (self.sigma[-1], ) self.sigma2 = self.sigma2 + (self.sigma2[-1], ) if self.size: fdog = filterKernel(ftype='DoG', size=self.size, sigma=self.sigma, sigma2=self.sigma2) fdog = fdog.astype('float32') img = correlate(img, fdog) img[img < 0] = 0 img.shape = orig_shape return img
def gTV(x, N, strtag, kern, dirWeight, dirs=None, nmins=0, dirInfo=[None,None,None,None], a=10): if nmins: M = dirInfo[0] dIM = dirInfo[1] Ause = dirInfo[2] inds = dirInfo[3] else: M = None dIM = None Ause = None inds = None if len(x.shape) == 2: N = np.hstack([1,N]) x0 = x.reshape(N) grad = np.zeros(np.hstack([N[0], len(strtag), N[1:]]),dtype=float) Nkern = np.hstack([1,kern.shape[-2:]]) TV_data = TV(x0,N,strtag,kern,dirWeight,dirs,nmins,dirInfo) for i in xrange(len(strtag)): if strtag[i] == 'spatial': kernHld = np.flipud(np.fliplr(kern[i])).reshape(Nkern) grad[:,i,:,:] = correlate(np.tanh(a*TV_data[i]),kernHld,mode='wrap') return grad
def cor(img): img=img.detach().numpy() #print("image shape", img.shape) mid=int(img.shape[2]/2) k=2 #(K=2k+1) patch size #print(img.shape,"img.shape") img2=img[:,:,mid-k:mid+k,mid-k:mid+k] #print("template shape", img2.shape) #img2=img[:,:,8:19,8:19] #K=2k+1=11 #img2=img[:,:,8:19,8:19] #img2=img[:,:,8:19,8:19] #print(img.shape) #print(img2.shape) #sum1=np.zeros((img.shape[0],28,28)) #img=img.detach().numpy() #img2=img2.detach().numpy() #img3=np.transpose(img,(0,2, 3, 1)) #img4=np.transpose(img2,(0,2, 3, 1)) #print("image is") #plt.imshow(img3[1,:,:,:], interpolation='nearest') #plt.show() #print("template is") #plt.imshow(img4[1,:,:,:], interpolation='nearest') #plt.show() cor1=np.zeros((img.shape[0],img.shape[1],img.shape[2],img.shape[3])) for i in range (img.shape[0]): #print("image shape",img.shape,img.shape[0]) for j in range (img.shape[1]): cor1[i,j,:,:]=C.correlate(img[i,j,:,:],img2[i,j,:,:], output=None, mode='constant', cval=0.0, origin=0) #print(sum.shape,"sim shape") cor1=torch.from_numpy(cor1) return cor1
def makemask(wkernel, gassslice): #gassfile = '/Users/susanclark/Documents/gass_10.zea.fits' #gassdata = pyfits.getdata(gassfile, 0) #gassslice = gassdata[45, :, :] datay, datax = np.shape(gassslice) mnvals = np.indices((datax,datay)) pixcrd = np.zeros((datax*datay,2), np.float_) pixcrd[:,0] = mnvals[:,:][0].reshape(datax*datay) pixcrd[:,1] = mnvals[:,:][1].reshape(datax*datay) w = wcs.WCS(naxis=2) #TODO READ FROM FITS HEADER FILE! w.wcs.crpix = [1.125000000E3, 1.125000000E3] w.wcs.cdelt = np.array([-8.00000000E-2, 8.00000000E-2]) w.wcs.crval = [0.00000000E0, -9.00000000E1] w.wcs.ctype = ['RA---ZEA', 'DEC--ZEA'] worldc = w.wcs_pix2world(pixcrd, 1) worldcra = worldc[:,0].reshape(datax,datay) worldcdec = worldc[:,1].reshape(datax,datay) gm = np.zeros(gassslice.shape) #gm[worldcdec < 0] = 1 gmconv = filters.correlate(gm, weights=wkernel) gg = copy.copy(gmconv) gg[gmconv < np.max(gmconv)] = 0 gg[gmconv == np.max(gmconv)] = 1 return gg
def forward_cpu(self, x, w, mu1, mu2, sigma, num_dau_units_ignore=0): N = x.shape[0] S = x.shape[1] sigma_val = sigma[0] x_blur = np.zeros(x.shape, dtype=np.float32) filter, _, _, _, _ = self._get_filters(sigma_val) # pre-blur the X for n in range(N): for s in range(S): x_blur[n, s, :, :] = correlate(x[n, s, :, :], weights=filter, mode='constant') # then offset and sum element-wise y = self._offset_and_sum(x_blur, w, mu1, mu2, num_dau_units_ignore=num_dau_units_ignore) return y
def correlateEigenPsf(self, bandnum, img): from scipy.ndimage.filters import correlate eigenpsfs = self.getEigenPsfs(bandnum) eigenterms = self.getEigenPolynomials(bandnum) H,W = img.shape corr = np.zeros((H,W)) xx,yy = np.arange(W).astype(float), np.arange(H).astype(float) for epsf, (XO,YO,C) in zip(eigenpsfs, eigenterms): k = reduce(np.add, [np.outer(yy**yo, xx**xo) * c for xo,yo,c in zip(XO,YO,C)]) assert(k.shape == img.shape) # Trim symmetric zero-padding off the epsf. # This will fail spectacularly given an all-zero eigen-component. #print 'epsf shape:', epsf.shape while True: H,W = epsf.shape if (np.all(epsf[:,0] == 0) and np.all(epsf[:,-1] == 0) and np.all(epsf[0,:] == 0) and np.all(epsf[-1,:] == 0)): # Trim! epsf = epsf[1:-1, 1:-1] else: break #print 'trimmed epsf shape to:', epsf.shape corr += k * correlate(img, epsf) return corr
def correlateEigenPsf(self, bandnum, img): from scipy.ndimage.filters import correlate eigenpsfs = self.getEigenPsfs(bandnum) eigenterms = self.getEigenPolynomials(bandnum) H, W = img.shape corr = np.zeros((H, W)) xx, yy = np.arange(W).astype(float), np.arange(H).astype(float) for epsf, (XO, YO, C) in zip(eigenpsfs, eigenterms): k = reduce( np.add, [np.outer(yy**yo, xx**xo) * c for xo, yo, c in zip(XO, YO, C)]) assert (k.shape == img.shape) # Trim symmetric zero-padding off the epsf. # This will fail spectacularly given an all-zero eigen-component. while True: H, W = epsf.shape if (np.all(epsf[:, 0] == 0) and np.all(epsf[:, -1] == 0) and np.all(epsf[0, :] == 0) and np.all(epsf[-1, :] == 0)): # Trim! epsf = epsf[1:-1, 1:-1] else: break corr += k * correlate(img, epsf) return corr
def image_spotfinder(I, SNR=10): """Finds spots whose Signal-to-Noise Ratio exceeds SNR (default = 10); returns S_mask""" from scipy.ndimage.filters import correlate, maximum_filter, median_filter RO_var = 2.3**2 # Readout variance for detector #SNR = 10 # Signal-to-noise ratio threshold to be identified as a spot w, h = shape(I) footprint0 = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] footprint0 = array(footprint0) N0 = sum(footprint0) #9 footprint1 = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]] footprint1 = array(footprint1) N1 = sum(footprint1) #16 seed([1]) I += 0.1 * (random_sample((w, h)) - 0.5) I_max = maximum_filter(I, footprint=footprint0) I_median = median_filter(I, footprint=footprint1) I_int = correlate(I, footprint0) IC = (I_int - N0 * I_median) # Integrated counts, Background-subtracted S_mask = (I == I_max) & \ (IC/sqrt(I_int+(N0**2/N1)*I_median+RO_var*N0*(1+N0/N1)) > SNR) S_mask = array(S_mask, int16) return S_mask
def cor(img): #sum1=np.zeros((img.shape[0],28,28)) img = img.detach().numpy() mid = int(img.shape[2] / 2) k = 2 #(K=2k+1) patch size #print(img.shape,"img.shape") img2 = img[:, :, mid - k:mid + k, mid - k:mid + k] #5:22] #K=17 #print(img2.shape,"img2.shape") #img2=img[:,:,8:19,8:19] #K=2k+1=11 #img2=img[:,:,8:19,8:19] #img2=img[:,:,8:19,8:19] #sum=np.zeros((img.shape[0],img.shape[2],img.shape[3])) #print(sum.shape,"sum.shape") cor1 = np.zeros((img.shape[0], img.shape[1], img.shape[2], img.shape[3])) for i in range(img.shape[0]): #print("image shape",img.shape,img.shape[0]) for j in range(img.shape[1]): cor1[i, j, :, :] = C.correlate(img[i, j, :, :], img2[i, j, :, :], output=None, mode='constant', cval=0.0, origin=0) #sum[i,:,:]=sum[i,:,:]+cor1[i,j,:,:] #cor1[i,1,:,:]=C.correlate(img[i,1,:,:],img2[i,1,:,:], output=None, mode='constant', cval=0.0, origin=0) #cor1[i,2,:,:]=C.correlate(img[i,2,:,:],img2[i,2,:,:], output=None, mode='constant', cval=0.0, origin=0) #sum[i,:,:]=cor1[i,0,:,:]+cor1[i,1,:,:]+cor1[i,2,:,:]#+cor1[i,3,:,:]+cor1[i,4,:,:]+cor1[i,5,:,:] #sum=np.reshape(sum,(img.shape[0],img.shape[1],img.shape[2],img.shape[3])) #print(sum.shape,"sim shape") #sum=torch.from_numpy(sum) cor1 = torch.from_numpy(cor1) return cor1
def cor(img): img = img.detach().numpy() #print("image shape", img.shape) img2 = img[:, :, 5:22, 5:22] #K=17 #print("template shape", img2.shape) #img2=img[:,:,8:19,8:19] #K=2k+1=11 #img2=img[:,:,8:19,8:19] #img2=img[:,:,8:19,8:19] #print(img.shape) #print(img2.shape) #sum1=np.zeros((img.shape[0],28,28)) #img=img.detach().numpy() #img2=img2.detach().numpy() #img3=np.transpose(img,(0,2, 3, 1)) #img4=np.transpose(img2,(0,2, 3, 1)) #print("image is") #plt.imshow(img3[1,:,:,:], interpolation='nearest') #plt.show() #print("template is") #plt.imshow(img4[1,:,:,:], interpolation='nearest') #plt.show() cor1 = np.zeros((img.shape[0], 1, 28, 28)) for i in range(img.shape[0]): cor1[i, :, :, :] = C.correlate(img[i, :, :, :], img2[i, :, :, :], output=None, mode='constant', cval=0.0, origin=0) cor1 = np.reshape(cor1, (img.shape[0], 1, 28, 28)) #print(sum.shape,"sim shape") cor1 = torch.from_numpy(cor1) return cor1
def cor(img): #sum1=np.zeros((img.shape[0],28,28)) img = img.detach().numpy() print(img.shape) cor1 = np.zeros((img.shape)) #[0],img.shape[1],28,28)) for i in range(img.shape[0]): #print("image shape",img.shape,img.shape[0]) for j in range(img.shape[1]): cor1[i, j, :, :] = C.correlate(img[i, j, :, :], img[i, j, :, :], output=None, mode='constant', cval=0.0, origin=0) #img3=np.reshape(img,(img.shape[0],img.shape[2],img.shape[3])) #img4=np.reshape(img,(img.shape[0],img.shape[2],img.shape[3])) #cor3=np.reshape(cor1,(cor1.shape[0],img.shape[2],img.shape[3])) # ============================================================================= # print("image is") # plt.imshow(img[1,1,:,:],cmap='gray', interpolation='nearest') # plt.show() # print("template is") # plt.imshow(img[1,1,:,:],cmap='gray', interpolation='nearest') # plt.show() # print("correlation is") # plt.imshow(cor1[1,1,:,:], cmap='gray', interpolation='nearest') # plt.show() # # ============================================================================= cor1 = torch.from_numpy(cor1) return cor1
def _generate_ds_images(img, reduction_factor, mov, hfilter, noise_amp): num_imgs = len(mov) imgs = [] img = tc.im2double(img) def maxabs(x): # Return the maximum absolute value in x return math.fabs(max(x.min(), x.max(), key=abs)) vx_max = maxabs(mov[:, 0]) vy_max = maxabs(mov[:, 1]) img_width = img.shape[0] img_height = img.shape[1] for i in range(0, num_imgs): translated_img = gt.translate_and_crop(img, mov[i, 0], mov[i, 1]) translated_img = translated_img[:img_width - vx_max, :img_height - vy_max] if len(translated_img.shape) == 3: blurred_img = np.zeros( (translated_img.shape[0], translated_img.shape[1], translated_img.shape[2]), dtype=np.float32 ) temp = correlate(translated_img[:, :, 0], hfilter) blurred_img[:, :, 0] = correlate(translated_img[:, :, 0], hfilter) blurred_img[:, :, 1] = correlate(translated_img[:, :, 1], hfilter) blurred_img[:, :, 2] = correlate(translated_img[:, :, 2], hfilter) downsampled_img = gt.downsampling(blurred_img, reduction_factor) noisy_img = downsampled_img + noise_amp * np.random.randn( downsampled_img.shape[0], downsampled_img.shape[1], downsampled_img.shape[2] ) else: blurred_img = correlate(translated_img, hfilter) downsampled_img = gt.downsampling(blurred_img, reduction_factor) noisy_img = downsampled_img + noise_amp * np.random.randn( downsampled_img.shape[0], downsampled_img.shape[1] ) imgs.append({ 'hr': translated_img, 'ds': downsampled_img, 'lr': noisy_img }) return imgs
def filterDoG(img, filterDoGParameter = None, size = None, sigma = None, sigma2 = None, save = None, verbose = None, subStack = None, out = sys.stdout, **parameter): """Difference of Gaussians (DoG) filter step Arguments: img (array): image data filterDoGParameter (dict): ========= ==================== ================================================================ Name Type Descritption ========= ==================== ================================================================ *size* (tuple or None) size for the DoG filter if None, do not correct for any background *sigma* (tuple or None) std of outer Guassian, if None autmatically determined from size *sigma2* (tuple or None) std of inner Guassian, if None autmatically determined from size *save* (str or None) file name to save result of this operation if None dont save to file *verbose* (bool or int) print progress information ========= ==================== ================================================================ subStack (dict or None): sub-stack information out (object): object to write progress info to Returns: array: DoG filtered image """ timer = Timer(); dogSize = getParameter(filterDoGParameter, "size", size); dogSigma = getParameter(filterDoGParameter, "sigma", sigma); dogSigma2= getParameter(filterDoGParameter, "sigma2",sigma2); dogSave = getParameter(filterDoGParameter, "save", save); verbose = getParameter(filterDoGParameter, "verbose", verbose); if verbose: writeParameter(out = out, head = 'DoG:', size = dogSize, sigma = dogSigma, sigma2 = dogSigma2, save = dogSave); #DoG filter img = img.astype('float32'); # always convert to float for downstream processing if not dogSize is None: fdog = filterKernel(ftype = 'DoG', size = dogSize, sigma = dogSigma, sigma2 = dogSigma2); fdog = fdog.astype('float32'); #img = correlate(img, fdog); #img = scipy.signal.correlate(img, fdog); img = correlate(img, fdog); #img = convolve(img, fdog, mode = 'same'); img[img < 0] = 0; if verbose > 1: plotTiling(img); if not dogSave is None: writeSubStack(dogSave, img, subStack = subStack); if verbose: out.write(timer.elapsedTime(head = 'DoG') + '\n'); return img
def pyrup_impl(image): """ Upsamples an image by a factor of 2 and then uses a gaussian kernel as a reconstruction filter. The following 1D convolution kernel should be used in both the x and y directions. K = 1/8 [ 1 4 6 4 1 ] Note: 1/8 is not a mistake. The additional factor of 4 (applying this 1D kernel twice) scales the solution according to the 2x2 upsampling factor. Filtering should mirror the input image across the border. For scipy this is mode = mirror. For cv2 this is mode = BORDER_REFLECT_101. Upsampling should produce samples at even-numbered coordinates with coordinates starting at 0. Input: image -- height x width [x channels] image of type float32. Output: up -- 2 height x 2 width [x channels] image of type float32. """ #raise NotImplementedError() K = 1.0/8.0*np.array([[1,4,6,4,1]]) size = image.shape width = size[1] height = size[0] if len(size) == 3: channel = size[2] up = np.zeros((height*2,width*2,channel)) up[1::2,1::2,:] = image for i in range(channel): up[:,:,i] = correlate(up[:,:,i],K,mode = 'mirror') K = K.transpose() for i in range(channel): up[:,:,i] = correlate(up[:,:,i],K,mode = 'mirror') else: up = np.zeros((height*2,width*2)) up[1::2,1::2] = image up = correlate(up,K,mode = 'mirror') K = K.transpose() up = correlate(up,K,mode = 'mirror') return up
def TV(im, N, strtag, kern, dirWeight=1, dirs=None, nmins=0, dirInfo=[None,None,None,None]): res = np.zeros(np.hstack([len(strtag), im.shape])) inds = dirInfo[3] Nkern = np.hstack([1,kern.shape[-2:]]) for i in xrange(len(strtag)): if strtag[i] == 'spatial': res[i] = correlate(im,kern[i].reshape(Nkern),mode='wrap') elif strtag[i] == 'diff': res[i] = dirWeight*d.least_Squares_Fitting(im,N,strtag,dirs,inds,dirInfo[0]).real return res.reshape(np.hstack([len(strtag), N]))
def test_deconvolve2d_forward(self): img = np.random.randn(1, 3, 3, 1).astype(np.float32) kernel = np.random.randn(3, 3, 1, 1).astype(np.float32) output = nn.deconvolve2d(img, kernel, (1, 1), (0, 0)) self.assertTrue( np.allclose( output.value[0, 1:-1, 1:-1, 0], correlate(img[0, :, :, 0], kernel[::-1, ::-1, 0, 0], mode="constant")))
def test_convolve2d_forward(self): img = np.random.randn(1, 5, 5, 1) kernel = np.random.randn(3, 3, 1, 1) output = nn.convolve2d(img, kernel) self.assertTrue( np.allclose( output.value[0, ..., 0], correlate(img[0, ..., 0], kernel[..., 0, 0])[1:-1, 1:-1])) self.assertEqual(nn.config.dtype, np.float32) self.assertEqual(output.value.dtype, nn.config.dtype)
def cor(img): sum1 = np.zeros((img.shape[0], 28, 28)) #60K 28 28 3 cor1 = np.zeros((img.shape[0], 28, 28, 6)) #print(sum1.shape,"sum1")#,cor1.shape) for i in range(img.shape[0]): cor1[i, :, :, 0] = C.correlate(img[i, :, :, 0], img[i, :, :, 0], output=None, mode='constant', cval=0.0, origin=0) cor1[i, :, :, 1] = C.correlate(img[i, :, :, 1], img[i, :, :, 1], output=None, mode='constant', cval=0.0, origin=0) cor1[i, :, :, 2] = C.correlate(img[i, :, :, 2], img[i, :, :, 2], output=None, mode='constant', cval=0.0, origin=0) #print("done") #print(cor1[i,:,:,2].shape, "cor[i,:,:,2] shape") cor1[i, :, :, 3] = np.zeros( (img[i, :, :, 0].shape) ) #C.correlate(img[i,:,:,0],img[i,:,:,1], output=None, mode='constant', cval=0.0, origin=0) cor1[i, :, :, 4] = np.zeros( (img[i, :, :, 0].shape) ) #C.correlate(img[i,:,:,0],img[i,:,:,2], output=None, mode='constant', cval=0.0, origin=0) cor1[i, :, :, 5] = np.zeros( (img[i, :, :, 0].shape) ) #C.correlate(img[i,:,:,2],img[i,:,:,1], output=None, mode='constant', cval=0.0, origin=0) sum1[i, :, :] = cor1[i, :, :, 0] + cor1[i, :, :, 1] + cor1[ i, :, :, 2] + cor1[i, :, :, 3] + cor1[i, :, :, 4] + cor1[i, :, :, 5] #print("sum1 shape,",sum1.shape) #sum1=np.transpose(sum1,(0,3, 1, 2)) sum1 = sum1.reshape(img.shape[0], 1, 28, 28) sum11 = torch.from_numpy(sum1) return sum11
def cor(img): #sum1=np.zeros((img.shape[0],28,28)) img=img.detach().numpy() cor1=np.zeros((img.shape))#[0],img.shape[1],28,28)) for i in range (img.shape[0]): #print("image shape",img.shape,img.shape[0]) for j in range (img.shape[1]): cor1[i,j,:,:]=C.correlate(img[i,j,:,:],img[i,j,:,:], output=None, mode='constant', cval=0.0, origin=0) cor1=torch.from_numpy(cor1) return cor1
def cor(img): sum1 = np.zeros((img.shape[0], 28, 28)) cor1 = np.zeros((img.shape[0], 6, 28, 28)) for i in range(img.shape[0]): #print("image shape",img.shape,img.shape[0]) cor1[i, 0, :, :] = np.zeros( (cor1[i, 0, :, :].shape) ) #C.correlate(img[i,0,:,:],img[i,0,:,:], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 1, :, :] = np.zeros( (cor1[i, 0, :, :].shape) ) #C.correlate(img[i,1,:,:],img[i,1,:,:], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 2, :, :] = np.zeros( (cor1[i, 0, :, :].shape) ) #C.correlate(img[i,2,:,:],img[i,2,:,:], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 3, :, :] = C.correlate(img[i, 0, :, :], img[i, 1, :, :], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 4, :, :] = C.correlate(img[i, 0, :, :], img[i, 2, :, :], output=None, mode='constant', cval=0.0, origin=0) cor1[i, 5, :, :] = C.correlate(img[i, 1, :, :], img[i, 1, :, :], output=None, mode='constant', cval=0.0, origin=0) sum1[i, :, :] = cor1[i, 0, :, :] + cor1[i, 1, :, :] + cor1[ i, 2, :, :] + cor1[i, 3, :, :] + cor1[i, 4, :, :] + cor1[i, 5, :, :] #print(img[:,0,:,:].shape, "image shape") #chu=C.correlate(img[0,:,:],img[0,:,:], output=None, mode='constant', cval=0.0, origin=0) #print(chu.shape,"chu shape") sum1 = sum1.reshape(img.shape[0], 1, 28, 28) #,1) sum1 = torch.from_numpy(sum1) return sum1
def _generate_ds_images(img, reduction_factor, mov, hfilter, noise_amp): num_imgs = len(mov) imgs = [] img = tc.im2double(img) def maxabs(x): # Return the maximum absolute value in x return math.fabs(max(x.min(), x.max(), key=abs)) vx_max = maxabs(mov[:, 0]) vy_max = maxabs(mov[:, 1]) img_width = img.shape[0] img_height = img.shape[1] for i in range(0, num_imgs): translated_img = gt.translate_and_crop(img, mov[i, 0], mov[i, 1]) translated_img = translated_img[:img_width - vx_max, :img_height - vy_max] if len(translated_img.shape) == 3: blurred_img = np.zeros( (translated_img.shape[0], translated_img.shape[1], translated_img.shape[2]), dtype=np.float32) temp = correlate(translated_img[:, :, 0], hfilter) blurred_img[:, :, 0] = correlate(translated_img[:, :, 0], hfilter) blurred_img[:, :, 1] = correlate(translated_img[:, :, 1], hfilter) blurred_img[:, :, 2] = correlate(translated_img[:, :, 2], hfilter) downsampled_img = gt.downsampling(blurred_img, reduction_factor) noisy_img = downsampled_img + noise_amp * np.random.randn( downsampled_img.shape[0], downsampled_img.shape[1], downsampled_img.shape[2]) else: blurred_img = correlate(translated_img, hfilter) downsampled_img = gt.downsampling(blurred_img, reduction_factor) noisy_img = downsampled_img + noise_amp * np.random.randn( downsampled_img.shape[0], downsampled_img.shape[1]) imgs.append({ 'hr': translated_img, 'ds': downsampled_img, 'lr': noisy_img }) return imgs
def normalized_correlation(image, kernel): # Normalize image_norm = _normalize(image, 1.5, -1.5) correlated = correlate(image_norm, kernel) # max_indices = _get_max_indices(correlated, 6) # ks = [int((kernel.shape[0]-1)/2), int((kernel.shape[1]-1)/2)] # for x, y in max_indices: # correlated[x-ks[0]:x+ks[0], y-ks[1]:y+ks[1]] = 0 # correlated[x, y] = 255 correlated = correlated.astype(np.uint8) return correlated
def dog_filter(source, shape, sigma=None, sigma2=None): if not shape is None: fdog = fk.filter_kernel(ftype='dog', shape=shape, sigma=sigma, sigma2=sigma2) fdog = fdog.astype('float32') filtered = ndf.correlate(source, fdog) filtered[filtered < 0] = 0 return filtered else: return source
def correlations(Arr, neighbors = 8): """Computes the correlation image for the input dataset""" Arr = Arr.astype('float32') Arr -= np.mean(Arr, axis=0) Arr_std = np.std(Arr, axis=0) Arr_std[Arr_std == 0] = np.inf Arr /= Arr_std sz = np.ones((3, 3), dtype='float32') sz[1, 1] = 0 if neighbors == 4: sz = np.array([[0,1,0],[1,0,1],[0,1,0]]) Arr_corr = correlate(Arr, sz[np.newaxis, :], mode='constant') MASK = correlate( np.ones(Arr.shape[1:], dtype='float32'), sz, mode='constant') Corr = np.mean(Arr_corr * Arr, axis=0) / MASK return Corr
def cor(img,img2): #sum1=np.zeros((img.shape[0],28,28)) img=img.cpu().numpy() img2=img2.cpu().numpy() cor=np.zeros((img.shape))#[0],img.shape[1],28,28)) for i in range (img.shape[0]): #print("image shape",img.shape,img.shape[0]) for j in range (img.shape[1]): cor[i,j,:,:]=C.correlate(img[i,j,:,:],img2[i,j,:,:], output=None, mode='constant', cval=0.0, origin=0) cor=torch.from_numpy(cor).float().cuda() return cor
def umask(data, inkernel): outdata = filters.correlate(data, weights=inkernel) #Our convolution has scaled outdata by sum(kernel), so we will divide out these weights. kernweight = np.sum(inkernel, axis=0) kernweight = np.sum(kernweight, axis=0) subtr_data = data - outdata/kernweight #Convert to binary data bindata = copy.copy(subtr_data) bindata[subtr_data > 0] = 1 bindata[subtr_data <= 0] = 0 return bindata
def show_conv( self, img, tag, ax, f ): oimg = correlate( img, f, mode="constant" ) # FIXME: debatably not 'right' on borders ... #print tag, oimg ax.set_title(tag) ax.imshow(oimg,cmap="gray",interpolation="nearest")
def detection(array, psf, bkg_sigma=5, mode='lpeaks', matched_filter=False, mask=True, snr_thresh=5, plot=True, debug=False, full_output=False, verbose=True, save_plot=None, plot_title=None, angscale=False, pxscale=0.01): """ Finds blobs in a 2d array. The algorithm is designed for automatically finding planets in post-processed high contrast final frames. Blob can be defined as a region of an image in which some properties are constant or vary within a prescribed range of values. See <Notes> below to read about the algorithm details. Parameters ---------- array : array_like, 2d Input frame. psf : array_like Input psf, normalized with ``vip_hci.phot.normalize_psf``. bkg_sigma : int or float, optional The number standard deviations above the clipped median for setting the background level. mode : {'lpeaks','log','dog'}, optional Sets with algorithm to use. Each algorithm yields different results. matched_filter : bool, optional Whether to correlate with the psf of not. mask : bool, optional Whether to mask the central region (circular aperture of 2*fwhm radius). snr_thresh : float, optional SNR threshold for deciding whether the blob is a detection or not. plot : bool, optional If True plots the frame showing the detected blobs on top. debug : bool, optional Whether to print and plot additional/intermediate results. full_output : bool, optional Whether to output just the coordinates of blobs that fulfill the SNR constraint or a table with all the blobs and the peak pixels and SNR. verbose : bool, optional Whether to print to stdout information about found blobs. save_plot: string If provided, the plot is saved to the path. plot_title : str, optional Title of the plot. angscale: bool, optional If True the plot axes are converted to angular scale. pxscale : float, optional Pixel scale in arcseconds/px. Default 0.01 for Keck/NIRC2. Returns ------- yy, xx : array_like Two vectors with the y and x coordinates of the centers of the sources (potential planets). If full_output is True then a table with all the candidates that passed the 2d Gaussian fit constrains and their S/N is returned. Notes ----- The FWHM of the PSF is measured directly on the provided array. If the parameter matched_filter is True then the PSF is used to run a matched filter (correlation) which is equivalent to a convolution filter. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. The background level or threshold is found with sigma clipped statistics (5 sigma over the median) on the image/correlated image. Then 5 different strategies can be used to detect the blobs (potential planets): Local maxima + 2d Gaussian fit. The local peaks above the background on the (correlated) frame are detected. A maximum filter is used for finding local maxima. This operation dilates the original image and merges neighboring local maxima closer than the size of the dilation. Locations where the original image is equal to the dilated image are returned as local maxima. The minimum separation between the peaks is 1*FWHM. A 2d Gaussian fit is done on each of the maxima constraining the position on the subimage and the sigma of the fit. Finally the blobs are filtered based on its SNR. Laplacian of Gaussian + 2d Gaussian fit. It computes the Laplacian of Gaussian images with successively increasing standard deviation and stacks them up in a cube. Blobs are local maximas in this cube. LOG assumes that the blobs are again assumed to be bright on dark. A 2d Gaussian fit is done on each of the candidates constraining the position on the subimage and the sigma of the fit. Finally the blobs are filtered based on its SNR. Difference of Gaussians. This is a faster approximation of LoG approach. In this case the image is blurred with increasing standard deviations and the difference between two successively blurred images are stacked up in a cube. DOG assumes that the blobs are again assumed to be bright on dark. A 2d Gaussian fit is done on each of the candidates constraining the position on the subimage and the sigma of the fit. Finally the blobs are filtered based on its SNR. """ def check_blobs(array_padded, coords_temp, fwhm, debug): y_temp = coords_temp[:,0] x_temp = coords_temp[:,1] coords = [] # Fitting a 2d gaussian to each local maxima position for y, x in zip(y_temp, x_temp): subsi = 2 * int(np.ceil(fwhm)) if subsi %2 == 0: subsi += 1 subim, suby, subx = get_square(array_padded, subsi, y+pad, x+pad, position=True, force=True) cy, cx = frame_center(subim) gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx, y_mean=cy, theta=0, x_stddev=fwhm*gaussian_fwhm_to_sigma, y_stddev=fwhm*gaussian_fwhm_to_sigma) sy, sx = np.indices(subim.shape) fitter = fitting.LevMarLSQFitter() fit = fitter(gauss, sx, sy, subim) # checking that the amplitude is positive > 0 # checking whether the x and y centroids of the 2d gaussian fit # coincide with the center of the subimage (within 2px error) # checking whether the mean of the fwhm in y and x of the fit # are close to the FWHM_PSF with a margin of 3px fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)]) if fit.amplitude.value > 0 \ and np.allclose(fit.y_mean.value, cy, atol=2) \ and np.allclose(fit.x_mean.value, cx, atol=2) \ and np.allclose(mean_fwhm_fit, fwhm, atol=3): coords.append((suby + fit.y_mean.value, subx + fit.x_mean.value)) if debug: print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x)) print('fit peak = {:.3f}'.format(fit.amplitude.value)) msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}' print(msg.format(fwhm_y, fwhm_x)) print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit)) pp_subplots(subim, colorb=True, axis=False, dpi=60) return coords def print_coords(coords): print('Blobs found:', len(coords)) print(' ycen xcen') print('------ ------') for i in range(len(coords[:, 0])): print('{:.3f} \t {:.3f}'.format(coords[i,0], coords[i,1])) def print_abort(): if verbose: print(sep) print('No potential sources found') print(sep) # -------------------------------------------------------------------------- if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array') if psf.ndim != 2 and psf.shape[0] < array.shape[0]: raise TypeError('Input psf is not a 2d array or has wrong size') # Getting the FWHM from the PSF array cenpsf = frame_center(psf) outdf = fit_2dgaussian(psf, cent=(cenpsf), debug=debug, full_output=True) fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y'] fwhm = np.mean([fwhm_x, fwhm_y]) if verbose: print('FWHM = {:.2f} pxs\n'.format(fwhm)) if debug: print('FWHM_y', fwhm_y) print('FWHM_x', fwhm_x) # Masking the center, 2*lambda/D is the expected IWA if mask: array = mask_circle(array, radius=fwhm) # Matched filter if matched_filter: frame_det = correlate(array, psf) else: frame_det = array # Estimation of background level _, median, stddev = sigma_clipped_stats(frame_det, sigma=5, iters=None) bkg_level = median + (stddev * bkg_sigma) if debug: print('Sigma clipped median = {:.3f}'.format(median)) print('Sigma clipped stddev = {:.3f}'.format(stddev)) print('Background threshold = {:.3f}'.format(bkg_level)) print() if mode == 'lpeaks' or mode == 'log' or mode == 'dog': # Padding the image with zeros to avoid errors at the edges pad = 10 array_padded = np.lib.pad(array, pad, 'constant', constant_values=0) if debug and plot and matched_filter: print('Input frame after matched filtering:') pp_subplots(frame_det, rows=2, colorb=True) if mode == 'lpeaks': # Finding local peaks (can be done in the correlated frame) coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level, min_distance=int(np.ceil(fwhm)), num_peaks=20) coords = check_blobs(array_padded, coords_temp, fwhm, debug) coords = np.array(coords) if verbose and coords.shape[0] > 0: print_coords(coords) elif mode == 'log': sigma = fwhm*gaussian_fwhm_to_sigma coords = feature.blob_log(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) if len(coords) == 0: print_abort() return 0, 0 coords = coords[:,:2] coords = check_blobs(array_padded, coords, fwhm, debug) coords = np.array(coords) if coords.shape[0] > 0 and verbose: print_coords(coords) elif mode == 'dog': sigma = fwhm*gaussian_fwhm_to_sigma coords = feature.blob_dog(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) if len(coords) == 0: print_abort() return 0, 0 coords = coords[:, :2] coords = check_blobs(array_padded, coords, fwhm, debug) coords = np.array(coords) if coords.shape[0] > 0 and verbose: print_coords(coords) else: msg = 'Wrong mode. Available modes: lpeaks, log, dog.' raise TypeError(msg) if coords.shape[0] == 0: print_abort() return 0, 0 yy = coords[:, 0] xx = coords[:, 1] yy_final = [] xx_final = [] yy_out = [] xx_out = [] snr_list = [] xx -= pad yy -= pad # Checking S/N for potential sources for i in range(yy.shape[0]): y = yy[i] x = xx[i] if verbose: print(sep) print('X,Y = ({:.1f},{:.1f})'.format(x,y)) snr = snr_ss(array, (x,y), fwhm, False, verbose=False) snr_list.append(snr) if snr >= snr_thresh: if verbose: _ = frame_quick_report(array, fwhm, (x,y), verbose=verbose) yy_final.append(y) xx_final.append(x) else: yy_out.append(y) xx_out.append(x) if verbose: print('S/N constraint NOT fulfilled (S/N = {:.3f})'.format(snr)) if debug: _ = frame_quick_report(array, fwhm, (x,y), verbose=verbose) if debug or full_output: table = Table([yy.tolist(), xx.tolist(), snr_list], names=('y', 'x', 'px_snr')) table.sort('px_snr') yy_final = np.array(yy_final) xx_final = np.array(xx_final) yy_out = np.array(yy_out) xx_out = np.array(xx_out) if plot: coords = list(zip(xx_out.tolist() + xx_final.tolist(), yy_out.tolist() + yy_final.tolist())) circlealpha = [0.3] * len(xx_out) circlealpha += [1] * len(xx_final) pp_subplots(array, circle=coords, circlealpha=circlealpha, circlelabel=True, circlerad=fwhm, save=save_plot, dpi=120, angscale=angscale, pxscale=pxscale, title=plot_title) if debug: print(table) if full_output: return table else: return yy_final, xx_final
def get_corr(dots, part): corr = correlate(dots, part, output = np.float64) return corr
def filter(self, array, *args, **kwargs): array[np.isnan(array)] = 0.0 shape = array.shape if len(shape) == 3: array = np.abs(array) span = np.sum(array ** 2, axis=0) array = array[np.newaxis, ...] elif len(shape) == 4: span = np.trace(array, axis1=0, axis2=1) else: array = np.abs(array) span = array ** 2 array = array[np.newaxis, np.newaxis, ...] lshape = array.shape[0:2] # --------------------------------------------- # INIT & SPAN # --------------------------------------------- sig2 = 1.0 / self.looks sfak = 1.0 + sig2 # nrx = array.shape[-1] # # lshape = array.shape[0:-2] # if len(lshape) == 2: # # span = np.abs(np.trace(array,axis1=0,axis2=1)) # span = np.abs(array[0, 0, ...] + array[1, 1, ...] + array[2, 2, ...]) # else: # logging.error("Data not in matrix form") # --------------------------------------------- # TURNING BOX # --------------------------------------------- cbox = np.zeros((9, self.win, self.win), dtype='float32') chbox = np.zeros((self.win, self.win), dtype='float32') chbox[0:self.win // 2 + 1, :] = 1 cvbox = np.zeros((self.win, self.win), dtype='float32') for k in range(self.win): cvbox[k, 0:k + 1] = 1 cbox[0, ...] = np.rot90(chbox, 3) cbox[1, ...] = np.rot90(cvbox, 1) cbox[2, ...] = np.rot90(chbox, 2) cbox[3, ...] = np.rot90(cvbox, 0) cbox[4, ...] = np.rot90(chbox, 1) cbox[5, ...] = np.rot90(cvbox, 3) cbox[6, ...] = np.rot90(chbox, 0) cbox[7, ...] = np.rot90(cvbox, 2) for k in range(self.win // 2 + 1): for l in range(self.win // 2 - k, self.win // 2 + k + 1): cbox[8, k:self.win - k, l] = 1 for k in range(9): cbox[k, ...] /= np.sum(cbox[k, ...]) ampf1 = np.empty((9,) + span.shape) ampf2 = np.empty((9,) + span.shape) for k in range(9): ampf1[k, ...] = filters.correlate(span ** 2, cbox[k, ...]) ampf2[k, ...] = filters.correlate(span, cbox[k, ...]) ** 2 # --------------------------------------------- # GRADIENT ESTIMATION # --------------------------------------------- np.seterr(divide='ignore', invalid='ignore') if self.method == 'original': xs = [+2, +2, 0, -2, -2, -2, 0, +2] ys = [0, +2, +2, +2, 0, -2, -2, -2] samp = filters.uniform_filter(span, self.win // 2) grad = np.empty((8,) + span.shape) for k in range(8): grad[k, ...] = np.abs(np.roll(np.roll(samp, ys[k], axis=0), xs[k], axis=1) / samp - 1.0) magni = np.amax(grad, axis=0) direc = np.argmax(grad, axis=0) direc[magni < self.threshold] = 8 elif self.method == 'cov': grad = np.empty((8,) + span.shape) for k in range(8): grad[k, ...] = np.abs((ampf1[k, ...] - ampf2[k, ...]) / ampf2[k, ...]) direc = np.argmin(grad, axis=0) else: logging.error("Unknown method!") np.seterr(divide='warn', invalid='warn') # --------------------------------------------- # FILTERING # --------------------------------------------- out = np.empty_like(array) dbox = np.zeros((1, 1) + (self.win, self.win)) for l in range(9): grad = ampf1[l, ...] mamp = ampf2[l, ...] dbox[0, 0, ...] = cbox[l, ...] vary = (grad - mamp).clip(1e-10) varx = ((vary - mamp * sig2) / sfak).clip(0) kfac = varx / vary if np.iscomplexobj(array): mamp = filters.correlate(array.real, dbox) + 1j * filters.convolve(array.imag, dbox) else: mamp = filters.correlate(array, dbox) idx = np.argwhere(direc == l) out[:, :, idx[:, 0], idx[:, 1]] = (mamp + (array - mamp) * kfac)[:, :, idx[:, 0], idx[:, 1]] return out
def S1C1(self, Y_data_f): """ Y_data_f must be a 2D data """ c1_list = [] height, width = Y_data_f.shape rot_num = len(self.gabor_thetas) s1 = NM.empty((self.band_num, self.scale_num_in_band, rot_num, height, width), dtype=NM.float64) c1 = NM.empty((self.band_num, rot_num, height, width), dtype=NM.float64) ### Compute the Normalize factor Y_data_f_2 = NM.power(Y_data_f, 2) ### Compute S1 for idx_band in xrange(self.band_num): for idx_scale in xrange(self.scale_num_in_band): idx = idx_band*self.scale_num_in_band + idx_scale filter_size = self.filter_sizes[idx] gabor_sigma = self.gabor_sigmas[idx] gabor_lambda = self.gabor_lambdas[idx] ### TODO -- avoid divide 0 factor = scipy_f.convolve(Y_data_f_2, NM.ones((filter_size, filter_size)), mode="constant") factor = NM.power(factor, 0.5) for idx_r in xrange(rot_num): theta = self.gabor_thetas[idx_r] gabor_filter=self.GetGaborFilter(filter_size, theta, gabor_sigma, gabor_lambda, self.gabor_gamma) temp = NM.fabs(scipy_f.correlate(Y_data_f, gabor_filter, mode='constant')) self.RemoveBorder(temp, filter_size) NM.divide(temp, factor, temp) s1[idx_band, idx_scale, idx_r,:,:] = temp del temp ### Compute C1 ### pool over scales within band for idx_band in xrange(self.band_num): for idx_r in xrange(rot_num): T = s1[idx_band, 0, idx_r,:,:] for idx_scale in xrange(1, self.scale_num_in_band): T = NM.maximum(s1[idx_band, idx_scale, idx_r,:,:], T) c1[idx_band, idx_r] = T ### pool over local neighborhood for idx_band in xrange(self.band_num): grid_size = self.pool_grids[idx_band] gap = grid_size/2 grid_size = grid_size*2-1 for idx_r in xrange(rot_num): t = c1[idx_band, idx_r] c1[idx_band, idx_r] = scipy_morp.grey_dilation(t, size=grid_size, mode='constant') t = c1[idx_band, idx_r, 0::gap, 0::gap] c1_list.append(t) del s1 del c1 ### subSample return c1_list
def filterLinear(img, filterLinearParameter = None, ftype = None, size = None, sigma = None, sigma2 = None, save = None, subStack = None, verbose = False, out = sys.stdout, **parameter): """Applies a linear filter to the image Arguments: img (array): image data filterLinearParameter (dict): ========= ==================== ================================================================ Name Type Descritption ========= ==================== ================================================================ *ftype* (str or None) the type of the filter, see :ref:`FilterTypes` if None do ot perform any fitlering *size* (tuple or None) size for the filter if None, do not perform filtering *sigma* (tuple or None) std of outer Guassian, if None autmatically determined from size *sigma2* (tuple or None) std of inner Guassian, if None autmatically determined from size *save* (str or None) file name to save result of this operation if None dont save to file *verbose* (bool or int) print progress information ========= ==================== ================================================================ subStack (dict or None): sub-stack information verbose (bool): print progress info out (object): object to write progress info to Returns: array: filtered image Note: Converts image to float32 type if filter is active! """ timer = Timer(); ftype = getParameter(filterLinearParameter, "ftype", ftype); size = getParameter(filterLinearParameter, "size", size); sigma = getParameter(filterLinearParameter, "sigma", sigma); sigma2 = getParameter(filterLinearParameter, "sigma2", sigma2); save = getParameter(filterLinearParameter, "save", save); verbose = getParameter(filterLinearParameter, "verbose",verbose); if verbose: writeParameter(out = out, head = 'Linear Filter:', ftype = ftype, size = size, sigma = sigma, sigma2 = sigma2, save = save); if ftype is None: return img; #DoG filter img = img.astype('float32'); # always convert to float for downstream processing if not size is None: fil = filterKernel(ftype = ftype, size = size, sigma = sigma, sigma2 = sigma2); fil = fil.astype('float32'); #img = correlate(img, fdog); #img = scipy.signal.correlate(img, fdog); img = correlate(img, fil); #img = convolve(img, fdog, mode = 'same'); img[img < 0] = 0; if verbose > 1: plotTiling(img); if not save is None: writeSubStack(save, img, subStack = subStack); if verbose: out.write(timer.elapsedTime(head = 'Linear Filter') + '\n'); return img
# detmap1.max()/detsig1, detmap2.max()/detsig2, detmap.max()/detsig alphas = np.linspace(0, 1, 101) codetsn = np.zeros(len(alphas), np.float32) psfimg1 = 1./(2.*np.pi*psfsig1**2) * np.exp(-0.5 * ((xx-cx)**2 + (yy-cy)**2) / psfsig1**2) psfimg2 = 1./(2.*np.pi*psfsig2**2) * np.exp(-0.5 * ((xx-cx)**2 + (yy-cy)**2) / psfsig2**2) norm1 = np.sqrt(np.sum(psfimg1**2)) norm2 = np.sqrt(np.sum(psfimg2**2)) for ii,alpha in enumerate(alphas): beta = 1.-alpha coadd = alpha * image1 + beta * image2 cosig = np.sqrt((alpha * sig1)**2 + (beta * sig2)**2) copsf = alpha * psfimg1 + beta * psfimg2 conorm = np.sqrt(np.sum(copsf**2)) codet = correlate(coadd, copsf) / conorm**2 codetsig = cosig / conorm codetsn[ii] = codet.max() / codetsig plt.clf() plt.axhline(detmap.max()/detsig, color='k', linestyle='--', label='Detection map') plt.plot(alphas, codetsn, 'b-', label='Detect on coadd') plt.axhline(detmap1.max()/detsig1, color='r', linestyle=':', label='Single-image detection maps') plt.axhline(detmap2.max()/detsig2, color='r', linestyle=':') plt.legend(loc=(0.02, 0.75)) plt.xlim(0,1) plt.xlabel('Coadd weight') plt.ylabel('Detection S/N'); plt.savefig('dont-coadd.pdf')
def detection(array, fwhm=4, psf=None, mode='lpeaks', bkg_sigma=5, matched_filter=False, mask=True, snr_thresh=5, nproc=1, plot=True, debug=False, full_output=False, verbose=True, **kwargs): """ Finds blobs in a 2d array. The algorithm is designed for automatically finding planets in post-processed high contrast final frames. Blob can be defined as a region of an image in which some properties are constant or vary within a prescribed range of values. See ``Notes`` below to read about the algorithm details. Parameters ---------- array : numpy ndarray, 2d Input frame. fwhm : None or int, optional Size of the FWHM in pixels. If None and a ``psf`` is provided, then the FWHM is measured on the PSF image. psf : numpy ndarray Input PSF template. It must be normalized with the ``vip_hci.metrics.normalize_psf`` function. mode : {'lpeaks', 'log', 'dog', 'snrmap', 'snrmapf'}, optional Sets with algorithm to use. Each algorithm yields different results. See notes for the details of each method. bkg_sigma : int or float, optional The number standard deviations above the clipped median for setting the background level. Used when ``mode`` is either 'lpeaks', 'dog' or 'log'. matched_filter : bool, optional Whether to correlate with the psf of not. Used when ``mode`` is either 'lpeaks', 'dog' or 'log'. mask : bool, optional If True the central region (circular aperture of 2*FWHM radius) of the image will be masked out. snr_thresh : float, optional S/N threshold for deciding whether the blob is a detection or not. Used to threshold the S/N map when ``mode`` is set to 'snrmap' or 'snrmapf'. nproc : None or int, optional The number of processes for running the ``snrmap`` function. plot : bool, optional If True plots the frame showing the detected blobs on top. debug : bool, optional Whether to print and plot additional/intermediate results. full_output : bool, optional Whether to output just the coordinates of blobs that fulfill the SNR constraint or a table with all the blobs and the peak pixels and SNR. verbose : bool, optional Whether to print to stdout information about found blobs. **kwargs : dictionary, optional Arguments to be passed to ``plot_frames`` to customize the plot (and to save it to disk). Returns ------- yy, xx : numpy ndarray Two vectors with the y and x coordinates of the centers of the sources (potential planets). If full_output is True then a table with all the candidates that passed the 2d Gaussian fit constrains and their S/N is returned. Notes ----- When ``mode`` is either 'lpeaks', 'dog' or 'log', the detection might happen in the input frame or in a match-filtered version of it (by setting ``matched_filter`` to True and providing a PSF template, to run a correlation filter). Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. When ``mode`` is either 'snrmap' or 'snrmapf', the detection is done on an S/N map directly. When ``mode`` is set to: 'lpeaks' (Local maxima): The local peaks above the background (computed using sigma clipped statistics) on the (correlated) frame are detected. A maximum filter is used for finding local maxima. This operation dilates the original image and merges neighboring local maxima closer than the size of the dilation. Locations where the original image is equal to the dilated image are returned as local maxima. The minimum separation between the peaks is 1*FWHM. 'log' (Laplacian of Gaussian): It computes the Laplacian of Gaussian images with successively increasing standard deviation and stacks them up in a cube. Blobs are local maximas in this cube. LOG assumes that the blobs are again assumed to be bright on dark. 'dog' (Difference of Gaussians): This is a faster approximation of the Laplacian of Gaussian approach. In this case the image is blurred with increasing standard deviations and the difference between two successively blurred images are stacked up in a cube. DOG assumes that the blobs are again assumed to be bright on dark. 'snrmap' or 'snrmapf': A threshold is applied to the S/N map, computed with the ``snrmap`` function (``snrmapf`` calls ``snrmap`` with ``approximated`` set to True). The threshold is given by ``snr_thresh`` and local maxima are found as in the case of 'lpeaks'. Finally, a 2d Gaussian fit is done on each of the potential blobs constraining the position on a cropped sub-image and the sigma of the fit (to match the input FWHM). Finally the blobs are filtered based on its S/N value, according to ``snr_thresh``. """ def check_blobs(array, coords_temp, fwhm, debug): y_temp = coords_temp[:, 0] x_temp = coords_temp[:, 1] coords = [] # Fitting a 2d gaussian to each local maxima position for y, x in zip(y_temp, x_temp): subsi = 3 * int(np.ceil(fwhm)) if subsi % 2 == 0: subsi += 1 if mode in ('lpeaks', 'log', 'dog'): scy = y + pad scx = x + pad elif mode in ('snrmap', 'snrmapf'): scy = y scx = x subim, suby, subx = get_square(array, subsi, scy, scx, position=True, force=True, verbose=False) cy, cx = frame_center(subim) gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx, y_mean=cy, theta=0, x_stddev=fwhm*gaussian_fwhm_to_sigma, y_stddev=fwhm*gaussian_fwhm_to_sigma) sy, sx = np.indices(subim.shape) fitter = fitting.LevMarLSQFitter() fit = fitter(gauss, sx, sy, subim) # checking that the amplitude is positive > 0 # checking whether the x and y centroids of the 2d gaussian fit # coincide with the center of the subimage (within 2px error) # checking whether the mean of the fwhm in y and x of the fit # are close to the FWHM_PSF with a margin of 3px fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)]) condyf = np.allclose(fit.y_mean.value, cy, atol=2) condxf = np.allclose(fit.x_mean.value, cx, atol=2) condmf = np.allclose(mean_fwhm_fit, fwhm, atol=3) if fit.amplitude.value > 0 and condxf and condyf and condmf: coords.append((suby + fit.y_mean.value, subx + fit.x_mean.value)) if debug: print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x)) print('fit peak = {:.3f}'.format(fit.amplitude.value)) msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}' print(msg.format(fwhm_y, fwhm_x)) print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit)) if plot: plot_frames(subim, colorbar=True, axis=False, dpi=60) return coords def print_coords(coords): print('Blobs found:', len(coords)) print(' ycen xcen') print('------ ------') for j in range(len(coords[:, 0])): print('{:.3f} \t {:.3f}'.format(coords[j, 0], coords[j, 1])) def print_abort(): if verbose: print(sep) print('No potential sources found') print(sep) # -------------------------------------------------------------------------- if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array') if psf is not None: if psf.ndim != 2 and psf.shape[0] < array.shape[0]: raise TypeError('Input psf is not a 2d array or has wrong size') else: if matched_filter: raise ValueError('`psf` must be provided when `matched_filter` is ' 'True') if fwhm is None: if psf is not None: # Getting the FWHM from the PSF array cenpsf = frame_center(psf) outdf = fit_2dgaussian(psf, cent=(cenpsf), debug=debug, full_output=True) fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y'] fwhm = np.mean([fwhm_x, fwhm_y]) if verbose: print('FWHM = {:.2f} pxs\n'.format(fwhm)) if debug: print('FWHM_y', fwhm_y) print('FWHM_x', fwhm_x) else: raise ValueError('`fwhm` or `psf` must be provided') # Masking the center, 2*lambda/D is the expected IWA if mask: array = mask_circle(array, radius=fwhm) # Generating a detection map: Match-filtered frame or SNRmap # For 'lpeaks', 'dog', 'log' it is possible to skip this step if mode in ('lpeaks', 'log', 'dog'): if matched_filter: frame_det = correlate(array, psf) else: frame_det = array if debug and plot and matched_filter: print('Match-filtered frame:') plot_frames(frame_det, colorbar=True) # Estimation of background level _, median, stddev = sigma_clipped_stats(frame_det, sigma=5, maxiters=None) bkg_level = median + (stddev * bkg_sigma) if debug: print('Sigma clipped median = {:.3f}'.format(median)) print('Sigma clipped stddev = {:.3f}'.format(stddev)) print('Background threshold = {:.3f}'.format(bkg_level), '\n') elif mode in ('snrmap', 'snrmapf'): if mode == 'snrmap': approx = False elif mode == 'snrmapf': approx = True frame_det = snrmap(array, fwhm=fwhm, approximated=approx, plot=False, nproc=nproc, verbose=verbose) if debug and plot: print('Signal-to-noise ratio map:') plot_frames(frame_det, colorbar=True) if mode in ('lpeaks', 'log', 'dog'): # Padding the image with zeros to avoid errors at the edges pad = 10 array_padded = np.lib.pad(array, pad, 'constant', constant_values=0) if mode in ('lpeaks', 'snrmap', 'snrmapf'): if mode == 'lpeaks': threshold = bkg_level else: threshold = snr_thresh coords_temp = peak_local_max(frame_det, threshold_abs=threshold, min_distance=int(np.ceil(fwhm)), num_peaks=20) if mode == 'lpeaks': coords = check_blobs(array_padded, coords_temp, fwhm, debug) else: coords = check_blobs(array, coords_temp, fwhm, debug) coords = np.array(coords) if verbose and coords.shape[0] > 0: print_coords(coords) elif mode == 'log': sigma = fwhm * gaussian_fwhm_to_sigma coords = feature.blob_log(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) if len(coords) == 0: print_abort() return 0, 0 coords = coords[:, :2] coords = check_blobs(array_padded, coords, fwhm, debug) coords = np.array(coords) if coords.shape[0] > 0 and verbose: print_coords(coords) elif mode == 'dog': sigma = fwhm * gaussian_fwhm_to_sigma coords = feature.blob_dog(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) if len(coords) == 0: print_abort() return 0, 0 coords = coords[:, :2] coords = check_blobs(array_padded, coords, fwhm, debug) coords = np.array(coords) if coords.shape[0] > 0 and verbose: print_coords(coords) else: raise ValueError('`mode` not recognized') if coords.shape[0] == 0: print_abort() return 0, 0 yy = coords[:, 0] xx = coords[:, 1] yy_final = [] xx_final = [] yy_out = [] xx_out = [] snr_list = [] if mode in ('lpeaks', 'log', 'dog'): xx -= pad yy -= pad # Checking S/N for potential sources for i in range(yy.shape[0]): y = yy[i] x = xx[i] if verbose: print('') print(sep) print('X,Y = ({:.1f},{:.1f})'.format(x, y)) snr_value = snr(array, (x, y), fwhm, False, verbose=False) snr_list.append(snr_value) if snr_value >= snr_thresh: if verbose: _ = frame_report(array, fwhm, (x, y), verbose=verbose) yy_final.append(y) xx_final.append(x) else: yy_out.append(y) xx_out.append(x) if verbose: msg = 'S/N constraint NOT fulfilled (S/N = {:.3f})' print(msg.format(snr_value)) if debug: _ = frame_report(array, fwhm, (x, y), verbose=verbose) if verbose: print(sep) if debug or full_output: table_full = pn.DataFrame({'y': yy.tolist(), 'x': xx.tolist(), 'px_snr': snr_list}) table_full.sort_values('px_snr') yy_final = np.array(yy_final) xx_final = np.array(xx_final) yy_out = np.array(yy_out) xx_out = np.array(xx_out) table = pn.DataFrame({'y': yy_final.tolist(), 'x': xx_final.tolist()}) if plot: coords = tuple(zip(xx_out.tolist() + xx_final.tolist(), yy_out.tolist() + yy_final.tolist())) circlealpha = [0.3] * len(xx_out) circlealpha += [1] * len(xx_final) plot_frames(array, dpi=120, circle=coords, circle_alpha=circlealpha, circle_label=True, circle_radius=fwhm, **kwargs) if debug: print(table_full) if full_output: return table_full else: return table
def detection(array, psf, bkg_sigma=3, mode='lpeaks', matched_filter=True, mask=True, snr_thresh=5, plot=True, debug=False, full_output=False, verbose=True): """ Finds blobs in a 2d array. The algorithm is designed for automatically finding planets in post-processed high contrast final frames. Blob can be defined as a region of an image in which some properties are constant or vary within a prescribed range of values. See <Notes> below to read about the algorithm details. Parameters ---------- array : array_like, 2d Input frame. psf : array_like Input psf. bkg_sigma : float, optional The number standard deviations above the clipped median for setting the background level. mode : {'lpeaks','irafsf','daofind','log','dog'}, optional Sets with algorithm to use. Each algorithm yields different results. matched_filter : {True, False}, bool optional Whether to correlate with the psf of not. mask : {True, False}, optional Whether to mask the central region (circular aperture of 2*fwhm radius). snr_thresh : float, optional SNR threshold for deciding whether the blob is a detection or not. plot {True, False}, bool optional If True plots the frame showing the detected blobs on top. debug : {False, True}, bool optional Whether to print and plot additional/intermediate results. full_output : {False, True}, bool optional Whether to output just the coordinates of blobs that fulfill the SNR constraint or a table with all the blobs and the peak pixels and SNR. verbose : {True,False}, bool optional Whether to print to stdout information about found blobs. Returns ------- yy, xx : array_like Two vectors with the y and x coordinates of the centers of the sources (putative planets). If full_output is True then a table with all the candidates that passed the 2d Gaussian fit constrains and their SNR is returned. Also the count of companions with SNR>5 (those with highest probability of being true detections). Notes ----- The PSF is used to run a matched filter (correlation) which is equivalent to a convolution filter. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. The background level or threshold is found with sigma clipped statistics (5 sigma over the median) on the image. Then 5 different strategies can be used to detect the blobs (planets): Local maxima + 2d Gaussian fit. The local peaks above the background on the (correlated) frame are detected. A maximum filter is used for finding local maxima. This operation dilates the original image and merges neighboring local maxima closer than the size of the dilation. Locations where the original image is equal to the dilated image are returned as local maxima. The minimum separation between the peaks is 1*FWHM. A 2d Gaussian fit is done on each of the maxima constraining the position on the subimage and the sigma of the fit. Finally an SNR criterion can be applied. Laplacian of Gaussian. It computes the Laplacian of Gaussian images with successively increasing standard deviation and stacks them up in a cube. Blobs are local maximas in this cube. Detecting larger blobs is especially slower because of larger kernel sizes during convolution. Only bright blobs on dark backgrounds are detected. This is the most accurate and slowest approach. Difference of Gaussians. This is a faster approximation of LoG approach. In this case the image is blurred with increasing standard deviations and the difference between two successively blurred images are stacked up in a cube. This method suffers from the same disadvantage as LoG approach for detecting larger blobs. Blobs are again assumed to be bright on dark. Irafsf. starfind algorithm (IRAF software) searches images for local density maxima that have a peak amplitude greater than threshold above the local background and have a PSF full-width half-maximum similar to the input fwhm. The objects' centroid, roundness (ellipticity), and sharpness are calculated using image moments. Daofind. Searches images for local density maxima that have a peak amplitude greater than threshold (approximately; threshold is applied to a convolved image) and have a size and shape similar to the defined 2D Gaussian kernel. The Gaussian kernel is defined by the fwhm, ratio, theta, and sigma_radius input parameters. Daofind finds the object centroid by fitting the the marginal x and y 1D distributions of the Gaussian kernel to the marginal x and y distributions of the input (unconvolved) data image. """ def print_coords(coords): print 'Blobs found:', len(coords) print ' ycen xcen' print '------ ------' for i in range(len(coords[:,0])): print ' ', coords[i,0], '\t', coords[i,1] if not array.ndim == 2: raise TypeError('Input array is not a frame or 2d array') if not psf.ndim == 2 and psf.shape[0] < array.shape[0]: raise TypeError('Input psf is not a 2d array or has wrong size') # Getting the FWHM with a 2d gaussian fit on the PSF gauss = Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=3.5, y_stddev=3.5, theta=0) fitter = LevMarLSQFitter() # Levenberg-Marquardt algorithm psf_subimage = get_square(psf, 9, frame_center(psf)[0],frame_center(psf)[1]) y, x = np.indices(psf_subimage.shape) fit = fitter(gauss, x, y, psf_subimage) fwhm = np.mean([fit.y_stddev.value*gaussian_sigma_to_fwhm, fit.x_stddev.value*gaussian_sigma_to_fwhm]) if verbose: print 'FWHM =', fwhm print if debug: print 'FWHM_y ', fit.y_stddev.value*gaussian_sigma_to_fwhm print 'FWHM_x ', fit.x_stddev.value*gaussian_sigma_to_fwhm print # Masking the center, 2*lambda/D is the expected IWA if mask: array = mask_circle(array, radius=2*fwhm) # Matched filter if matched_filter: frame_det = correlate(array, psf) else: frame_det = array # Estimation of background level _, median, stddev = sigma_clipped_stats(frame_det, sigma=5, iters=None) bkg_level = median + (stddev * bkg_sigma) if debug: print 'Sigma clipped median = {:.3f}'.format(median) print 'Sigma clipped stddev = {:.3f}'.format(stddev) print 'Background threshold = {:.3f}'.format(bkg_level) print round = 0.3 # roundness constraint # Padding the image with zeros to avoid errors at the edges pad = 10 array_padded = np.lib.pad(array, pad, 'constant', constant_values=0) if debug and plot and matched_filter: print 'Input frame after matched filtering' pp_subplots(frame_det, size=6, rows=2, colorb=True) if mode=='lpeaks': # Finding local peaks (can be done in the correlated frame) coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level, min_distance=fwhm, num_peaks=20) y_temp = coords_temp[:,0] x_temp = coords_temp[:,1] coords = [] # Fitting a 2d gaussian to each local maxima position for y,x in zip(y_temp,x_temp): subim, suby, subx = get_square(array_padded, 2*int(np.ceil(fwhm)), y+pad, x+pad, position=True) cy, cx = frame_center(subim) gauss = Gaussian2D(amplitude=subim.max(), x_mean=cx, y_mean=cy, x_stddev=fwhm*gaussian_fwhm_to_sigma, y_stddev=fwhm*gaussian_fwhm_to_sigma, theta=0) sy, sx = np.indices(subim.shape) fit = fitter(gauss, sx, sy, subim) # checking that the amplitude is positive > 0 # checking whether the x and y centroids of the 2d gaussian fit # coincide with the center of the subimage (within 2px error) # checking whether the mean of the fwhm in y and x of the fit are # close to the FWHM_PSF with a margin of 3px fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)]) if fit.amplitude.value>0 \ and np.allclose(fit.y_mean.value, cy, atol=2) \ and np.allclose(fit.x_mean.value, cx, atol=2) \ and np.allclose(mean_fwhm_fit, fwhm, atol=3): coords.append((suby+fit.y_mean.value, subx+fit.x_mean.value)) if debug: print 'Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x) print 'fit peak = {:.3f}'.format(fit.amplitude.value) #print fit msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}' print msg.format(fwhm_y, fwhm_x) print 'mean fit fwhm = {:.3f}'.format(mean_fwhm_fit) pp_subplots(subim, colorb=True) coords = np.array(coords) if verbose and coords.shape[0]>0: print_coords(coords) elif mode=='daofind': tab = findstars.daofind(frame_det, fwhm=fwhm, threshold=bkg_level, roundlo=-round,roundhi=round) coords = np.transpose((np.array(tab['ycentroid']), np.array(tab['xcentroid']))) if verbose: print 'Blobs found:', len(coords) print tab['ycentroid','xcentroid','roundness1','roundness2','flux'] elif mode=='irafsf': tab = findstars.irafstarfind(frame_det, fwhm=fwhm, threshold=bkg_level, roundlo=0, roundhi=round) coords = np.transpose((np.array(tab['ycentroid']), np.array(tab['xcentroid']))) if verbose: print 'Blobs found:', len(coords) print tab['ycentroid','xcentroid','fwhm','flux','roundness'] elif mode=='log': sigma = fwhm*gaussian_fwhm_to_sigma coords = feature.blob_log(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) coords = coords[:,:2] if coords.shape[0]>0 and verbose: print_coords(coords) elif mode=='dog': sigma = fwhm*gaussian_fwhm_to_sigma coords = feature.blob_dog(frame_det.astype('float'), threshold=bkg_level, min_sigma=sigma-.5, max_sigma=sigma+.5) coords = coords[:,:2] if coords.shape[0]>0 and verbose: print_coords(coords) else: msg = 'Wrong mode. Available modes: lpeaks, daofind, irafsf, log, dog.' raise TypeError(msg) if coords.shape[0]==0: if verbose: print '_________________________________________' print 'No potential sources found' print '_________________________________________' return 0, 0 yy = coords[:,0] xx = coords[:,1] yy_final = [] xx_final = [] yy_out = [] xx_out = [] snr_list = [] px_list = [] if mode=='lpeaks': xx -= pad yy -= pad # Checking SNR for potential sources for i in xrange(yy.shape[0]): y = yy[i] x = xx[i] if verbose: print '_________________________________________' print 'Y,X = ({:.1f},{:.1f}) -----------------------'.format(y, x) subim = get_square(array, size=15, y=y, x=x) snr = snr_ss(array, y, x, fwhm, False, verbose=False) snr_list.append(snr) px_list.append(array[y,x]) if snr >= snr_thresh and array[y,x]>0: if plot: pp_subplots(subim, size=2) if verbose: _ = frame_quick_report(array, fwhm, y=y, x=x , verbose=verbose) yy_final.append(y) xx_final.append(x) else: yy_out.append(y) xx_out.append(x) if verbose: print 'SNR constraint NOT fulfilled' if debug: if plot: pp_subplots(subim, size=2) _ = frame_quick_report(array, fwhm, y=y, x=x , verbose=verbose) else: if verbose: print 'SNR = {:.3f}'.format(snr) if debug or full_output: table = Table([yy.tolist(), xx.tolist(), px_list, snr_list], names=('y','x','px_val','px_snr')) table.sort('px_snr') yy_final = np.array(yy_final) xx_final = np.array(xx_final) yy_out = np.array(yy_out) xx_out = np.array(xx_out) if plot: print print '_________________________________________' print'Input frame showing all the detected blobs / potential sources' print 'In red circles those that did not pass the SNR and 2dGauss fit constraints' print 'In cyan circles those that passed the constraints' fig, ax = plt.subplots(figsize=(8,8)) im = ax.imshow(array, origin='lower', interpolation='nearest', cmap='gray') colorbar_ax = fig.add_axes([0.92, 0.12, 0.03, 0.78]) fig.colorbar(im, cax=colorbar_ax) ax.grid('off') for i in xrange(yy_out.shape[0]): y = yy_out[i] x = xx_out[i] circ = plt.Circle((x, y), radius=2*fwhm, color='red', fill=False, linewidth=2) ax.text(x, y+5*fwhm, (int(y),int(x)), fontsize=10, color='red', family='monospace', ha='center', va='top', weight='bold') ax.add_patch(circ) for i in xrange(yy_final.shape[0]): y = yy_final[i] x = xx_final[i] circ = plt.Circle((x, y), radius=2*fwhm, color='cyan', fill=False, linewidth=2) ax.text(x, y+5*fwhm, (int(y),int(x)), fontsize=10, color='cyan', weight='heavy', family='monospace', ha='center', va='top') ax.add_patch(circ) plt.show() if debug: print table if full_output: return table, yy_final.shape[0] else: return yy_final, xx_final
def imblur(Y, sig = 5, siz = 11, nDimBlur = None, kernel = None): """Spatial filtering with a Gaussian or user defined kernel The parameters are specified in GreedyROI2d """ from scipy.ndimage.filters import correlate #from scipy.signal import correlate X = np.zeros(np.shape(Y)) if kernel is None: if nDimBlur is None: nDimBlur = Y.ndim - 1 else: nDimBlur = np.min((Y.ndim,nDimBlur)) if np.isscalar(sig): sig = sig*np.ones(nDimBlur) if np.isscalar(siz): siz = siz*np.ones(nDimBlur) xx = np.arange(-np.floor(siz[0]/2),np.floor(siz[0]/2)+1) yy = np.arange(-np.floor(siz[1]/2),np.floor(siz[1]/2)+1) hx = np.exp(-xx**2/(2*sig[0]**2)) hx /= np.sqrt(np.sum(hx**2)) hy = np.exp(-yy**2/(2*sig[1]**2)) hy /= np.sqrt(np.sum(hy**2)) for t in range(np.shape(Y)[-1]): temp = correlate(Y[:,:,t],hx[:,np.newaxis],mode='wrap') X[:,:,t] = correlate(temp,hy[np.newaxis,:],mode='wrap') else: for t in range(np.shape(Y)[-1]): X[:,:,t] = correlate(Y[:,:,t],kernel,mode='wrap') ## uncomment the following for general n-dim filtering # xx = [] # hx = [] # for i in range(nDimBlur): # vec = np.arange(-np.floor(siz[i]/2),np.floor(siz[i]/2)+1) # xx.append(vec) # fil = np.exp(-xx[i]**2/(2*sig[0]**2)) # hx.append(fil/np.sqrt(np.sum(fil**2))) # X = np.zeros(np.shape(Y)) # siz = tuple([1]*nDimBlur) # sizY = np.shape(Y) # for t in range(sizY[-1]): # temp = Y[...,t] # for i in range(nDimBlur): # I = [0]*nDimBlur # I[i] = range(sizY[i]) # siz[i] = sizY[i] # H = np.zeros(siz) # H[tuple(I)] = hx[i] # temp = correlate(temp,H,mode='wrap') # # X[...,t] = temp return X
def stroke_length(self) -> float: """Length of the estimated skeleton.""" skel = self.skeleton.astype(float) conv = filters.correlate(skel, _SKEL_LEN_MASK, mode='constant') up_length = np.einsum('ij,ij->', conv, skel) # type: float return up_length / self.scale
kw = 2 red = (255, 0, 0) draw = ImageDraw.Draw(img) for y, x in locs: x2 = x # flip the y-axis y2 = h - y bbox = (x2 - kw, y2 - kw, x2 + kw, y2 + kw) draw.ellipse(bbox, outline = red) return img test = np.zeros((96, 160)) test[32:64, 32:64] = x test[32:64, 96:128] = main_component #figure().suptitle('test image') #imshow(test) from scipy.ndimage import filters kwidth = 8 o = filters.correlate(test, x) figure().suptitle('square pooling') t = filters.correlate(o**2, np.ones((kwidth, kwidth)) / (kwidth * kwidth)) img = DrawLocations(t, np.argwhere(t == t.max())) imshow(img) figure().suptitle('max pooling') t = filters.maximum_filter(o, (kwidth, kwidth)) img = DrawLocations(t, [ np.argwhere(t == t.max())[0] ]) imshow(img)