def xyshift(path): path1=path[24:70] path1=path1[0:29] f1=h5py.File(path1[0],'r') f2=f1["MAPS"]["XRF_roi"][...] ## element to use for crosscorrelation horizontally element1=15 ## element to use for xcor vertically element2=18 ## element for output element3=15 matrix1=zeros([len(path1),24,38,151]) matrix2=zeros([len(path1),24,38,151]) matrix3=zeros([len(path1),24,38,151]) theta=zeros(len(path1)) for i in arange(len(path1)): fp=h5py.File(path1[i],'r')["MAPS"] f=fp["XRF_roi"][...] matrix2[i,:,:,:]=f matrix3[i,:,:,:]=f matrix1[i,:,:,:]=f temp=fp["extra_pvs_as_csv"][99] theta[i]=temp[temp.rfind(",")+2:] matrix2[:,element3,:,:]=np.roll(matrix2[:,element3,:,:],40,axis=2) matrix3[:,element3,:,:]=np.roll(matrix1[:,element3,:,:],40,axis=2) shift=zeros([2,len(path1)],dtype=int) for i in arange(len(path1)-1): cor2d=correlate2d(matrix1[i,element1,:,:],matrix1[i+1,element1,:,:],fillvalue=np.average(matrix1[i,element1,:,:])) b1=np.where(cor2d==cor2d.max()) cor2d=correlate2d(matrix1[i,element2,:,:],matrix1[i+1,element2,:,:],fillvalue=np.average(matrix1[i,element2,:,:])) b2=np.where(cor2d==cor2d.max()) x1=b1[1][0] y1=b2[0][0] x=151 y=38 for j in arange(len(path1)-1-i): matrix2[i+j+1,element3,:,:]=np.roll(matrix2[i+j+1,element3,:,:],x1-x+1,axis=1) matrix2[i+j+1,element3,:,:]=np.roll(matrix2[i+j+1,element3,:,:],y1-y+1,axis=0) print y1,x1 shift[0,i+1]=y1 shift[1,i+1]=x1 ## for i in arange(len(path1)): ## j=Image.fromarray(matrix2[i,element2,8:30,:].astype(np.float32)) ## if i>=10: ## j.save("/Users/youngpyohong/Documents/Work/2014-2/hong/hong/projections/shifted"+str(i)+".tiff") ## else: ## j.save("/Users/youngpyohong/Documents/Work/2014-2/hong/hong/projections/shifted0"+str(i)+".tiff") ## ## wkatl return shift,matrix2[:,element3,8:30,:],theta
def xcor(data,element1,element2): xmatrix=data[element1,:,:,:] ymatrix=data[element2,:,:,:] finalmatrix=zeros(data.shape,dtype=float32) shift=zeros([2,data.shape[1]],dtype=int) for i in arange(len(data[0,:,0,0])-1): cor2dx=correlate2d(xmatrix[i,:,:],xmatrix[i+1,:,:],fillvalue=np.average(xmatrix[i,:,:])) position1=np.where(cor2dx==cor2dx.max()) cor2dy=correlate2d(ymatrix[i,:,:],ymatrix[i+1,:,:],fillvalue=np.average(ymatrix[i,:,:])) position2=np.where(cor2dy==cor2dy.max()) x1=position1[1][0] y1=position2[0][0] shift[0,i+1]=y1 shift[1,i+1]=x1 x=len(xmatrix[i,0,:]) y=len(ymatrix[i,:,0]) for j in arange(len(data[0,:,0,0])-1-i): finalmatrix[i+j+1,:,:,:]=np.roll(data[i+j+1,:,:,:],x1-x+1,axis=2) finalmatrix[i+j+1,:,:,:]=np.roll(data[i+j+1,:,:,:],y1-y+1,axis=1) print "Xcor", i return finalmatrix, shift
def gradients(image): deriv_filter = np.array([[-1.,0.,1.],[-2.,0,2.],[-1.,0.,1.]]) #derivative in x direction deriv_x = correlate2d(image, deriv_filter, mode="same") #derivative in y direction deriv_y = correlate2d(image, deriv_filter.T, mode="same") gradients = np.sqrt(deriv_x**2 + deriv_y**2) return gradients
def matchIms(): bmp=Image.open('C:/Copy/workspace/TyperSharkAI/Images/Play4.png').convert('L') shark=Image.open('basic_template.gif') shark.load() #shark.show() bmp=np.array(bmp) shark=np.array(shark) #Image.fromarray(shark).show() Image.fromarray(bmp).show() #bmp.show() print(bmp.shape) print(shark.shape) signal.correlate2d(shark,bmp)
def get_pssm_scores(encoded_sequences, pssm): encoded_sequences = np.squeeze(encoded_sequences, axis=1) num_samples, num_bases, seq_length = np.shape(encoded_sequences) scores = np.ones((num_samples, num_bases, seq_length)) for base_indx in range(num_bases): base_pssm = pssm[base_indx].reshape(1, len(pssm[0])) fwd_scores = correlate2d( encoded_sequences[:, base_indx, :], base_pssm, mode='same') rc_base_pssm = pssm[-(base_indx + 1), ::-1].reshape(1, len(pssm[0])) rc_scores = correlate2d( encoded_sequences[:, base_indx, :], rc_base_pssm, mode='same') scores[:, base_indx, :] = np.maximum(fwd_scores, rc_scores) return scores.sum(axis=1)
def correlationMatrix(img1, img2): matrix = signal.correlate2d(img1, img2, mode="full", boundary="fill", fillvalue=3) return matrix
def template_correlate(template, image): ''' Returns the top left-point of the bounding box (x, y)''' corr = correlate2d(image, template, mode='same') x, y = np.unravel_index(np.argmax(corr), image.shape) return x, y
def imf(f, kernel, τ, max_iters): """ Find the next Intrinsic Mode Function of an image Arguments: f (np.ndarray): A 2-d image matrix, values have mean of zero kernel (np.ndarray): A 2-d cross-correlation kernel τ (float): The termination error threshold max_iters (int): The largest number of permissible iterations Returns an np.ndarray and the final termination error value """ err = [] try: from tqdm.autonotebook import tqdm rng = tqdm(range(max_iters), leave=False) use_tqdm = True except: rng = range(max_iters) use_tqdm = False for i in rng: mva = correlate2d(f, kernel, mode='same', boundary='symm') last = f f = f - mva err.append(np.linalg.norm(f - last, 2) / np.linalg.norm(last, 2)) if err[-1] < τ or err[-1] > min(err): if use_tqdm: rng.reset() rng.close() break return f, err
def corr_NVs_no_subset(baseline_image, new_image): """ # Tracks drift by correlating new and old images, and returns shift in pixels :param baseline_image: original image :param new_image: new (drifted) image. Should be same size as baseline_image in pixels :return: shift from baseline image to new image in pixels """ # subtracts mean to sharpen each image and sharpen correlation baseline_image_sub = baseline_image - baseline_image.mean() new_image_sub = new_image - new_image.mean() #takes center part of baseline image x_len = len(baseline_image_sub[0]) y_len = len(baseline_image_sub) old_image = baseline_image_sub[(x_len/4):(x_len*3/4),(y_len/4):(y_len*3/4)] # correlate with new image. mode='valid' ignores all correlation points where an image is out of bounds. if baseline # and new image are NxN, returns a (N/2)x(N/2) correlation corr = signal.correlate2d(new_image_sub, baseline_image_sub) y, x = np.unravel_index(np.argmax(corr), corr.shape) # finds shift by subtracting center of initial coordinates, x_shift = x + (x_len/4) - (x_len/2) x_shift = x - (x_len) y_shift = y - (y_len) #return (x_shift, y_shift) #, corr, old_image --- test outputs return (x_shift, y_shift, corr, old_image) # --- test outputs
def subtract_background_v1(img, fgbg, kernel_size, threshold): """Perform background subtraction using OpenCV and outlier rejection. To perform noise/outlier rejection, we zero out regions that don't contain many pixels after background subtraction. Parameters ---------- img: ndarray grayscale frame to process fgbg OpenCV background subtraction object kernel_size: int size of the kernel used for denoising threshold: int denoising threshold """ fg_mask = fgbg.apply(img) fg_mask = np.clip(fg_mask, 0, 1) corr = signal.correlate2d(fg_mask, np.ones((kernel_size, kernel_size)), mode='same', boundary='fill', fillvalue=0) fg_mask *= (corr > threshold) # remove noise fg = img * fg_mask return fg, fg_mask
def chroma_cross_correlate_full(chroma1_par, chroma2_par): length1 = chroma1_par.size / 12 chroma1 = np.empty([length1, 12]) length2 = chroma2_par.size / 12 chroma2 = np.empty([length2, 12]) if (length1 > length2): chroma1 = chroma1_par.reshape(length1, 12) chroma2 = chroma2_par.reshape(length2, 12) else: chroma2 = chroma1_par.reshape(length1, 12) chroma1 = chroma2_par.reshape(length2, 12) corr = correlate2d(chroma1, chroma2, mode='full') transposed_chroma = corr.transpose() #print "length1: " + str(length1) #print "length2: " + str(length2) #transposed_chroma = transposed_chroma / (min(length1, length2)) index = np.where(transposed_chroma == np.amax(transposed_chroma)) index = int(index[0]) #print "index: " + str(index) transposed_chroma = transposed_chroma.transpose() transposed_chroma = np.transpose(transposed_chroma) mean_line = transposed_chroma[index] sos = butter(1, 0.1, 'high', analog=False, output='sos') mean_line = sosfilt(sos, mean_line) #print np.max(mean_line) return np.max(mean_line)
def correlation_retrieval(data,K=None,ARGS=False): f_data = np.fft.fft2(data) kx = np.fft.fftfreq(f_data.shape[0]) ky = np.fft.fftfreq(f_data.shape[1]) # get k-vector, if not give if K is None: K = get_wave(f_data) # get the frequency data information Kfreq = kx[K[0]],ky[K[1]] # make an X-Y grid pi = np.pi nx,ny = np.shape(data) Y,X = np.meshgrid(np.arange(ny),np.arange(nx),\ sparse=False,indexing='xy') print Kfreq # produce a correlation function phplot.imageshow(wave_function(X,Y,Kfreq)) zsum = np.zeros(data.shape) Nphi = 1. a_phi = np.linspace(0,2*np.pi,Nphi) for phi in a_phi: zsum += correlate2d(data,wave_function(X,Y,K,phi),mode='same') phplot.imageshow(zsum) return data
def step(self): """ Progress forest fire by one time step. """ m, n = self.array.shape p, f = self.p, self.f a = self.array new_trees = np.random.choice([1, 0], size=(m, n), p=[p, 1 - p]) new_fires = np.random.choice([1, 0], size=(m, n), p=[f, 1 - f]) c = correlate2d(a, self.kernel, mode='same') #print('a:\n', a) #print('c:\n', c) #print('new_trees:\n', new_trees) #print('new_fires:\n', new_fires) # Trees on fire self.array[(c < 25) & (c >= 10) & (a == 1)] = 5 # Trees burned down self.array[c >= 25] = 0 # Add new trees self.array[(a == 0) & (new_trees == 1)] = 1 # Add new fires self.array[(a == 1) & (new_fires == 1)] = 5
def align(thar, cs_lines, manual=False, plot=False): # Align using window like in IDL REDUCE if manual: _, ax = plt.subplots() ap = AlignmentPlot(ax, thar, cs_lines) ap.connect() plt.show() offset = ap.offset else: # make image from cs_lines min_order = np.min(cs_lines.order) img = np.zeros_like(thar) for line in cs_lines: img[line.order, line.xfirst:line.xlast] = line.height * signal.gaussian( line.xlast - line.xfirst, line.width) img = np.ma.masked_array(img, mask=img == 0) # Cross correlate with thar image correlation = signal.correlate2d(thar, img, mode="same") offset_order, offset_x = np.unravel_index(np.argmax(correlation), correlation.shape) offset_order = 2 * offset_order - thar.shape[0] + min_order + 1 offset_x = offset_x - thar.shape[1] / 2 offset = int(offset_order), int(offset_x) if plot: _, ax = plt.subplots() AlignmentPlot(ax, thar, cs_lines, offset=offset) plt.show() return offset
def estimateNoise(model): # noise = estimateNoise(model) # This function compute the noise of a sequence from the structure # previously computed 'model'. # R.M.Luque and Ezequiel Lopez-Rubio -- February 2011 # The mean of the scene is used as the original frame MuImage = np.array(np.squeeze(shiftdim(model.Mu, 2, 1)), dtype=np.float64, order='F') # The smoothing approach is applied SmoothFrame = np.zeros(MuImage.shape, order='F') for idx in range(MuImage.shape[2]): SmoothFrame[:, :, idx] = ssig.correlate2d(MuImage[:, :, idx], SmoothingFilter, mode='same') # The difference between the two images is obtained dif = np.square(MuImage - SmoothFrame) # A 0.01-winsorized mean is applied instead of the standard mean because # the first measure is more robust and certain extreme values are removed dif2 = dif.reshape((dif.shape[0] * dif.shape[1], model.Dimension), order='F') dif3 = np.sort(dif2, axis=0) idx = int(np.round(np.max(dif3.shape) * 0.99)) for NdxDim in range(model.Dimension): dif3[idx:, NdxDim] = dif3[idx - 2, NdxDim] noise = np.mean(dif3, axis=0) return noise
def test_xcorr2(signal): """Check if correlation matrix match signal and compare dense vs sparse results""" # Get max coordinates of 2D normal in signal exp_row, exp_col = np.where(signal.todense() == np.max(signal.todense())) # Get max coordinates of correlation scores corr_mat_sparse = cud.xcorr2(signal, gauss_kernel, threshold=1e-4).todense() corr_mat_dense = cud.xcorr2(signal.todense(), gauss_kernel, threshold=1e-4) obs_row, obs_col = np.where(corr_mat_dense == np.max(corr_mat_dense)) # Use scipy result as base truth to compare chromosight results corr_mat_scipy = np.zeros(signal.shape) kh, kw = (np.array(gauss_kernel.shape) - 1) // 2 corr_mat_scipy[kh:-kh, kw:-kw] = sig.correlate2d(signal.todense(), gauss_kernel, "valid") # Apply threshold to scipy result for comparison with xcorr2 corr_mat_scipy[corr_mat_scipy < 1e-4] = 0 # Check if best correlation is at the mode of the normal distribution # NOTE: There are sometime two maximum values side to side in signal, hence # the isin check rather than equality assert np.all(np.isin(obs_row, exp_row)) assert np.all(np.isin(obs_col, exp_col)) assert np.allclose( corr_mat_dense, corr_mat_sparse, atol=np.mean(corr_mat_dense[corr_mat_dense != 0] / 10), ) assert np.allclose(corr_mat_dense, corr_mat_scipy)
def max_shift(window_a, window_b): """A function to determine the k and l shifts that maximize cross-correlation between two input interrogation windows. Parameters ---------- window_a : array Numpy array of the template interrogation window. window_b : array Numpy array of the shifted interrogation window (must have same shape as window_a). Returns ------- k : int k value that maximizes cross-correlation (row shift). l : int l value that maximizes cross-correlation (column shift). """ corr = correlate2d(window_a - window_a.mean(), window_b - window_b.mean()) max_index = np.argmax(corr) k, l = np.unravel_index([max_index], (corr.shape[0], corr.shape[1])) k_max = k - (corr.shape[0] - 1) / 2 l_max = l - (corr.shape[1] - 1) / 2 return k_max, l_max
def translate(image, shift, subpixel=True, cval=0): """Translate an image by (dy, dx) using scipy. Based on stsci.image.translation algorithm cval = value to fill empty pixels after shift. """ # for subpixel, a correlation kernel need translation if subpixel: rot = 0 dy, dx = shift dx, dy = -dx, -dy if dx >= 0 and dy >= 0: rot = 2 elif dx >= 0 and dy < 0: rot = 1 elif dx < 0 and dy >= 0: rot = 3 elif dx < 0 and dy < 0: rot = 0 dx, dy = np.abs([dx, dy]) if rot % 2 != 0: dx, dy = dy, dx nim = np.rot90(image, rot) nim = scipy_shift(nim, (dy, dx), mode='constant', cval=cval) # correlation kernel to fix subpixel shifting x, y = dx % 1.0, dy % 1.0 kernel = np.array([[x * y, (1 - x) * y], [(1 - y) * x, (1 - y) * (1 - x)]]) nim = correlate2d(nim, kernel, mode='full', fillvalue=cval) return np.rot90(nim, -rot % 4).astype(image.dtype)[:-1, :-1] return scipy_shift(image, shift, mode='constant', cval=cval)
def cross_correlation(f, g): """ Cross-correlation of f and g Hint: use the conv_fast function defined above. Args: f: numpy array of shape (Hf, Wf) g: numpy array of shape (Hg, Wg) Returns: out: numpy array of shape (Hf, Wf) """ out = None ### YOUR CODE HERE # kernel = np.array( # [ # [-1, -1, -1], # [-1, 8, -1], # [-1, -1, -1] # ]) kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) f = conv_fast(f, kernel) g = conv_fast(g, kernel) temp = signal.correlate2d(f, g, 'same') out = np.copy(temp) ### END YOUR CODE return out
def find_template_2D(template, img): c = sp.correlate2d(img, template, mode='same') # These y, x coordinates represent the peak. This point needs to be # translated to be the top-left corner as the quiz suggests y, x = np.unravel_index(np.argmax(c), c.shape) return y - template.shape[0] // 2, x - template.shape[1] // 2
def matchTemplate(searchImage, templateImage): searchWidth = searchImage.size[0] searchHeight = searchImage.size[1] templateWidth = templateImage.size[0] templateHeight = templateImage.size[1] si = np.asarray(searchImage) ti = np.asarray(templateImage) si = si - si.mean() ti = ti - si.mean() ti = ti - ti.mean() # give it a noise (because source image doesn't contain the exact same image) si = si + np.random.randn(*si.shape) * 5 corr = signal.correlate2d(si, ti, boundary='symm', mode='same') y, x = np.unravel_index(np.argmax(corr), corr.shape) # make the checking image ''' im1 = Image.new('RGB', (searchWidth, searchHeight), (80, 147, 0)) im1.paste(searchImage, (0,0)) im1.paste(templateImage, (x-int(templateWidth/2),y-int(templateHeight/2))) print('Location : {},{}'.format(x,y)) #searchImage.show() #im1.show() im1.save('template_matched_in_search.png') ''' return (x, y)
def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ["full", "valid", "same"]: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode))
def _convolve(self, imgs, filters): assert(imgs.ndim == 3 and filters.ndim == 3) assert(imgs.shape[-2] >= filters.shape[-2] and imgs.shape[-1] >= filters.shape[-1]) assert(filters.shape[-2] == filters.shape[-1] and filters.shape[-1] % 2 != 0) lx = filters.shape[-1]//2 rx = imgs.shape[-1] - lx - 1 ly = lx ry = imgs.shape[-2] - ly - 1 #print "f " + str(filters.shape[0]) output = np.zeros((filters.shape[0], rx - lx + 1, ry - ly + 1)) for f in range(0, filters.shape[0]): filter = filters[f] filter_map = np.zeros((rx - lx + 1, ry - ly + 1)) for i in range(0, imgs.shape[0]): img = imgs[i] convolved = np.zeros((rx - lx + 1, ry - ly + 1)) #print "convolved shape " + str(convolved.shape) #print "lx " + str(lx) + " rx " + str(rx) + " ly " + str(ly) + " ry " + str(ry) for x in range(lx, rx + 1): for y in range(ly, ry + 1): subimg = img[y - ly:y + ly + 1:,x - lx:x + lx + 1] convolved[y - ly, x - lx] = (subimg * filter).sum() if self.debug: lib_convolved = correlate2d(img, filter, "valid") if not np.all(np.abs(convolved - lib_convolved) < 0.000001): print "Convolved:\n{}\nLib Convolved:\n{}\nFilter:\n{}".format(convolved, lib_convolved, filter) assert(False) filter_map += convolved output[f]=filter_map return output
def forward(ctx, input, filter, bias): # detach so we can cast to NumPy input, filter, bias = input.detach(), filter.detach(), bias.detach() result = correlate2d(input.numpy(), filter.numpy(), mode='valid') result += bias.numpy() ctx.save_for_backward(input, filter, bias) return torch.from_numpy(result)
def backward_propagation(self, output_error, learning_rate): #initialization of the value of the array : dInput, dWeights, dBias with 0 dInput = np.zeros(self.input_shape) dWeights = np.zeros((self.kernel_shape[0], self.kernel_shape[1], self.input_shape[2], self.layer_depth)) dBias = np.zeros(self.layer_depth) for i in range(self.layer_depth): for j in range(self.input_shape[2]): #we use correlated2d / convolve2d because we have 2 dimensional images #correlated2d allows to cross-correlate two 2-dimensional arrays #convolve2d allows to convolve two 2-dimensional arrays. #the parameter 'valid' mean that the output consists only of those elements that do not rely on the zero-padding. #we calculate dWeight dE/dW, dBias dE/dB that we use in updating tue values of weight table and bias table dInput[:, :, j] = np.add( dInput[:, :, j], signal.convolve2d(output_error[:, :, i], self.weights[:, :, j, i])) dWeights[:, :, j, i] = signal.correlate2d(self.input[:, :, j], output_error[:, :, i], 'valid') dBias[i] = self.layer_depth * np.sum(output_error[:, :, i]) #updating the value of weight table and bias table self.weights = np.subtract(self.weights, np.multiply(dWeights, learning_rate)) self.bias = np.subtract(self.bias, np.multiply(dBias, learning_rate)) #return dInput dE/dX return dInput
def Conv_Forward(self, x, w, b): """ - x: Input data of shape (N, C, H, W) - w: Filter weights of shape (F, C, HH, WW) - b: Biases, of shape (F,) Returns a tuple of: - out: Output data. - cache: (x, w, b, conv_param) """ out = None pad = self.conv_param['pad'] stride = self.conv_param['stride'] H = x.shape[2] W = x.shape[3] HH = w.shape[2] WW = w.shape[3] N = x.shape[0] F = w.shape[0] out = np.zeros([N,F,H,W]) for i in xrange(x.shape[0]): #For every data sample for j in xrange(x.shape[1]): #For every color temp = x[i,j,:,:] #store layer to make things simpler for f in xrange(w.shape[0]): #For every filter filt = w[f,j,:,:] out[i,f,:,:] += signal.correlate2d(temp, filt, mode='same', boundary='fill', fillvalue=0) + b[f] #/x.shape[1] cache = (x, w, b, self.conv_param) return out, cache
def TestLaws( mgnames, NJ = 100 ): # create laws filters filts = texture.BuildLawsFilters() # allocate for jets NI = len( mgnames ) # number of images jets = np.zeros( (NJ*NI, 25 )) # for each image for i in xrange( NI ): # load # correlate #corrs = BruteCorrelate( data, filts ) data = mgnames[i]+0 corrs = map( lambda x: correlate2d( data, x ), filts ) for j in range( 25 ): corrs[i] = cspline2d( abs(corrs[i]), 200 ) corrs = np.array( corrs ) # extract random jets V,H = data.shape vs = range( V ) hs = range( H ) np.random.shuffle( vs ); np.random.shuffle( hs ) for j in range( NJ ): jets[i*NJ + j] = corrs[:,vs[j], hs[j] ] # k-means clustering clust, mmb = kmeans.KMeans( NI, jets ) #return jets cffs,evecs = pca.PCA(clust,3) cffs = pca.Map2PCA(clust,evecs) gnu.Save('Laws_results.txt',cffs) return clust,cffs
def get_shift_correlation(frame1, frame2): #frame2 = frame2[65:200, 75:220] frame2 = frame2[48:208, 48:208] #init 135, 145 #second center 201, 219 save_to_file("frame2.raw", frame2, np.float32) return signal.correlate2d(frame1, frame2, mode='same', boundary='symm')
def dynamics(self, data, verbose=0): dA = correlate2d(data,array([[0,1,0],[1,-4,1],[0,1,0]]),boundary='wrap') dA = dA[1:N+1,1:M+1] if verbose: print(dA[0:5,0:5]) data = data + dt*((1+self.alpha*1j)*self.scale*dA + data - (1+1j*self.beta)*data*power(abs(data),2)); return data
def check_game_over(self, s): for p in range(self.get_num_players()): reward = -np.ones(self.get_num_players()) reward[p] = 1 board = s[:, :, p] if np.isin(3, correlate2d(board, np.ones((1, 3)), mode="valid")): return reward # Horizontal if np.isin(3, correlate2d(board, np.ones((3, 1)), mode="valid")): return reward # Vertical i = np.eye(3) if np.isin(3, correlate2d(board, i, mode="valid")): return reward # Downward diagonol if np.isin(3, correlate2d(board, np.fliplr(i), mode="valid")): return reward # Upward diagonol if self.get_available_actions(s).sum() == 0: # Full board, draw return np.zeros(self.get_num_players())
def autoCorr(matrix): averagePotential = np.mean(matrix) matrix = matrix - averagePotential matrix = correlate2d(matrix, matrix) shape = matrix.shape max_ele = matrix[shape[0] / 2, shape[1] / 2] return matrix[shape[0] / 2:, shape[1] / 2:] / max_ele
def cross_correlation_sim(hms): shft_0 = np.array([19, 19]) for k1, hm1 in hms.items(): print('################################################') print(k1) print(hm1) print('################################################') for k1, hm1 in hms.items(): for k2, hm2 in hms.items(): if k1 != k2: print('Distance between ', k1, " and ", k2, end=': ') X = signal.correlate2d(hm1, hm2) #boundary='symm', mode='full') peak = find_peaks(data=X, threshold=0, box_size=1, npeaks=1) #print(peaks) if peak is not None: for p in peak: dist = np.linalg.norm( np.array([p['x_peak'], p['y_peak']]) - shft_0) peak_value = p['peak_value'] else: print("NO PEAKS FOUND") print(dist, peak_value)
def main(): scale = 100 #lower this value to make the correlation go faster image = imread2("./waldo.png") image = imresize(image, scale) template = imread2("./template.png") template = imresize(template, scale) # make grayscale image_gray = grayscale(image) template = grayscale(template) template_w, template_h = template.shape gradients_image = gradients(image_gray) gradients_image /= np.linalg.norm(gradients_image.flatten()) gradients_template = gradients(template) gradients_template /= np.sum(gradients_template) # use cross correlation convolved_gradients = correlate2d(gradients_image, gradients_template, mode="same") position = np.argmax(convolved_gradients) position_x, position_y = np.unravel_index(position, gradients_image.shape) #put a big red dot in the middle of where we found our maxima dot_rad = 8 image[position_x-dot_rad:position_x+dot_rad,position_y-dot_rad:position_y+dot_rad,0] = 255 image[position_x-dot_rad:position_x+dot_rad,position_y-dot_rad:position_y+dot_rad,1:2] = 0 imsave("./image_matched.png", image )
def forward_pass_3B(self, features, target): '''Compute forward pass over this data sample, returning SSE for pass''' features_sq = features.reshape(self.img_width, self.img_width) self.ConvY = np.zeros((self.num_filters, self.conv_mat_H)) for filter in range(self.num_filters): activation = correlate2d(features_sq, self.WConv[:, :, filter], mode='valid') self.ConvY[filter, :] = activation.reshape(1, self.conv_mat_H) self.ConvY = self.activation(self.ConvY + self.bConv, self.sig_lambdas[0]) self.ConvY = self.ConvY.reshape( self.ConvY.shape[0] * self.ConvY.shape[1], 1) self.Y2a = self.W2a.dot(self.ConvY) self.Y2a = self.activation(self.Y2a + self.b2a, self.sig_lambdas[1]) self.Y2 = self.W2.dot(self.Y2a) self.Y2 = self.activation(self.Y2 + self.b2, self.sig_lambdas[2]) self.error = target - self.Y2 return np.sum(np.square(self.error)) / 2
def correlate(self, image): ''' scipy correlate function. veri slow, based on convolution''' corr = signal.correlate2d(image.data, self.data, boundary='symm', mode='same') return Corr(corr)
def correlate_adjacent_frames(previous_frame, current_frame): ''' This function takes in two NumPy arrays filled with uint8 (integers between 0 and 255 inclusive) of size 160 by 160 and returns a NumPy array of uint8 that represents `previous_frame` being cross correlated with a convolutional kernel of size 110 by 110 created by removing the first and last 25 pixels in each dimension from `current_frame`. The array returned should be of size 110 by 110 but the elements that are dependent on values "outside" the provided pixels of `previous_frame` should be set to zero. Before computing the cross-correlation, you should normalize the input arrays in the range 0 to 1 and then subtracting the mean pixel value of both inputs (i.e. the mean value of the list created by concatenating all pixel intensities of `current_frame` and all pixel intensities of `previous_frame`) ''' max_value = 255 min_value = 0 norm_input1 = previous_frame / max_value norm_input2 = current_frame / max_value avg_input1 = np.mean(norm_input1) avg_input2 = np.mean(norm_input2) adjustedInput1 = norm_input1 - avg_input1 adjustedInput2 = norm_input2 - avg_input2 inputsubArray2 = adjustedInput2[25:135, 25:135] y = sp.correlate2d(adjustedInput1, inputsubArray2, mode='valid', boundary='fill', fillvalue=0) ymax = np.amax(y) ymin = np.amin(y) m = max_value / (ymax - ymin) c = max_value - m * ymax y_adj = m * y + c y_int = y_adj.astype('uint8') return y_int
def corr_NVs_no_subset(baseline_image, new_image): """ # Tracks drift by correlating new and old images, and returns shift in pixels :param baseline_image: original image :param new_image: new (drifted) image. Should be same size as baseline_image in pixels :return: shift from baseline image to new image in pixels """ # subtracts mean to sharpen each image and sharpen correlation baseline_image_sub = baseline_image - baseline_image.mean() new_image_sub = new_image - new_image.mean() #takes center part of baseline image x_len = len(baseline_image_sub[0]) y_len = len(baseline_image_sub) old_image = baseline_image_sub[(x_len / 4):(x_len * 3 / 4), (y_len / 4):(y_len * 3 / 4)] # correlate with new image. mode='valid' ignores all correlation points where an image is out of bounds. if baseline # and new image are NxN, returns a (N/2)x(N/2) correlation corr = signal.correlate2d(new_image_sub, baseline_image_sub) y, x = np.unravel_index(np.argmax(corr), corr.shape) # finds shift by subtracting center of initial coordinates, x_shift = x + (x_len/4) - (x_len/2) x_shift = x - (x_len) y_shift = y - (y_len) #return (x_shift, y_shift) #, corr, old_image --- test outputs return (x_shift, y_shift, corr, old_image) # --- test outputs
def forward(ctx, input, filter, bias): # detach so we can cast to NumPy input, filter, bias = input.detach(), filter.detach(), bias.detach() result = correlate2d(input.numpy(), filter.numpy(), mode='valid') result += bias.numpy() ctx.save_for_backward(input, filter, bias) return torch.as_tensor(result, dtype=input.dtype)
def test_corr(): #imshp = (3,2,20,20) # num images, channels, szy, szx kshp = (10,2,5,10) # features, channels, szy, szx featshp = (3,10,11,11) # num images, features, szy, szx theano_correlate2d = get_theano_correlate2d(kshp=kshp,featshp=featshp) features = np.random.randn(*featshp) kernel = np.random.randn(*kshp) output_sz = (featshp[0], kshp[1], kshp[2] + featshp[2] - 1, kshp[3] + featshp[3] - 1) scipy_output = np.zeros(output_sz) for im_i in range(featshp[0]): for im_j in range(kshp[1]): for k_i in range(kshp[0]): scipy_output[im_i,im_j,:,:] += correlate2d(np.squeeze(features[im_i,k_i,:,:]),np.squeeze(kernel[k_i,im_j,:,:]),mode='full') theano_output = theano_correlate2d(features,kernel) print 'scipy:', scipy_output.shape print 'theano:', theano_output.shape np.testing.assert_allclose(scipy_output,theano_output)
def align2D(cur_img, ref_patch_with_border, ref_patch, n_iter, cur_px_estimate, no_simd=False): # half_patch_size = 4 patch_size = 8 patch_area = patch_size**2 is_converged = False # ref_patch_dx # ref_patch_dy H = np.zeros((3, 3), dtype=np.float64) # calculate gradient and hessian ref_step = patch_size + 2 mask_x, mask_y = np.array([[-1, 0, 1]]), np.array([[-1], [0], [1]]) dx = 0.5 * signal.correlate2d( ref_patch_with_border[1:-1, :], mask_x, mode='valid') dy = 0.5 * signal.correlate2d( ref_patch_with_border[:, 1:-1], mask_y, mode='valid') H[0, 0] = (dx**2).sum() H[0, 1] = (dx * dy).sum() H[0, 2] = dx.sum() H[1, 0] = H[0, 1] H[1, 1] = (dy**2).sum() H[1, 2] = dy.sum() H[2, 0] = H[0, 2] H[2, 1] = H[1, 2] H[2, 2] = patch_area Hinv = np.linalg.inv(H) mean_diff = 0 # calculate position in new image u, v = cur_px_estimate # termination criterion min_update_squared = 0.03**2 cur_step = cur_img update = np.zeros(3, dytpe=np.float64)
def l_pca(self, X, C=False): entropy = self.entropy min_tolerance = self.min_tolerance if type(X) is np.ndarray: X = t.from_numpy(X).to(self.device) list_mw = [] ## Reduction Dimension with entropy ?? ->> size_reduce = (int(X.shape[1] * 0.5) if X.shape[1] <= 20 else int(X.shape[1] * 0.25)) while True: _delete_idx = self._lpca_reduce(X, entropy) if _delete_idx is None or X.shape[1] < size_reduce: break else: X = X.numpy() X_p = np.delete(X.T, _delete_idx, 0) X = t.from_numpy(X_p.T) # Next compute m_w for anny entropy while entropy >= 0: m_w = self._lpca(X, entropy) # Return idx to remove if m_w.shape[1] < min_tolerance: break list_mw.append(m_w) entropy -= self.dt if C: C_out = t.ones( (list_mw[-1].shape[0], list_mw[-1].shape[1])).double() C = correlate2d(list_mw[-1], list_mw[-2], mode='same') if (C.shape[0] == C.shape[1]): for i in range(len(list_mw) - 2, 0, 2): if i - 1 >= 0: d = correlate2d(C, list_mw[i], mode='same') if (d.shape[0] != list_mw[-1].shape[1]) or ( d.shape[0] != d.shape[1]): break else: C = d C_out = t.tensor(C) return X, t.tensor(list_mw[-1]), C_out else: return X, t.tensor(list_mw[-1])
def get_pssm_scores(encoded_sequences, pssm, include_rc=True): """ Convolves pssm and its reverse complement with encoded sequences and returns the maximum score at each position of each sequence. Parameters ---------- encoded_sequences: 3darray (num_examples, 1, 4, seq_length) array pssm: 2darray (4, pssm_length) array rc Returns ------- scores: 2darray (num_examples, seq_length) array """ encoded_sequences = encoded_sequences.squeeze(axis=1) # initialize fwd and reverse scores to -infinity fwd_scores = np.full_like(encoded_sequences, -np.inf, float) rc_scores = np.full_like(encoded_sequences, -np.inf, float) # cross-correlate separately for each base, # for both the PSSM and its reverse complement for base_indx in range(encoded_sequences.shape[1]): base_pssm = pssm[base_indx][None] base_pssm_rc = base_pssm[:, ::-1] fwd_scores[:, base_indx, :] = correlate2d(encoded_sequences[:, base_indx, :], base_pssm, mode='same') if include_rc == True: rc_scores[:, base_indx, :] = correlate2d( encoded_sequences[:, -(base_indx + 1), :], base_pssm_rc, mode='same') # sum over the bases fwd_scores = fwd_scores.sum(axis=1) if include_rc == True: rc_scores = rc_scores.sum(axis=1) if include_rc == True: # take max of fwd and reverse scores at each position scores = np.maximum(fwd_scores, rc_scores) else: scores = fwd_scores return scores
def find_translation(src, tar, ratio, step_offset, min_size) -> (np.ndarray, float): """ src and tar should be the same in size :param src: source image to find 'tar' on :param tar: target image to find in 'src' :param ratio: resize ratio of gaussian pyramid :param step_offset: search size on each step :param min_size: threshold size of image to perform brute-force search :return: A tuple showing how much translation should be applied to 'tar' to get 'src' plus a float showing match percentage """ if max(src.shape + tar.shape) < min_size: # perform a brute-force search print('Brute-force search start at top of the pyramid') src = (src - np.mean(src)) / np.std(src) tar = (tar - np.mean(tar)) / np.std(tar) corr = signal.correlate2d(src, tar) offsets = np.unravel_index(np.argmax(np.abs(corr)), corr.shape) print(f'Search end at size {src.shape}') return offsets - np.array(tar.shape), np.amax(corr) / src.size else: # recursively solve then adjust kernel_size = 2 * round(1 / ratio) + 1 sigma = 1 / ratio src_new = cv.GaussianBlur(src, (kernel_size, kernel_size), sigma) src_new = cv.resize(src_new, (0, 0), src_new, ratio, ratio, interpolation=cv.INTER_AREA) tar_new = cv.GaussianBlur(tar, (kernel_size, kernel_size), sigma) tar_new = cv.resize(tar_new, (0, 0), tar_new, ratio, ratio, interpolation=cv.INTER_AREA) offset, _ = find_translation(src_new, tar_new, ratio, step_offset, min_size) offset *= 2 print(f'Fine tuning at size {src.shape}') mx = 0 ind = (0, 0) src = (src - np.mean(src)) / np.std(src) tar = (tar - np.mean(tar)) / np.std(tar) for i_off in range(-step_offset, step_offset + 1): for j_off in range(-step_offset, step_offset + 1): t_tar = np.roll(tar, offset + (i_off, j_off), (0, 1)) corr = abs((t_tar * src).sum()) if corr > mx: mx = corr ind = (i_off, j_off) return offset + ind, mx
def correlationFinder(image, template, c, color=1): """ Calculate the correlation and find matches between an image and a template drawing a square around the higher values pixels according a given coeficient. Parameters: * image: nparray - image bitmap * template: nparray - template bitmap * c: int - match coeficient between 0 and 1, example: it will find the 100% correlation if c=1, it will find the 50% correlation if c=0.5 * color: int, optional - binary level color which will be used to highlight the match, can be 0 or 1 Returns: * matc: ndarray - 'recognized' image """ from scipy import signal import numpy as np from math import ceil import matplotlib.pyplot as plt corr = signal.correlate2d(image, template, boundary='symm', mode='same') plt.imshow(corr, cmap='gray') plt.axis('off') plt.show() templateTrue = np.sum(template) corrLim = c * templateTrue squareDim = template.shape # matc = np.copy(image) # for i in range(corr.shape[0]): # for j in range(corr.shape[1]): # if corr[i, j] > corrLim: # x = ceil( squareDim[0] / 2 ) # draw a square with template dimensions in the original image y = ceil( squareDim[1] / 2 ) # around the pixel with value over the limit in the correlated image for k in range(x): # matc[i - k, j - y] = matc[i + k, j - y] = matc[i - k, j + y] = matc[i + k, j + y] = color for l in range(y): # matc[i - x, j - l] = matc[i + x, j - l] = matc[i - x, j + l] = matc[i + x, j + l] = color return (matc)
def get_disp_vec(f1, f2): from scipy import signal corr = signal.correlate2d(f1, f2, boundary='symm', mode='same') loc_max = np.where(corr == corr.max()) center = g_size / 2 x_disp = (loc_max[0][0] - center) + 1 y_disp = (loc_max[1][0] - center) + 1 return x_disp, y_disp
def pos_estimate(self, obs, grid_map): "Estimate position in gridmap based on observation" o, gm = self.remove_self(obs), remove_self(grid_map) corr = signal.correlate2d(gm, o, mode='same') max_idx = np.unravel_index(np.argmax(corr), grid_map.shape) likelihood = np.exp(corr) / np.sum(np.exp(corr)) log_likelihood = np.log(likelihood) return log_likelihood, max_idx
def __call__(self,x): y = np.empty_like(x) sh = x.shape xf = x.reshape((-1,)+sh[-2:]) yf = y.reshape((-1,)+sh[-2:]) for i in range(len(xf)): yf[i] = correlate2d(xf[i], np.array([[.0625, .125, .0625], [.125, .25, .125], [.0625, .125, .0625]]), mode='same') return y
def cross_correlation_equal_shape(img1, img2): if not (img1.shape == img2.shape): raise ValueError( "Images must have the same shape for function cross_correlation_equal_shape. Received shape {} and {}" .format(img1.shape, img2.shape)) img1 = np.nan_to_num(img1) img2 = np.nan_to_num(img2) return correlate2d(img1, img2, mode='valid', boundary='symm').item()
def dotProdsWithAllWindows(x, X): """Slide x along the columns of X and compute the dot product >>> x = np.array([[1, 1], [2, 2]]) >>> X = np.arange(12).reshape((2, -1)) >>> dotProdsWithAllWindows(x, X) # doctest: +NORMALIZE_WHITESPACE array([27, 33, 39, 45, 51]) """ return sig.correlate2d(X, x, mode='valid').flatten()
def gabor_filter(image, sigma,theta): correlation = correlate2d(image, gabor_fn(sigma,theta), mode="same", boundary='symm') correlation = correlation.flatten() # correlation = np.abs(correlation) # correlation -= np.min(correlation) # correlation *= 1/np.max(correlation) # correlation -= 0.5 # correlation *= 2 return correlation.reshape(image.shape)
def xcorr( self, p=[0,0] ): from scipy.signal import correlate2d from scipy.ndimage import affine_transform im1 = self.im1.copy() im2 = self.im2.copy() im1_tf = affine_transform(im1, np.identity(2) ,mode='wrap',offset=p ) result = correlate2d( (im1_tf - im1_tf.mean()), (im2 - im2.mean()), mode='full', boundary='wrap' ) return np.sum( result ) / result.size
def dynamics(data,verbose=0): global beta; dA = correlate2d(data,array([[1,1,1],[1,0,1],[1,1,1]]),boundary='wrap'); dA = dA[1:N+1,1:M+1]; r = random.rand(N,M); data = 2*floor(r*(1+exp(beta*dA)))-1; return data;
def SNAutoCorr(rateMap, arenaDiam, h): precision = arenaDiam/h xedges = np.linspace(-arenaDiam, arenaDiam, precision*2 + 1) yedges = np.linspace(-arenaDiam, arenaDiam, precision*2 + 1) X, Y = np.meshgrid(xedges, yedges) corr = ma.masked_array(correlate2d(rateMap, rateMap), mask = np.sqrt(X**2 + Y**2) > arenaDiam) return corr, xedges, yedges
def dynamics(data,verbose=0): global scale; dA = correlate2d(data,array([[0,1,0],[1,-4,1],[0,1,0]]),boundary='wrap'); dA = dA[1:N+1,1:M+1]; if verbose: print(dA[0:5,0:5]); data = data + dt*((1+alpha*1j)*scale*dA + data - (1+1j*beta)*data*power(abs(data),2)); # data[23:25,12:14] = 1; return data;
def backward(ctx, grad_output): grad_output = grad_output.detach() input, filter, bias = ctx.saved_tensors grad_output = grad_output.numpy() grad_bias = np.sum(grad_output, keepdims=True) grad_input = convolve2d(grad_output, filter.numpy(), mode='full') # the previous line can be expressed equivalently as: # grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full') grad_filter = correlate2d(input.numpy(), grad_output, mode='valid') return torch.from_numpy(grad_input), torch.from_numpy(grad_filter), torch.from_numpy(grad_bias)
def correlation(self, other): """Compute the correlation between two, single-channel, grayscale input images. The second image must be smaller than the first. :param other: the Image we're looking for """ from scipy import signal input = self.ndarray() match = other.ndarray() c=signal.correlate2d(input,match) return Image(c)
def harris(image, alpha, sigma): deriv_filter = np.array([[-1.,0.,1.]]) #derivative in x direction deriv_x = correlate2d(image, deriv_filter, mode="same") #derivative in y direction deriv_y = correlate2d(image, deriv_filter.T, mode="same") deriv_x2, deriv_y2, deriv_xy = deriv_x*deriv_x, deriv_y*deriv_y, deriv_x*deriv_y g_kern = gauss_kernel(sigma) deriv_x2 = correlate2d(deriv_x2, g_kern, mode="same") deriv_y2 = correlate2d(deriv_y2, g_kern, mode="same") deriv_xy = correlate2d(deriv_xy, g_kern, mode="same") harris = deriv_x2 * deriv_y2 - deriv_xy * deriv_xy \ - alpha*(deriv_x2 + deriv_y2)*(deriv_x2 + deriv_y2) return harris
def calc_grad(self, in_feat_maps): n_batch_size = in_feat_maps.shape[0] for filterIdx in range(self.n_filters): for inFeatIdx in range(self.n_feat_maps): temp_w_grad = np.zeros([self.n_filter_h, self.n_filter_w]) for imgIdx in range(n_batch_size): error_matrix = self.delta[imgIdx, filterIdx, :, :] im = in_feat_maps[imgIdx, inFeatIdx, :, :] temp_w_grad += signal.correlate2d(im, error_matrix, 'valid') self.grad_W[inFeatIdx, filterIdx, :, :] = -temp_w_grad * (1 / n_batch_size) self.grad_b[filterIdx] = -(self.delta[:, filterIdx, :, :].sum()) * (1 / n_batch_size)
def calc_mean_pooling(self, in_feat_maps): n_batch_size = in_feat_maps.shape[0] self.o_feat_maps = np.zeros([n_batch_size, self.n_feature_maps, self.pooled_h, self.pooled_w]) for imgIdx in range(n_batch_size): for filterIdx in range(self.n_feature_maps): im = in_feat_maps[imgIdx, filterIdx, :, :] p_feat_con = signal.correlate2d(im, np.ones(self.pool_shape), 'valid') factor = 1 / (self.pool_shape[0] * self.pool_shape[1]) p_feat_con = p_feat_con[0::self.pool_shape[0], 0::self.pool_shape[1]] self.o_feat_maps[imgIdx, filterIdx, :, :] = factor * p_feat_con self.o_feat_maps_v = self.o_feat_maps.reshape(n_batch_size, self.n_out)
def step(self, K=3): """Executes one time step. returns: number of cells that toppled """ toppling = self.array > K num_toppled = np.sum(toppling) self.toppled_seq.append(num_toppled) c = correlate2d(toppling, self.kernel, mode='same') self.array += c return num_toppled