def HOGcorr_frameandvec(frame1, vecx, vecy, gradthres=0., vecthres=0., pxsz=1., ksz=1., res=1., mask1=0, mask2=0, wd=1, allow_huge=False, regrid=False): # Calculates the spatial correlation between frame1 and the vector field described by vecx and vecy using the HOG methods # # INPUTS # frame1 - input map # vecx - x-component of the input vector field # vecy - y-component of the input vector field # # OUTPUTS # hogcorr - # corrframe - sf=3. #Number of pixels per kernel FWHM pxksz =ksz/pxsz pxres =res/pxsz sz1=np.shape(frame1) if (ksz > 1): if (regrid): intframe1=congrid(frame1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intvecx =congrid(vecx, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intvecy =congrid(vecy, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) if np.array_equal(np.shape(frame1), np.shape(mask1)): intmask1=congrid(mask1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask1[(intmask1 > 0.).nonzero()]=1. if np.array_equal(np.shape(frame2), np.shape(mask2)): intmask2=congrid(mask2, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask2[(intmask2 > 0.).nonzero()]=1. else: intframe1=frame1 intvecx=vecx intvecy=vecy intmask1=mask1 intmask2=mask2 #smoothframe1=convolve_fft(intframe1, Gaussian2DKernel(pxksz), allow_huge=allow_huge) smoothframe1=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,0], mode='nearest') #grad1=np.gradient(smoothframe1) dI1dx=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[1,0], mode='nearest') else: intframe1=frame1 smoothframe1=frame1 intvecx=vecx intvecy=vecy intmask1=mask1 intmask2=mask2 #grad1=np.gradient(intframe1) dI1dx=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[1,0], mode='nearest') # ======================================================================================================================== normGrad1=np.sqrt(dI1dx*dI1dx+dI1dy*dI1dy) #np.sqrt(grad1[1]**2+grad1[0]**2) normVec=np.sqrt(intvecx*intvecx + intvecy*intvecy) bad=np.logical_or(normGrad1 <= gradthres, normVec <= vecthres).nonzero() normGrad1[bad]=1.; normVec[bad]=1.; #tempphi=np.arctan2(grad1[1]*intvecy-grad1[0]*intvecx, grad1[1]*intvecx+grad1[0]*intvecy) tempphi=np.arctan2(dI1dx*intvecy-dI1dy*intvecx, dI1dx*intvecx+dI1dy*intvecy) tempphi[bad]=np.nan phi=np.arctan(np.tan(tempphi)) #if np.array_equal(np.shape(frame1), np.shape(mask1)): # if np.array_equal(np.shape(normVec), np.shape(mask2)): # phi[np.logical_or(mask1==0, mask2==0).nonzero()]=np.nan # good=np.logical_and(mask1 > 0., mask2 > 0.).nonzero() # else: # phi[(mask1==0).nonzero()]=np.nan # good=(mask1 > 0.).nonzero() #else: # good=np.isfinite(phi).nonzero() corrframe=np.cos(2.*phi) if np.array_equal(np.shape(intframe1), np.shape(intmask1)): corrframe[(intmask1 == 0.).nonzero()]=np.nan if np.array_equal(np.shape(intvecx), np.shape(intmask2)): corrframe[(intmask2 == 0.).nonzero()]=np.nan good=np.logical_and(np.logical_and(np.isfinite(phi), intmask1 > 0), intmask2 > 0).nonzero() else: good=np.logical_and(np.isfinite(phi), intmask1 > 0).nonzero() else: good=np.isfinite(phi).nonzero() Zx, s_Zx, meanPhi = HOG_PRS(phi[good]) #if (wd > 1): # hogcorr, corrframe =HOGvotes_blocks(phi, wd=wd) #else: # hogcorr, corrframe =HOGvotes_simple(phi) #plt.imshow(phi, origin='lower') #plt.colorbar() #plt.show() #import pdb; pdb.set_trace() return Zx, corrframe, smoothframe1
def HOGcorr_frame(frame1, frame2, gradthres1=0., gradthres2=0., pxsz=1., ksz=1., res=1., mask1=0, mask2=0, wd=1, allow_huge=False, regrid=False): # Calculates the spatial correlation between frame1 and frame2 using the HOG methods # # INPUTS # frame1 - # frame2 - # # OUTPUTS # hogcorr - # corrframe - sf=3. #Number of pixels per kernel FWHM pxksz =ksz/pxsz pxres =res/pxsz sz1=np.shape(frame1) if (ksz > 1): weight=(pxsz/ksz)**2 if (regrid): intframe1=congrid(frame1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intframe2=congrid(frame2, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) if np.array_equal(np.shape(frame1), np.shape(mask1)): intmask1=congrid(mask1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask1[(intmask1 > 0.).nonzero()]=1. if np.array_equal(np.shape(frame2), np.shape(mask2)): intmask2=congrid(mask2, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask2[(intmask2 > 0.).nonzero()]=1. else: intframe1=frame1 intframe2=frame2 intmask1=mask1 intmask2=mask2 smoothframe1=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,0], mode='nearest') #convolve_fft(intframe1, Gaussian2DKernel(pxksz), allow_huge=allow_huge) smoothframe2=ndimage.filters.gaussian_filter(frame2, [pxksz, pxksz], order=[0,0], mode='nearest') #convolve_fft(intframe2, Gaussian2DKernel(pxksz), allow_huge=allow_huge) #grad1=np.gradient(smoothframe1) #grad2=np.gradient(smoothframe2) dI1dx=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[1,0], mode='nearest') dI2dx=ndimage.filters.gaussian_filter(frame2, [pxksz, pxksz], order=[0,1], mode='nearest') dI2dy=ndimage.filters.gaussian_filter(frame2, [pxksz, pxksz], order=[1,0], mode='nearest') else: weight=(pxsz/res)**2 intframe1=frame1 intframe2=frame2 intmask1=mask1 intmask2=mask2 smoothframe1=frame1 smoothframe2=frame2 #grad1=np.gradient(intframe1) #grad2=np.gradient(intframe2) dI1dx=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[1,0], mode='nearest') dI2dx=ndimage.filters.gaussian_filter(frame2, [1, 1], order=[0,1], mode='nearest') dI2dy=ndimage.filters.gaussian_filter(frame2, [1, 1], order=[1,0], mode='nearest') # Calculation of the relative orientation angles #tempphi0=np.arctan2(grad1[1]*grad2[0]-grad1[0]*grad2[1], grad1[0]*grad2[0]+grad1[1]*grad2[1]) tempphi=np.arctan2(dI1dx*dI2dy-dI1dy*dI2dx, dI1dx*dI2dx+dI1dy*dI2dy) phi=np.arctan(np.tan(tempphi)) # Excluding small gradients normGrad1=np.sqrt(dI1dx*dI1dx+dI1dy*dI1dy) #np.sqrt(grad1[1]**2+grad1[0]**2) normGrad2=np.sqrt(dI2dx*dI2dx+dI2dy*dI2dy) #np.sqrt(grad2[1]**2+grad2[0]**2) bad=np.logical_or(normGrad1 <= gradthres1, normGrad2 <= gradthres2).nonzero() phi[bad]=np.nan corrframe=phi#np.cos(2.*phi) # Excluding masked regions if np.array_equal(np.shape(intframe1), np.shape(intmask1)): corrframe[(intmask1 == 0.).nonzero()]=np.nan if np.array_equal(np.shape(intframe2), np.shape(intmask2)): corrframe[(intmask2 == 0.).nonzero()]=np.nan good=np.logical_and(np.logical_and(np.isfinite(phi), intmask1 > 0), intmask2 > 0).nonzero() else: good=np.logical_and(np.isfinite(phi), intmask1 > 0).nonzero() else: good=np.isfinite(phi).nonzero() Zx, s_Zx, meanPhi = HOG_PRS(phi[good]) wghts=0.*phi[good]+weight rvl=circ.descriptive.resultant_vector_length(2.*phi[good], w=wghts) can=circ.descriptive.mean(2.*phi[good], w=wghts)/2. pz, Z = circ.tests.rayleigh(2.*phi[good], w=wghts) pv, V = circ.tests.vtest(2.*phi[good], 0., w=wghts) #if (wd > 1): # hogcorr, corrframe =HOGvotes_blocks(phi, wd=wd) #else: # hogcorr, corrframe =HOGvotes_simple(phi) circstats=[rvl, Z, V, pz, pv] return circstats, corrframe, smoothframe1, smoothframe2
def lic(vx0, vy0, length=8, niter=1, normalize=True, amplitude=False, level=0.1, scalar=1, interpolation='nearest', inputmap=None, factor=1.): # Calculates the line integral convolution representation of the 2D vector field represented by Vx and Vy. # INPUTS # Vx - X # Vy - Y # length - L # Check if the images match assert vx0.shape == vy0.shape, "Dimensions of ima2 and ima1 must match" sz = np.shape(vx0) # Identify bad pixels vxbad = np.isnan(vx0).nonzero() vybad = np.isnan(vy0).nonzero() vx0[vxbad] = 0. vy0[vybad] = 0. # =============================================================================================== if (factor == 1.): vx = np.copy(vx0) vy = np.copy(vy0) else: print('[LIC] Warning: rescaling input maps') vx = congrid(vx0, np.array([int(factor * sz[0]), int(factor * sz[1])]), method='linear') vy = congrid(vy0, np.array([int(factor * sz[0]), int(factor * sz[1])]), method='linear') # Assert new shape sz = np.shape(vx) ni = sz[0] nj = sz[1] uu = np.sqrt(vx**2 + vy**2) ii = (uu == 0.).nonzero() if (np.size(ii) > 0): uu[ii] = 1.0 if (normalize): ux = vx / uu uy = vy / uu else: ux = vx / np.max(uu) uy = vy / np.max(uu) if (inputmap is None): vl = np.random.rand(ni, nj) else: vl = inputmap xi = np.arange(ni) xj = np.arange(nj) outvl = np.zeros([niter, ni, nj]) for i in range(0, niter): print('iter {:.0f} / {:.0f}'.format(i + 1, niter)) texture = vl vv = np.zeros([ni, nj]) pi0, pj0 = np.meshgrid(xi, xj, indexing='ij') pi, pj = np.meshgrid(xi, xj, indexing='ij') mi = pi mj = pj ppi = 1. * pi ppj = 1. * pj mmi = 1. * mi mmj = 1. * mj pbar = tqdm(total=length) for l in range(0, length): ppi0 = ppi ppj0 = ppj points = np.transpose(np.array([pi0.ravel(), pj0.ravel()])) outpoints = np.transpose(np.array([ppi.ravel(), ppj.ravel()])) dpi = interpolate.griddata(points, uy.ravel(), outpoints, method=interpolation) dpj = interpolate.griddata(points, ux.ravel(), outpoints, method=interpolation) ppi = ppi0 + 0.25 * np.reshape(dpi, [ni, nj]) ppj = ppj0 + 0.25 * np.reshape(dpj, [ni, nj]) mmi0 = mmi mmj0 = mmj points = np.transpose(np.array([pi0.ravel(), pj0.ravel()])) outpoints = np.transpose(np.array([mmi.ravel(), mmj.ravel()])) dmi = interpolate.griddata(points, uy.ravel(), outpoints, method=interpolation) dmj = interpolate.griddata(points, ux.ravel(), outpoints, method=interpolation) mmi = mmi0 - 0.25 * np.reshape(dmi, [ni, nj]) mmj = mmj0 - 0.25 * np.reshape(dmj, [ni, nj]) pi = (np.fix(ppi) + ni) % ni pj = (np.fix(ppj) + nj) % nj mi = (np.fix(mmi) + ni) % ni mj = (np.fix(mmj) + nj) % nj ppi = pi + (ppi.copy() - np.fix(ppi.copy())) ppj = pj + (ppj.copy() - np.fix(ppj.copy())) mmi = mi + (mmi.copy() - np.fix(mmi.copy())) mmj = mj + (mmj.copy() - np.fix(mmj.copy())) points = np.transpose(np.array([pi0.ravel(), pj0.ravel()])) outpoints = np.transpose(np.array([ppi.ravel(), ppj.ravel()])) tempA = interpolate.griddata(points, texture.ravel(), outpoints, method=interpolation) points = np.transpose(np.array([pi0.ravel(), pj0.ravel()])) outpoints = np.transpose(np.array([mmi.ravel(), mmj.ravel()])) tempB = interpolate.griddata(points, texture.ravel(), outpoints, method=interpolation) vv = vv.copy() + np.reshape(tempA, [ni, nj]) + np.reshape( tempB, [ni, nj]) pbar.update() pbar.close() vl = 0.25 * vv / length outvl[i, :, :] = vl vl[vxbad] = np.nan vl[vybad] = np.nan return outvl
def rescale_image(slice_original, size_slice, size_output): size_original = slice_original.shape if size_slice % 2 == 1: size_slice += 1 size_new = (size_slice, size_slice) scale_factor = np.float(size_new[0]) / size_original[0] if scale_factor < 1: type = 'compress' if scale_factor >= 1: type = 'expand' # type == 'expand' if type == 'compress': # print ' Applying Congrid {0} -> {1}'.format( size_original, size_new ) slice_scaled = congrid(slice_original, size_new, method='linear', centre=True) # print slice_scaled.shape if type == 'expand': zoom_factor = scale_factor # print ' Applying Zoom {0} -> {1}'.format( size_original, size_new ) slice_scaled = scipy.ndimage.zoom(slice_original, zoom_factor, order=1) # print slice_scaled.shape # Create Periodic full slice slice_full = np.zeros(size_output) size_output_x, size_output_y = size_output size_new_x, size_new_y = size_new # if size_output_x > size_new_x: type = 'compress' # else: type = 'expand' type = 'compress' if type == 'compress': center_values = np.array( [[size_output_x / 2, size_output_y / 2], [size_output_x / 2 - size_new_x, size_output_y / 2], [size_output_x / 2 + size_new_x, size_output_y / 2], [size_output_x / 2, size_output_y / 2 - size_new_y], [size_output_x / 2, size_output_y / 2 + size_new_y], [size_output_x / 2 - size_new_x, size_output_y / 2 - size_new_y], [size_output_x / 2 - size_new_x, size_output_y / 2 + size_new_y], [size_output_x / 2 + size_new_x, size_output_y / 2 - size_new_y], [size_output_x / 2 + size_new_x, size_output_y / 2 + size_new_y]]) for center in center_values: center_x, center_y = center edge_x_l, edge_x_r = center_x - size_new_x / 2, center_x + size_new_x / 2 edge_y_l, edge_y_r = center_y - size_new_y / 2, center_y + size_new_y / 2 # print '' # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, edge_x_l, edge_x_r, edge_y_l, edge_y_r ) if edge_x_l < 0: offset_x_l = -edge_x_l edge_x_l = 0 else: offset_x_l = 0 if edge_x_r >= size_output_x: offset_x_r = size_output_x / 2 - size_new_x / 2 edge_x_r = size_output_x else: offset_x_r = size_new_x if edge_y_l < 0: offset_y_l = -edge_y_l edge_y_l = 0 else: offset_y_l = 0 if edge_y_r >= size_output_y: offset_y_r = size_output_y / 2 - size_new_y / 2 edge_y_r = size_output_y else: offset_y_r = size_new_y # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, edge_x_l, edge_x_r, edge_y_l, edge_y_r ) # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, offset_x_l, offset_x_r, offset_y_l, offset_y_r ) slice_full[edge_x_l:edge_x_r, edge_y_l:edge_y_r] = slice_scaled[offset_x_l:offset_x_r, offset_y_l:offset_y_r] if type == 'expand': center_values = np.array([[size_output_x / 2, size_output_y / 2]]) for center in center_values: center_x, center_y = center edge_x_l, edge_x_r = center_x - size_new_x / 2, center_x + size_new_x / 2 edge_y_l, edge_y_r = center_y - size_new_y / 2, center_y + size_new_y / 2 # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, edge_x_l, edge_x_r, edge_y_l, edge_y_r ) if edge_x_l < 0: offset_x_l = size_new_x / 2 - size_output_x / 2 edge_x_l = 0 else: offset_x_l = 0 if edge_x_r >= size_output_x: offset_x_r = offset_x_l + size_output_x edge_x_r = size_output_x else: offset_x_r = size_new_x if edge_y_l < 0: offset_y_l = size_new_y / 2 - size_output_y / 2 edge_y_l = 0 else: offset_y_l = 0 if edge_y_r >= size_output_y: offset_y_r = offset_y_l + size_output_y edge_y_r = size_output_y else: offset_y_r = size_new_y # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, edge_x_l, edge_x_r, edge_y_l, edge_y_r ) # print ' {0}, [ {1}, {2} ] [ {3}, {4} ]'.format( center, offset_x_l, offset_x_r, offset_y_l, offset_y_r ) # print slice_scaled[offset_x_l: offset_x_r, offset_y_l: offset_y_r].shape slice_full[edge_x_l:edge_x_r, edge_y_l:edge_y_r] = slice_scaled[offset_x_l:offset_x_r, offset_y_l:offset_y_r] return slice_full
def HOGcorr_frameandvec(frame1, vecx, vecy, gradthres=0., vecthres=0., pxsz=1., ksz=1., res=1., mask1=0, mask2=0, wd=1, allow_huge=False, regrid=False): # Calculates the spatial correlation between frame1 and the vector field described by vecx and vecy using the HOG methods # # INPUTS # frame1 - input map # vecx - x-component of the input vector field # vecy - y-component of the input vector field # # OUTPUTS # hogcorr - # corrframe - sf=3. #Number of pixels per kernel FWHM pxksz=(ksz/(2*np.sqrt(2.*np.log(2.))))/pxsz #gaussian_filter takes sigma instead of FWHM as input pxres =res/pxsz sz1=np.shape(frame1) if (ksz > 1): if (regrid): intframe1=congrid(frame1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intvecx =congrid(vecx, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intvecy =congrid(vecy, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) if np.array_equal(np.shape(frame1), np.shape(mask1)): intmask1=congrid(mask1, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask1[(intmask1 > 0.).nonzero()]=1. if np.array_equal(np.shape(frame2), np.shape(mask2)): intmask2=congrid(mask2, [np.int(np.round(sf*sz1[0]/pxres)), np.int(np.round(sf*sz1[1]/pxres))]) intmask2[(intmask2 > 0.).nonzero()]=1. else: intframe1=frame1 intvecx=vecx intvecy=vecy intmask1=mask1 intmask2=mask2 smoothframe1=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,0], mode='nearest') dI1dx=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [pxksz, pxksz], order=[1,0], mode='nearest') else: intframe1=frame1 smoothframe1=frame1 intvecx=vecx intvecy=vecy intmask1=mask1 intmask2=mask2 #grad1=np.gradient(intframe1) dI1dx=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[0,1], mode='nearest') dI1dy=ndimage.filters.gaussian_filter(frame1, [1, 1], order=[1,0], mode='nearest') # ======================================================================================================================== normGrad1=np.sqrt(dI1dx*dI1dx+dI1dy*dI1dy) #np.sqrt(grad1[1]**2+grad1[0]**2) normVec=np.sqrt(intvecx*intvecx + intvecy*intvecy) bad=np.logical_or(normGrad1 <= gradthres, normVec <= vecthres).nonzero() normGrad1[bad]=1.; normVec[bad]=1.; tempphi=np.arctan2(dI1dx*intvecy-dI1dy*intvecx, dI1dx*intvecx+dI1dy*intvecy) tempphi[bad]=np.nan phi=np.arctan(np.tan(tempphi)) corrframe=np.cos(2.*phi) if np.array_equal(np.shape(intframe1), np.shape(intmask1)): corrframe[(intmask1 == 0.).nonzero()]=np.nan if np.array_equal(np.shape(intvecx), np.shape(intmask2)): corrframe[(intmask2 == 0.).nonzero()]=np.nan good=np.logical_and(np.logical_and(np.isfinite(phi), intmask1 > 0), intmask2 > 0).nonzero() else: good=np.logical_and(np.isfinite(phi), intmask1 > 0).nonzero() else: good=np.isfinite(phi).nonzero() #Zx, s_Zx, meanPhi = HOG_PRS(phi[good]) output=HOG_PRS(2.*phi[good], w=weights[good]) Zx=output['Zx'] s_Zx=output['s_Zx'] meanPhi=output['meanphi'] return Zx, corrframe, smoothframe1
def HOGcorr_frame(frame1, frame2, gradthres=0., pxsz=1., ksz=1., res=1., mask1=0, mask2=0, wd=1, allow_huge=False, regrid=False): # Calculates the spatial correlation between frame1 and frame2 using the HOG methods # # INPUTS # frame1 - # frame2 - # # OUTPUTS # hogcorr - # corrframe - sf = 3. #Number of pixels per kernel FWHM pxksz = ksz / pxsz pxres = res / pxsz sz1 = np.shape(frame1) if (ksz > 1): if (regrid): intframe1 = congrid(frame1, [ np.int(np.round(sf * sz1[0] / pxres)), np.int(np.round(sf * sz1[1] / pxres)) ]) intframe2 = congrid(frame2, [ np.int(np.round(sf * sz1[0] / pxres)), np.int(np.round(sf * sz1[1] / pxres)) ]) if np.array_equal(np.shape(frame1), np.shape(mask1)): intmask1 = congrid(mask1, [ np.int(np.round(sf * sz1[0] / pxres)), np.int(np.round(sf * sz1[1] / pxres)) ]) intmask1[(intmask1 > 0.).nonzero()] = 1. if np.array_equal(np.shape(frame2), np.shape(mask2)): intmask2 = congrid(mask2, [ np.int(np.round(sf * sz1[0] / pxres)), np.int(np.round(sf * sz1[1] / pxres)) ]) intmask2[(intmask2 > 0.).nonzero()] = 1. else: intframe1 = frame1 intframe2 = frame2 intmask1 = mask1 intmask2 = mask2 smoothframe1 = convolve_fft(intframe1, Gaussian2DKernel(pxksz), allow_huge=allow_huge) smoothframe2 = convolve_fft(intframe2, Gaussian2DKernel(pxksz), allow_huge=allow_huge) grad1 = np.gradient(smoothframe1) grad2 = np.gradient(smoothframe2) else: intframe1 = frame1 intframe2 = frame2 intmask1 = mask1 intmask2 = mask2 smoothframe1 = frame1 smoothframe2 = frame2 grad1 = np.gradient(intframe1) grad2 = np.gradient(intframe2) # Calculation of the relative orientation angles tempphi = np.arctan2(grad1[0] * grad2[1] - grad1[1] * grad2[0], grad1[0] * grad2[0] + grad1[1] * grad2[1]) phi = np.arctan(np.tan(tempphi)) # Excluding small gradients normGrad1 = np.sqrt(grad1[1]**2 + grad1[0]**2) normGrad2 = np.sqrt(grad2[1]**2 + grad2[0]**2) bad = np.logical_or(normGrad1 <= gradthres, normGrad2 <= gradthres).nonzero() phi[bad] = np.nan corrframe = np.cos(2. * phi) # Excluding masked regions if np.array_equal(np.shape(intframe1), np.shape(intmask1)): corrframe[(intmask1 == 0.).nonzero()] = np.nan if np.array_equal(np.shape(intframe2), np.shape(intmask2)): corrframe[(intmask2 == 0.).nonzero()] = np.nan good = np.logical_and( np.logical_and(np.isfinite(phi), intmask1 > 0), intmask2 > 0).nonzero() else: good = np.logical_and(np.isfinite(phi), intmask1 > 0).nonzero() else: good = np.isfinite(phi).nonzero() Zx, s_Zx, meanPhi = HOG_PRS(phi[good]) #if (wd > 1): # hogcorr, corrframe =HOGvotes_blocks(phi, wd=wd) #else: # hogcorr, corrframe =HOGvotes_simple(phi) #plt.imshow(mask1, origin='lower') #plt.colorbar() #plt.show() #import pdb; pdb.set_trace() return Zx, corrframe, smoothframe1, smoothframe2