def test_can_apply_intersection_mask_to_three_masked_arrays(self): import numpy.ma as ma array1 = ma.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], mask=[1, 1, 1, 0, 0, 0, 0, 0, 0, 0]) array2 = ma.array([2, 4, 5, 6, 7, 8, 4, 3, 6, 80], mask=[0, 1, 0, 0, 0, 0, 0, 1, 0, 0]) array3 = ma.array([2, 4, 5, 6, 7, 8, 4, 3, 6, 80], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) array1, array2 = apply_intersection_mask_to_two_arrays(array1, array2) array1, array3 = apply_intersection_mask_to_two_arrays(array1, array3) array1, array2 = apply_intersection_mask_to_two_arrays(array1, array2) assert (ma.equal(array1.mask, [1, 1, 1, 0, 0, 0, 0, 1, 0, 1]).all()) assert (ma.equal(array2.mask, [1, 1, 1, 0, 0, 0, 0, 1, 0, 1]).all()) assert (ma.equal(array3.mask, [1, 1, 1, 0, 0, 0, 0, 1, 0, 1]).all())
def test_testUfuncs1(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) assert_(eq(np.sinh(x), sinh(xm))) assert_(eq(np.tan(x), tan(xm))) assert_(eq(np.tanh(x), tanh(xm))) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(np.sqrt(abs(x)), sqrt(xm))) assert_(eq(np.log(abs(x)), log(xm))) assert_(eq(np.log10(abs(x)), log10(xm))) assert_(eq(np.exp(x), exp(xm))) assert_(eq(np.arcsin(z), arcsin(zm))) assert_(eq(np.arccos(z), arccos(zm))) assert_(eq(np.arctan(z), arctan(zm))) assert_(eq(np.arctan2(x, y), arctan2(xm, ym))) assert_(eq(np.absolute(x), absolute(xm))) assert_(eq(np.equal(x, y), equal(xm, ym))) assert_(eq(np.not_equal(x, y), not_equal(xm, ym))) assert_(eq(np.less(x, y), less(xm, ym))) assert_(eq(np.greater(x, y), greater(xm, ym))) assert_(eq(np.less_equal(x, y), less_equal(xm, ym))) assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym))) assert_(eq(np.conjugate(x), conjugate(xm))) assert_(eq(np.concatenate((x, y)), concatenate((xm, ym)))) assert_(eq(np.concatenate((x, y)), concatenate((x, y)))) assert_(eq(np.concatenate((x, y)), concatenate((xm, y)))) assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def log_linear_vinterp(T,P,levs): ''' # Author Charles Doutriaux # Version 1.1 # Expect 2D field here so there''s no reorder which I suspect to do a memory leak # email: [email protected] # Converts a field from sigma levels to pressure levels # Log linear interpolation # Input # T : temperature on sigma levels # P : pressure field from TOP (level 0) to BOTTOM (last level) # levs : pressure levels to interplate to (same units as P) # Output # t : temperature on pressure levels (levs) # External: Numeric''' import numpy.ma as MA ## from numpy.oldnumeric.ma import ones,Float,greater,less,logical_and,where,equal,log,asarray,Float16 sh=P.shape nsigma=sh[0] # Number of sigma levels try: nlev=len(levs) # Number of pressure levels except: nlev=1 # if only one level len(levs) would breaks t=[] for ilv in range(nlev): # loop through pressure levels try: lev=levs[ilv] # get value for the level except: lev=levs # only 1 level passed # print ' ......... level:',lev Pabv=MA.ones(P[0].shape,Numeric.Float) Tabv=-Pabv # Temperature on sigma level Above Tbel=-Pabv # Temperature on sigma level Below Pbel=-Pabv # Pressure on sigma level Below Pabv=-Pabv # Pressure on sigma level Above for isg in range(1,nsigma): # loop from second sigma level to last one ## print 'Sigma level #',isg a = MA.greater(P[isg], lev) # Where is the pressure greater than lev b = MA.less(P[isg-1],lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Pabv, Pbel and Tabv, Tbel Pabv=MA.where(MA.logical_and(a,b),P[isg],Pabv) # Pressure on sigma level Above Tabv=MA.where(MA.logical_and(a,b),T[isg],Tabv) # Temperature on sigma level Above Pbel=MA.where(MA.logical_and(a,b),P[isg-1],Pbel) # Pressure on sigma level Below Tbel=MA.where(MA.logical_and(a,b),T[isg-1],Tbel) # Temperature on sigma level Below # end of for isg in range(1,nsigma) # val=where(equal(Pbel,-1.),Pbel.missing_value,lev) # set to missing value if no data below lev if there is tl=MA.masked_where(MA.equal(Pbel,-1.),MA.log(lev/MA.absolute(Pbel))/MA.log(Pabv/Pbel)*(Tabv-Tbel)+Tbel) # Interpolation t.append(tl) # add a level to the output # end of for ilv in range(nlev) return asMA(t).astype(Numeric.Float32) # convert t to an array
def GroupData(x, y): """ This function takes 2 variables, x (a grouping / dummy variable), and y (the actual data). Returned are a list of vectors for each condition. """ uniques, freqs = UniqueVals(x) data = [] for idx in zip(uniques, freqs): indices = ma.equal(x, idx[0]) data.append(y[indices]) return data
def corr_proba(r, ndata, ndataset=2, dof=False): """Probability of rejecting correlations - **r**: Correlation coefficient - **ndata**: Number of records use for correlations - **ndataset**, optional: Number of datasets (1 for autocorrelations, else 2) [default: 2] .. todo:: This must be rewritten using :mod:`scipy.stats` """ # TODO: use scipy for betai and _gamma? from genutil.salstat import betai,_gammaln # Basic tests ndata = MA.masked_equal(ndata,0,copy=0) r = MV2.masked_where(MA.equal(MA.absolute(r),1.),r,copy=0) # Degree of freedom if dof: df = ndata else: df = ndata-2-ndataset # Advanced test: prevent extreme values by locally decreasing the dof reduc = N.ones(r.shape) z = None while z is None or MA.count(MA.masked_greater(z,-600.)): if z is not None: imax = MA.argmin(z.ravel()) reduc.flat[imax] += 1 dfr = df/reduc t = r*MV2.sqrt(dfr/((1.0-r)* (1.0+r))) a = 0.5*dfr b = 0.5 x = df/(dfr+t**2) z = _gammaln(a+b)-_gammaln(a)-_gammaln(b)+a*MA.log(x)+b*MA.log(1.0-x) # Perfom the test and format the variable prob = MV2.masked_array(betai(a,b,x),axes=r.getAxisList())*100 prob.id = 'corr_proba' ; prob.name = prob.id prob.long_name = 'Probability of rejection' prob.units = '%' return prob
def corr_proba(r, ndata, ndataset=2, dof=False): """Probability of rejecting correlations - **r**: Correlation coefficient - **ndata**: Number of records use for correlations - **ndataset**, optional: Number of datasets (1 for autocorrelations, else 2) [default: 2] .. todo:: This must be rewritten using :mod:`scipy.stats` """ # Basic tests ndata = MA.masked_equal(ndata,0,copy=0) r = MV2.masked_where(MA.equal(MA.absolute(r),1.),r,copy=0) # Degree of freedom if dof: df = ndata else: df = ndata-2-ndataset # Advanced test: prevent extreme values by locally decreasing the dof reduc = N.ones(r.shape) z = None while z is None or MA.count(MA.masked_greater(z,-600.)): if z is not None: imax = MA.argmin(z.ravel()) reduc.flat[imax] += 1 dfr = df/reduc t = r*MV2.sqrt(dfr/((1.0-r)* (1.0+r))) a = 0.5*dfr b = 0.5 x = df/(dfr+t**2) z = _gammaln(a+b)-_gammaln(a)-_gammaln(b)+a*MA.log(x)+b*MA.log(1.0-x) # Perfom the test and format the variable prob = MV2.masked_array(betai(a,b,x),axes=r.getAxisList())*100 prob.id = 'corr_proba' ; prob.name = prob.id prob.long_name = 'Probability of rejection' prob.units = '%' return prob
def log_linear_vinterp(T, P, levs): ''' # Author Charles Doutriaux # Version 1.1 # Expect 2D field here so there''s no reorder which I suspect to do a memory leak # email: [email protected] # Converts a field from sigma levels to pressure levels # Log linear interpolation # Input # T : temperature on sigma levels # P : pressure field from TOP (level 0) to BOTTOM (last level) # levs : pressure levels to interplate to (same units as P) # Output # t : temperature on pressure levels (levs) # External: Numeric''' import numpy.ma as MA ## from numpy.oldnumeric.ma import ones,Float,greater,less,logical_and,where,equal,log,asarray,Float16 sh = P.shape nsigma = sh[0] # Number of sigma levels try: nlev = len(levs) # Number of pressure levels except: nlev = 1 # if only one level len(levs) would breaks t = [] for ilv in range(nlev): # loop through pressure levels try: lev = levs[ilv] # get value for the level except: lev = levs # only 1 level passed # print ' ......... level:',lev Pabv = MA.ones(P[0].shape, Numeric.Float) Tabv = -Pabv # Temperature on sigma level Above Tbel = -Pabv # Temperature on sigma level Below Pbel = -Pabv # Pressure on sigma level Below Pabv = -Pabv # Pressure on sigma level Above for isg in range(1, nsigma): # loop from second sigma level to last one ## print 'Sigma level #',isg a = MA.greater(P[isg], lev) # Where is the pressure greater than lev b = MA.less(P[isg - 1], lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Pabv, Pbel and Tabv, Tbel Pabv = MA.where(MA.logical_and(a, b), P[isg], Pabv) # Pressure on sigma level Above Tabv = MA.where(MA.logical_and(a, b), T[isg], Tabv) # Temperature on sigma level Above Pbel = MA.where(MA.logical_and(a, b), P[isg - 1], Pbel) # Pressure on sigma level Below Tbel = MA.where(MA.logical_and(a, b), T[isg - 1], Tbel) # Temperature on sigma level Below # end of for isg in range(1,nsigma) # val=where(equal(Pbel,-1.),Pbel.missing_value,lev) # set to missing value if no data below lev if there is tl = MA.masked_where( MA.equal(Pbel, -1.), MA.log(lev / MA.absolute(Pbel)) / MA.log(Pabv / Pbel) * (Tabv - Tbel) + Tbel) # Interpolation t.append(tl) # add a level to the output # end of for ilv in range(nlev) return asMA(t).astype(Numeric.Float32) # convert t to an array
def test_testOddFeatures(self): # Test of other odd features x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert_(x[1, 0] == 12) z = x + 10j * x assert_(eq(z.real, x)) assert_(eq(z.imag, 10 * x)) assert_(eq((z * conjugate(z)).real, 101 * x * x)) z.imag[...] = 0.0 x = arange(10) x[3] = masked assert_(str(x[3]) == str(masked)) c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) assert_(z[7] is masked) assert_(z[8] is not masked) assert_(z[9] is not masked) assert_(eq(x, z)) z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) z = masked_where(c, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) assert_(eq(x, z)) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) c[0] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) assert_(eq(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2))) assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) assert_(eq(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])) assert_(eq(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])) assert_(eq(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])) assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])) assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5])) atest = ones((10, 10, 10), dtype=np.float32) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_(eq(atest, ctest)) z = choose(c, (-x, x)) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) x = arange(6) x[5] = masked y = arange(6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_(eq(z, zm)) assert_(getmask(zm) is nomask) assert_(eq(zm, [0, 1, 2, 30, 40, 50])) z = where(c, masked, 1) assert_(eq(z, [99, 99, 99, 1, 1, 1])) z = where(c, 1, masked) assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def compute_photometric_stereo_impl(lights, images): """ Given a set of images taken from the same viewpoint and a corresponding set of directions for light sources, this function computes the albedo and normal map of a Lambertian scene. If the computed albedo for a pixel has an L2 norm less than 1e-7, then set the albedo to black and set the normal to the 0 vector. Normals should be unit vectors. Input: lights -- 3 x N array. Columns are normalized and are to be interpreted as lighting directions. images -- list of N images. Each image is of the same scene from the same viewpoint, but under the lighting condition specified in lights. Images are height x width x channels arrays, all with identical dimensions. Output: albedo -- float32 height x width x channels image with dimensions matching the input images. normals -- float32 height x width x 3 image with dimensions matching the input images. """ # Calculate dimensions for inputs & outputs. (height, width, channel) = images[0].shape N = len(images) # Initialize arrays for albedo & normals; dtype='float32'. albedo = np.zeros((height,width,channel),dtype='float32') normals = np.zeros((height,width,3),dtype='float32') # Accumulator to hold G of each RGB color channel. G = np.zeros((height*width,3,channel),dtype='float32') for ch in xrange(channel): # Stack images in a (pixel x num_images) array. I = np.zeros((height*width, N),dtype='float32') for img_idx in xrange(N): I[:,img_idx] = images[img_idx][:,:,ch].reshape(height*width,) # Store G_RGB in G_Accumulator. G[:,:,ch] = I.dot(lights.T).dot(np.linalg.inv(lights.dot(lights.T))) # Calculate the albedo. G_norm = np.linalg.norm(G,axis=1) G_norm = G_norm[:,np.newaxis] G_norm[G_norm < 1.0e-7] = 0.0 albedo = G_norm.reshape(height,width,channel) # Calculate G_grey matrix. G_grey = (np.sum(G, axis=2)/3.0) # Calculate ||G_grey||; set entries less than 1e-7 to 0. G_grey_norm = np.linalg.norm(G_grey, axis=1).astype(dtype='float32') G_grey_norm = G_grey_norm[:,np.newaxis] G_grey_norm[G_grey_norm < 1e-7] = 0.0 # Mask for ||G_grey||; True for index containing 0-values. mask = ma.equal(G_grey_norm, 0.0) # Replace 0-values with 1.0 temporarily in ||G_grey||. G_grey_norm[mask] = 1.0 # Calculate normals. N_matrix = (G_grey/G_grey_norm) N_matrix[mask] = 0.0 normals = N_matrix.reshape(height,width,3) return albedo, normals