Esempio n. 1
0
  def blinval(self,lat,lon,grid,offset):
    """ a.blinval(lat,lon,grid,offset)

     Bilinear interpolation of grid at lat,lon point

     Offset is 1.0 for cross variables, 0.5 for dot ones

     Advantages:    Stair-step effect caused by the nearest neighbour approach
                    is reduced. Image looks smooth.

     Disadvantages: Alters original data and reduces contrast and high
                    frequency component of the image by averaging neighbouring
                    values together. Is computationally more expensive than
                    nearest neighbour. 
    """
    (i,j) = self.latlon_to_ij(lat,lon)
    (i,j) = (i-offset,j-offset)
    (i0,j0)=(floor(i),floor(j))
    (i1,j1)=(floor(i),ceil(j))
    (i2,j2)=(ceil(i),ceil(j))
    (i3,j3)=(ceil(i),floor(j))
    if (ispointin(i0,j0,shape(grid)) and ispointin(i1,j1,shape(grid)) and \
        ispointin(i2,j2,shape(grid)) and ispointin(i3,j3,shape(grid))):
      dx=(i-i0)
      dy=(j-j0)
      i0=int(i0); j0=int(j0); i1=int(i1); j1=int(j1)
      i2=int(i2); j2=int(j2); i3=int(i3); j3=int(j3)
      p12=dx*grid[i2][j2]+(1-dx)*grid[i1][j1]
      p03=dx*grid[i3][j3]+(1-dx)*grid[i0][j0]
      value=dy*p12+(1-dy)*p03
    else:
      value=1e20
#     print "WARNING : Point is outside grid !"
    return value
Esempio n. 2
0
  def lwval(self,lat,lon,grid,offset):
    """ a.lwval(lat,lon,grid,offset)

     Liner interpolation distance weighted of grid at lat,lon point

     Offset is 1.0 for cross variables, 0.5 for dot ones
    """
    (i,j) = self.latlon_to_ij(lat,lon)
    (i,j) = (i-offset,j-offset)
    (i0,j0)=(floor(i),floor(j))
    (i1,j1)=(floor(i),ceil(j))
    (i2,j2)=(ceil(i),ceil(j))
    (i3,j3)=(ceil(i),floor(j))
    if (ispointin(i0,j0,shape(grid)) and ispointin(i1,j1,shape(grid)) and \
        ispointin(i2,j2,shape(grid)) and ispointin(i3,j3,shape(grid))):
      d0=1.0/hypot((i-i0),(j-j0)); d1=1.0/hypot((i-i1),(j-j1))
      d2=1.0/hypot((i-i2),(j-j2)); d3=1.0/hypot((i-i3),(j-j3))
      i0=int(i0); j0=int(j0); i1=int(i1); j1=int(j1)
      i2=int(i2); j2=int(j2); i3=int(i3); j3=int(j3)
      value=grid[i0][j0]*d0+grid[i1][j1]*d1+grid[i2][j2]*d2+grid[i3][j3]*d3
      value=value/(d0+d1+d2+d3)
    else:
      value=1e20
#     print "WARNING : Point is outside grid !"
    return value
Esempio n. 3
0
	def textureToList( self, array, metrics, mode=None ):
		"""Compile Numeric Python array to a display-list

		XXX:
			use a single texture and coordinates for texmap
			version

			special case ' ' so that it's not rendered as
			an image...
		"""
		if shape(array)[-1] == 2:
			mode = GL_LUMINANCE_ALPHA
		elif shape(array)[-1] == 4:
			mode = GL_RGBA
		else:
			raise ValueError( """Unsupported array dimension for textureToList, require 2 or 4 items/pixel, got %s"""%(shape(array)[-1],))
		list = glGenLists (1)
		glNewList( list, GL_COMPILE )
		try:
			try:
				if metrics.char != ' ':
					glPixelStorei(GL_UNPACK_ALIGNMENT,1)
					glPixelStorei(GL_PACK_ALIGNMENT, 1)
					glDrawPixelsub(
						mode,
						array,
					)
				glBitmap( 0,0,0,0, metrics.width,0, None )
			except Exception:
				glDeleteLists( list, 1 )
				raise
		finally:
			glEndList()
		return list
Esempio n. 4
0
def show(a, title = ''):
	"""Show function. Quite handy. :)"""

	if type(a) == list:
		for e in a:
			print e
	elif isarray(a):
		l = len(shape(a))
		if l == 1:
			scipy.gplt.plot(a)
			title or scipy.gplt.title(title)
		elif l == 2:
			imagesc.plot2d(a, palette = imagesc.brownish)
		else:
			print shape(a)
Esempio n. 5
0
def get_column(E):
    """
    @param E: 2-dimensional matrix of number data 
    @type E: Numeric array    
    
    @return: a column which has a non-zero eigenvalue
    """
    (rows, cols) = shape(E)
    
    for col_ind in range(cols):
        t = E[:,col_ind] # extract a column
        if (eigenvalue_vec(t) > 0):
            return t
    raise ValueError, 'all column vectors in E have zero-eigenvalues' # error: sum of matrix is 0
Esempio n. 6
0
def get_column(E):
    """
    @param E: 2-dimensional matrix of number data 
    @type E: Numeric array    
    
    @return: a non-zero vector
    """
    (rows, cols) = shape(E)
    
    for col_ind in range(cols):
        t = E[:,col_ind] # extract a column
        if (vec_inner(t) > 0):
            return t
    raise ValueError('all column vectors in E are zero vectors') # error: sum of matrix is 0
Esempio n. 7
0
def get_column(E):
    """
    @param E: 2-dimensional matrix of number data 
    @type E: Numeric array    
    
    @return: a column which has a non-zero eigenvalue
    """
    (rows, cols) = shape(E)

    for col_ind in range(cols):
        t = E[:, col_ind]  # extract a column
        if (eigenvalue_vec(t) > 0):
            return t
    raise ValueError, 'all column vectors in E have zero-eigenvalues'  # error: sum of matrix is 0
Esempio n. 8
0
def nipals_c(X, PCs, threshold, E_matrices):
    """
    
    @param X: 2-dimensional matrix of number data. 
    @type X: Numeric array
    
    @param PCs: Number of Principal Components.
    @type PCs: int
    
    @param threshold: Convergence check value. For checking on convergence to zero (e.g. 0.000001). 
    @type threshold: float
    
    @param E_matrices: If E-matrices should be retrieved or not. E-matrices (for each PC) or explained_var (explained variance for each PC).
    @type E_matrices: bool
    
    @return: (Scores, Loadings, E)

    """

    if not import_ok:
        raise ImportError, "could not import c_nipals python extension"
    else:

        (rows, cols) = shape(X)

        maxPCs = min(rows,
                     cols)  # max number of PCs is min(objects, variables)
        if maxPCs < PCs: PCs = maxPCs  # change to maxPCs if PCs > maxPCs

        Scores = zeros((rows, PCs), Float)  # all Scores (T)
        Loadings = zeros((PCs, cols), Float)  # all Loadings (P)

        E = X.copy()  #E[0]  (should already be mean centered)

        if E_matrices:
            Error_matrices = zeros((PCs, rows, cols),
                                   Float)  # all Error matrices (E)
            c_nipals.nipals2(Scores, Loadings, E, Error_matrices, PCs,
                             threshold)
            return Scores, Loadings, Error_matrices
        else:
            explained_var = c_nipals.nipals(Scores, Loadings, E, PCs,
                                            threshold)
            return Scores, Loadings, explained_var
Esempio n. 9
0
def mean_center(X):
    """
    
    @param X: 2-dimensional matrix of number data 
    @type X: Numeric array
    
    
    @return: Mean centered X (always has same dimensions as X)
    
    """
    (rows, cols) = shape(X)
    new_X = zeros((rows, cols), Float)
    _averages = average(X, 0)
    #print _averages
        
    for row in range(rows):
        for col in range(cols):
            new_X[row, col] = X[row, col] - _averages[col]
    return new_X
Esempio n. 10
0
def mean_center(X):
    """
    
    @param X: 2-dimensional matrix of number data 
    @type X: Numeric array
    
    
    @return: Mean centered X (always has same dimensions as X)
    
    """
    (rows, cols) = shape(X)
    new_X = zeros((rows, cols), Float)
    _averages = average(X, 0)
    #print _averages

    for row in range(rows):
        for col in range(cols):
            new_X[row, col] = X[row, col] - _averages[col]
    return new_X
def pca(M):
    "Perform PCA on M, return eigenvectors and eigenvalues, sorted."
    T, N = shape(M)
    # if there are fewer rows T than columns N, use snapshot method
    if T < N:
        C = dot(M, t(M))
        evals, evecsC = eigenvectors(C)
        # HACK: make sure evals are all positive
        evals = where(evals < 0, 0, evals)
        evecs = 1. / sqrt(evals) * dot(t(M), t(evecsC))
    else:
        # calculate covariance matrix
        K = 1. / T * dot(t(M), M)
        evals, evecs = eigenvectors(K)
    # sort the eigenvalues and eigenvectors, descending order
    order = (argsort(evals)[::-1])
    evecs = take(evecs, order, 1)
    evals = take(evals, order)
    return evals, t(evecs)
Esempio n. 12
0
def nipals_c(X, PCs, threshold, E_matrices):
    """
    
    @param X: 2-dimensional matrix of number data. 
    @type X: Numeric array
    
    @param PCs: Number of Principal Components.
    @type PCs: int
    
    @param threshold: Convergence check value. For checking on convergence to zero difference (e.g. 0.000001). 
    @type threshold: float
    
    @param E_matrices: If E-matrices should be retrieved or not. E-matrices (for each PC) or explained_var (explained variance for each PC).
    @type E_matrices: bool
    
    @return: (Scores, Loadings, E)

    """
    
    if not import_ok:
        raise ImportError("could not import c_nipals python extension")
    else:  

        (rows, cols) = shape(X)

	maxPCs = min(rows, cols) # max number of PCs is min(objects, variables)
	if maxPCs < PCs: PCs = maxPCs # change to maxPCs if PCs > maxPCs

	Scores = zeros((rows, PCs), Float) # all Scores (T)
	Loadings = zeros((PCs, cols), Float) # all Loadings (P)

	E = X.copy() #E[0]  (should already be mean centered)
        
        if E_matrices:
            Error_matrices = zeros((PCs, rows, cols), Float) # all Error matrices (E)
            c_nipals.nipals2(Scores, Loadings, E, Error_matrices, PCs, threshold)
            return Scores, Loadings, Error_matrices
        else:
	    explained_var = c_nipals.nipals(Scores, Loadings, E, PCs, threshold)
	    return Scores, Loadings, explained_var
Esempio n. 13
0
  def nearval(self,lat,lon,grid,offset):
    """ a.nearval(lat,lon,grid,offset)

     Nearest value interpolation of grid at lat,lon point
     Offset is 1.0 for cross variables, 0.5 for dot ones

     Advantages:    Output values are the original input values. Other methods
                    of resampling tend to average surrounding values.
                    Easy to compute and therefore fastest to use.

     Disadvantages: Produces a choppy, "stair-stepped" effect. The image has
                    a rough appearance relative to the original unrectified
                    data. Data values may be lost, while other values may be
                    duplicated.
    """
    (i,j) = self.latlon_to_ij(float(lat),float(lon))
    (i,j) = (rintf(i-offset),rintf(j-offset))
    if (ispointin(rintf(i-1.0),rintf(j-1.0),shape(grid))):
      return grid[int(i)][int(j)]
    else:
#     print "WARNING : Point is outside grid !"
      return 1e20
Esempio n. 14
0
def pca(M):
    from Numeric import take, dot, shape, argsort, where, sqrt, transpose as t
    from LinearAlgebra import eigenvectors
    "Perform PCA on M, return eigenvectors and eigenvalues, sorted."
    T, N = shape(M)
    # if there are less rows T than columns N, use
    # snapshot method
    if T < N:
        C = dot(M, t(M))
        evals, evecsC = eigenvectors(C)
        # HACK: make sure evals are all positive
        evals = where(evals < 0, 0, evals)
        evecs = 1./sqrt(evals) * dot(t(M), t(evecsC))
    else:
        # calculate covariance matrix
        K = 1./T * dot(t(M), M)
        evals, evecs = eigenvectors(K)
    # sort the eigenvalues and eigenvectors, decending order
    order = (argsort(evals)[::-1])
    evecs = take(evecs, order, 1)
    evals = take(evals, order)
    return evals, t(evecs)
Esempio n. 15
0
def isnan(a):
    """y = isnan(x) returns True where x is Not-A-Number"""
    return reshape(array([_isnan(i) for i in ravel(a)], 'b'), shape(a))
Esempio n. 16
0
def nipals_arr(X, PCs, threshold, E_matrices):
    """
    
    @param X: 2-dimensional matrix of number data. 
    @type X: Numeric array
    
    @param PCs: Number of Principal Components.
    @type PCs: int
    
    @param threshold: Convergence check value. For checking on convergence to zero difference (e.g. 0.000001). 
    @type threshold: float
    
    @param E_matrices: If E-matrices should be retrieved or not. E-matrices (for each PC) or explained_var (explained variance for each PC).
    @type E_matrices: bool
    
    @return: (Scores, Loadings, E)

    """
    (rows, cols) = shape(X)

    maxPCs = min(rows, cols) # max number of PCs is min(objects, variables)
    if maxPCs < PCs: PCs = maxPCs # change to maxPCs if PCs > maxPCs
    
    Scores = zeros((rows, PCs), Float) # all Scores (T)
    Loadings = zeros((PCs, cols), Float) # all Loadings (P)
    
    E = X.copy() #E[0]  (should already be mean centered)
    
    if E_matrices:
        Error_matrices = zeros((PCs, rows, cols), Float) # all Error matrices (E)
    else:
        explained_var = zeros((PCs), Float)
        tot_explained_var = 0
    
        # total object residual variance for PC[0] (calculating from E[0])
        e_tot0 = 0 # for E[0] the total object residual variance is 100%
        for k in range(rows):
            e_k = E[k, :]**2
            e_tot0 += sum(e_k)  
            
    t = get_column(E) # extract a column
    p = zeros((cols), Float)
    
    # do iterations (0, PCs)
    for i in range(PCs):
        convergence = False
        ready_for_compare = False
        E_t = transpose(E)
        
        while not convergence:
            _temp = vec_inner(t)
            p = mat_prod(E_t, t) / _temp # ..................................... step 1
            
            _temp = vec_inner(p)**(-0.5)
            p = p * _temp # .................................................... step 2
            
            _temp = vec_inner(p)
            t = mat_prod(E, p) / _temp # ....................................... step 3
            
            
            eigenval_new = vec_inner(t)
            if not ready_for_compare:
                ready_for_compare = True
            else: # ready for convergence check
                if (eigenval_new - eigenval_old) < threshold*eigenval_new: # ... step 4
                    convergence = True           
            eigenval_old = eigenval_new;

        remove_tp_prod(E, t, p) # .............................................. step 5
        
        # add Scores and Loadings for PC[i] to the collection of all PCs
        Scores[:, i] = t; Loadings[i, :] = p
        
        if E_matrices:
		# complete error matrix
		# can calculate object residual variance (row-wise) or variable resiudal variance (column-wise)
		# total residual variance can also be calculated
		
		Error_matrices[i] = E.copy()
        
        else:
		# total object residual variance for E[i]
		e_tot = 0
		for k in range(rows):
		    e_k = E[k, :]**2
		    e_tot += sum(e_k)
		tot_obj_residual_var =  (e_tot / e_tot0)
		explained_var[i] = 1 - tot_obj_residual_var - tot_explained_var
		tot_explained_var += explained_var[i]

    if E_matrices:
        return Scores, Loadings, Error_matrices
    else:
        return Scores, Loadings, explained_var  
Esempio n. 17
0
def nipals_arr(X, PCs, threshold, E_matrices):
    """
    
    @param X: 2-dimensional matrix of number data. 
    @type X: Numeric array
    
    @param PCs: Number of Principal Components.
    @type PCs: int
    
    @param threshold: Convergence check value. For checking on convergence to zero (e.g. 0.000001). 
    @type threshold: float
    
    @param E_matrices: If E-matrices should be retrieved or not. E-matrices (for each PC) or explained_var (explained variance for each PC).
    @type E_matrices: bool
    
    @return: (Scores, Loadings, E)

    """
    (rows, cols) = shape(X)

    maxPCs = min(rows, cols)  # max number of PCs is min(objects, variables)
    if maxPCs < PCs: PCs = maxPCs  # change to maxPCs if PCs > maxPCs

    Scores = zeros((rows, PCs), Float)  # all Scores (T)
    Loadings = zeros((PCs, cols), Float)  # all Loadings (P)

    E = X.copy()  #E[0]  (should already be mean centered)

    if E_matrices:
        Error_matrices = zeros((PCs, rows, cols),
                               Float)  # all Error matrices (E)
    else:
        explained_var = zeros((PCs), Float)
        tot_explained_var = 0

        # total object residual variance for PC[0] (calculating from E[0])
        e_tot0 = 0  # for E[0] the total object residual variance is 100%
        for k in range(rows):
            e_k = E[k, :]**2
            e_tot0 += sum(e_k)

    t = get_column(E)  # extract a column
    p = zeros((cols), Float)

    # do iterations (0, PCs)
    for i in range(PCs):
        convergence = False
        ready_for_compare = False
        E_t = transpose(E)

        while not convergence:
            _temp = eigenvalue_vec(t)
            p = mat_prod(
                E_t, t) / _temp  # ..................................... step 1

            _temp = eigenvalue_vec(p)**(-0.5)
            p = p * _temp  # .................................................... step 2

            _temp = eigenvalue_vec(p)
            t = mat_prod(
                E, p) / _temp  # ....................................... step 3

            eigenval_new = eigenvalue_vec(t)
            if not ready_for_compare:
                ready_for_compare = True
            else:  # ready for convergence check
                if (eigenval_new -
                        eigenval_old) < threshold * eigenval_new:  # ... step 4
                    convergence = True
            eigenval_old = eigenval_new

        remove_tp_prod(
            E, t, p)  # .............................................. step 5

        # add Scores and Loadings for PC[i] to the collection of all PCs
        Scores[:, i] = t
        Loadings[i, :] = p

        if E_matrices:
            # complete error matrix
            # can calculate object residual variance (row-wise) or variable resiudal variance (column-wise)
            # total residual variance can also be calculated

            Error_matrices[i] = E.copy()

        else:
            # total object residual variance for E[i]
            e_tot = 0
            for k in range(rows):
                e_k = E[k, :]**2
                e_tot += sum(e_k)
            tot_obj_residual_var = (e_tot / e_tot0)
            explained_var[i] = 1 - tot_obj_residual_var - tot_explained_var
            tot_explained_var += explained_var[i]

    if E_matrices:
        return Scores, Loadings, Error_matrices
    else:
        return Scores, Loadings, explained_var
Esempio n. 18
0
def isnan(a):
    """y = isnan(x) returns True where x is Not-A-Number"""
    return reshape(array([_isnan(i) for i in ravel(a)],'b'), shape(a))
Esempio n. 19
0
  def cubconval(self,lat,lon,grid,offset):
    """ a.cubconval(lat,lon,grid,offset)

     Cubic convolution of grid at lat,lon point

     Offset is 1.0 for cross variables, 0.5 for dot ones

     Advantages:    Stair-step effect caused by the nearest neighbour approach
                    is reduced. Image looks smooth. 

     Disadvantages: Alters original data and reduces contrast by averaging
                    neighbouring values together. Is computationally more
                    expensive than nearest neighbour or bilinear interpolation. 
    """
    (ix,jx)=self.latlon_to_ij(lat,lon)
    (ix,jx) = (ix-1,jx-1)
    (id,jd)=(rintf(ix),rintf(jx))
    (i0,j0)=(id-1.0,jd-1.0);   (i1,j1)=(id,jd-1.0);
    (i2,j2)=(id+1.0,jd-1.0);   (i3,j3)=(id+2.0,jd-1.0);
    (i4,j4)=(id-1.0,jd);       (i5,j5)=(id,jd);
    (i6,j6)=(id+1.0,jd);       (i7,j7)=(id+2.0,jd);
    (i8,j8)=(id-1.0,jd+1.0);   (i9,j9)=(id,jd+1.0);
    (i10,j10)=(id+1.0,jd+1.0); (i11,j11)=(id+2.0,jd+1.0);
    (i12,j12)=(id-1.0,jd+2.0); (i13,j13)=(id,jd+2.0);
    (i14,j14)=(id+1.0,jd+2.0); (i15,j15)=(id+2.0,jd+2.0);
    if (ispointin(i0,j0,shape(grid))   and ispointin(i1,j1,shape(grid))   and \
        ispointin(i2,j2,shape(grid))   and ispointin(i3,j3,shape(grid))   and \
        ispointin(i4,j4,shape(grid))   and ispointin(i5,j5,shape(grid))   and \
        ispointin(i6,j6,shape(grid))   and ispointin(i7,j7,shape(grid))   and \
        ispointin(i8,j8,shape(grid))   and ispointin(i9,j9,shape(grid))   and \
        ispointin(i10,j10,shape(grid)) and ispointin(i11,j11,shape(grid)) and \
        ispointin(i12,j12,shape(grid)) and ispointin(i13,j13,shape(grid)) and \
        ispointin(i14,j14,shape(grid)) and ispointin(i15,j15,shape(grid))):
      f0=cubic(hypot((ix-i0),(jx-j0))); f1=cubic(hypot((ix-i1),(jx-j1)));
      f2=cubic(hypot((ix-i2),(jx-j2))); f3=cubic(hypot((ix-i3),(jx-j3)));
      f4=cubic(hypot((ix-i4),(jx-j4))); f5=cubic(hypot((ix-i5),(jx-j5)));
      f6=cubic(hypot((ix-i6),(jx-j6))); f7=cubic(hypot((ix-i7),(jx-j7)));
      f8=cubic(hypot((ix-i8),(jx-j8))); f9=cubic(hypot((ix-i9),(jx-j9)));
      f10=cubic(hypot((ix-i10),(jx-j10))); f11=cubic(hypot((ix-i11),(jx-j11)));
      f12=cubic(hypot((ix-i12),(jx-j12))); f13=cubic(hypot((ix-i13),(jx-j13)));
      f14=cubic(hypot((ix-i14),(jx-j14))); f15=cubic(hypot((ix-i15),(jx-j15)));
      i0=int(i0); j0=int(j0); i1=int(i1); j1=int(j1);
      i2=int(i2); j2=int(j2); i3=int(i3); j3=int(j3);
      i4=int(i4); j4=int(j4); i5=int(i5); j5=int(j5);
      i6=int(i6); j6=int(j6); i7=int(i7); j7=int(j7);
      i8=int(i8); j8=int(j8); i9=int(i9); j9=int(j9);
      i10=int(i10); j10=int(j10); i11=int(i11); j11=int(j11);
      i12=int(i12); j12=int(j12); i13=int(i13); j13=int(j13);
      i14=int(i14); j14=int(j14); i15=int(i15); j15=int(j15);
      v1=grid[i0][j0]*f0+grid[i1][j1]*f1+grid[i2][j2]*f2+grid[i3][j3]*f3
      v2=grid[i4][j4]*f4+grid[i5][j5]*f5+grid[i6][j6]*f6+grid[i7][j7]*f7
      v3=grid[i8][j8]*f8+grid[i9][j9]*f9+grid[i10][j10]*f10+grid[i11][j11]*f11
      v4=grid[i12][j12]*f12+grid[i13][j13]*f13+\
         grid[i14][j14]*f14+grid[i15][j15]*f15
      div=f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15
      value=(v1+v2+v3+v4)/div
    else:
      value=1e20
#     print "WARNING : Point is outside grid !"
    return value