コード例 #1
0
ファイル: WhatIf.py プロジェクト: ostrokach/biskit
    def __exposedResidues( self, ASA_values, sidechainCut=0.0,
                         backboneCut=0.0, totalCut=0.0  ):
        """
        Decide what is a surface exposed residue and what is not.
        sidechainCut, backboneCut, totalCut - float, cutoff value
        for what will be considered as a exposed residue. All
        three values have to pass the test.

        @param ASA_values: array with ASA values for side chains, backbone
                           and total calculated in L{__read_residueASA}.
        @type  ASA_values: array
        @param sidechainCut: cutoff ASA value for considering the side chain
                             to consider thew residue being exposed
                             (default: 0.0) 
        @type  sidechainCut: float
        @param backboneCut: cutoffvalue for back bone ASA
        @type  backboneCut: float 
        @param totalCut: cutoff for total ASA
        @type  totalCut: float   

        @return: residue mask, where 0 = burried
        @rtype: [1|0]
        """
        col_0 = N.greater( N.transpose(ASA_values)[0], totalCut )
        col_1 = N.greater( N.transpose(ASA_values)[1], backboneCut )
        col_2 = N.greater( N.transpose(ASA_values)[2], sidechainCut )

        col_012 = N.concatenate( ([col_0],[col_1],[col_2]) ) 

        exposedList = N.greater(N.sum(col_012), 0)

        return exposedList
コード例 #2
0
ファイル: Hmmer.py プロジェクト: ostrokach/biskit
    def mergeProfiles( self, p0, p1, maxOverlap=3 ):
        """
        Merge profile p0 with profile p1, as long as they overlap in
        at most maxOverlap positions

        @param p0: profile
        @type  p0: [float]
        @param p1: profile
        @type  p1: [float]
        @param maxOverlap: maximal allowed overlap between profiles
        @type  maxOverlap: int
        
        @return: array
        @rtype: 
        """
        p0 = self.__list2array( p0 )
        p1 = self.__list2array( p1 )

        overlap = N.greater( N.greater(p0,0) + N.greater(p1,0), 1 )

        if N.sum( overlap ) <= maxOverlap:
            ## one of the two profiles will in most cases not belong to these
            ## positions. We can't decide which one is wrong, let's eliminate
            ## both values. Alternatively we could keep one, or the average, ..
            N.put( p1, N.nonzero( overlap ), 0 )
            N.put( p0, N.nonzero( overlap ), 0 )

            p0 = p0 + p1

        return p0
コード例 #3
0
ファイル: WhatIf.py プロジェクト: ostrokach/biskit
    def __exposedResidues(self,
                          ASA_values,
                          sidechainCut=0.0,
                          backboneCut=0.0,
                          totalCut=0.0):
        """
        Decide what is a surface exposed residue and what is not.
        sidechainCut, backboneCut, totalCut - float, cutoff value
        for what will be considered as a exposed residue. All
        three values have to pass the test.

        @param ASA_values: array with ASA values for side chains, backbone
                           and total calculated in L{__read_residueASA}.
        @type  ASA_values: array
        @param sidechainCut: cutoff ASA value for considering the side chain
                             to consider thew residue being exposed
                             (default: 0.0) 
        @type  sidechainCut: float
        @param backboneCut: cutoffvalue for back bone ASA
        @type  backboneCut: float 
        @param totalCut: cutoff for total ASA
        @type  totalCut: float   

        @return: residue mask, where 0 = burried
        @rtype: [1|0]
        """
        col_0 = N.greater(N.transpose(ASA_values)[0], totalCut)
        col_1 = N.greater(N.transpose(ASA_values)[1], backboneCut)
        col_2 = N.greater(N.transpose(ASA_values)[2], sidechainCut)

        col_012 = N.concatenate(([col_0], [col_1], [col_2]))

        exposedList = N.greater(N.sum(col_012), 0)

        return exposedList
コード例 #4
0
    def display(self):
	GL.glClearColor( 0.0, 0.0, 0.0, 0.0)
	GL.glClear( GL.GL_COLOR_BUFFER_BIT)
	GL.glColor3f( 1.0,1.0,0.0)
	self.x = self.x + self.move_x
	self.y = self.y + self.move_y
	self.age = self.age + 1
	which = Numeric.greater( self.age, MAX_AGE)
	self.x = Numeric.choose( which, (self.x, RandomArray.random( NUMDOTS)))
	selfy = Numeric.choose( which, (self.y, RandomArray.random( NUMDOTS)))
	self.age = Numeric.choose( which, (self.age, 0))
	self.x = Numeric.choose( Numeric.greater( self.x, 1.0), (self.x, self.x - 1.0)) 
	self.y = Numeric.choose( Numeric.greater( self.y, 1.0), (self.y, self.y - 1.0))
	x2 = RandomArray.random( NUMDOTS2)
	y2 = RandomArray.random( NUMDOTS2)
	v = Numeric.concatenate(
		(Numeric.transpose( Numeric.array( [self.x, self.y])),
		 Numeric.transpose( Numeric.array( [self.x - 0.005, self.y + 0.005])),
		 Numeric.transpose( Numeric.array( [self.x + 0.005, self.y - 0.005])),
		 Numeric.transpose( Numeric.array( [x2, y2]))))
        #from opengltk.util import GLdouble
        #av = bufarray.readArray( v, GLdouble)
	#GL.glVertexPointer( 2, av)
        GL.glVertexPointer( 2, v)
	GL.glEnableClientState( GL.GL_VERTEX_ARRAY)
	#glplus.DrawArrays( GL.POINTS, len( av))
        from opengltk import  glplus
        glplus.DrawArrays( GL.GL_POINTS, len( v))
	#GL.glDisableClientState( GL.VERTEX_ARRAY)
	GL.glFlush()
	GLUT.glutSwapBuffers()
コード例 #5
0
    def mergeProfiles(self, p0, p1, maxOverlap=3):
        """
        Merge profile p0 with profile p1, as long as they overlap in
        at most maxOverlap positions

        @param p0: profile
        @type  p0: [float]
        @param p1: profile
        @type  p1: [float]
        @param maxOverlap: maximal allowed overlap between profiles
        @type  maxOverlap: int
        
        @return: array
        @rtype: 
        """
        p0 = self.__list2array(p0)
        p1 = self.__list2array(p1)

        overlap = N.greater(N.greater(p0, 0) + N.greater(p1, 0), 1)

        if N.sum(overlap) <= maxOverlap:
            ## one of the two profiles will in most cases not belong to these
            ## positions. We can't decide which one is wrong, let's eliminate
            ## both values. Alternatively we could keep one, or the average, ..
            N.put(p1, N.nonzero(overlap), 0)
            N.put(p0, N.nonzero(overlap), 0)

            p0 = p0 + p1

        return p0
コード例 #6
0
ファイル: chipimpute.py プロジェクト: JakaKokosar/orange-bio
def kNNimputeMA(arr2d, K=20, callback=None):
    """Returns a new 2D MA.array with missing values imputed from K nearest neighbours.
    Find K rows (axis 0) with the most similar values where similarity measure corresponds to weighted Euclidean distance.
    Imputed value = weighted average of the corresponding values of K nearest neighbours,
    where weights equal to tricubic distribution of distances to all rows.
    Impute missing rows by average over all rows.
    Version: 30.8.2005
    """
    arr2d = MA.asarray(arr2d)
    assert len(arr2d.shape) == 2, "2D array expected"
    # make a copy for imputation
    aImp2 = MA.array(arr2d)
    # leave out columns with 0 known values (columnInd: non-zero columns)
    columnCond = Numeric.greater(MA.count(arr2d, axis=0), 0)
    columnIndAll = Numeric.arange(arr2d.shape[1])
    columnInd = Numeric.compress(columnCond, columnIndAll)
    # impute the rows where 0 < #known_values < #non_zero_columns, i.e. exclude the rows with 0 and all (non-zero-column) values
    countByRows = MA.count(arr2d, axis=1)
    for rowIdx in Numeric.compress(Numeric.logical_and(Numeric.greater(countByRows, 0), Numeric.less(countByRows, columnInd.shape[0])), Numeric.arange(arr2d.shape[0])):
        rowResized = MA.resize(arr2d[rowIdx], arr2d.shape)
        diff = arr2d - rowResized
        distances = MA.sqrt(MA.add.reduce((diff)**2, 1) / MA.count(diff, axis=1))
        # nearest neighbours row indices (without the current row index)
        indSorted = MA.argsort(distances)[1:]
        distSorted = distances.take(indSorted)
        # number of distances different from MA.masked
        numNonMasked = distSorted.shape[0] - Numeric.add.reduce(Numeric.asarray(MA.getmaskarray(distSorted), Numeric.Int))
        # number of distances to account for (K or less)
        if numNonMasked > 1:
            weightsSorted = MA.power(1-MA.power(distSorted/distSorted[numNonMasked-1],3),3) # tricubic distribution of all weights
        else:
            weightsSorted = Numeric.ones(distSorted.shape[0])
        # compute average for each column separately in order to account for K non-masked values
        colInd4CurrRow = Numeric.compress(Numeric.logical_and(MA.getmaskarray(arr2d[rowIdx]), columnCond), columnIndAll)
        for colIdx in colInd4CurrRow:
            # column values sorted by distances
            columnVals = arr2d[:,colIdx].take(indSorted)
            # take only those weights where columnVals does not equal MA.masked
            weightsSortedCompressed = MA.compress(1-MA.getmaskarray(columnVals), weightsSorted)
            # impute from K (or possibly less) values
            aImp2[rowIdx,colIdx] = MA.average(columnVals.compressed()[:K], weights=weightsSortedCompressed[:K])
        if callback:
            callback()
    # impute the unknown rows with average profile
    avrgRow = MA.average(arr2d, 0)
    for rowIdx in Numeric.compress(Numeric.equal(countByRows, 0), Numeric.arange(arr2d.shape[0])):
        aImp2[rowIdx] = avrgRow
        if callback:
            callback()
    return aImp2
コード例 #7
0
ファイル: bitmap.py プロジェクト: KeithRobertson/topographica
    def _arrayToImage(self, inArray):
        """
        Generate a 1-channel PIL Image from an array of values from 0 to 1.0.

        Values larger than 1.0 are clipped, after adding them to the total
        clipped_pixels.  Returns a one-channel (monochrome) Image.
        """

        # PIL 'L' Images use a range of 0 to 255, so we scale the
        # input array to match.  The pixels are scaled by 255, not
        # 256, so that 1.0 maps to fully white.
        max_pixel_value=255
        inArray = (Numeric.floor(inArray * max_pixel_value)).astype(Numeric.Int)

        # Clip any values that are still larger than max_pixel_value
        to_clip = (Numeric.greater(inArray.ravel(),max_pixel_value)).sum()
        if (to_clip>0):
            # CEBALERT: no explanation of why clipped pixel count is
            # being accumulated.
            self.clipped_pixels = self.clipped_pixels + to_clip
            inArray.clip(0,max_pixel_value,out=inArray)
            self.verbose("Bitmap: clipped",to_clip,"image pixels that were out of range")

        r,c = inArray.shape
        # The size is (width,height), so we swap r and c:
        newImage = Image.new('L',(c,r),None)
        newImage.putdata(inArray.ravel())
        return newImage
コード例 #8
0
ファイル: SurfaceRacer.py プロジェクト: ostrokach/biskit
    def __checkProfileIntegrity( self, profile, upperLimit=1.0,
                                 lowerLimit=-1.0):
        """
        In some cases SurfaceRacer generates incorrect curvature
        values for some atoms. This function sets values outside
        a given range to 0

        @param profile: profile name
        @type  profile: str
        @param upperLimit: upper limit for a valid value (default: 1.0)
        @type  upperLimit: float
        @param lowerLimit: lower limit for a valid value (default: -1.0)
        @type  lowerLimit: float

        @return: profile with inspected values
        @rtype: [float]
        """
        mask = N.greater( profile, upperLimit )
        mask += N.less( profile, lowerLimit )

        for i in  N.nonzero(mask):
            print 'WARNING! Profile value %.2f set to O\n'%profile[i]
            profile[i] = 0

        return profile
コード例 #9
0
    def __checkProfileIntegrity(self,
                                profile,
                                upperLimit=1.0,
                                lowerLimit=-1.0):
        """
        In some cases SurfaceRacer generates incorrect curvature
        values for some atoms. This function sets values outside
        a given range to 0

        @param profile: profile name
        @type  profile: str
        @param upperLimit: upper limit for a valid value (default: 1.0)
        @type  upperLimit: float
        @param lowerLimit: lower limit for a valid value (default: -1.0)
        @type  lowerLimit: float

        @return: profile with inspected values
        @rtype: [float]
        """
        mask = N.greater(profile, upperLimit)
        mask += N.less(profile, lowerLimit)

        for i in N.nonzero(mask):
            print 'WARNING! Profile value %.2f set to O\n' % profile[i]
            profile[i] = 0

        return profile
コード例 #10
0
ファイル: estimateReferences.py プロジェクト: VuisterLab/cing
def stable_sd(x, n_sd=3., min_length=20):

    if len(x) < min_length:
        if len(x) == 1:
            return 0.
        else:
            return standardDeviation(x)

    x = Numeric.array(x)
    _x = x
    _outliers = 0.
    
    i = 0

    while i < 10:

        mu = median(_x)
        sd = standardDeviation(_x, mu)

        outliers = Numeric.greater(abs(x-mu), n_sd*sd)

        if not Numeric.sum(outliers) or Numeric.sum(outliers==_outliers) == len(x):
            break

        _x = Numeric.compress(Numeric.logical_not(outliers), x)
        _outliers = outliers

        i += 1

    return sd
コード例 #11
0
    def _arrayToImage(self, inArray):
        """
        Generate a 1-channel PIL Image from an array of values from 0 to 1.0.

        Values larger than 1.0 are clipped, after adding them to the total
        clipped_pixels.  Returns a one-channel (monochrome) Image.
        """

        # PIL 'L' Images use a range of 0 to 255, so we scale the
        # input array to match.  The pixels are scaled by 255, not
        # 256, so that 1.0 maps to fully white.
        max_pixel_value = 255
        inArray = (Numeric.floor(inArray * max_pixel_value)).astype(
            Numeric.Int)

        # Clip any values that are still larger than max_pixel_value
        to_clip = (Numeric.greater(inArray.ravel(), max_pixel_value)).sum()
        if (to_clip > 0):
            # CEBALERT: no explanation of why clipped pixel count is
            # being accumulated.
            self.clipped_pixels = self.clipped_pixels + to_clip
            inArray.clip(0, max_pixel_value, out=inArray)
            self.verbose("Bitmap: clipped", to_clip,
                         "image pixels that were out of range")

        r, c = inArray.shape
        # The size is (width,height), so we swap r and c:
        newImage = Image.new('L', (c, r), None)
        newImage.putdata(inArray.ravel())
        return newImage
コード例 #12
0
    def __call__(self,**params_to_override):
        p = ParamOverrides(self,params_to_override)

        xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
        xsize,ysize = int(round(xsize)),int(round(ysize))

        xdisparity  = int(round(xsize*p.xdisparity))
        ydisparity  = int(round(xsize*p.ydisparity))
        dotsize     = int(round(xsize*p.dotsize))

        bigxsize = 2*xsize
        bigysize = 2*ysize
        ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) /
                        min(dotsize,xsize) / min(dotsize,ysize)))
        halfdot = floor(dotsize/2)

        # Choose random colors and locations of square dots
        random_seed = p.random_seed

        random_array.seed(random_seed*12,random_seed*99)
        col=where(random_array.random((ndots))>=0.5, 1.0, -1.0)

        random_array.seed(random_seed*122,random_seed*799)
        xpos=floor(random_array.random((ndots))*(bigxsize+2*dotsize)) - halfdot

        random_array.seed(random_seed*1243,random_seed*9349)
        ypos=floor(random_array.random((ndots))*(bigysize+2*dotsize)) - halfdot

        # Construct arrays of points specifying the boundaries of each
        # dot, cropping them by the big image size (0,0) to (bigxsize,bigysize)
        x1=xpos.astype(Int) ; x1=choose(less(x1,0),(x1,0))
        y1=ypos.astype(Int) ; y1=choose(less(y1,0),(y1,0))
        x2=(xpos+(dotsize-1)).astype(Int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize))
        y2=(ypos+(dotsize-1)).astype(Int) ; y2=choose(greater(y2,bigysize),(y2,bigysize))

        # Draw each dot in the big image, on a blank background
        bigimage = zeros((bigysize,bigxsize))
        for i in range(ndots):
            bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i]

        result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity ,
                                              (xsize/2)+xdisparity:(3*xsize/2)+xdisparity ]

        for of in p.output_fns:
            of(result)

        return result
コード例 #13
0
ファイル: random.py プロジェクト: ioam/svn-history
    def __call__(self,**params_to_override):
        p = ParamOverrides(self,params_to_override)

        xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
        xsize,ysize = int(round(xsize)),int(round(ysize))
        
        xdisparity  = int(round(xsize*p.xdisparity))  
        ydisparity  = int(round(xsize*p.ydisparity))   
        dotsize     = int(round(xsize*p.dotsize))
        
        bigxsize = 2*xsize
        bigysize = 2*ysize
        ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) /
                        min(dotsize,xsize) / min(dotsize,ysize)))
        halfdot = floor(dotsize/2)
    
        # Choose random colors and locations of square dots
        random_seed = p.random_seed

        random_array.seed(random_seed*12,random_seed*99)
        col=where(random_array.random((ndots))>=0.5, 1.0, -1.0)

        random_array.seed(random_seed*122,random_seed*799)
        xpos=floor(random_array.random((ndots))*(bigxsize+2*dotsize)) - halfdot
    
        random_array.seed(random_seed*1243,random_seed*9349)
        ypos=floor(random_array.random((ndots))*(bigysize+2*dotsize)) - halfdot
      
        # Construct arrays of points specifying the boundaries of each
        # dot, cropping them by the big image size (0,0) to (bigxsize,bigysize)
        x1=xpos.astype(Int) ; x1=choose(less(x1,0),(x1,0))
        y1=ypos.astype(Int) ; y1=choose(less(y1,0),(y1,0))
        x2=(xpos+(dotsize-1)).astype(Int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize))
        y2=(ypos+(dotsize-1)).astype(Int) ; y2=choose(greater(y2,bigysize),(y2,bigysize))

        # Draw each dot in the big image, on a blank background
        bigimage = zeros((bigysize,bigxsize))
        for i in range(ndots):
            bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i]
            
        result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity ,
                                              (xsize/2)+xdisparity:(3*xsize/2)+xdisparity ]

        for of in p.output_fns:
            of(result)

        return result
コード例 #14
0
ファイル: utils.py プロジェクト: apolitis/imp
def map_angles(angles, period=None):
    """
    maps angles into interval [-pi,pi]
    """

    from numpy.oldnumeric import fmod, greater, logical_not

    if period is None:
        from numpy.oldnumeric import pi as period

    mask = greater(angles, 0.)

    return mask * (fmod(angles+period, 2*period)-period) + \
           logical_not(mask) * (fmod(angles-period, 2*period)+period)
コード例 #15
0
def map_angles(angles, period=None):
    """
    maps angles into interval [-pi,pi]
    """

    from numpy.oldnumeric import fmod, greater, logical_not

    if period is None:
        from numpy.oldnumeric import pi as period

    mask = greater(angles, 0.)

    return mask * (fmod(angles + period, 2 * period) - period) + \
        logical_not(mask) * (fmod(angles - period, 2 * period) + period)
コード例 #16
0
    def draw(self):
        # XXX This method is not speed-optimized. I just wrote it to
        # get the job done. (Nonetheless, it seems faster than the C
        # version commented out above.)

        p = self.parameters # shorthand

        now_sec = VisionEgg.time_func()
        if self.start_times_sec is not None:
            # compute extinct dots and generate new positions
            replace_indices = Numeric.nonzero( Numeric.greater( now_sec - self.start_times_sec, p.dot_lifespan_sec) )
            Numeric.put( self.start_times_sec, replace_indices, now_sec )

            new_centers = np.random.standard_normal((3,len(replace_indices)))
            for i in range(3):
                Numeric.put( self.centers[i,:], replace_indices, new_centers[i,:] )
        else:
            # initialize dot extinction values to random (uniform) distribution
            self.start_times_sec = RandomArray.uniform( now_sec - p.dot_lifespan_sec, now_sec,
                                                        (self.constant_parameters.num_dots,))

        time_delta_sec = now_sec - self.last_time_sec
        self.last_time_sec = now_sec # reset for next loop
        self.centers = self.centers + np.array(p.signal_vec)[:,np.newaxis]*time_delta_sec

        xyz = self.centers*p.start_position_variance + np.array(p.start_position_mean)[:,np.newaxis]
        xs = xyz[0,:]
        ys = xyz[1,:]
        zs = xyz[2,:]

        if p.on:
            gl.glEnable( gl.GL_POINT_SMOOTH )
            # allow max_alpha value to control blending
            gl.glEnable( gl.GL_BLEND )
            gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )

            gl.glPointSize(p.dot_size)

            # Clear the modeview matrix
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()

            gl.glDisable(gl.GL_TEXTURE_2D)

            draw_dots(xs,ys,zs,self.colors)
            gl.glDisable( gl.GL_POINT_SMOOTH ) # turn off
            gl.glPopMatrix()
コード例 #17
0
ファイル: Analyzer.py プロジェクト: ostrokach/biskit
    def __categorizeHexSurf(self, cutoff=0.1):
        """
        Compare complexes of list to native complex to see if
        their contact surfaces overlapp with the native complex.
        
        @param cutoff: fraction cutoff for defining a overlap (default: 0.1)
        @type  cutoff: float
        
        @return: list of len(self.hexContacts) overlapping with
                 native contact surface of lig and rec (0 - no overlap,
                 1 - rec OR lig overlapps, 2- rec AND lig overlapps)
        @rtype: [0|1|2]
        """
        result = [ self.com.fractionNativeSurface( c, self.contacts )
                   for c in self.hexContacts ]

        result = [ N.sum( N.greater( o, cutoff ) ) for o in result ]
        return result
コード例 #18
0
ファイル: Density.py プロジェクト: ostrokach/biskit
    def __find_intervals(self, l):
        l = N.array(l)
        l = N.take(l, N.argsort(l))

        globals().update( locals() )

        break_points = N.nonzero(N.greater(l[1:] - l[:-1], 1))

        start = 0
        intervals = []

        for i in range(len(break_points)):
            index = break_points[i]
            intervals.append(tuple(N.take(l, range(start, index + 1))))
            start = index + 1

        intervals.append(tuple(l[start:]))

        return intervals
コード例 #19
0
    def __find_intervals(self, l):
        l = N.array(l)
        l = N.take(l, N.argsort(l))

        globals().update(locals())

        break_points = N.nonzero(N.greater(l[1:] - l[:-1], 1))

        start = 0
        intervals = []

        for i in range(len(break_points)):
            index = break_points[i]
            intervals.append(tuple(N.take(l, range(start, index + 1))))
            start = index + 1

        intervals.append(tuple(l[start:]))

        return intervals
コード例 #20
0
    def __categorizeHexSurf(self, cutoff=0.1):
        """
        Compare complexes of list to native complex to see if
        their contact surfaces overlapp with the native complex.
        
        @param cutoff: fraction cutoff for defining a overlap (default: 0.1)
        @type  cutoff: float
        
        @return: list of len(self.hexContacts) overlapping with
                 native contact surface of lig and rec (0 - no overlap,
                 1 - rec OR lig overlapps, 2- rec AND lig overlapps)
        @rtype: [0|1|2]
        """
        result = [
            self.com.fractionNativeSurface(c, self.contacts)
            for c in self.hexContacts
        ]

        result = [N.sum(N.greater(o, cutoff)) for o in result]
        return result
コード例 #21
0
    def memberFrames(self, threshold=0.):
        """
        Get indices of all frames belonging to each cluster. Each frame
        is guaranteed to belong, at least, to the cluster for which it has
        its maximum membership. If threshold > 0, it can additionally pop
        up in other clusters.

        @param threshold: minimal cluster membership or 0 to consider
                          only max membership (default: 0)
        @type  threshold: float

        @return: n_cluster, lst of lst of int, frame indices
        @rtype: [[int]]
        """
        ## best cluster for each frame
        msm = self.memberships()
        maxMemb = N.argmax(msm, 0)

        r = [N.nonzero(N.equal(maxMemb, i)) for i in range(0, self.n_clusters)]
        r = [x.tolist() for x in r]

        ## same thing but now taking all above threshold
        ## -> same frame can end up in several clusters
        if threshold > 0.:
            r2 = [N.nonzero(N.greater(l, threshold)) for l in msm]

            ## add only additional frames
            for i in range(0, len(r)):
                try:
                    frames = r[i].tolist()
                except:
                    frames = r[i]

                r[i] = frames + [fr for fr in r2[i] if fr not in r[i]]

        ## sort frames within each cluster by their membership
        r = [self.membershipSort(r[i], i) for i in range(0, len(r))]

        return r
コード例 #22
0
ファイル: estimateReferences.py プロジェクト: VuisterLab/cing
def flag_entries(_data, stats, bounds, z_max, atom_type='H',molType='protein'):

    l = []

    for entry, val in _data.items():

        classes = decompose_classes(val, bounds,molType=molType)

        for key, shifts in classes.items():

            if not key in stats:
                print 'no stats', key
                continue

            mean, sd = stats[key][:2]

            Z = z_scores(shifts, mean, sd)
  
            if Numeric.sum(Numeric.greater(abs(Z), z_max)):
                l.append((entry, key))

    return l
コード例 #23
0
ファイル: TrajCluster.py プロジェクト: ostrokach/biskit
    def memberFrames( self, threshold=0. ):
        """
        Get indices of all frames belonging to each cluster. Each frame
        is guaranteed to belong, at least, to the cluster for which it has
        its maximum membership. If threshold > 0, it can additionally pop
        up in other clusters.

        @param threshold: minimal cluster membership or 0 to consider
                          only max membership (default: 0)
        @type  threshold: float

        @return: n_cluster, lst of lst of int, frame indices
        @rtype: [[int]]
        """
        ## best cluster for each frame
        msm = self.memberships()
        maxMemb = N.argmax( msm, 0 )

        r = [N.nonzero( N.equal(maxMemb, i) ) for i in range(0, self.n_clusters)]
        r = [ x.tolist() for x in r ]

        ## same thing but now taking all above threshold
        ## -> same frame can end up in several clusters
        if threshold > 0.:
            r2 = [ N.nonzero( N.greater( l, threshold) ) for l in msm ]

            ## add only additional frames
            for i in range(0, len( r ) ):
                try:
                    frames = r[i].tolist()
                except:
                    frames = r[i]

                r[i] = frames + [ fr for fr in r2[i] if fr not in r[i] ]

        ## sort frames within each cluster by their membership
        r = [ self.membershipSort( r[i], i) for i in range(0, len(r) )]

        return r
コード例 #24
0
ファイル: pressure.py プロジェクト: metocean/cdms
    def rgrd(self, dataIn, missingValueIn, missingMatch, logYes = 'yes', positionIn = None, missingValueOut = None):

        """        #---------------------------------------------------------------------------------
        #
        #    PURPOSE: To perform all the tasks required to regrid the input data, dataIn, into the ouput data,
        #             dataout along the level dimension only.
        #
        #    DEFINITION:
        #
        #             def rgrd(self, dataIn, missingValueIn, missingMatch, positionIn = None, missingValueOut = None):
        # 
        # 
        #    PASSED :  dataIn -- data to regrid
        #
        #             missingValueIn -- the missing data value to use in setting missing in the mask. It is required
        #                               and there are two choices:
        #                                     None -- there is no missing data
        #                                     A number -- the value to use in the search for possible missing data.
        #                               The presence of missing data at a grid point leads to recording 0.0 in the mask.
        #
        #             missingMatch -- the comparison scheme used in searching for missing data in dataIn using the value passed
        #                             in as missingValueIn. The choices are:
        #                                  None -- used if None is the entry for missingValueIn
        #                                  exact -- used if missingValue is the exact value from the file
        #                                  greater -- the missing data value is equal to or greater than missingValueIn
        #                                  less -- the missing data value is equal to or less than missingValueIn
        #
        #             logYes -- choose the level regrid as linear in log of level or linear in level. Set to 
        #                       'yes' for log. Anything else is linear in level.
        #         
        #
        #
        #             positionIn -- a tuple with the numerical position of the dimensions
        #                           in C or Python order specified in the sequence longitude,
        #                           latitude, level and time. Longitude, latitude and level are
        #                           required. If time is missing submit None in its slot in the 
        #                           tuple. Notice that the length of the tuple is always four.
        #
        #                           Explicitly, in terms of the shape of dataIn as returned by Python's shape function
        #
        #                                positionIn[0] contains the position of longitude in dataIn      
        #                                positionIn[1] contains the position of latitude in dataIn      
        #                                positionIn[2] contains the position of level in dataIn or None      
        #                                positionIn[3] contains the position of time in dataIn or None      
        #
        #                           As  examples:
        #                                If the C order shape of 4D data is
        #                                    (number of longitudes, number of times, number of levels, number of latitudes)
        #                                submit
        #                                     (0, 3, 2, 1) 
        #
        #                                If the C order shape of 3D data is 
        #                                    (number of longitudes, number of times, number oflatitudes)
        #                                submit
        #                                    (0, 2, 1, None) 
        #
        #                           Send in None if the shape is a subset of (time, level,
        #                           latitude, longitude) which is evaluated as follows:
        #                              3D -- code assumes (2,1,0,None)
        #                              4D -- code assumes (3,2,1,0)
        #
        #              missingValueOut -- the value for the missing data used in writing the output data. If left at the
        #                                 default entry, None, the code uses missingValueIn if present or as a last resort
        #                                 1.0e20
        #
        # 
        #    RETURNED : dataOut -- the regridded data
        #
        #                
        #    USAGE: 
        #                
        #          Example 1.  To regrid dataIn into dataOut using all the defaults where None, None signifies no
        #                      missing data.                   
        #              dataOut = x.rgrd(dataIn, None, None)    
        #
        #          Example 2.  To regrid dataIn into dataOut using 1.0e20 and greater as the missing data
        #                
        #                      dataOut = x.rgrd(dataIn, 1.e20, 'greater')    
        #
        #---------------------------------------------------------------------------------------------------------------------"""

        # check the required input -- dataIn, missingValueIn and  missingMatch

        # make sure that dataIn is an array

        try:
            z = len(dataIn)
        except TypeError:
            sendmsg('Error in calling the rgrd method -- dataIn must be an array')
            raise TypeError

        # check the missingValueIn pass

        if missingValueIn != None:
            try:
                z = abs(missingValueIn)
            except TypeError:
                sendmsg('Error in calling the rgrd method -- missingvalueIn must be None or a number. Now it is  ', missingValueIn)
                raise TypeError

        # check the missingMatch pass

        missingPossibilities = ['greater', 'equal', 'less', None]
        if missingMatch not in missingPossibilities:
            msg = 'Error in missingMatch -- it must be None or the string greater, equal, or less. Now it is '
            sendmsg(msg, missingMatch)
            raise ValueError

        # --- Check data type and change to float if necessary ----

        if dataIn.dtype.char != 'f':
            dataIn = dataIn.astype(Numeric.Float32)

        dataShape = dataIn.shape
        numberDim = len(dataShape)

        if numberDim < 2: 
            msg = 'Error in call to rgrd -- data must have at least 2 dimensions'
            sendmsg(msg)
            raise TypeError

        # --- evaluate positionIn ----
        
        # --- make standard positionIn as a check----
        positionList =[]
        for n in range(numberDim):              # insert a sequence of numbers
            positionList.append(n)
        positionList.reverse()

        for n in range(numberDim, 4):            # fill end of list with Nones
            positionList.append(None)

        positionCheck = tuple(positionList)      


        standardPosition = 0                            # transpose required

        if positionIn == None:                          # construct the default positionIn tuple
            positionIn = positionCheck
            standardPosition = 1                        # no need for a transpose with this data
        else:
            if positionIn == positionCheck:             # compare to the standard
                standardPosition = 1                    # no need for a transpose with this data

        if len(positionIn) != 4: 
            msg = 'Error in call to rgrd -- positionIn must be a tuple of length 4'
            sendmsg(msg)
            raise TypeError

        if standardPosition == 0:                        # transpose data to the standard order (t,z,y,x)

            newOrder, inverseOrder = checkorder(positionIn)

            dataIn = Numeric.transpose(dataIn, newOrder)                    # transpose data to standard order (t,z,y,x)
            dataIn = Numeric.array(dataIn.astype(Numeric.Float32), Numeric.Float32)       # make contiguous 


        # set dimension sizes and check for consistency 

        if positionIn[0] != None: 
            self.nlon = (dataShape[ positionIn[0] ]) 
        else:
            self.nlon = 0 
        if positionIn[1] != None: 
            self.nlat = (dataShape[ positionIn[1] ]) 
        else:
            self.nlat = 0 
        if positionIn[2] != None: 
            if self.nlevi != (dataShape[ positionIn[2] ]): 
                msg = 'Level size is inconsistent with input data'
                sendmsg(msg)
                raise ValueError
        if positionIn[3] != None: 
            self.ntime = (dataShape[ positionIn[3] ]) 
        else:
            self.ntime = 0 

        # allocate memory for dataOut -- the array with new number of levels

        outList = list(dataIn.shape)

        for i in range(len(outList)):
            if outList[i] == self.nlevi:
                outList[i] = self.nlevo
                break

        dataOut = Numeric.zeros(tuple(outList), Numeric.Float32)                      # memory for aout


        if missingMatch == None:                                                # if no missing do not pass None
            missingMatch = 'none'

        if missingValueIn == None:                                                # if no missing do not pass None
            missingValueIn = 1.333e33

        if logYes != 'yes':
            logYes = 'no'

        levIn = self.axisIn[:].astype(Numeric.Float64)
        levOut = self.axisOut[:].astype(Numeric.Float64)
        _regrid.rgdpressure(self.nlevi, self.nlevo, self.nlat, self.nlon, self.ntime, missingValueIn, missingMatch, logYes, levIn, levOut, dataIn, dataOut)  

        if missingMatch == 'none':                                                # if no missing do not pass None
            missingMatch = None
        if missingValueIn == 1.333e33:              
            missingValueIn = None

        if standardPosition == 0:
            dataOut = Numeric.transpose(dataOut, inverseOrder)                                   # transpose data to original order
            dataOut = Numeric.array(dataOut.astype(Numeric.Float32), Numeric.Float32)            # make contiguous 

        if missingValueOut != None:                # set the missing value in data to missingValueOut

            if missingMatch == 'greater': 
                if missingValueIn > 0.0: 
                    missing = 0.99*missingValueIn
                else: 
                    missing = 1.01*missingValueIn

                dataOut = Numeric.where(Numeric.greater(dataOut,missing), missingValueOut, dataOut)

            elif missingMatch == 'equal': 
                missing = missingValueIn
                dataOut = Numeric.where(Numeric.equal(dataOut,missing), missingValueOut, dataOut)

            elif missingMatch == 'less': 
                if missingValueIn < 0.0: 
                    missing = 0.99*missingValueIn
                else: 
                    missing = 1.01*missingValueIn

                dataOut = Numeric.where(Numeric.less(dataOut,missing), missingValueOut, dataOut)

        return dataOut 
コード例 #25
0
                self.logfile.write("Catalog dimension mismatch: ",
                                   str(len(t1)), ' ', nsources)
                self.logfile.write(
                    "Check patrameters in detectionCatalog.inpar and filterCatalog.inpar"
                )
            #flux[i,:],fluxerr[i,:] = tableio.get_data(catalog,self.fluxColumns)
            flux[i, :], fluxerr[i, :] = t1, t2
            flux[i, :] = pUtil.deNAN(flux[i, :])

            # Those objects with flux equal or less than 0 are assigned a magnitude of 99
            # and a limiting magnitude equal to their SExtractor photometric error. This
            # is interpreted by BPZ as a nondetection with zero flux and 1-sigma error
            # equal to the limiting magnitude

            nondetected = Numeric.less_equal(
                flux[i, :], 0.0) * Numeric.greater(fluxerr[i, :], 0.0)

            # Those objects with error flux and flux equal to 0 are assigned a magnitude of -99
            # and a flux of 0, which is interpreted by SExtractor as a non-observed object

            nonobserved = Numeric.less_equal(fluxerr[i, :], 0.0)

            # When flux error > 100*(flux), mark as nonobserved (Benitez, 24-Oct-03).

            nonobserved = Numeric.where(
                fluxerr[i, :] > 100 * (abs(flux[i, :])), 1.0, nonobserved[:])

            detected = Numeric.logical_not(nonobserved + nondetected)

            # Get the zero point for the final magnitudes
コード例 #26
0
ファイル: CheckIdentities.py プロジェクト: ostrokach/biskit
    def identities(self, aln_dictionary):
        """
        Create a dictionary that contains information about all the
        alignments in the aln_dictionary using pairwise comparisons.

        @param aln_dictionary: alignment dictionary
        @type  aln_dictionary: dict

        @return: a dictionary of dictionaries with the sequence name as the
        top key. Each sub dictionary then has the keys: 
         - 'name' - str, sequence name
         - 'seq' - str, sequence of
         - 'template_info' - list of the same length as the 'key'
             sequence excluding deletions. The number of sequences
             in the multiple alignment that contain information at
             this position.
         - 'ID' - dict, sequence identity in percent comparing the
            'key'  sequence to all other sequences (excluding deletions)
         - 'info_ID' - dict, same as 'ID' but compared to the template
             sequence length (i.e excluding deletions and insertions
             in the 'key' sequence )
         - 'cov_ID' - dict, same as 'info_ID' but insertions are defined
             comparing to all template sequences (i.e where
             'template_info' is zero )
        @rtype: dict
        """
        ## loop over all sequences in alignment
        for i in self.sequences_name:
            template_names = []

            ## don't compare to self, remove current sequence
            for name in self.sequences_name:
                if(name is not i):
                    template_names.append(name)

            ## loop over all sequences in alignment
            info_ID, ID, cov_ID  = {}, {}, {}
            for y in self.sequences_name:
##                identity = 0
##                info_identity = 0
##                cov_identity = 0
                nb_of_identities = 0
                nb_of_template = 0 
                template_info = []
                nb_of_residues = 0

                ## loop over the full length of the alignment
                for w in range(len(aln_dictionary["target"]["seq"])):

                    ## skip deletions
                    nb_of_info_res=0
                    if(aln_dictionary[i]["seq"][w] is not '-'):
                        nb_of_residues += 1

                        ## count identities
                        if(aln_dictionary[i]["seq"][w] == \
                           aln_dictionary[y]["seq"][w]):
                            nb_of_identities += 1

                        ## length excluding insertions
                        if(aln_dictionary[y]["seq"][w] is not '-'):
                            nb_of_template += 1

                        ## loop over all sequences but self
                        for z in template_names:
                            ## count how many sequences contain alignment
                            ## information at this position
                            if(aln_dictionary[z]["seq"][w] is not '-'):
                                nb_of_info_res += 1

                        template_info.append(nb_of_info_res)

                ## number of positions in which any other sequence
                ## contains alignment information
                nb_cov_res = N.sum( N.greater(template_info, 0) )

                ## calculate identities
                info_ID[y] = ID[y] = cov_ID[y] = 0
                ## RAIK: Hack, nb_of_... can turn 0 for fragmented alignments
                if nb_of_template:
                    info_ID[y] = 100. * nb_of_identities / nb_of_template
                if nb_of_residues:
                    ID[y]      = 100. * nb_of_identities / nb_of_residues
                if nb_cov_res:
                    cov_ID[y]  = 100. * nb_of_identities / nb_cov_res

            aln_dictionary[i]["info_ID"] = info_ID 
            aln_dictionary[i]["ID"] = ID
            aln_dictionary[i]["cov_ID"] = cov_ID
            aln_dictionary[i]["template_info"] = template_info

        return aln_dictionary        
コード例 #27
0
            self.fluxColumns = tuple(fluxList)    # the get_data function interface requires a tuple

            # Build the various columns arrays with the get_data function.
            # We read raw fluxes and errors into the flux,fluxerr arrays.
            # They are afterwards transformed to magnitudes

            pdb.set_trace()   #xingxing
            flux[i,:],fluxerr[i,:] = tableio.get_data(catalog,self.fluxColumns)
            flux[i,:] = pUtil.deNAN(flux[i,:])

            # Those objects with flux equal or less than 0 are assigned a magnitude of 99
            # and a limiting magnitude equal to their SExtractor photometric error. This
            # is interpreted by BPZ as a nondetection with zero flux and 1-sigma error
            # equal to the limiting magnitude

            nondetected = Numeric.less_equal(flux[i,:],0.0)*Numeric.greater(fluxerr[i,:],0.0)

            # Those objects with error flux and flux equal to 0 are assigned a magnitude of -99
            # and a flux of 0, which is interpreted by SExtractor as a non-observed object

            nonobserved = Numeric.less_equal(fluxerr[i,:],0.0)

            # When flux error > 100*(flux), mark as nonobserved (Benitez, 24-Oct-03).
            
            nonobserved = Numeric.where(fluxerr[i,:] > 100*(abs(flux[i,:])),1.0,nonobserved[:])
            
            detected    = Numeric.logical_not(nonobserved+nondetected)

            # Get the zero point for the final magnitudes

            zpoint  = fUtil.zeroPoint(fitsfile)       # pass the fits file to zeroPoint func
コード例 #28
0
def createHexInp(recPdb,
                 recModel,
                 ligPdb,
                 ligModel,
                 comPdb=None,
                 outFile=None,
                 macDock=None,
                 silent=0,
                 sol=512):
    """
    Prepare a Hex macro file for the docking of the receptor(s)
    against ligand(s).

    @param recPdb: hex-formatted PDB
    @type  recPdb: str
    @param recModel: hex-formatted PDB
    @type  recModel: str
    @param ligPdb: PDBModel, get distances from this one
    @type  ligPdb: PDBModel
    @param ligModel: PDBModel, getdistances from this one
    @type  ligModel: PDBModel
    @param comPdb: reference PDB
    @type  comPdb: str
    @param outFile: base of file name for mac and out
    @type  outFile: str

    @param macDock: None -> hex decides (from the size of the molecule),
                    1 -> force macroDock, 0-> force off (default: None)
    @type  macDock: None|1|0
    @param silent: don't print distances and macro warnings (default: 0)
    @type  silent: 0|1
    @param sol: number of solutions that HEx should save (default: 512)
    @type  sol: int

    @return: HEX macro file name, HEX out generated bu the macro,
             macro docking status
    @rtype: str, str, boolean
    """
    ## files and names
    recCode = t.stripFilename(recPdb)[0:4]
    ligCode = t.stripFilename(ligPdb)[0:4]

    outFile = outFile or recCode + '-' + ligCode

    ## hex macro name
    macName = t.absfile(outFile + '_hex.mac')

    ## hex rotation matrix output name
    outName_all = t.absfile(outFile + '_hex.out')
    outName_clust = t.absfile(outFile + '_hex_cluster.out')

    ## add surface profiles if not there
    if not recModel.atoms.has_key('relAS'):
        #t.flushPrint('\nCalculating receptor surface profile')
        rec_asa = PDBDope(recModel)
        rec_asa.addSurfaceRacer()
    if not ligModel.atoms.has_key('relAS'):
        #t.flushPrint('\nCalculating ligand surface profile')
        lig_asa = PDBDope(ligModel)
        lig_asa.addSurfaceRacer()

    ## surface masks, > 95% exposed
    rec_surf_mask = N.greater(recModel.profile('relAS'), 95)
    lig_surf_mask = N.greater(ligModel.profile('relAS'), 95)

    ## maximun and medisn distance from centre of mass to any surface atom
    recMax, recMin = centerSurfDist(recModel, rec_surf_mask)
    ligMax, ligMin = centerSurfDist(ligModel, lig_surf_mask)

    ## approxinate max and min center to centre distance
    maxDist = recMax + ligMax
    minDist = recMin + ligMin

    ## molecular separation and search range to be used in the docking
    molSep = (maxDist + minDist) / 2
    molRange = 2 * (maxDist - molSep)

    if not silent:
        print 'Docking setup: %s\nRecMax: %.1f RecMin: %.1f\nLigMax: %.1f LigMin: %.1f\nMaxDist: %.1f MinDist: %.1f\nmolecular_separation: %.1f r12_range: %.1f\n' % (
            outFile, recMax, recMin, ligMax, ligMin, maxDist, minDist, molSep,
            molRange)

    if recMax > 30 and ligMax > 30 and not silent:
        print '\nWARNING! Both the receptor and ligand radius is ',
        print 'greater than 30A.\n'

    ## determine docking mode to use
    macroDocking = 0

    if macDock == None:
        if recMax > 35 and not silent:
            print '\nReceptor has a radius that exceeds 35A ',
            print '-> Macro docking will be used'
            macroDocking = 1
    else:
        macroDocking = macDock

    #####################
    ## write macro file

    macOpen = open(macName, 'w')

    macOpen.write('# -- ' + macName + ' --\n')
    macOpen.write(' \n')
    macOpen.write('open_receptor ' + t.absfile(recPdb) + '\n')
    macOpen.write('open_ligand ' + t.absfile(ligPdb) + '\n')

    if comPdb and comPdb[-4:] == '.pdb':
        macOpen.write('open_complex ' + comPdb + '\n')

    macOpen.write('\n')

    head = """
# -------------- general settings ----------------
disc_cache 1                   # disc cache on (0 off)
docking_sort_mode 1            # Sort solutions by cluster (0 by energy)
docking_cluster_mode 1         # Display all clusters (0 display best)
docking_cluster_threshold 2.00
# docking_cluster_bumps  number

# ------------ molecule orientation --------------
molecule_separation %(separation)i
commit_view """ % ({
        'separation': round(molSep)
    })

    macro = """
# -------------- macro docking -------------------
macro_min_coverage 25
macro_sphere_radius 15
macro_docking_separation 25
activate_macro_model"""


    tail = """
# -------------- docking setup -------------------
docking_search_mode 0          # full rotational search

receptor_range_angle  180      # 0, 15, 30, 45, 60, 75, 90, 180
docking_receptor_samples 720   # 362, 492, 642, 720, 980, 1280

ligand_range_angle  180
docking_ligand_samples 720

twist_range_angle 360          # 0, 15, 30, 60, 90, 180, 360
docking_alpha_samples 128      # 64, 128, 256

r12_step 0.500000              # 0.1, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2
r12_range %(range)i

docking_radial_filter 0        # Radial Envelope Filter - None

grid_size 0.600                # 0.4, 0.5, 0.6, 0.75, 1.0
# docking_electrostatics 0       # use only surface complimentarity
docking_electrostatics 1      # use electrostatic term for scoring clusters

docking_main_scan 16     # 
docking_main_search 26

max_docking_solutions %(nr_sol)i # number of solutions to save

# -------------- post-processing ----------------
docking_refine 0    # None
#  docking_refine 1    # Backbone Bumps
#  docking_refine 2    # MM energies
#  docking_refine 3    # MM minimization

# ---------------- run docking ------------------
activate_docking
#  save_docking %(output_clust)s
#  save_range 1 512 ./ dock .pdb

# ------------ also save all solutions ----------
docking_sort_mode 0            # Sort solutions by energy (1 by cluster)
save_docking %(output_all)s""" \
         %({'range':round(molRange), 'output_all':outName_all,
            'nr_sol':int(sol), 'output_clust':outName_clust} )

    macOpen.writelines(head)

    ## macro docking will not work with multiple models, if both are added to
    ## the hex macro file - macrodocking will be skipped during the docking run
    if macroDocking:
        macOpen.writelines(macro)

    macOpen.writelines(tail)

    macOpen.close()

    return macName, outName_all, macroDocking
コード例 #29
0
ファイル: CheckIdentities.py プロジェクト: ostrokach/biskit
    def identities(self, aln_dictionary):
        """
        Create a dictionary that contains information about all the
        alignments in the aln_dictionary using pairwise comparisons.

        @param aln_dictionary: alignment dictionary
        @type  aln_dictionary: dict

        @return: a dictionary of dictionaries with the sequence name as the
        top key. Each sub dictionary then has the keys: 
         - 'name' - str, sequence name
         - 'seq' - str, sequence of
         - 'template_info' - list of the same length as the 'key'
             sequence excluding deletions. The number of sequences
             in the multiple alignment that contain information at
             this position.
         - 'ID' - dict, sequence identity in percent comparing the
            'key'  sequence to all other sequences (excluding deletions)
         - 'info_ID' - dict, same as 'ID' but compared to the template
             sequence length (i.e excluding deletions and insertions
             in the 'key' sequence )
         - 'cov_ID' - dict, same as 'info_ID' but insertions are defined
             comparing to all template sequences (i.e where
             'template_info' is zero )
        @rtype: dict
        """
        ## loop over all sequences in alignment
        for i in self.sequences_name:
            template_names = []

            ## don't compare to self, remove current sequence
            for name in self.sequences_name:
                if (name is not i):
                    template_names.append(name)

            ## loop over all sequences in alignment
            info_ID, ID, cov_ID = {}, {}, {}
            for y in self.sequences_name:
                ##                identity = 0
                ##                info_identity = 0
                ##                cov_identity = 0
                nb_of_identities = 0
                nb_of_template = 0
                template_info = []
                nb_of_residues = 0

                ## loop over the full length of the alignment
                for w in range(len(aln_dictionary["target"]["seq"])):

                    ## skip deletions
                    nb_of_info_res = 0
                    if (aln_dictionary[i]["seq"][w] is not '-'):
                        nb_of_residues += 1

                        ## count identities
                        if(aln_dictionary[i]["seq"][w] == \
                           aln_dictionary[y]["seq"][w]):
                            nb_of_identities += 1

                        ## length excluding insertions
                        if (aln_dictionary[y]["seq"][w] is not '-'):
                            nb_of_template += 1

                        ## loop over all sequences but self
                        for z in template_names:
                            ## count how many sequences contain alignment
                            ## information at this position
                            if (aln_dictionary[z]["seq"][w] is not '-'):
                                nb_of_info_res += 1

                        template_info.append(nb_of_info_res)

                ## number of positions in which any other sequence
                ## contains alignment information
                nb_cov_res = N.sum(N.greater(template_info, 0))

                ## calculate identities
                info_ID[y] = ID[y] = cov_ID[y] = 0
                ## RAIK: Hack, nb_of_... can turn 0 for fragmented alignments
                if nb_of_template:
                    info_ID[y] = 100. * nb_of_identities / nb_of_template
                if nb_of_residues:
                    ID[y] = 100. * nb_of_identities / nb_of_residues
                if nb_cov_res:
                    cov_ID[y] = 100. * nb_of_identities / nb_cov_res

            aln_dictionary[i]["info_ID"] = info_ID
            aln_dictionary[i]["ID"] = ID
            aln_dictionary[i]["cov_ID"] = cov_ID
            aln_dictionary[i]["template_info"] = template_info

        return aln_dictionary
コード例 #30
0
ファイル: OWHypTest.py プロジェクト: acopar/orange-bio
    def senddata(self):
        """computes selectionList, partitions the examples and updates infoc;
        sends out selectionList and selected/other dataStructure or None;
        """
        if self.dataStructure and self.ps.shape[1]:
            # set selectionList
            alphas = [self.alphaA, self.alphaB, self.alphaI]
            selectors = [self.selectorA, self.selectorB, self.selectorI]
            selectionList = Numeric.ones((self.numExamples,))
            boxSelectors = [self.boxSelectorA, self.boxSelectorB, self.boxSelectorI]
            for si in range(3):
                try:
##                    if selectors[si] and self.anovaType in [[0,1,3,4],[2,3,4],[4]][si]:
                    if selectors[si] and boxSelectors[si].isEnabled():
                        selectionList = Numeric.logical_and(selectionList, Numeric.less(self.ps[si], float(alphas[si])))
                except ValueError:
                    print "Warning: cannot convert %s to float" % str(alphas[si])
                    pass
            self.infoc.setText('Sending out data...')
            
            if self.sendProbabilities:
                # create example table with probabilities
##                print self.ps
##                print Numeric.transpose(self.ps).shape
                etProb = orange.ExampleTable(orange.Domain([orange.FloatVariable("Factor A p-val"),orange.FloatVariable("Factor B p-val"),orange.FloatVariable("Interaction p-val")]), Numeric.transpose(self.ps))
                # in etProb, convert p-val to meta attribute
                domProb = orange.Domain([])
                domProb.addmetas(dict(zip([orange.newmetaid(),orange.newmetaid(),orange.newmetaid()], etProb.domain.variables)))
                etProb = orange.ExampleTable(domProb, etProb)
            else:
                # create new etProb without attributes/metas and of length equal to etProb
                etProb = orange.ExampleTable(orange.Domain([]), Numeric.zeros((selectionList.shape[0],0)))

            # partition dataStructure and send out data
            selectionList = selectionList.tolist()
            self.send("Example Selection", (self.selectorName, selectionList))
            dataStructS = []
            dataStructN = []
            self.progressBarInit()

            if self.sendNotSelectedData:
                pbStep = 50./len(self.dataStructure)
            else:
                pbStep = 100./len(self.dataStructure)

            for (dsName, etList) in self.dataStructure:
                etListS = [et.select(selectionList) for et in etList]
                for i in range(len(etList)):
                    # append probabilities (if etProb not empty)
                    etListS[i] = orange.ExampleTable([etListS[i], etProb.select(selectionList)])
                    # add name
                    etListS[i].name = etList[i].name
                dataStructS.append((dsName, etListS))
                self.progressBarAdvance(pbStep)
            self.send("Selected Structured Data", dataStructS)

            if self.sendNotSelectedData:
                for (dsName, etList) in self.dataStructure:
                    etListN = [et.select(selectionList, negate=1) for et in etList]
                    for i in range(len(etList)):
                        # append probabilities (if etProb not empty)
                        etListN[i] = orange.ExampleTable([etListN[i], etProb.select(selectionList, negate=1)])
                        # add name
                        etListN[i].name = etList[i].name
                    dataStructN.append((dsName, etListN))
                    self.progressBarAdvance(pbStep)
                self.send("Other Structured Data", dataStructN)
            else:
                self.send("Other Structured Data", None)

            self.progressBarFinished()
            # report the number of selected examples
            numExamples = Numeric.add.reduce(Numeric.greater(selectionList, 0))
            self.infoc.setText('Total of %d example%s match criteria.' % (numExamples, ['', 's'][int(numExamples!=1)]))
        else:
            self.send("Example Selection", None)
            self.send("Selected Structured Data", None)
            self.send("Other Structured Data", None)
コード例 #31
0
def kNNimputeMA(arr2d, K=20, callback=None):
    """Returns a new 2D MA.array with missing values imputed from K nearest neighbours.
    Find K rows (axis 0) with the most similar values where similarity measure corresponds to weighted Euclidean distance.
    Imputed value = weighted average of the corresponding values of K nearest neighbours,
    where weights equal to tricubic distribution of distances to all rows.
    Impute missing rows by average over all rows.
    Version: 30.8.2005
    """
    arr2d = MA.asarray(arr2d)
    assert len(arr2d.shape) == 2, "2D array expected"
    # make a copy for imputation
    aImp2 = MA.array(arr2d)
    # leave out columns with 0 known values (columnInd: non-zero columns)
    columnCond = Numeric.greater(MA.count(arr2d, axis=0), 0)
    columnIndAll = Numeric.arange(arr2d.shape[1])
    columnInd = Numeric.compress(columnCond, columnIndAll)
    # impute the rows where 0 < #known_values < #non_zero_columns, i.e. exclude the rows with 0 and all (non-zero-column) values
    countByRows = MA.count(arr2d, axis=1)
    for rowIdx in Numeric.compress(
            Numeric.logical_and(Numeric.greater(countByRows, 0),
                                Numeric.less(countByRows, columnInd.shape[0])),
            Numeric.arange(arr2d.shape[0])):
        rowResized = MA.resize(arr2d[rowIdx], arr2d.shape)
        diff = arr2d - rowResized
        distances = MA.sqrt(
            MA.add.reduce((diff)**2, 1) / MA.count(diff, axis=1))
        # nearest neighbours row indices (without the current row index)
        indSorted = MA.argsort(distances)[1:]
        distSorted = distances.take(indSorted)
        # number of distances different from MA.masked
        numNonMasked = distSorted.shape[0] - Numeric.add.reduce(
            Numeric.asarray(MA.getmaskarray(distSorted), Numeric.Int))
        # number of distances to account for (K or less)
        if numNonMasked > 1:
            weightsSorted = MA.power(
                1 - MA.power(distSorted / distSorted[numNonMasked - 1], 3),
                3)  # tricubic distribution of all weights
        else:
            weightsSorted = Numeric.ones(distSorted.shape[0])
        # compute average for each column separately in order to account for K non-masked values
        colInd4CurrRow = Numeric.compress(
            Numeric.logical_and(MA.getmaskarray(arr2d[rowIdx]), columnCond),
            columnIndAll)
        for colIdx in colInd4CurrRow:
            # column values sorted by distances
            columnVals = arr2d[:, colIdx].take(indSorted)
            # take only those weights where columnVals does not equal MA.masked
            weightsSortedCompressed = MA.compress(
                1 - MA.getmaskarray(columnVals), weightsSorted)
            # impute from K (or possibly less) values
            aImp2[rowIdx,
                  colIdx] = MA.average(columnVals.compressed()[:K],
                                       weights=weightsSortedCompressed[:K])
        if callback:
            callback()
    # impute the unknown rows with average profile
    avrgRow = MA.average(arr2d, 0)
    for rowIdx in Numeric.compress(Numeric.equal(countByRows, 0),
                                   Numeric.arange(arr2d.shape[0])):
        aImp2[rowIdx] = avrgRow
        if callback:
            callback()
    return aImp2
コード例 #32
0
    def draw(self):
        # XXX This method is not speed-optimized. I just wrote it to
        # get the job done. (Nonetheless, it seems faster than the C
        # version commented out above.)

        p = self.parameters  # shorthand
        if p.center is not None:
            if not hasattr(VisionEgg.config, "_GAVE_CENTER_DEPRECATION"):
                logger = logging.getLogger('VisionEgg.Dots')
                logger.warning("Specifying DotArea2D by deprecated "
                               "'center' parameter deprecated.  Use "
                               "'position' parameter instead.  (Allows "
                               "use of 'anchor' parameter to set to "
                               "other values.)")
                VisionEgg.config._GAVE_CENTER_DEPRECATION = 1
            p.anchor = 'center'
            p.position = p.center[0], p.center[
                1]  # copy values (don't copy ref to tuple)
        if p.on:
            # calculate center
            center = VisionEgg._get_center(p.position, p.anchor, p.size)

            if p.anti_aliasing:
                if len(p.color) == 4 and not self._gave_alpha_warning:
                    if p.color[3] != 1.0:
                        logger = logging.getLogger('VisionEgg.Dots')
                        logger.warning("The parameter anti_aliasing is "
                                       "set to true in the DotArea2D "
                                       "stimulus class, but the color "
                                       "parameter specifies an alpha "
                                       "value other than 1.0.  To "
                                       "acheive the best anti-aliasing, "
                                       "ensure that the alpha value for "
                                       "the color parameter is 1.0.")
                        self._gave_alpha_warning = 1
                gl.glEnable(gl.GL_POINT_SMOOTH)
                # allow max_alpha value to control blending
                gl.glEnable(gl.GL_BLEND)
                gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
            else:
                gl.glDisable(gl.GL_BLEND)

            now_sec = VisionEgg.time_func()
            if self.start_times_sec is not None:
                # compute extinct dots and generate new positions
                replace_indices = Numeric.nonzero(
                    Numeric.greater(now_sec - self.start_times_sec,
                                    p.dot_lifespan_sec))
                Numeric.put(self.start_times_sec, replace_indices, now_sec)

                new_x_positions = RandomArray.uniform(0.0, 1.0,
                                                      (len(replace_indices), ))
                Numeric.put(self.x_positions, replace_indices, new_x_positions)

                new_y_positions = RandomArray.uniform(0.0, 1.0,
                                                      (len(replace_indices), ))
                Numeric.put(self.y_positions, replace_indices, new_y_positions)

                new_random_directions_radians = RandomArray.uniform(
                    0.0, 2 * math.pi, (len(replace_indices), ))
                Numeric.put(self.random_directions_radians, replace_indices,
                            new_random_directions_radians)
            else:
                # initialize dot extinction values to random (uniform) distribution
                self.start_times_sec = RandomArray.uniform(
                    now_sec - p.dot_lifespan_sec, now_sec,
                    (self.constant_parameters.num_dots, ))

            signal_num_dots = int(
                round(p.signal_fraction * self.constant_parameters.num_dots))
            time_delta_sec = now_sec - self.last_time_sec
            self.last_time_sec = now_sec  # reset for next loop
            x_increment_normalized = math.cos(
                p.signal_direction_deg / 180.0 * math.pi
            ) * p.velocity_pixels_per_sec / p.size[0] * time_delta_sec
            y_increment_normalized = -math.sin(
                p.signal_direction_deg / 180.0 * math.pi
            ) * p.velocity_pixels_per_sec / p.size[1] * time_delta_sec
            self.x_positions[:signal_num_dots] += x_increment_normalized
            self.y_positions[:signal_num_dots] += y_increment_normalized

            num_random_dots = self.constant_parameters.num_dots - signal_num_dots
            random_x_increment_normalized = Numeric.cos(
                self.random_directions_radians[signal_num_dots:]
            ) * p.velocity_pixels_per_sec / p.size[0] * time_delta_sec
            random_y_increment_normalized = -Numeric.sin(
                self.random_directions_radians[signal_num_dots:]
            ) * p.velocity_pixels_per_sec / p.size[1] * time_delta_sec
            self.x_positions[signal_num_dots:] += random_x_increment_normalized
            self.y_positions[signal_num_dots:] += random_y_increment_normalized

            self.x_positions = Numeric.fmod(self.x_positions, 1.0)  # wrap
            self.y_positions = Numeric.fmod(self.y_positions, 1.0)

            self.x_positions = Numeric.fmod(self.x_positions + 1,
                                            1.0)  # wrap again for values < 1
            self.y_positions = Numeric.fmod(self.y_positions + 1, 1.0)

            xs = (self.x_positions - 0.5) * p.size[0] + center[0]
            ys = (self.y_positions - 0.5) * p.size[1] + center[1]

            if len(p.color) == 3:
                gl.glColor3f(*p.color)
            elif len(p.color) == 4:
                gl.glColor4f(*p.color)
            gl.glPointSize(p.dot_size)

            # Clear the modeview matrix
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()

            gl.glDisable(gl.GL_TEXTURE_2D)

            if p.depth is None:
                depth = 0.0
            else:
                gl.glEnable(gl.GL_DEPTH_TEST)
                depth = p.depth
            zs = (depth, ) * len(xs)  # make N tuple with repeat value of depth
            draw_dots(xs, ys, zs)
            if p.anti_aliasing:
                gl.glDisable(gl.GL_POINT_SMOOTH)  # turn off
            gl.glPopMatrix()
コード例 #33
0
ファイル: hexTools.py プロジェクト: ostrokach/biskit
def createHexInp( recPdb, recModel, ligPdb, ligModel, comPdb=None,
                  outFile=None, macDock=None, silent=0, sol=512 ):
    """
    Prepare a Hex macro file for the docking of the receptor(s)
    against ligand(s).

    @param recPdb: hex-formatted PDB
    @type  recPdb: str
    @param recModel: hex-formatted PDB
    @type  recModel: str
    @param ligPdb: PDBModel, get distances from this one
    @type  ligPdb: PDBModel
    @param ligModel: PDBModel, getdistances from this one
    @type  ligModel: PDBModel
    @param comPdb: reference PDB
    @type  comPdb: str
    @param outFile: base of file name for mac and out
    @type  outFile: str

    @param macDock: None -> hex decides (from the size of the molecule),
                    1 -> force macroDock, 0-> force off (default: None)
    @type  macDock: None|1|0
    @param silent: don't print distances and macro warnings (default: 0)
    @type  silent: 0|1
    @param sol: number of solutions that HEx should save (default: 512)
    @type  sol: int

    @return: HEX macro file name, HEX out generated bu the macro,
             macro docking status
    @rtype: str, str, boolean
    """
    ## files and names
    recCode = t.stripFilename( recPdb )[0:4]          
    ligCode = t.stripFilename( ligPdb )[0:4]

    outFile = outFile or recCode + '-' + ligCode

    ## hex macro name
    macName = t.absfile( outFile + '_hex.mac' )

    ## hex rotation matrix output name
    outName_all = t.absfile( outFile + '_hex.out'  )
    outName_clust = t.absfile( outFile + '_hex_cluster.out')

    ## add surface profiles if not there
    if not recModel.atoms.has_key('relAS'):
        #t.flushPrint('\nCalculating receptor surface profile')
        rec_asa = PDBDope( recModel )
        rec_asa.addSurfaceRacer()
    if not ligModel.atoms.has_key('relAS'):
        #t.flushPrint('\nCalculating ligand surface profile')
        lig_asa = PDBDope( ligModel )
        lig_asa.addSurfaceRacer()

    ## surface masks, > 95% exposed
    rec_surf_mask = N.greater( recModel.profile('relAS'), 95 )
    lig_surf_mask = N.greater( ligModel.profile('relAS'), 95 )

    ## maximun and medisn distance from centre of mass to any surface atom
    recMax, recMin = centerSurfDist( recModel, rec_surf_mask )
    ligMax, ligMin = centerSurfDist( ligModel, lig_surf_mask )

    ## approxinate max and min center to centre distance
    maxDist = recMax + ligMax 
    minDist = recMin + ligMin

    ## molecular separation and search range to be used in the docking
    molSep = ( maxDist + minDist ) / 2
    molRange = 2 * ( maxDist - molSep )

    if not silent:
        print 'Docking setup: %s\nRecMax: %.1f RecMin: %.1f\nLigMax: %.1f LigMin: %.1f\nMaxDist: %.1f MinDist: %.1f\nmolecular_separation: %.1f r12_range: %.1f\n'%(outFile, recMax, recMin, ligMax, ligMin, maxDist, minDist, molSep, molRange)

    if recMax > 30 and ligMax > 30 and not silent:
        print '\nWARNING! Both the receptor and ligand radius is ',
        print 'greater than 30A.\n'     

    ## determine docking mode to use
    macroDocking = 0

    if macDock==None:
        if recMax > 35 and not silent:
            print '\nReceptor has a radius that exceeds 35A ',
            print '-> Macro docking will be used'
            macroDocking = 1
    else:
        macroDocking = macDock

    #####################
    ## write macro file

    macOpen= open( macName, 'w')

    macOpen.write('# -- ' + macName + ' --\n')
    macOpen.write(' \n')
    macOpen.write('open_receptor '+ t.absfile(recPdb) +'\n')
    macOpen.write('open_ligand '+ t.absfile(ligPdb) +'\n')

    if comPdb and comPdb[-4:] == '.pdb':
        macOpen.write('open_complex '+comPdb+'\n')

    macOpen.write('\n')

    head = """
# -------------- general settings ----------------
disc_cache 1                   # disc cache on (0 off)
docking_sort_mode 1            # Sort solutions by cluster (0 by energy)
docking_cluster_mode 1         # Display all clusters (0 display best)
docking_cluster_threshold 2.00
# docking_cluster_bumps  number

# ------------ molecule orientation --------------
molecule_separation %(separation)i
commit_view """%({'separation': round(molSep)} )


    macro ="""
# -------------- macro docking -------------------
macro_min_coverage 25
macro_sphere_radius 15
macro_docking_separation 25
activate_macro_model"""


    tail = """
# -------------- docking setup -------------------
docking_search_mode 0          # full rotational search

receptor_range_angle  180      # 0, 15, 30, 45, 60, 75, 90, 180
docking_receptor_samples 720   # 362, 492, 642, 720, 980, 1280

ligand_range_angle  180
docking_ligand_samples 720

twist_range_angle 360          # 0, 15, 30, 60, 90, 180, 360
docking_alpha_samples 128      # 64, 128, 256

r12_step 0.500000              # 0.1, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2
r12_range %(range)i

docking_radial_filter 0        # Radial Envelope Filter - None

grid_size 0.600                # 0.4, 0.5, 0.6, 0.75, 1.0
# docking_electrostatics 0       # use only surface complimentarity
docking_electrostatics 1      # use electrostatic term for scoring clusters

docking_main_scan 16     # 
docking_main_search 26

max_docking_solutions %(nr_sol)i # number of solutions to save

# -------------- post-processing ----------------
docking_refine 0    # None
#  docking_refine 1    # Backbone Bumps
#  docking_refine 2    # MM energies
#  docking_refine 3    # MM minimization

# ---------------- run docking ------------------
activate_docking
#  save_docking %(output_clust)s
#  save_range 1 512 ./ dock .pdb

# ------------ also save all solutions ----------
docking_sort_mode 0            # Sort solutions by energy (1 by cluster)
save_docking %(output_all)s""" \
         %({'range':round(molRange), 'output_all':outName_all,
            'nr_sol':int(sol), 'output_clust':outName_clust} )

    macOpen.writelines( head )

    ## macro docking will not work with multiple models, if both are added to
    ## the hex macro file - macrodocking will be skipped during the docking run
    if macroDocking:
        macOpen.writelines( macro )

    macOpen.writelines( tail )

    macOpen.close()

    return macName, outName_all, macroDocking
コード例 #34
0
ファイル: Dots.py プロジェクト: Complex501/visionegg
    def draw(self):
        # XXX This method is not speed-optimized. I just wrote it to
        # get the job done. (Nonetheless, it seems faster than the C
        # version commented out above.)

        p = self.parameters # shorthand
        if p.center is not None:
            if not hasattr(VisionEgg.config,"_GAVE_CENTER_DEPRECATION"):
                logger = logging.getLogger('VisionEgg.Dots')
                logger.warning("Specifying DotArea2D by deprecated "
                               "'center' parameter deprecated.  Use "
                               "'position' parameter instead.  (Allows "
                               "use of 'anchor' parameter to set to "
                               "other values.)")
                VisionEgg.config._GAVE_CENTER_DEPRECATION = 1
            p.anchor = 'center'
            p.position = p.center[0], p.center[1] # copy values (don't copy ref to tuple)
        if p.on:
            # calculate center
            center = VisionEgg._get_center(p.position,p.anchor,p.size)

            if p.anti_aliasing:
                if len(p.color) == 4 and not self._gave_alpha_warning:
                    if p.color[3] != 1.0:
                        logger = logging.getLogger('VisionEgg.Dots')
                        logger.warning("The parameter anti_aliasing is "
                                       "set to true in the DotArea2D "
                                       "stimulus class, but the color "
                                       "parameter specifies an alpha "
                                       "value other than 1.0.  To "
                                       "acheive the best anti-aliasing, "
                                       "ensure that the alpha value for "
                                       "the color parameter is 1.0.")
                        self._gave_alpha_warning = 1
                gl.glEnable( gl.GL_POINT_SMOOTH )
                # allow max_alpha value to control blending
                gl.glEnable( gl.GL_BLEND )
                gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )
            else:
                gl.glDisable( gl.GL_BLEND )

            now_sec = VisionEgg.time_func()
            if self.start_times_sec is not None:
                # compute extinct dots and generate new positions
                replace_indices = Numeric.nonzero( Numeric.greater( now_sec - self.start_times_sec, p.dot_lifespan_sec) )
                Numeric.put( self.start_times_sec, replace_indices, now_sec )

                new_x_positions = RandomArray.uniform(0.0,1.0,
                                                      (len(replace_indices),))
                Numeric.put( self.x_positions, replace_indices, new_x_positions )

                new_y_positions = RandomArray.uniform(0.0,1.0,
                                                      (len(replace_indices),))
                Numeric.put( self.y_positions, replace_indices, new_y_positions )

                new_random_directions_radians = RandomArray.uniform(0.0,2*math.pi,
                                                                    (len(replace_indices),))
                Numeric.put( self.random_directions_radians, replace_indices, new_random_directions_radians )
            else:
                # initialize dot extinction values to random (uniform) distribution
                self.start_times_sec = RandomArray.uniform( now_sec - p.dot_lifespan_sec, now_sec,
                                                            (self.constant_parameters.num_dots,))

            signal_num_dots = int(round(p.signal_fraction * self.constant_parameters.num_dots))
            time_delta_sec = now_sec - self.last_time_sec
            self.last_time_sec = now_sec # reset for next loop
            x_increment_normalized =  math.cos(p.signal_direction_deg/180.0*math.pi) * p.velocity_pixels_per_sec / p.size[0] * time_delta_sec
            y_increment_normalized = -math.sin(p.signal_direction_deg/180.0*math.pi) * p.velocity_pixels_per_sec / p.size[1] * time_delta_sec
            self.x_positions[:signal_num_dots] += x_increment_normalized
            self.y_positions[:signal_num_dots] += y_increment_normalized

            num_random_dots = self.constant_parameters.num_dots - signal_num_dots
            random_x_increment_normalized =  Numeric.cos(self.random_directions_radians[signal_num_dots:]) * p.velocity_pixels_per_sec / p.size[0] * time_delta_sec
            random_y_increment_normalized = -Numeric.sin(self.random_directions_radians[signal_num_dots:]) * p.velocity_pixels_per_sec / p.size[1] * time_delta_sec
            self.x_positions[signal_num_dots:] += random_x_increment_normalized
            self.y_positions[signal_num_dots:] += random_y_increment_normalized

            self.x_positions = Numeric.fmod( self.x_positions, 1.0 ) # wrap
            self.y_positions = Numeric.fmod( self.y_positions, 1.0 )

            self.x_positions = Numeric.fmod( self.x_positions+1, 1.0 ) # wrap again for values < 1
            self.y_positions = Numeric.fmod( self.y_positions+1, 1.0 )

            xs = (self.x_positions - 0.5) * p.size[0] + center[0]
            ys = (self.y_positions - 0.5) * p.size[1] + center[1]

            if len(p.color)==3:
                gl.glColor3f(*p.color)
            elif len(p.color)==4:
                gl.glColor4f(*p.color)
            gl.glPointSize(p.dot_size)

            # Clear the modeview matrix
            gl.glMatrixMode(gl.GL_MODELVIEW)
            gl.glPushMatrix()

            gl.glDisable(gl.GL_TEXTURE_2D)

            if p.depth is None:
                depth = 0.0
            else:
                gl.glEnable(gl.GL_DEPTH_TEST)
                depth = p.depth
            zs = (depth,)*len(xs) # make N tuple with repeat value of depth
            draw_dots(xs,ys,zs)
            if p.anti_aliasing:
                gl.glDisable( gl.GL_POINT_SMOOTH ) # turn off
            gl.glPopMatrix()