def correlation(x_items, y_items): """Returns Pearson correlation between x and y, and its significance.""" sum_x = sum_y = sum_x_sq = sum_y_sq = sum_xy = n = 0 for x, y in zip(x_items, y_items): n += 1 sum_x += x sum_x_sq += x * x sum_y += y sum_y_sq += y * y sum_xy += x * y try: r = 1.0 * ((n * sum_xy) - (sum_x * sum_y)) / \ (sqrt((n * sum_x_sq)-(sum_x*sum_x))*sqrt((n*sum_y_sq)-(sum_y*sum_y))) except (ZeroDivisionError, ValueError): #no variation r = 0.0 #check we didn't get a naughty value for r due to rounding error if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 if n < 3: prob = 1 else: try: t = r/sqrt((1 - (r*r))/(n-2)) prob = tprob(t, n-2) except ZeroDivisionError: #r was presumably 1 prob = 0 return (r, prob)
def writepov(self, file, dispdef): if self.hidden: return if self.is_disabled(): return #bruce 050421 c = self.posn() a = self.axen() xrot = -atan2(a[1], sqrt(1 - a[1] * a[1])) * 180 / pi yrot = atan2(a[0], sqrt(1 - a[0] * a[0])) * 180 / pi file.write("lmotor(" \ + povpoint([self.width * 0.5, self.width * 0.5, self.length * 0.5]) + "," \ + povpoint([self.width * -0.5, self.width * -0.5, self.length * -0.5]) + "," \ + "<0.0, " + str(yrot) + ", 0.0>," \ + "<" + str(xrot) + ", 0.0, 0.0>," \ + povpoint(c) + "," \ + "<" + str(self.color[0]) + "," + str(self.color[1]) + "," + str(self.color[2]) + ">)\n") for a in self.atoms: if vlen( c - a.posn() ) > 0.001: #bruce 060808 add condition to see if this fixes bug 719 (two places in this file) file.write("spoke(" + povpoint(c) + "," + povpoint(a.posn()) + "," + str(self.sradius) + ",<" + str(self.color[0]) + "," + str(self.color[1]) + "," + str(self.color[2]) + ">)\n")
def txt_sqrt(norm, numeric=False): if numeric: return repr(sqrt(norm)) else: if sqrt(norm) % 1 == 0: return str(sqrt(norm)) else: return "sqrt(" + str(norm.nom) + ("./" + str(norm.denom)) * (norm.denom != 1) + ")"
def txt_sqrt(norm, numeric=False): if numeric: return repr(sqrt(norm)) else: if sqrt(norm) % 1 == 0: return str(sqrt(norm)) else: return 'sqrt(' + str(norm.nom) + \ ('./' + str(norm.denom)) * (norm.denom != 1) + ')'
def test_euclidean_distance(self): """euclidean_distance: should return dist between 2 vectors or matrices """ a = array([3,4]) b = array([8,5]) c = array([[2,3],[4,5]]) d = array([[1,5],[8,2]]) self.assertFloatEqual(euclidean_distance(a,b),sqrt(26)) self.assertFloatEqual(euclidean_distance(c,d),sqrt(30))
def _getinfo_TEST(self): # please leave in for debugging POV-Ray lmotor macro. mark 060324 a = self.axen() xrot = -atan2(a[1], sqrt(1-a[1]*a[1]))*180/pi yrot = atan2(a[0], sqrt(1-a[0]*a[0]))*180/pi return "[Object: Linear Motor] [Name: " + str(self.name) + "] " + \ "[Force = " + str(self.force) + " pN] " + \ "[Stiffness = " + str(self.stiffness) + " N/m] " + \ "[Axis = " + str(self.axis[0]) + ", " + str(self.axis[1]) + ", " + str(self.axis[2]) + "]" + \ "[xRotation = " + str(xrot) + ", yRotation = " + str(yrot) + "]"
def stats(data): """ Assumes a matrix of data with variables on the columns and observations on the rows. Returns the mean, variance and standard error of the data. """ from Numeric import average, sqrt mean = average(data) var = average((data - mean)**2) stderr = sqrt(var) / sqrt(len(data)) return mean, var, stderr
def over_line(line): # can't use the line bbox because it covers the entire extent # of the line xdata = line.transx.positions(line.get_xdata()) ydata = line.transy.positions(line.get_ydata()) distances = sqrt((x - xdata)**2 + (y - ydata)**2) return min(distances) < epsilon
def init_cube(): drawing_globals.cubeVertices = cubeVertices = [[-1.0, 1.0, -1.0], [-1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, -1.0], [-1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [1.0, -1.0, 1.0], [1.0, -1.0, -1.0]] #bruce 051117: compute this rather than letting a subroutine hardcode it as # a redundant constant flatCubeVertices = [] for threemore in cubeVertices: flatCubeVertices.extend(threemore) flatCubeVertices = list(flatCubeVertices) #k probably not needed drawing_globals.flatCubeVertices = flatCubeVertices if 1: # remove this when it works flatCubeVertices_hardcoded = [ -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0 ] assert flatCubeVertices == flatCubeVertices_hardcoded sq3 = sqrt(3.0) / 3.0 drawing_globals.cubeNormals = [[-sq3, sq3, -sq3], [-sq3, sq3, sq3], [sq3, sq3, sq3], [sq3, sq3, -sq3], [-sq3, -sq3, -sq3], [-sq3, -sq3, sq3], [sq3, -sq3, sq3], [sq3, -sq3, -sq3]] drawing_globals.cubeIndices = [[0, 1, 2, 3], [0, 4, 5, 1], [1, 5, 6, 2], [2, 6, 7, 3], [0, 3, 7, 4], [4, 7, 6, 5]] return
def corrcoef(*args): """ corrcoef(X) where X is a matrix returns a matrix of correlation coefficients for each row of X. corrcoef(x,y) where x and y are vectors returns the matrix or correlation coefficients for x and y. Numeric arrays can be real or complex The correlation matrix is defined from the covariance matrix C as r(i,j) = C[i,j] / (C[i,i]*C[j,j]) """ if len(args) == 2: X = transpose(array([args[0]] + [args[1]])) elif len(args == 1): X = args[0] else: raise RuntimeError, 'Only expecting 1 or 2 arguments' C = cov(X) d = resize(diagonal(C), (2, 1)) r = divide(C, sqrt(matrixmultiply(d, transpose(d))))[0, 1] try: return r.real except AttributeError: return r
def Length(self): """Length of vector Returns the length of the vector generated by the basis. """ from Numeric import sqrt return sqrt(self.InnerProduct(self))
def test_vanishing_moments(self): """Test that coefficients in lp satisfy the vanishing moments condition """ from daubfilt import daubfilt, number_of_filters for i in range(number_of_filters): D = 2*(i+1) P = D/2 # Number of vanishing moments N = P-1 # Dimension of nullspace of the matrix A R = P+1 # Rank of A, R = D-N = P+1 equations lp, hp = daubfilt(D) # Condition number of A grows with P, so we test only # the first 6 (and eps is slightly larger than machine precision) A = zeros((R,D), Float) # D unknowns, D-N equations b = zeros((R,1), Float) # Right hand side b[0] = sqrt(2) A[0,:] = ones(D, Float) # Coefficients must sum to sqrt(2) for p in range(min(P,6)): # the p'th vanishing moment (Cond Ap) for k in range(D): m=D-k; A[p+1,k] = (-1)**m * k**p; assert allclose(b, mvmul(A,lp))
def test_randomSequence(self): """randomSequence: 99% of new frequencies should be within 3*SD""" r_num, c_num = 100,20 num_elements = r_num*c_num alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" r = random([r_num,c_num]) p = Profile(r,alpha[:c_num]) p.normalizePositions() d = p.Data n = 1000 #Test only works on normalized profile, b/c of 1-d below means = n*d three_stds = sqrt(d*(1-d)*n)*3 a = Alignment([p.randomSequence() for x in range(n)]) def absoluteProfile(alignment,char_order): f = a.columnFrequencies() res = zeros([len(f),len(char_order)]) for row, freq in enumerate(f): for i in freq: col = char_order.index(i) res[row, col] = freq[i] return res ap = absoluteProfile(a,p.CharOrder) failure = abs(ap-means) > three_stds assert sum(sum(failure))/num_elements <= 0.01
def over_line(line): # can't use the line bbox because it covers the entire extent # of the line xdata = line.transx.positions(line.get_xdata()) ydata = line.transy.positions(line.get_ydata()) distances = sqrt((x - xdata) ** 2 + (y - ydata) ** 2) return min(distances) < epsilon
def test_euclidean_distance_unexpected(self): """euclidean_distance: works always when frames are aligned. UNEXPECTED! """ a = array([3,4]) b = array([8,5]) c = array([[2,3],[4,5]]) d = array([[1,5],[8,2]]) e = array([[4,5],[4,5],[4,5]]) f = array([1,1,1,1,1]) self.assertFloatEqual(euclidean_distance(a,c),sqrt(4)) self.assertFloatEqual(euclidean_distance(c,a),sqrt(4)) self.assertFloatEqual(euclidean_distance(a,e),sqrt(6)) #IT DOES RAISE AN ERROR WHEN THE FRAMES ARE NOT ALIGNED self.assertRaises(ValueError,euclidean_distance,c,e) self.assertRaises(ValueError,euclidean_distance,c,f)
def init_diamond(): # a chunk of diamond grid, to be tiled out in 3d drawing_globals.sp0 = sp0 = 0.0 #bruce 051102 replaced 1.52 with this constant (1.544), # re bug 900 (partial fix.) drawing_globals.sp1 = sp1 = DIAMOND_BOND_LENGTH / sqrt(3.0) sp2 = 2.0*sp1 sp3 = 3.0*sp1 drawing_globals.sp4 = sp4 = 4.0*sp1 digrid=[[[sp0, sp0, sp0], [sp1, sp1, sp1]], [[sp1, sp1, sp1], [sp2, sp2, sp0]], [[sp2, sp2, sp0], [sp3, sp3, sp1]], [[sp3, sp3, sp1], [sp4, sp4, sp0]], [[sp2, sp0, sp2], [sp3, sp1, sp3]], [[sp3, sp1, sp3], [sp4, sp2, sp2]], [[sp2, sp0, sp2], [sp1, sp1, sp1]], [[sp1, sp1, sp1], [sp0, sp2, sp2]], [[sp0, sp2, sp2], [sp1, sp3, sp3]], [[sp1, sp3, sp3], [sp2, sp4, sp2]], [[sp2, sp4, sp2], [sp3, sp3, sp1]], [[sp3, sp3, sp1], [sp4, sp2, sp2]], [[sp4, sp0, sp4], [sp3, sp1, sp3]], [[sp3, sp1, sp3], [sp2, sp2, sp4]], [[sp2, sp2, sp4], [sp1, sp3, sp3]], [[sp1, sp3, sp3], [sp0, sp4, sp4]]] drawing_globals.digrid = A(digrid) drawing_globals.DiGridSp = sp4 return
def findHandles_exact(self, p1, p2, cutoff = 0.0, backs_ok = 1, offset = V(0,0,0)): """ return a list of (dist, handle) pairs, in arbitrary order, which includes, for each handle (spherical surface) hit by the ray from p1 thru p2, its front-surface intersection with the ray, unless that has dist < cutoff and backs_ok, in which case include its back-surface intersection (unless *that* has dist < cutoff). """ #e For now, just be simple, don't worry about speed. # Someday we can preprocess self.handlpos using Numeric functions, # like in nearSinglets and/or findSinglets # (I have untested prototype code for this in extrude-outs.py). hh = self.handles res = [] v = norm(p2-p1) ## is this modifying the vector in-place, causing a bug?? offset += self.origin # treat our handles' pos as relative to this ## I don't know, but one of the three instances of += was doing this!!! probably i was resetting the atom or mol pos.... offset = offset + self.origin # treat our handles' pos as relative to this radius_multiplier = self.radius_multiplier for (pos,radius,info) in hh: ## bug in this? pos += offset pos = pos + offset radius *= radius_multiplier dist, wid = orthodist(p1, v, pos) if radius >= wid: # the ray hits the sphere delta = sqrt(radius*radius - wid*wid) front = dist - delta # depth from p1 of front surface of sphere, where it's hit if front >= cutoff: res.append((front,(pos,radius,info))) elif backs_ok: back = dist + delta if back >= cutoff: res.append((back,(pos,radius,info))) return res
def t_two_sample (a, b, tails=None, exp_diff=0): """Returns t, prob for two INDEPENDENT samples of scores a, and b. From Sokal and Rohlf, p 223. Usage: t, prob = t_two_sample(a,b, tails, exp_diff) t is a float; prob is a probability. a and b should be lists of observations (numbers) supporting Mean, Count, and Variance. Need not be equal. tails should be None (default), 'high', or 'low'. exp_diff should be the expected difference in means (a-b); 0 by default. """ try: #see if we need to back off to the single-observation for single-item #groups n1 = a.Count if n1 < 2: return t_one_observation(a.Sum, b, tails, exp_diff) n2 = b.Count if n2 < 2: return t_one_observation(b.Sum, a, reverse_tails(tails), exp_diff) #otherwise, calculate things properly x1 = a.Mean x2 = b.Mean df = n1+n2-2 svar = ((n1-1)*a.Variance + (n2-1)*b.Variance)/df t = (x1-x2-exp_diff)/sqrt(svar*(1/n1 + 1/n2)) except (ZeroDivisionError, ValueError, AttributeError, TypeError): #bail out if the sample sizes are wrong, the values aren't numeric or #aren't present, etc. return (None, None) prob = t_tailed_prob(t, df, tails) return t, prob
def init_diamond(): # a chunk of diamond grid, to be tiled out in 3d drawing_globals.sp0 = sp0 = 0.0 #bruce 051102 replaced 1.52 with this constant (1.544), # re bug 900 (partial fix.) drawing_globals.sp1 = sp1 = DIAMOND_BOND_LENGTH / sqrt(3.0) sp2 = 2.0 * sp1 sp3 = 3.0 * sp1 drawing_globals.sp4 = sp4 = 4.0 * sp1 digrid = [[[sp0, sp0, sp0], [sp1, sp1, sp1]], [[sp1, sp1, sp1], [sp2, sp2, sp0]], [[sp2, sp2, sp0], [sp3, sp3, sp1]], [[sp3, sp3, sp1], [sp4, sp4, sp0]], [[sp2, sp0, sp2], [sp3, sp1, sp3]], [[sp3, sp1, sp3], [sp4, sp2, sp2]], [[sp2, sp0, sp2], [sp1, sp1, sp1]], [[sp1, sp1, sp1], [sp0, sp2, sp2]], [[sp0, sp2, sp2], [sp1, sp3, sp3]], [[sp1, sp3, sp3], [sp2, sp4, sp2]], [[sp2, sp4, sp2], [sp3, sp3, sp1]], [[sp3, sp3, sp1], [sp4, sp2, sp2]], [[sp4, sp0, sp4], [sp3, sp1, sp3]], [[sp3, sp1, sp3], [sp2, sp2, sp4]], [[sp2, sp2, sp4], [sp1, sp3, sp3]], [[sp1, sp3, sp3], [sp0, sp4, sp4]]] drawing_globals.digrid = A(digrid) drawing_globals.DiGridSp = sp4 return
def min_dist(coord, surface): """ Return minimum distance between coord and surface. """ d=surface-coord d2=sum(d*d, 1) return sqrt(min(d2))
def __str__(self): n = self.norm sn = sqrt(n) if int(sn) == sn: string = repr(sn) + "/sqrt(pi)" else: string = "sqrt(" + repr(n.nom) + ("./" + repr(n.denom)) * (n.denom != 1) + "/pi)" return string
def __str__(self): n = self.norm sn = sqrt(n) if int(sn) == sn: string = repr(sn) + '/sqrt(pi)' else: string = 'sqrt(' + repr(n.nom) + \ ('./' + repr(n.denom)) * (n.denom != 1) + '/pi)' return string
def computeEndPointsFromChunk(self, chunk, update = True): """ Derives and returns the endpoints and radius of a Peptide chunk. @param chunk: a Peptide chunk @type chunk: Chunk @return: endPoint1, endPoint2 and radius @rtype: Point, Point and float @note: computing the endpoints works fine when n=m or m=0. Otherwise, the endpoints can be slightly off the central axis, especially if the Peptide is short. @attention: endPoint1 and endPoint2 may not be the original endpoints, and they may be flipped (opposites of) the original endpoints. """ # Since chunk.axis is not always one of the vectors chunk.evecs # (actually chunk.poly_evals_evecs_axis[2]), it's best to just use # the axis and center, then recompute a bounding cylinder. if not chunk.atoms: return None axis = chunk.axis axis = norm(axis) # needed center = chunk._get_center() points = chunk.atpos - center # not sure if basepos points are already centered # compare following Numeric Python code to findAtomUnderMouse and its caller matrix = matrix_putting_axis_at_z(axis) v = dot( points, matrix) # compute xy distances-squared between axis line and atom centers r_xy_2 = v[:,0]**2 + v[:,1]**2 # to get radius, take maximum -- not sure if max(r_xy_2) would use Numeric code, but this will for sure: i = argmax(r_xy_2) max_xy_2 = r_xy_2[i] radius = sqrt(max_xy_2) # to get limits along axis (since we won't assume center is centered between them), use min/max z: z = v[:,2] min_z = z[argmin(z)] max_z = z[argmax(z)] # Adjust the endpoints such that the ladder rungs (rings) will fall # on the ring segments. # TO DO: Fix drawPeptideLadder() to offset the first ring, then I can # remove this adjustment. --Mark 2008-04-12 z_adjust = self.getEndPointZOffset() min_z += z_adjust max_z -= z_adjust endpoint1 = center + min_z * axis endpoint2 = center + max_z * axis if update: #print "Original endpoints:", self.getEndPoints() self.setEndPoints(endpoint1, endpoint2) #print "New endpoints:", self.getEndPoints() return (endpoint1, endpoint2, radius)
def computeEndPointsFromChunk(self, chunk, update=True): """ Derives and returns the endpoints and radius of a Peptide chunk. @param chunk: a Peptide chunk @type chunk: Chunk @return: endPoint1, endPoint2 and radius @rtype: Point, Point and float @note: computing the endpoints works fine when n=m or m=0. Otherwise, the endpoints can be slightly off the central axis, especially if the Peptide is short. @attention: endPoint1 and endPoint2 may not be the original endpoints, and they may be flipped (opposites of) the original endpoints. """ # Since chunk.axis is not always one of the vectors chunk.evecs # (actually chunk.poly_evals_evecs_axis[2]), it's best to just use # the axis and center, then recompute a bounding cylinder. if not chunk.atoms: return None axis = chunk.axis axis = norm(axis) # needed center = chunk._get_center() points = chunk.atpos - center # not sure if basepos points are already centered # compare following Numeric Python code to findAtomUnderMouse and its caller matrix = matrix_putting_axis_at_z(axis) v = dot(points, matrix) # compute xy distances-squared between axis line and atom centers r_xy_2 = v[:, 0] ** 2 + v[:, 1] ** 2 # to get radius, take maximum -- not sure if max(r_xy_2) would use Numeric code, but this will for sure: i = argmax(r_xy_2) max_xy_2 = r_xy_2[i] radius = sqrt(max_xy_2) # to get limits along axis (since we won't assume center is centered between them), use min/max z: z = v[:, 2] min_z = z[argmin(z)] max_z = z[argmax(z)] # Adjust the endpoints such that the ladder rungs (rings) will fall # on the ring segments. # TO DO: Fix drawPeptideLadder() to offset the first ring, then I can # remove this adjustment. --Mark 2008-04-12 z_adjust = self.getEndPointZOffset() min_z += z_adjust max_z -= z_adjust endpoint1 = center + min_z * axis endpoint2 = center + max_z * axis if update: # print "Original endpoints:", self.getEndPoints() self.setEndPoints(endpoint1, endpoint2) # print "New endpoints:", self.getEndPoints() return (endpoint1, endpoint2, radius)
def norm(a): """Returns the norm of a matrix or vector Calculates the Euclidean norm of a vector. Applies the Frobenius norm function to a matrix (a.k.a. Euclidian matrix norm) a = Numeric array """ return sqrt(sum((a*a).flat))
def compute_memo(self, chunk): """ If drawing chunk in this display mode can be optimized by precomputing some info from chunk's appearance, compute that info and return it. If this computation requires preference values, access them as env.prefs[key], and that will cause the memo to be removed (invalidated) when that preference value is changed by the user. This computation is assumed to also depend on, and only on, chunk's appearance in ordinary display modes (i.e. it's invalidated whenever havelist is). There is not yet any way to change that, so bugs will occur if any ordinarily invisible chunk info affects this rendering, and potential optimizations will not be done if any ordinarily visible info is not visible in this rendering. These can be fixed if necessary by having the real work done within class Chunk's _recompute_ rules, with this function or drawchunk just accessing the result of that (and sometimes causing its recomputation), and with whatever invalidation is needed being added to appropriate setter methods of class Chunk. If the real work can depend on more than chunk's ordinary appearance can, the access would need to be in drawchunk; otherwise it could be in drawchunk or in this method compute_memo. """ # for this example, we'll turn the chunk axes into a cylinder. # Since chunk.axis is not always one of the vectors chunk.evecs (actually chunk.poly_evals_evecs_axis[2]), # it's best to just use the axis and center, then recompute a bounding cylinder. if not chunk.atoms: return None axis = chunk.axis axis = norm( axis ) # needed (unless we're sure it's already unit length, which is likely) center = chunk.center points = chunk.atpos - center # not sure if basepos points are already centered # compare following Numeric Python code to findAtomUnderMouse and its caller matrix = matrix_putting_axis_at_z(axis) v = dot(points, matrix) # compute xy distances-squared between axis line and atom centers r_xy_2 = v[:, 0]**2 + v[:, 1]**2 ## r_xy = sqrt(r_xy_2) # not needed # to get radius, take maximum -- not sure if max(r_xy_2) would use Numeric code, but this will for sure: i = argmax(r_xy_2) max_xy_2 = r_xy_2[i] radius = sqrt(max_xy_2) # to get limits along axis (since we won't assume center is centered between them), use min/max z: z = v[:, 2] min_z = z[argmin(z)] max_z = z[argmax(z)] bcenter = chunk.abs_to_base(center) # return, in chunk-relative coords, end1, end2, and radius of the cylinder, and color. color = chunk.color if color is None: color = V(0.5, 0.5, 0.5) # make sure it's longer than zero (in case of a single-atom chunk); in fact, add a small margin all around # (note: this is not sufficient to enclose all atoms entirely; that's intentional) margin = 0.2 min_z -= margin max_z += margin radius += margin return (bcenter + min_z * axis, bcenter + max_z * axis, radius, color)
def m2rotaxis(m): """ Return angles, axis pair that corresponds to rotation matrix m. """ # Angle always between 0 and pi # Sense of rotation is defined by axis orientation t=0.5*(trace(m)-1) t=max(-1, t) t=min(1, t) angle=acos(t) if angle<1e-15: # Angle is 0 return 0.0, Vector(1,0,0) elif angle<pi: # Angle is smaller than pi x=m[2,1]-m[1,2] y=m[0,2]-m[2,0] z=m[1,0]-m[0,1] axis=Vector(x,y,z) axis.normalize() return angle, axis else: # Angle is pi - special case! m00=m[0,0] m11=m[1,1] m22=m[2,2] if m00>m11 and m00>m22: x=sqrt(m00-m11-m22+0.5) y=m[0,1]/(2*x) z=m[0,2]/(2*x) elif m11>m00 and m11>m22: y=sqrt(m11-m00-m22+0.5) x=m[0,1]/(2*y) z=m[1,2]/(2*y) else: z=sqrt(m22-m00-m11+0.5) x=m[0,2]/(2*z) y=m[1,2]/(2*z) axis=Vector(x,y,z) axis.normalize() return pi, axis
def test_setWeightedProperty(self): """setWeightedProperty should calculate and set correct property""" leaf_dict = {'d':2, 'e':1, 'g':6, 'h':7} means_dict = leaf_dict.copy() c_val = (2.0/1+1.0/4+6.0/2)/(1/1+1.0/4+1.0/2) delta = 0.01 a_val = (c_val/delta + 7.0/2)/(1.0/delta + 1.0/2) means_dict.update({'f':6, 'c':c_val, 'b':c_val, 'a':a_val}) c_stdev =sqrt(((2-c_val)**2 +(1-c_val)**2 + (6-c_val)**2)/3) stdevs_dict = {'d':0, 'e':0, 'g':0, 'f':0, \ 'c': c_stdev, 'b':0, 'h':0, \ 'a':sqrt(((a_val-c_val)**2+(a_val-7)**2)/2)} def set_leaf_f(node): return leaf_dict[node.Data] r = self.TreeRoot r.setWeightedProperty(set_leaf_f, 'x', branch_delta=delta) for node in r.traverse(self_before=False, self_after=True): self.assertEqual(node.xWeightedMean, means_dict[node.Data]) self.assertFloatEqual(node.xWeightedStdev, stdevs_dict[node.Data])
def writepov(self, file, dispdef): if self.hidden: return if self.is_disabled(): return #bruce 050421 c = self.posn() a = self.axen() xrot = -atan2(a[1], sqrt(1-a[1]*a[1]))*180/pi yrot = atan2(a[0], sqrt(1-a[0]*a[0]))*180/pi file.write("lmotor(" \ + povpoint([self.width * 0.5, self.width * 0.5, self.length * 0.5]) + "," \ + povpoint([self.width * -0.5, self.width * -0.5, self.length * -0.5]) + "," \ + "<0.0, " + str(yrot) + ", 0.0>," \ + "<" + str(xrot) + ", 0.0, 0.0>," \ + povpoint(c) + "," \ + "<" + str(self.color[0]) + "," + str(self.color[1]) + "," + str(self.color[2]) + ">)\n") for a in self.atoms: if vlen(c - a.posn()) > 0.001: #bruce 060808 add condition to see if this fixes bug 719 (two places in this file) file.write("spoke(" + povpoint(c) + "," + povpoint(a.posn()) + "," + str (self.sradius) + ",<" + str(self.color[0]) + "," + str(self.color[1]) + "," + str(self.color[2]) + ">)\n")
def __sub__(self, other): """ Calculate distance between two atoms. Example: >>> distance=atom1-atom2 @param other: the other atom @type other: L{Atom} """ diff=self.coord-other.coord return sqrt(sum(diff*diff))
def compute_memo(self, chunk): """ If drawing chunk in this display mode can be optimized by precomputing some info from chunk's appearance, compute that info and return it. If this computation requires preference values, access them as env.prefs[key], and that will cause the memo to be removed (invalidated) when that preference value is changed by the user. This computation is assumed to also depend on, and only on, chunk's appearance in ordinary display modes (i.e. it's invalidated whenever havelist is). There is not yet any way to change that, so bugs will occur if any ordinarily invisible chunk info affects this rendering, and potential optimizations will not be done if any ordinarily visible info is not visible in this rendering. These can be fixed if necessary by having the real work done within class Chunk's _recompute_ rules, with this function or drawchunk just accessing the result of that (and sometimes causing its recomputation), and with whatever invalidation is needed being added to appropriate setter methods of class Chunk. If the real work can depend on more than chunk's ordinary appearance can, the access would need to be in drawchunk; otherwise it could be in drawchunk or in this method compute_memo. """ # for this example, we'll turn the chunk axes into a cylinder. # Since chunk.axis is not always one of the vectors chunk.evecs (actually chunk.poly_evals_evecs_axis[2]), # it's best to just use the axis and center, then recompute a bounding cylinder. if not chunk.atoms: return None axis = chunk.axis axis = norm(axis) # needed (unless we're sure it's already unit length, which is likely) center = chunk.center points = chunk.atpos - center # not sure if basepos points are already centered # compare following Numeric Python code to findAtomUnderMouse and its caller matrix = matrix_putting_axis_at_z(axis) v = dot( points, matrix) # compute xy distances-squared between axis line and atom centers r_xy_2 = v[:,0]**2 + v[:,1]**2 ## r_xy = sqrt(r_xy_2) # not needed # to get radius, take maximum -- not sure if max(r_xy_2) would use Numeric code, but this will for sure: i = argmax(r_xy_2) max_xy_2 = r_xy_2[i] radius = sqrt(max_xy_2) # to get limits along axis (since we won't assume center is centered between them), use min/max z: z = v[:,2] min_z = z[argmin(z)] max_z = z[argmax(z)] bcenter = chunk.abs_to_base(center) # return, in chunk-relative coords, end1, end2, and radius of the cylinder, and color. color = chunk.color if color is None: color = V(0.5,0.5,0.5) # make sure it's longer than zero (in case of a single-atom chunk); in fact, add a small margin all around # (note: this is not sufficient to enclose all atoms entirely; that's intentional) margin = 0.2 min_z -= margin max_z += margin radius += margin return (bcenter + min_z * axis, bcenter + max_z * axis, radius, color)
def test_conservation_of_area(self): """Test that coefficients in lp satisfy the dilation equation """ from daubfilt import daubfilt, number_of_filters for p in range(number_of_filters): D = 2*(p+1) lp, hp = daubfilt(D) err = abs(sum(lp)-sqrt(2)) #assert abs(err) <= epsilon, 'Error == %e' %err assert allclose(err, 0), 'Error == %e' %err
def gauss_potential_to_string(l, m, numeric=False): """Return string representation of the potential of a generalized gaussian. The potential is determined by:: m m ^ _ m ^ v [g (r) Y (r) ](r) = v (r) Y (r) l l l l l l where:: 4 pi / -l-1 /r l+2 l /oo 1-l \ v (r) = ---- | r | dx x g (r) + r | dx x g (r) | l 2l+1 \ /0 l /r l / """ v_l = [ [Q(4, 1), 1], [Q(4, 3), 1, 2], [Q(4, 15), 3, 6, 4], [Q(4, 105), 15, 30, 20, 8], [Q(4, 945), 105, 210, 140, 56, 16], [Q(4, 10395), 945, 1890, 1260, 504, 144, 32], ] norm, xyzs = Y_collect(l, m) norm.multiply(v_l[l][0]) string = txt_sqrt(norm.norm, numeric) + "*" + (l != 0) * "(" if numeric: string += repr(v_l[l][1] * sqrt(pi)) else: string += str(v_l[l][1]) + "*sqrt(pi)" string += "*erf(sqrt(a)*r)" if len(v_l[l]) > 2: string += "-(" for n, coeff in enumerate(v_l[l][2:]): if n == 0: string += str(coeff) else: string += "+" + str(coeff) + "*(sqrt(a)*r)**%d" % (2 * n) string += ")*sqrt(a)*r*exp(-a*r2)" if l == 0: string += "/r" elif l == 1: string += ")/r/r2*" + to_string(l, xyzs) else: string += ")/r/r2**%d*" % l + to_string(l, xyzs) return string
def gauss_potential_to_string(l, m, numeric=False): """Return string representation of the potential of a generalized gaussian. The potential is determined by:: m m ^ _ m ^ v [g (r) Y (r) ](r) = v (r) Y (r) l l l l l l where:: 4 pi / -l-1 /r l+2 l /oo 1-l \ v (r) = ---- | r | dx x g (r) + r | dx x g (r) | l 2l+1 \ /0 l /r l / """ v_l = [ [Q(4, 1), 1], [Q(4, 3), 1, 2], [Q(4, 15), 3, 6, 4], [Q(4, 105), 15, 30, 20, 8], [Q(4, 945), 105, 210, 140, 56, 16], [Q(4, 10395), 945, 1890, 1260, 504, 144, 32], ] norm, xyzs = Y_collect(l, m) norm.multiply(v_l[l][0]) string = txt_sqrt(norm.norm, numeric) + '*' + (l != 0) * '(' if numeric: string += repr(v_l[l][1] * sqrt(pi)) else: string += str(v_l[l][1]) + '*sqrt(pi)' string += '*erf(sqrt(a)*r)' if len(v_l[l]) > 2: string += '-(' for n, coeff in enumerate(v_l[l][2:]): if n == 0: string += str(coeff) else: string += '+' + str(coeff) + '*(sqrt(a)*r)**%d' % (2 * n) string += ')*sqrt(a)*r*exp(-a*r2)' if l == 0: string += '/r' elif l == 1: string += ')/r/r2*' + to_string(l, xyzs) else: string += ')/r/r2**%d*' % l + to_string(l, xyzs) return string
def plot_avg(self,x=None,y=None,title=None,replot=False,step=1, errorbars='conf'): """ Plot the average over a set of Y values with error bars indicating the 95% confidence interval of the sample mean at each point. (i.e. stderr * 1.96) y = A sequence of sequences of Y values to average. If not all sequences are of equal length, the length of the shortest sequence is used for all. x = (optional) A single sequence of X values corresponding to the Ys. title = The title of the average plot. replot = Keep the old contents of the plot window. default = False step = Plot the average at every Nth point. (default = 1) errorbars = What statistic to use for error bars, one of: 'conf' -> 95% confidence interval (stderr * 1.96) 'stderr' -> Standard error 'stddev -> Standard deviation 'var' -> Variance """ from Numeric import concatenate as join N = min(map(len,y)) mean,var,stderr = utils.stats(join([array([a[:N]]) for a in y],axis=0)) if replot: self.current_style += 1 else: self.current_style = 1 self.plot(x=x,y=mean,title=title, with='lines %d'%self.current_style, step=step,replot=replot) if not x: x = range(len(mean)) if errorbars == 'conf': bars = stderr * 1.96 elif errorbars == 'stderr': bars = stderr elif errorbars == 'stddev': bars = sqrt(var) elif errorbars == 'var': bars = var else: raise 'Unknown error bar type: "%s"' % errorbars self.plot(pts=zip(x,mean,bars),with='errorbars %d'%self.current_style, step=step,replot=1)
def t_one_observation(x, sample, tails=None, exp_diff=0): """Returns t-test for significance of single observation versus a sample. Equation for 1-observation t (Sokal and Rohlf 1995 p 228): t = obs - mean - exp_diff / (var * sqrt((n+1)/n)) df = n - 1 """ try: n = sample.Count t = (x - sample.Mean - exp_diff)/sample.StandardDeviation/sqrt((n+1)/n) except (ZeroDivisionError, ValueError, AttributeError, TypeError): return (None, None) prob = t_tailed_prob(t, n-1, tails) return t, prob
def t_one_sample(a,popmean=0, tails=None): """Returns t for ONE group of scores a, given a population mean. Usage: t, prob = t_one_sample(a, popmean, tails) t is a float; prob is a probability. a should support Mean, StandardDeviation, and Count. popmean should be the expected mean; 0 by default. tails should be None (default), 'high', or 'low'. """ try: n = a.Count t = (a.Mean - popmean)/(a.StandardDeviation/sqrt(n)) except (ZeroDivisionError, ValueError, AttributeError, TypeError): return None, None prob = t_tailed_prob(t, n-1, tails) return t, prob
def construct_spherical_code(lmax=3): """Method for generating the code in gpaw/spherical_harmonics.py""" YL = [] norms = [] for L in range((lmax+1)**2): #norm, xyzs = Y_collect(*L_to_lm(L)) norm, xyzs = Y_collect2(*L_to_lm(L)) norms.append(str(norm)) YL.append(zip(xyzs.values(), xyzs.keys())) print('Y_L = [') for L, Y in enumerate(YL): l = sqrt(L) if l % 1 == 0: print(' #' + 'spdfghijklmn'[int(l)] + ':') print(' %s,' % Y) print(']') print('norms =', norms)
def weightedMean(data, sigma): """Weighted mean of a sequence of numbers with given standard deviations. |data| is a list of measurements, |sigma| a list with corresponding standard deviations. Returns weighted mean and corresponding standard deviation. """ from Numeric import array, Float, sqrt, sum if len(data) != len(sigma): raise ValueError data = 1. * Numeric.array(data) sigma = 1. * Numeric.array(sigma) nom = sum(data / sigma**2) denom = sum(1. / sigma**2) mean = nom / denom sig = sqrt(1. / denom) return mean, sig
def construct_spherical_code(lmax=3): """Method for generating the code in gpaw/spherical_harmonics.py""" YL = [] norms = [] for L in range((lmax + 1)**2): #norm, xyzs = Y_collect(*L_to_lm(L)) norm, xyzs = Y_collect2(*L_to_lm(L)) norms.append(str(norm)) YL.append(zip(xyzs.values(), xyzs.keys())) print('Y_L = [') for L, Y in enumerate(YL): l = sqrt(L) if l % 1 == 0: print(' #' + 'spdfghijklmn'[int(l)] + ':') print(' %s,' % Y) print(']') print('norms =', norms)
def test_distance(self): """distance: should return correct distance between the profiles """ p1 = Profile(array([[2,4],[3,1]]), "AB") p2 = Profile(array([[4,6],[5,3]]), "AB") p3 = Profile(array([[4,6],[5,3],[1,1]]), "AB") p4 = Profile(array([2,2]),"AB") p5 = Profile(array([2,2,2]),"AB") p6 = Profile(array([[]]),"AB") self.assertEqual(p1.distance(p2),4) self.assertEqual(p2.distance(p1),4) self.assertEqual(p1.distance(p4),sqrt(6)) self.assertEqual(p6.distance(p6),0) #Raises error when frames are not aligned self.assertRaises(ProfileError, p1.distance,p3) self.assertRaises(ProfileError,p1.distance,p5)
def pca(M): "Perform PCA on M, return eigenvectors and eigenvalues, sorted." T, N = shape(M) # if there are fewer rows T than columns N, use snapshot method if T < N: C = dot(M, t(M)) evals, evecsC = eigenvectors(C) # HACK: make sure evals are all positive evals = where(evals < 0, 0, evals) evecs = 1. / sqrt(evals) * dot(t(M), t(evecsC)) else: # calculate covariance matrix K = 1. / T * dot(t(M), M) evals, evecs = eigenvectors(K) # sort the eigenvalues and eigenvectors, descending order order = (argsort(evals)[::-1]) evecs = take(evecs, order, 1) evals = take(evals, order) return evals, t(evecs)
def init_icos(): global icosa, icosix # the golden ratio global phi phi = (1.0 + sqrt(5.0)) / 2.0 vert = norm(V(phi, 0, 1)) a = vert[0] b = vert[1] c = vert[2] # vertices of an icosahedron icosa = ((-a, b, c), (b, c, -a), (b, c, a), (a, b, -c), (-c, -a, b), (-c, a, b), (b, -c, a), (c, a, b), (b, -c, -a), (a, b, c), (c, -a, b), (-a, b, -c)) icosix = ((9, 2, 6), (1, 11, 5), (11, 1, 8), (0, 11, 4), (3, 1, 7), (3, 8, 1), (9, 3, 7), (0, 6, 2), (4, 10, 6), (1, 5, 7), (7, 5, 2), (8, 3, 10), (4, 11, 8), (9, 7, 2), (10, 9, 6), (0, 5, 11), (0, 2, 5), (8, 10, 4), (3, 9, 10), (6, 0, 4)) return
def findHandles_exact(self, p1, p2, cutoff=0.0, backs_ok=1, offset=V(0, 0, 0)): """ @return: a list of (dist, handle) pairs, in arbitrary order, which includes, for each handle (spherical surface) hit by the ray from p1 thru p2, its front-surface intersection with the ray, unless that has dist < cutoff and backs_ok, in which case include its back-surface intersection (unless *that* has dist < cutoff). """ #e For now, just be simple, don't worry about speed. # Someday we can preprocess self.handlpos using Numeric functions, # like in nearSinglets and/or findSinglets # (I have untested prototype code for this in extrude-outs.py). hh = self.handles res = [] v = norm(p2 - p1) # is this modifying the vector in-place, causing a bug?? ## offset += self.origin # treat our handles' pos as relative to this # I don't know, but one of the three instances of += was doing this!!! # probably i was resetting the atom or mol pos.... offset = offset + self.origin # treat our handles' pos as relative to this radius_multiplier = self.radius_multiplier for (pos, radius, info) in hh: ## bug in this? pos += offset pos = pos + offset radius *= radius_multiplier dist, wid = orthodist(p1, v, pos) if radius >= wid: # the ray hits the sphere delta = sqrt(radius * radius - wid * wid) front = dist - delta # depth from p1 of front surface of sphere, where it's hit if front >= cutoff: res.append((front, (pos, radius, info))) elif backs_ok: back = dist + delta if back >= cutoff: res.append((back, (pos, radius, info))) return res
def pca(M): from Numeric import take, dot, shape, argsort, where, sqrt, transpose as t from LinearAlgebra import eigenvectors "Perform PCA on M, return eigenvectors and eigenvalues, sorted." T, N = shape(M) # if there are less rows T than columns N, use # snapshot method if T < N: C = dot(M, t(M)) evals, evecsC = eigenvectors(C) # HACK: make sure evals are all positive evals = where(evals < 0, 0, evals) evecs = 1./sqrt(evals) * dot(t(M), t(evecsC)) else: # calculate covariance matrix K = 1./T * dot(t(M), M) evals, evecs = eigenvectors(K) # sort the eigenvalues and eigenvectors, decending order order = (argsort(evals)[::-1]) evecs = take(evecs, order, 1) evals = take(evals, order) return evals, t(evecs)
def scale(self): """ Return the maximum distance from self's geometric center to any point in self (i.e. the corner-center distance). Note: This is the radius of self's bounding sphere, which is as large as, and usually larger than, the bounding sphere of self's contents. Note: self's box dimensions are slightly larger than needed to enclose its data, due to hardcoded constants in its construction methods. [TODO: document, make optional] """ if not self.data: return 10.0 #x=1.2*maximum.reduce(subtract.reduce(self.data)) dd = 0.5 * subtract.reduce(self.data) # dd = halfwidths in each dimension (x,y,z) x = sqrt(dd[0] * dd[0] + dd[1] * dd[1] + dd[2] * dd[2]) # x = half-diameter of bounding sphere of self #return max(x, 2.0) return x
def L_to_lm(L): """convert L index to (l, m) index""" l = int(sqrt(L)) m = L - l**2 - l return l, m
def __float__(self): return sqrt(self.norm / pi)
def _dist(p, q): diff = p - q return sqrt(sum(diff * diff))
def norm(x): return sqrt(dot(x, x))
def _buildResiduum(self, mol, zmatrix, n_atoms, phi, psi, init_pos, symbol): """ Builds cartesian coordinates for an amino acid from the internal coordinates table. mol is a chunk to which the amino acid will be added. zmatrix is an internal coordinates array corresponding to a given amino acid. n_atoms is a number of atoms to be build + 3 dummy atoms. phi is a peptide bond PHI angle. psi is a peptide bond PSI angle. init_pos are optional postions of previous CA, C and O atoms. symbol is a current amino acid symbol (used for proline case) Note: currently, it doesn't rebuild bonds, so inferBonds has to be called after. Unfortunately, the proper bond order can not be correctly recognized this way. """ if mol == None: return if not init_pos: # assign three previous atom positions for i in range(0, 3): self.coords[i][0] = self.prev_coords[i][0] self.coords[i][1] = self.prev_coords[i][1] self.coords[i][2] = self.prev_coords[i][2] else: # if no prev_coords are given, compute the first three atom positions num, name, atom_name, atom_type, \ atom_c, atom_b, atom_a, r, a, t = zmatrix[1] self.coords[0][0] = 0.0 self.coords[0][1] = 0.0 self.coords[0][2] = 0.0 self.coords[1][0] = r self.coords[1][1] = 0.0 self.coords[1][2] = 0.0 ccos = cos(DEG2RAD * a) num, name, atom_name, atom_type, \ atom_c, atom_b, atom_a, r, a, t = zmatrix[2] if atom_c == 1: self.coords[2][0] = self.coords[0][0] + r * ccos else: self.coords[2][0] = self.coords[0][0] - r * ccos self.coords[2][1] = r * sin(DEG2RAD * a) self.coords[2][2] = 0.0 for i in range(0, 3): self.prev_coords[i][0] = self.coords[i][0] + init_pos[0] self.prev_coords[i][1] = self.coords[i][1] + init_pos[1] self.prev_coords[i][2] = self.coords[i][2] + init_pos[2] for n in range(3, n_atoms): # Generate all coordinates using three previous atoms # as a frame of reference, num, name, atom_name, atom_type, \ atom_c, atom_b, atom_a, r, a, t = zmatrix[n] cosa = cos(DEG2RAD * a) xb = self.coords[atom_b][0] - self.coords[atom_c][0] yb = self.coords[atom_b][1] - self.coords[atom_c][1] zb = self.coords[atom_b][2] - self.coords[atom_c][2] rbc = 1.0 / sqrt(xb * xb + yb * yb + zb * zb) if abs(cosa) >= 0.999: # Linear bond case # Skip angles, just extend along the bond. rbc = r * rbc * cosa self.coords[n][0] = self.coords[atom_c][0] + xb * rbc self.coords[n][1] = self.coords[atom_c][1] + yb * rbc self.coords[n][2] = self.coords[atom_c][2] + zb * rbc else: xa = self.coords[atom_a][0] - self.coords[atom_c][0] ya = self.coords[atom_a][1] - self.coords[atom_c][1] za = self.coords[atom_a][2] - self.coords[atom_c][2] xyb = sqrt(xb * xb + yb * yb) inv = False if xyb < 0.001: xpa = za za = -xa xa = xpa xpb = zb zb = -xb xb = xpb xyb = sqrt(xb * xb + yb * yb) inv = True costh = xb / xyb sinth = yb / xyb xpa = xa * costh + ya * sinth ypa = ya * costh - xa * sinth sinph = zb * rbc cosph = sqrt(abs(1.0 - sinph * sinph)) xqa = xpa * cosph + za * sinph zqa = za * cosph - xpa * sinph yza = sqrt(ypa * ypa + zqa * zqa) if yza < 1e-8: coskh = 1.0 sinkh = 0.0 else: coskh = ypa / yza sinkh = zqa / yza # Apply the peptide bond conformation if symbol != "P": if name == "N " and not init_pos: t = self.prev_psi + 0.0 if name == "O ": t = psi + 180.0 if name == "HA " or name == "HA2": t = 120.0 + phi if name == "CB " or name == "HA3": t = 240.0 + phi if name == "C ": t = phi else: # proline if name == "N " and not init_pos: t = self.prev_psi + 0.0 if name == "O ": t = psi + 180.0 if name == "CA ": t = phi - 120.0 if name == "CD ": t = phi + 60.0 sina = sin(DEG2RAD * a) sind = -sin(DEG2RAD * t) cosd = cos(DEG2RAD * t) # Apply the bond length. xd = r * cosa yd = r * sina * cosd zd = r * sina * sind # Compute the atom position using bond and torsional angles. ypd = yd * coskh - zd * sinkh zpd = zd * coskh + yd * sinkh xpd = xd * cosph - zpd * sinph zqd = zpd * cosph + xd * sinph xqd = xpd * costh - ypd * sinth yqd = ypd * costh + xpd * sinth if inv: tmp = -zqd zqd = xqd xqd = tmp self.coords[n][0] = xqd + self.coords[atom_c][0] self.coords[n][1] = yqd + self.coords[atom_c][1] self.coords[n][2] = zqd + self.coords[atom_c][2] if self.nterm_hydrogen: # It is a hack for the first hydrogen atom # to make sure the bond length is correct. self.nterm_hydrogen.setposn(self.nterm_hydrogen.posn() + 0.325 * norm(V(xqd, yqd, zqd))) self.nterm_hydrogen = None ax = self.coords[n][0] ay = self.coords[n][1] az = self.coords[n][2] # Store previous coordinates for the next building step if not init_pos: if name == "N ": self.prev_coords[0][0] = self.coords[n][0] self.prev_coords[0][1] = self.coords[n][1] self.prev_coords[0][2] = self.coords[n][2] if name == "CA ": self.prev_coords[1][0] = self.coords[n][0] self.prev_coords[1][1] = self.coords[n][1] self.prev_coords[1][2] = self.coords[n][2] if name == "C ": self.prev_coords[2][0] = self.coords[n][0] self.prev_coords[2][1] = self.coords[n][1] self.prev_coords[2][2] = self.coords[n][2] # Add a new atom to the molecule atom = Atom( atom_name, V(self.coords[n][0], self.coords[n][1], self.coords[n][2]), mol) # Create temporary attributes for proper bond assignment. atom._is_aromatic = False atom._is_single = False if atom_type == "sp2a": atom_type = "sp2" atom._is_aromatic = True if atom_type == "sp2s": atom_type = "sp2" atom._is_single = True atom.set_atomtype_but_dont_revise_singlets(atom_type) if name == "CA ": # Set c-alpha flag for protein main chain visualization. atom._protein_ca = True else: atom._protein_ca = False if name == "CB ": # Set c-alpha flag for protein main chain visualization. atom._protein_cb = True else: atom._protein_cb = False if name == "N ": # Set c-alpha flag for protein main chain visualization. atom._protein_n = True else: atom._protein_n = False if name == "C ": # Set c-alpha flag for protein main chain visualization. atom._protein_c = True else: atom._protein_c = False if name == "O ": # Set c-alpha flag for protein main chain visualization. atom._protein_o = True else: atom._protein_o = False # debug - output in PDB format # print "ATOM %5d %-3s %3s %c%4d %8.3f%8.3f%8.3f" % ( n, name, "ALA", ' ', res_num, coords[n][0], coords[n][1], coords[n][2]) self.prev_psi = psi # Remember previous psi angle. self.length += 1 # Increase the amino acid counter. return
def L2norm(ar): from Numeric import dot, sqrt return sqrt(sum(ar**2))
#!/usr/bin/python # -*- coding: utf-8 -*- from scipy.signal.ltisys import lti, lsim from matplotlib.pylab import save, randn from Numeric import sqrt, array, arange n = 128 Q = 1. R = 1. w = 0.3 * sqrt(Q) * randn(n) v = 0.2 * sqrt(R) * randn(n) ureq = array([[-1.743] * n]) t = arange(0, 0.9999, 1. / 128) #Generator().generateSin(n, 3, 33) #-0.37727 u = ureq + w #A, B, C, D = [[-6.,-25.], [1.,0.]], [[1.],[0.]], [[0., 1.]], [[0.]] #sys=lti(A, B, C, D) #y = lsim(sys, u, t) yv = u + v ##save('Q.txt', Q) ##save('R.txt', R) save('w.txt', w) save('v.txt', v) save('yv.txt', yv) save('u.txt', u) save('ureq.txt', ureq)
def addInternal(self, i, na, nb, nc, r, theta, phi): """ Add another point, given its internal coordinates. Once added via this routine, the cartesian coordinates for the point can be retrieved with getCartesian(). @param i: Index of the point being added. After this call, a call to getCartesian with this index value will succeed. Index values less than 4 are ignored. Index values should be presented here in sequence beginning with 4. @param na: Index value for point A. Point 'i' will be 'r' distance units from point A. @param nb: Index value for point B. Point 'i' will be located such that the angle i-A-B is 'theta' degrees. @param nc: Index value for point C. Point 'i' will be located such that the torsion angle i-A-B-C is 'torsion' degrees. @param r: Radial distance (in same units as resulting cartesian coordinates) between A and i. @param theta: Angle in degrees of i-A-B. @param phi: Torsion angle in degrees of i-A-B-C """ if (i < 4): return if (i != self._nextIndex): raise IndexError, "next index is %d not %r" % (self._nextIndex, i) cos_theta = cos(DEG2RAD * theta) xb = self._coords[nb][0] - self._coords[na][0] yb = self._coords[nb][1] - self._coords[na][1] zb = self._coords[nb][2] - self._coords[na][2] rba = 1.0 / sqrt(xb*xb + yb*yb + zb*zb) if abs(cos_theta) >= 0.999: # Linear case # Skip angles, just extend along A-B. rba = r * rba * cos_theta xqd = xb * rba yqd = yb * rba zqd = zb * rba else: xc = self._coords[nc][0] - self._coords[na][0] yc = self._coords[nc][1] - self._coords[na][1] zc = self._coords[nc][2] - self._coords[na][2] xyb = sqrt(xb*xb + yb*yb) inv = False if xyb < 0.001: # A-B points along the z axis. tmp = zc zc = -xc xc = tmp tmp = zb zb = -xb xb = tmp xyb = sqrt(xb*xb + yb*yb) inv = True costh = xb / xyb sinth = yb / xyb xpc = xc * costh + yc * sinth ypc = yc * costh - xc * sinth sinph = zb * rba cosph = sqrt(abs(1.0- sinph * sinph)) xqa = xpc * cosph + zc * sinph zqa = zc * cosph - xpc * sinph yzc = sqrt(ypc * ypc + zqa * zqa) if yzc < 1e-8: coskh = 1.0 sinkh = 0.0 else: coskh = ypc / yzc sinkh = zqa / yzc sin_theta = sin(DEG2RAD * theta) sin_phi = -sin(DEG2RAD * phi) cos_phi = cos(DEG2RAD * phi) # Apply the bond length. xd = r * cos_theta yd = r * sin_theta * cos_phi zd = r * sin_theta * sin_phi # Compute the atom position using bond and torsional angles. ypd = yd * coskh - zd * sinkh zpd = zd * coskh + yd * sinkh xpd = xd * cosph - zpd * sinph zqd = zpd * cosph + xd * sinph xqd = xpd * costh - ypd * sinth yqd = ypd * costh + xpd * sinth if inv: tmp = -zqd zqd = xqd xqd = tmp self._coords[i][0] = xqd + self._coords[na][0] self._coords[i][1] = yqd + self._coords[na][1] self._coords[i][2] = zqd + self._coords[na][2] self._nextIndex = self._nextIndex + 1
# | . . # | . . # | . . # | . . # sic_yU -(0) . . . . . (2) (5) # | . . # | . . # | . . # | . . # | . . # 0 --+------+------|-----(1)-----|-----(6)-----|--- # | | | | # 0 sic_uLen 2*sic_uLen 3*sic_uLen # sic_uLen = 1.8 # Si-C bond length (I think) sic_yU = sic_uLen * sqrt(3.0) / 2 sic_vpdat = [[0.0 * sic_uLen, 1.0 * sic_yU, 0.0], [1.5 * sic_uLen, 0.0 * sic_yU, 0.0], [1.0 * sic_uLen, 1.0 * sic_yU, 0.0], [1.5 * sic_uLen, 2.0 * sic_yU, 0.0], [2.5 * sic_uLen, 2.0 * sic_yU, 0.0], [3.0 * sic_uLen, 1.0 * sic_yU, 0.0], [2.5 * sic_uLen, 0.0 * sic_yU, 0.0]] def setup_draw_grid_lines(): """ This must be called in whichever GL display list context will be drawn in. See comment in drawer.setup_drawer about problems with calling this in more than one GL context. For now, it shouldn't be.