def derivative(self, variable = 0): """ @param variable: the index of the variable of the function with respect to which the X{derivative} is taken @type variable: C{int} @returns: a new InterpolatingFunction containing the numerical derivative @rtype: L{InterpolatingFunction} """ diffaxis = self.axes[variable] ai = index_expression[::] + \ (len(self.values.shape)-variable-1) * index_expression[N.NewAxis] period = self.period[variable] if period is None: ui = variable*index_expression[::] + \ index_expression[1::] + index_expression[...] li = variable*index_expression[::] + \ index_expression[:-1:] + index_expression[...] d_values = (self.values[ui]-self.values[li]) / \ (diffaxis[1:]-diffaxis[:-1])[ai] diffaxis = 0.5*(diffaxis[1:]+diffaxis[:-1]) else: u = N.take(self.values, range(1, len(diffaxis))+[0], axis=variable) l = self.values ua = N.concatenate((diffaxis[1:], period+diffaxis[0:1])) la = diffaxis d_values = (u-l)/(ua-la)[ai] diffaxis = 0.5*(ua+la) d_axes = self.axes[:variable]+[diffaxis]+self.axes[variable+1:] d_default = None if self.default is not None: d_default = 0. return self._constructor(d_axes, d_values, d_default, self.period)
def draw(self, event=None): xnum = self.xnum.get() ynum = self.ynum.get() znum = self.znum.get() self.mode_projector.calculateProjections([xnum, ynum, znum]) x = self.mode_projector[xnum] data = Numeric.zeros((len(x), 3), Numeric.Float) data[:, 0] = x[:, 1] data[:, 1] = self.mode_projector[ynum][:, 1] data[:, 2] = self.mode_projector[znum][:, 1] minv = Numeric.minimum.reduce(data) maxv = Numeric.maximum.reduce(data) scale = maxv-minv reference = minv-0.05*scale xaxis = PolyLine3D([reference, reference+scale*Numeric.array([0.2, 0., 0.])], color='red') yaxis = PolyLine3D([reference, reference+scale*Numeric.array([0., 0.2, 0.])], color='yellow') zaxis = PolyLine3D([reference, reference+scale*Numeric.array([0., 0., 0.2])], color='green') graphics = [PolyLine3D(data, color = 'blue'), xaxis, yaxis, zaxis] self.plot.clear() self.plot.draw(VisualizationGraphics(graphics))
def __getitem__(self, item): try: series = self.cache[item] except KeyError: self.calculateProjections([item]) series = self.cache[item] return Numeric.transpose(Numeric.array([self.time, series]))
def _mouseRelease(self, event): if self.mouse_state == 1: self.canvas.delete(self.rubberband) self.rubberband = None p1 = Numeric.array([self.startx, self.starty]) p2 = Numeric.array([self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)]) if Numeric.minimum.reduce(Numeric.fabs(p1-p2)) > 5: scale, shift = self.transformation p1 = (p1-shift)/scale p2 = (p2-shift)/scale graphics, xaxis, yaxis = self.last_draw if xaxis is not None: xaxis = (p1[0], p2[0]) if yaxis is not None: yaxis = (p2[1], p1[1]) self.clear() self.draw(graphics, xaxis, yaxis) elif self.mouse_state == 2: scale, shift = self.transformation x1 = (self.startx-shift[0])/scale[0] x2 = (self.canvas.canvasx(event.x)-shift[0])/scale[0] if x1 < x2: self.selected_range = (x1, x2) else: self.selected_range = (x2, x1) if self.selectfn is not None: self.selectfn(self.selected_range) else: self.canvas.delete(self.rectangle) self.rectangle = None self.selected_range = None if self.selectfn is not None: self.selectfn(self.selected_range) self.mouse_state = 0
def plotBox(self, name, data, data_range=None): box = Frame(self, border=2, relief=SUNKEN) box.pack(side=TOP, fill=BOTH, expand=YES) frame = Frame(box, background='grey') frame.pack(side=TOP, fill=X, expand=NO) Label(frame, text=string.capitalize(string.join( string.split(name , '_'), ' ')), background='grey').pack(side=LEFT) if data_range is None: min = Numeric.minimum.reduce(data[:,1]) max = Numeric.maximum.reduce(data[:,1]) min, max = plotRange(min, max) else: min, max = data_range plot_objects = [] plot_data = data time = plot_data[:,0] jumps = Numeric.repeat(Numeric.arange(len(time)-1), Numeric.less(time[1:], time[:-1]))+1 for i in self.master.restarts: plot_objects.append(PolyLine([(self.time[i], min), (self.time[i], max)], color='black', stipple='gray25')) plot_objects.insert(0, PolyLine(plot_data, color = 'red')) plot = PlotCanvas(box, 400, 100, zoom=1, select=self.master._selectRange) plot.pack(side=LEFT, fill=BOTH, expand=YES) plot.draw(PlotGraphics(plot_objects), 'automatic', (min, max)) plot.bind('<Double-Button-1>', lambda event, d=plot_data: externalPlot(d)) self.registerPlot(plot) self.setSelection(plot)
def EISF(self, q_range = (0., 15.), subset=None, weights = None, random_vectors = 15, first_mode = 6): if subset is None: subset = self.universe if weights is None: weights = self.universe.getParticleScalar('b_incoherent') weights = weights*weights weights = weights*subset.booleanMask() total = weights.sumOverParticles() weights = weights/total first, last, step = (q_range+(None,))[:3] if step is None: step = (last-first)/50. q = N.arange(first, last, step) f = ParticleProperties.ParticleTensor(self.universe) for i in range(first_mode, self.nmodes): mode = self.rawMode(i) f = f + (1./mode.inv_relaxation_time)*mode.dyadicProduct(mode) f = Units.k_B*self.temperature*f/self.friction eisf = N.zeros(q.shape, N.Float) random_vectors = Random.randomDirections(random_vectors) for v in random_vectors: for a in subset.atomList(): exp = N.exp(-v*(f[a]*v)) N.add(eisf, weights[a]*exp**(q*q), eisf) return InterpolatingFunction((q,), eisf/len(random_vectors))
def _setsize(self): self.width = string.atoi(self.canvas.cget('width')) self.height = string.atoi(self.canvas.cget('height')) self.plotbox_size = 0.97*Numeric.array([self.width, -self.height]) xo = 0.5*(self.width-self.plotbox_size[0]) yo = self.height-0.5*(self.height+self.plotbox_size[1]) self.plotbox_origin = Numeric.array([xo, yo])
def EISF(self, q_range = (0., 15.), subset=None, weights = None, random_vectors = 15, first_mode = 6): if self.temperature is None: raise ValueError("no temperature available") if subset is None: subset = self.universe if weights is None: weights = self.universe.getParticleScalar('b_incoherent') weights = weights*weights weights = weights*subset.booleanMask() total = weights.sumOverParticles() weights = weights/total first, last, step = (q_range+(None,))[:3] if step is None: step = (last-first)/50. q = N.arange(first, last, step) f = MMTK.ParticleTensor(self.universe) for i in range(first_mode, self.nmodes): mode = self[i] f = f + mode.dyadicProduct(mode) eisf = N.zeros(q.shape, N.Float) for i in range(random_vectors): v = MMTK.Random.randomDirection() for a in subset.atomList(): exp = N.exp(-v*(f[a]*v)) N.add(eisf, weights[a]*exp**(q*q), eisf) return InterpolatingFunction((q,), eisf/random_vectors)
def reduceToRange(self, first, last): """ Discards all modes outside a given range of mode numbers. This is done to reduce memory requirements, especially before saving the modesto a file. :param first: the number of the first mode to be kept :param last: the number of the last mode to be kept - 1 """ junk1 = list(self.sort_index[:first]) junk2 = list(self.sort_index[last:]) junk1.sort() junk2.sort() if junk1 == range(0, first) and \ junk2 == range(last, len(self.sort_index)): # This is the most frequent case. It can be handled # without copying the mode array. for array in self._internal_arrays: setattr(self, array, getattr(self, array)[first:last]) self.sort_index = self.sort_index[first:last]-first else: keep = self.sort_index[first:last] for array in self._internal_arrays: setattr(self, array, N.take(getattr(self, array), keep)) self.sort_index = N.arange(0, last-first) self.nmodes = last-first
def norm(self): """ :returns: the norm of the ParticleVector seen as a 3N-dimensional vector :rtype: float """ return N.sqrt(N.add.reduce(N.ravel(self.array**2)))
def integral(self, variable = 0): """ @param variable: the index of the variable of the function with respect to which the X{integration} is performed @type variable: C{int} @returns: a new InterpolatingFunction containing the numerical X{integral}. The integration constant is defined such that the integral at the first grid point is zero. @rtype: L{InterpolatingFunction} """ if self.period[variable] is not None: raise ValueError('Integration over periodic variables not defined') intaxis = self.axes[variable] ui = variable*index_expression[::] + \ index_expression[1::] + index_expression[...] li = variable*index_expression[::] + \ index_expression[:-1:] + index_expression[...] uai = index_expression[1::] + (len(self.values.shape)-variable-1) * \ index_expression[N.NewAxis] lai = index_expression[:-1:] + (len(self.values.shape)-variable-1) * \ index_expression[N.NewAxis] i_values = 0.5*N.add.accumulate((self.values[ui] +self.values[li])* \ (intaxis[uai]-intaxis[lai]), variable) s = list(self.values.shape) s[variable] = 1 z = N.zeros(tuple(s)) return self._constructor(self.axes, N.concatenate((z, i_values), variable), None)
def mouseReleaseEvent(self, event): button = event.button() self.setCursor(Qt.arrowCursor) if button == Qt.LeftButton: try: dx = event.x() - self.click1x dy = event.y() - self.click1y except AttributeError: return if dx != 0 or dy != 0: normal = Vector(self.axis) move = Vector(-dx*self.plane[:,0]+dy*self.plane[:,1]) axis = normal.cross(move) / \ N.minimum.reduce(N.fabs(self.plotbox_size)) rot = Rotation(axis.normal(), axis.length()) self.axis = rot(normal).array self.plane[:,0] = rot(Vector(self.plane[:,0])).array self.plane[:,1] = rot(Vector(self.plane[:,1])).array elif button == Qt.MidButton: try: dx = event.x() - self.click2x dy = event.y() - self.click2y except AttributeError: return if dx != 0 or dy != 0: self.translate = self.translate + N.array([dx, dy]) else: try: dy = event.y() - self.click3y except AttributeError: return if dy != 0: ratio = -dy/self.plotbox_size[1] self.scale = self.scale * (1.+ratio) self.update()
def draw(self, graphics): """ Draw something on the canvas @param graphics: the graphics object (L{PolyLine3D}, or L{VisualizationGraphics}) to be drawn """ self.last_draw = (graphics, ) self.configure(cursor='watch') self.update_idletasks() graphics.project(self.axis, self.plane) p1, p2 = graphics.boundingBoxPlane() center = 0.5*(p1+p2) scale = self.plotbox_size / (p2-p1) sign = scale/Numeric.fabs(scale) if self.scale is None: minscale = Numeric.minimum.reduce(Numeric.fabs(scale)) self.scale = 0.9*minscale scale = sign*self.scale box_center = self.plotbox_origin + 0.5*self.plotbox_size shift = -center*scale + box_center + self.translate graphics.scaleAndShift(scale, shift) items, depths = graphics.lines() sort = Numeric.argsort(depths) for index in sort: x1, y1, x2, y2, color, width = items[index] Line(self.canvas, x1, y1, x2, y2, fill=color, width=width) self.configure(cursor='top_left_arrow') self.update_idletasks()
def evaluatorTerms(self, universe, subset1, subset2, global_data): if subset1 is not None: for s1, s2 in [(subset1, subset2), (subset2, subset1)]: set = {} for a in s1.atomList(): set[a.index] = None for a in s2.atomList(): try: del set[a.index] except KeyError: pass set = {} for a in subset1.atomList(): set[a.index] = None for a in subset2.atomList(): set[a.index] = None atom_subset = set.keys() atom_subset.sort() atom_subset = Numeric.array(atom_subset) else: atom_subset = Numeric.array([], Numeric.Int) nothing = Numeric.zeros((0,2), Numeric.Int) nbl = NonbondedList(nothing, nothing, atom_subset, universe._spec, self.cutoff) update = NonbondedListTerm(nbl) cutoff = self.cutoff if cutoff is None: cutoff = 0. ev = CalphaTerm(universe._spec, nbl, cutoff, self.scale_factor, self.version) return [update, ev]
def _setsize(self): width = self.width() height = self.height() self.plotbox_size = 0.97*N.array([width, -height]) xo = 0.5*(width-self.plotbox_size[0]) yo = height-0.5*(height+self.plotbox_size[1]) self.plotbox_origin = N.array([xo, yo])
def boundingBoxPlane(self): p1, p2 = self.objects[0].boundingBoxPlane() for o in self.objects[1:]: p1o, p2o = o.boundingBoxPlane() p1 = N.minimum(p1, p1o) p2 = N.maximum(p2, p2o) return p1, p2
def nonbondedList(self, universe, subset1, subset2, global_data): try: from MMTK_forcefield import NonbondedList, NonbondedListTerm except ImportError: return None, None nbl = None update = None if 'nonbondedlist' in global_data.get('initialized'): nbl, update, cutoff = global_data.get('nonbondedlist') if nbl is None: excluded_pairs, one_four_pairs, atom_subset = \ self.excludedPairs(subset1, subset2, global_data) excluded_pairs = N.array(excluded_pairs) one_four_pairs = N.array(one_four_pairs) if atom_subset is not None: atom_subset = N.array(atom_subset) else: atom_subset = N.array([], N.Int) nbl = NonbondedList(excluded_pairs, one_four_pairs, atom_subset, universe._spec, self.cutoff) update = NonbondedListTerm(nbl) update.info = 0 global_data.set('nonbondedlist', (nbl, update, self.cutoff)) global_data.add('initialized', 'nonbondedlist') else: if cutoff is not None and \ (self.cutoff is None or self.cutoff > cutoff): nbl.setCutoff(self.cutoff) return nbl, update
def forceConstantTest(universe, atoms = None, delta = 0.0001): """ Test force constants by comparing to the numerical derivatives of the gradients. :param universe: the universe on which the test is performed :type universe: :class:`~MMTK.Universe.Universe` :param atoms: the atoms of the universe for which the gradient is tested (default: all atoms) :type atoms: list :param delta: the step size used in calculating the numerical derivatives :type delta: float """ e0, grad0, fc = universe.energyGradientsAndForceConstants() if atoms is None: atoms = universe.atomList() for a1, a2 in itertools.chain(itertools.izip(atoms, atoms), Utility.pairs(atoms)): print a1, a2 print fc[a1, a2] num_fc = [] for v in [ex, ey, ez]: x = a1.position() a1.setPosition(x+delta*v) e_plus, grad_plus = universe.energyAndGradients() a1.setPosition(x-delta*v) e_minus, grad_minus = universe.energyAndGradients() a1.setPosition(x) num_fc.append(0.5*(grad_plus[a2]-grad_minus[a2])/delta) print N.array(map(lambda a: a.array, num_fc))
def mouseMoveEvent(self, event): x = event.x() y = event.y() if self.mouse_state == 0: scale, shift = self.transformation p = (N.array([self.startx, self.starty])-shift)/scale bb1, bb2 = self.bbox if self.selectfn is not None and p[1] < bb1[1]: self.painter.setPen(QPen(Qt.NoPen)) self.painter.setBrush(QBrush(Qt.blue, Qt.Dense5Pattern)) self.rectangle = (self.startx, 0, x-self.startx, self.height()) self.painter.drawRect(*self.rectangle) self.mouse_state = 2 elif self.zoom: self.painter.setPen(QPen(Qt.white, 1, Qt.DotLine)) self.painter.setBrush(QBrush(Qt.NoBrush)) self.rectangle = (self.startx, self.starty, x-self.startx, y-self.starty) self.painter.drawRect(*self.rectangle) self.mouse_state = 1 elif self.mouse_state == 1 or self.mouse_state == 2: self.painter.drawRect(*self.rectangle) if self.mouse_state == 1: self.rectangle = (self.startx, self.starty, x-self.startx, y-self.starty) elif self.mouse_state == 2: self.rectangle = (self.startx, 0, x-self.startx, self.height()) self.painter.drawRect(*self.rectangle) elif self.mouse_state == 3: scale, shift = self.transformation point = N.array([x, y]) point = (point-shift)/scale self.value_label.setText(" x = %f\n y = %f" % tuple(point))
def evaluatorParameters(self, universe, subset1, subset2, global_data): n = universe.numberOfPoints() charge = N.zeros((n,), N.Float) atom_types = {} for o in universe: for a in o.atomList(): charge[a.index] = self._charge(o, a, global_data) charge = N.zeros((n,), N.Float) params = {} if n < 10000: params['spatial_decomposition_levels'] = 4 elif n < 100000: params['spatial_decomposition_levels'] = 5 else: params['spatial_decomposition_levels'] = 6 params['multipole_expansion_terms'] = 8 params['use_fft'] = 0 params['fft_blocking_factor'] = 4 params['macroscopic_expansion_terms'] = 6 params['multipole_acceptance'] = 0.5 for key, value in self.options.items(): params[key] = value params['algorithm'] = 'dpmta' params['charge'] = charge params['one_four_factor'] = self.es_14_factor excluded_pairs, one_four_pairs, atom_subset = \ self.excludedPairs(subset1, subset2, global_data) return {'electrostatic': params, 'nonbonded': {'excluded_pairs': excluded_pairs, 'one_four_pairs': one_four_pairs, 'atom_subset': atom_subset} }
def _axisInterval(self, spec, lower, upper): if spec is None: return None if spec == 'minimal': if lower == upper: return lower-0.5, upper+0.5 else: return lower, upper if spec == 'automatic': range = upper-lower if range == 0.: return lower-0.5, upper+0.5 log = N.log10(range) power = N.floor(log) fraction = log-power if fraction <= 0.05: power = power-1 grid = 10.**power lower = lower - lower % grid mod = upper % grid if mod != 0: upper = upper - mod + grid return lower, upper if type(spec) == type(()): lower, upper = spec if lower <= upper: return lower, upper else: return upper, lower raise ValueError(str(spec) + ': illegal axis specification')
def _ticks(self, lower, upper): ideal = (upper-lower)/7. if ideal == 0.: ideal = 1./7. log = N.log10(ideal) power = N.floor(log) fraction = log-power factor = 1. error = fraction for f, lf in self._multiples: e = N.fabs(fraction-lf) if e < error: error = e factor = f grid = factor * 10.**power if power > 3 or power < -3: format = '%+7.0e' elif power >= 0: digits = max(1, int(power)) format = '%' + `digits`+'.0f' else: digits = -int(power) format = '%'+`digits+2`+'.'+`digits`+'f' ticks = [] t = -grid*N.floor(-lower/grid) while t <= upper and len(ticks) < 200: ticks.append((t, format % (t,))) t = t + grid return ticks
def _dihedralTerm(self, n, phase, V): mod_file = self.mod_template % \ (V/(Units.kcal/Units.mol), phase/Units.deg, n) ff = Amber99ForceField(mod_files=[StringIO(mod_file)]) self.universe.setForceField(ff) param = self.universe.energyEvaluatorParameters() i1, i2, i3, i4, n_test, phase_test, V_test = \ param['cosine_dihedral_term'][0] self.assertEqual(n_test, n) # The accuracy is no better than five digits because the # parameters pass through a text representation. self.assertAlmostEqual(phase_test, phase, 5) self.assertAlmostEqual(V_test, V, 5) two_pi = 2.*N.pi m = self.universe[0] for angle in N.arange(0., two_pi, 0.1): m.C4.setPosition(Vector(N.cos(angle), N.sin(angle), 1.)) e = self.universe.energyTerms()['cosine dihedral angle'] da = self.universe.dihedral(m.C1, m.C2, m.C3, m.C4) e_ref = V*(1.+N.cos(n*angle-phase)) self.assertAlmostEqual(angle % two_pi, da % two_pi, 14) self.assertAlmostEqual(e, e_ref, 5) self._gradientTest() self._forceConstantTest()
def test_nonbondedList(self): self.universe.configuration() atoms = self.universe.atomList() atom_indices = N.array([a.index for a in self.universe.atomList()]) empty = N.zeros((0, 2), N.Int) for cutoff in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.]: nblist = NonbondedList(empty, empty, atom_indices, self.universe._spec, cutoff) nblist.update(self.universe.configuration().array) distances = nblist.pairDistances() pairs1 = nblist.pairIndices() pairs1 = [sorted_tuple(pairs1[i]) for i in range(len(pairs1)) if distances[i] < cutoff] pairs1.sort(lambda a, b: cmp(a[0], b[0]) or cmp(a[1], b[1])) pairs2 = [] for i in range(len(atoms)): for j in range(i+1, len(atoms)): d = self.universe.distance(atoms[i], atoms[j]) if d < cutoff: pairs2.append(sorted_tuple((atoms[i].index, atoms[j].index))) pairs2.sort(lambda a, b: cmp(a[0], b[0]) or cmp(a[1], b[1])) self.assertEqual(pairs1, pairs2)
def memoryFunctionZ(self): """ @returns: the M{z}-transform of the process' memory function @rtype: L{Scientific.Function.Rational.RationalFunction} """ poles = self.poles() cpoles = N.conjugate(poles) coeff0 = N.conjugate(self.coeff[0]) beta = N.zeros((self.order,), N.Complex) for i in range(self.order): pole = poles[i] beta[i] = -(self.sigsq*pole**(self.order-1)/coeff0) / \ (N.multiply.reduce((pole-poles)[:i]) * N.multiply.reduce((pole-poles)[i+1:]) * N.multiply.reduce(pole-1./cpoles) * self.variance) beta = beta/N.sum(beta) sum = 0. for i in range(self.order): sum = sum + RationalFunction([beta[i]], [-poles[i], 1.]) mz = (1./sum+Polynomial([1., -1.]))/self.delta_t**2 if not _isComplex(self.coeff): mz.numerator.coeff = _realPart(mz.numerator.coeff) mz.denominator.coeff = _realPart(mz.denominator.coeff) return mz
def memoryFunctionZapprox(self, den_order): """ @param den_order: @type den_order: C{int} @returns: an approximation to the M{z}-transform of the process' memory function that correponds to an expansion of the denominator up to order den_order @rtype: L{Scientific.Function.Rational.RationalFunction} """ poles = self.poles() cpoles = N.conjugate(poles) coeff0 = N.conjugate(self.coeff[0]) beta = N.zeros((self.order,), N.Complex) for i in range(self.order): pole = poles[i] beta[i] = -(self.sigsq*pole**(self.order-1)/coeff0) / \ (N.multiply.reduce((pole-poles)[:i]) * N.multiply.reduce((pole-poles)[i+1:]) * N.multiply.reduce(pole-1./cpoles) * self.variance) beta = beta/N.sum(beta) den_coeff = [] for i in range(den_order): sum = 0. for j in range(self.order): sum += beta[j]*poles[j]**i den_coeff.append(sum) den_coeff.reverse() mz = (RationalFunction(den_order*[0.] + [1.], den_coeff) + Polynomial([1., -1.]))/self.delta_t**2 if not _isComplex(self.coeff): mz.numerator.coeff = _realPart(mz.numerator.coeff) mz.denominator.coeff = _realPart(mz.denominator.coeff) return mz
def massWeightedNorm(self): """Returns the mass-weighted norm of the ParticleVector seen as a 3N-dimensional vector.""" m = self.universe.masses().array return N.sqrt(N.sum(N.ravel(m[:, N.NewAxis] * self.array**2)) / N.sum(m))
def correlation(self, nsteps): """ @param nsteps: the number of time steps for which the autocorrelation function is to be evaluated @type nsteps: C{int} @returns: the autocorrelation function of the process as estimated from the AR model @rtype: L{Scientific.Functions.Interpolation.InterpolatingFunction} """ poles = self.poles() cpoles = N.conjugate(poles) x = 0. exponents = N.arange(self.order-1, nsteps+self.order-1) for i in range(len(poles)): pole = poles[i] factor = N.multiply.reduce((pole-poles)[:i]) * \ N.multiply.reduce((pole-poles)[i+1:]) * \ N.multiply.reduce((pole-1./cpoles)) try: x = x + pole**exponents / factor except OverflowError: # happens with some Python versions on some systems power = N.zeros(exponents.shape, N.Complex) for i in range(len(exponents)): try: power[i] = pole**exponents[i] except ValueError: pass x = x + power/factor cf = -self.sigsq*x/N.conjugate(self.coeff[0]) if not _isComplex(self.coeff): cf = _realPart(cf) return InterpolatingFunction((self.delta_t*N.arange(nsteps),), cf)
def __init__(self, universe, objects): if not Utility.isSequenceObject(objects): objects = [objects] vectors = [] for o in objects: atoms = o.atomList() for d in [Vector(1.,0.,0.), Vector(0.,1.,0.), Vector(0.,0.,1.)]: v = ParticleProperties.ParticleVector(universe) for a in atoms: v[a] = d vectors.append(v/N.sqrt(len(atoms))) if len(atoms) > 1: center = o.centerOfMass() iv = len(vectors)-3 for d in [Vector(1.,0.,0.),Vector(0.,1.,0.),Vector(0.,0.,1.)]: v = ParticleProperties.ParticleVector(universe) for a in atoms: v[a] = d.cross(a.position()-center) for vt in vectors[iv:]: v = v - v.dotProduct(vt)*vt vectors.append(v/N.sqrt(v.dotProduct(v))) Subspace.__init__(self, universe, vectors) # The vector set is already orthonormal by construction # (assuming that the rigid bodies have no atoms in common), # so we can eliminate the lengthy SVD procedure count = ParticleProperties.ParticleScalar(universe) for o in objects: count = count + o.booleanMask() if N.maximum.reduce(count.array) == 1: self._basis = ParticleVectorSet(universe, len(vectors)) for i in range(len(vectors)): self._basis.array[i] = vectors[i].array
def paintEvent(self, event): graphics = self.last_draw[0] if graphics is None: return graphics.project(self.axis, self.plane) p1, p2 = graphics.boundingBoxPlane() center = 0.5*(p1+p2) scale = self.plotbox_size / (p2-p1) sign = scale/N.fabs(scale) if self.scale is None: minscale = N.minimum.reduce(N.fabs(scale)) self.scale = 0.9*minscale scale = sign*self.scale box_center = self.plotbox_origin + 0.5*self.plotbox_size shift = -center*scale + box_center + self.translate graphics.scaleAndShift(scale, shift) items, depths = graphics.lines() sort = N.argsort(depths) painter = QPainter() painter.begin(self) painter.fillRect(self.rect(), QBrush(self.background_color)) if colors_by_name: for index in sort: x1, y1, x2, y2, color, width = items[index] painter.setPen(QPen(QColor(color), width, Qt.SolidLine)) painter.drawLine(x1, y1, x2, y2) else: for index in sort: x1, y1, x2, y2, color, width = items[index] painter.setPen(QPen(getattr(Qt, color), width, Qt.SolidLine)) painter.drawLine(x1, y1, x2, y2) painter.end()
def setUp(self): self.constant = N.zeros((5,), N.Float) + 1.
class Molecule(CompositeChemicalObject, ChemicalObject): """Molecule Molecules consist of atoms and groups linked by bonds. """ def __init__(self, molecule_spec, _memo=None, **properties): """ :param molecule_spec: a string (not case sensitive) that specifies the molecule name in the chemical database :type molecule_spec: str :keyword position: the position of the center of mass of the molecule :type position: Scientific.Geometry.Vector :keyword name: a name given to the molecule :type name: str :keyword configuration: the name of a configuration listed in the database definition of the molecule, which is used to initialize the atom positions. If no configuration is specified, the configuration named "default" will be used, if it exists. Otherwise the atom positions are undefined. :type configuration: str """ if molecule_spec is not None: # molecule_spec is None when called from MoleculeFactory ChemicalObject.__init__(self, molecule_spec, _memo) properties = copy.copy(properties) CompositeChemicalObject.__init__(self, properties) self.bonds = Bonds.BondList(self.bonds) blueprintclass = Database.BlueprintMolecule def bondedTo(self, atom): return self.bonds.bondedTo(atom) def setBondAttributes(self): self.bonds.setBondAttributes() def clearBondAttributes(self): for a in self.atoms: a.clearBondAttribute() def _subunits(self): return self.groups def _descriptionSpec(self): return "M", None def addGroup(self, group, bond_atom_pairs): for a1, a2 in bond_atom_pairs: o1 = a1.topLevelChemicalObject() o2 = a2.topLevelChemicalObject() if set([o1, o2]) != set([self, group]): raise ValueError("bond %s-%s outside object" % (str(a1), str(a2))) self.groups.append(group) self.atoms = self.atoms + group.atoms group.parent = self self.clearBondAttributes() for a1, a2 in bond_atom_pairs: self.bonds.append(Bonds.Bond((a1, a2))) for b in group.bonds: self.bonds.append(b) # construct positions of missing hydrogens def findHydrogenPositions(self): """ Find reasonable positions for hydrogen atoms that have no position assigned. This method uses a heuristic approach based on standard geometry data. It was developed for proteins and DNA and may not give good results for other molecules. It raises an exception if presented with a topology it cannot handle. """ self.setBondAttributes() try: unknown = {} for a in self.atoms: if a.position() is None: if a.symbol != 'H': raise ValueError('position of ' + a.fullName() + \ ' is undefined') bonded = a.bondedTo()[0] unknown.setdefault(bonded, []).append(a) for a, list in unknown.items(): bonded = a.bondedTo() n = len(bonded) known = [b for b in bonded if b.position() is not None] nb = len(list) try: method = self._h_methods[a.symbol][n][nb] except KeyError: raise ValueError("Can't handle this yet: " + a.symbol + ' with ' + n + ' bonds (' + a.fullName() + ').') method(self, a, known, list) finally: self.clearBondAttributes() # default C-H bond length and X-C-H angle _ch_bond = 1.09 * Units.Ang _hch_angle = N.arccos(-1. / 3.) * Units.rad _nh_bond = 1.03 * Units.Ang _hnh_angle = 120. * Units.deg _oh_bond = 0.95 * Units.Ang _coh_angle = 114.9 * Units.deg _sh_bond = 1.007 * Units.Ang _csh_angle = 96.5 * Units.deg def _C4oneH(self, atom, known, unknown): r = atom.position() n0 = (known[0].position() - r).normal() n1 = (known[1].position() - r).normal() n2 = (known[2].position() - r).normal() n3 = (n0 + n1 + n2).normal() unknown[0].setPosition(r - self._ch_bond * n3) def _C4twoH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() r2 = known[1].position() plane = Objects3D.Plane(r, r1, r2) axis = -((r1 - r) + (r2 - r)).normal() plane = plane.rotate(Objects3D.Line(r, axis), 90. * Units.deg) cone = Objects3D.Cone(r, axis, 0.5 * self._hch_angle) sphere = Objects3D.Sphere(r, self._ch_bond) circle = sphere.intersectWith(cone) points = circle.intersectWith(plane) unknown[0].setPosition(points[0]) unknown[1].setPosition(points[1]) def _C4threeH(self, atom, known, unknown): self._tetrahedralH(atom, known, unknown, self._ch_bond) def _C3oneH(self, atom, known, unknown): r = atom.position() n1 = (known[0].position() - r).normal() n2 = (known[1].position() - r).normal() n3 = -(n1 + n2).normal() unknown[0].setPosition(r + self._ch_bond * n3) def _C3twoH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() others = filter(lambda a: a.symbol != 'H', known[0].bondedTo()) r2 = others[0].position() try: plane = Objects3D.Plane(r, r1, r2) except ZeroDivisionError: # We get here if all three points are colinear. # Add a small random displacement as a fix. from MMTK.Random import randomPointInSphere plane = Objects3D.Plane(r, r1, r2 + randomPointInSphere(0.001)) axis = (r - r1).normal() cone = Objects3D.Cone(r, axis, 0.5 * self._hch_angle) sphere = Objects3D.Sphere(r, self._ch_bond) circle = sphere.intersectWith(cone) points = circle.intersectWith(plane) unknown[0].setPosition(points[0]) unknown[1].setPosition(points[1]) def _C2oneH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() x = r + self._ch_bond * (r - r1).normal() unknown[0].setPosition(x) def _N2oneH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() others = filter(lambda a: a.symbol != 'H', known[0].bondedTo()) r2 = others[0].position() try: plane = Objects3D.Plane(r, r1, r2) except ZeroDivisionError: # We get here when all three points are colinear. # Add a small random displacement as a fix. from MMTK.Random import randomPointInSphere plane = Objects3D.Plane(r, r1, r2 + randomPointInSphere(0.001)) axis = (r - r1).normal() cone = Objects3D.Cone(r, axis, 0.5 * self._hch_angle) sphere = Objects3D.Sphere(r, self._nh_bond) circle = sphere.intersectWith(cone) points = circle.intersectWith(plane) unknown[0].setPosition(points[0]) def _N3oneH(self, atom, known, unknown): r = atom.position() n1 = (known[0].position() - r).normal() n2 = (known[1].position() - r).normal() n3 = -(n1 + n2).normal() unknown[0].setPosition(r + self._nh_bond * n3) def _N3twoH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() others = filter(lambda a: a.symbol != 'H', known[0].bondedTo()) r2 = others[0].position() plane = Objects3D.Plane(r, r1, r2) axis = (r - r1).normal() cone = Objects3D.Cone(r, axis, 0.5 * self._hnh_angle) sphere = Objects3D.Sphere(r, self._nh_bond) circle = sphere.intersectWith(cone) points = circle.intersectWith(plane) unknown[0].setPosition(points[0]) unknown[1].setPosition(points[1]) def _N4threeH(self, atom, known, unknown): self._tetrahedralH(atom, known, unknown, self._nh_bond) def _N4twoH(self, atom, known, unknown): r = atom.position() r1 = known[0].position() r2 = known[1].position() plane = Objects3D.Plane(r, r1, r2) axis = -((r1 - r) + (r2 - r)).normal() plane = plane.rotate(Objects3D.Line(r, axis), 90. * Units.deg) cone = Objects3D.Cone(r, axis, 0.5 * self._hnh_angle) sphere = Objects3D.Sphere(r, self._nh_bond) circle = sphere.intersectWith(cone) points = circle.intersectWith(plane) unknown[0].setPosition(points[0]) unknown[1].setPosition(points[1]) def _N4oneH(self, atom, known, unknown): r = atom.position() n0 = (known[0].position() - r).normal() n1 = (known[1].position() - r).normal() n2 = (known[2].position() - r).normal() n3 = (n0 + n1 + n2).normal() unknown[0].setPosition(r - self._nh_bond * n3) def _O2(self, atom, known, unknown): others = known[0].bondedTo() for a in others: r = a.position() if a != atom and r is not None: break dihedral = 180. * Units.deg self._findPosition(unknown[0], atom.position(), known[0].position(), r, self._oh_bond, self._coh_angle, dihedral) def _S2(self, atom, known, unknown): c2 = filter(lambda a: a.symbol == 'C', known[0].bondedTo())[0] self._findPosition(unknown[0], atom.position(), known[0].position(), c2.position(), self._sh_bond, self._csh_angle, 180. * Units.deg) def _tetrahedralH(self, atom, known, unknown, bond): r = atom.position() n = (known[0].position() - r).normal() cone = Objects3D.Cone(r, n, N.arccos(-1. / 3.)) sphere = Objects3D.Sphere(r, bond) circle = sphere.intersectWith(cone) others = filter(lambda a: a.symbol != 'H', known[0].bondedTo()) others.remove(atom) other = others[0] ref = (Objects3D.Plane(circle.center, circle.normal) \ .projectionOf(other.position())-circle.center).normal() p0 = circle.center + ref * circle.radius p0 = Objects3D.rotatePoint( p0, Objects3D.Line(circle.center, circle.normal), 60. * Units.deg) p1 = Objects3D.rotatePoint( p0, Objects3D.Line(circle.center, circle.normal), 120. * Units.deg) p2 = Objects3D.rotatePoint( p1, Objects3D.Line(circle.center, circle.normal), 120. * Units.deg) unknown[0].setPosition(p0) unknown[1].setPosition(p1) unknown[2].setPosition(p2) def _findPosition(self, unknown, a1, a2, a3, bond, angle, dihedral): sphere = Objects3D.Sphere(a1, bond) cone = Objects3D.Cone(a1, a2 - a1, angle) plane = Objects3D.Plane(a3, a2, a1) plane = plane.rotate(Objects3D.Line(a1, a2 - a1), dihedral) points = sphere.intersectWith(cone).intersectWith(plane) for p in points: if (a1 - a2).cross(p - a1) * (plane.normal) > 0: unknown.setPosition(p) break _h_methods = { 'C': { 4: { 3: _C4threeH, 2: _C4twoH, 1: _C4oneH }, 3: { 2: _C3twoH, 1: _C3oneH }, 2: { 1: _C2oneH } }, 'N': { 4: { 3: _N4threeH, 2: _N4twoH, 1: _N4oneH }, 3: { 2: _N3twoH, 1: _N3oneH }, 2: { 1: _N2oneH } }, 'O': { 2: { 1: _O2 } }, 'S': { 2: { 1: _S2 } }, }
# This gives the correspondance between the old 'percentage of trajectory length' parameter # and the new 'energy fwhm' parameter. from Scientific import N dt = N.array([ 0.005, 0.005, 0.01, 0.005, 0.015, 0.01, 0.005, 0.02, 0.005, 0.015, 0.01, 0.005 ]) n_frames = N.array([49, 19, 10, 9, 8, 10, 19, 9, 19, 9, 7, 19]) per_traj_length = N.array( [80.0, 10.0, 20.0, 50.0, 10.0, 40.0, 80.0, 10.0, 20.0, 25.0, 10.0, 8.0]) fwhm_e = 2.0 * N.sqrt( 2.0 * N.log(2.0)) * 100.0 / (1.5192669 * dt * per_traj_length * (n_frames - 1.0)) print fwhm_e
def hasValidPositions(self): return N.logical_and.reduce( N.ravel(N.less(self.array, Utility.undefined_limit)))
def __rdiv__(self, other): return self._arithmetic(other, lambda a, b: N.divide(b, a), True)
def hasPoint(self, point): center_line = LineSegment(self.center1, self.center2) pt = center_line.projectionOf(point) if pt is None: return 0 return N.fabs((point - pt).length() - self.radius) < eps
def __init__(self, universe=None, friction=None, temperature=300. * Units.K, subspace=None, delta=None, sparse=False): """ :param universe: the system for which the normal modes are calculated; it must have a force field which provides the second derivatives of the potential energy :type universe: :class:~MMTK.Universe.Universe :param friction: the friction coefficient for each particle. Note: The friction coefficients are not mass-weighted, i.e. they have the dimension of an inverse time. :type friction: :class:~MMTK.ParticleProperties.ParticleScalar :param temperature: the temperature for which the amplitudes of the atomic displacement vectors are calculated. A value of None can be specified to have no scaling at all. In that case the mass-weighted norm of each normal mode is one. :type temperature: float :param subspace: the basis for the subspace in which the normal modes are calculated (or, more precisely, a set of vectors spanning the subspace; it does not have to be orthogonal). This can either be a sequence of :class:~MMTK.ParticleProperties.ParticleVector objects or a tuple of two such sequences. In the second case, the subspace is defined by the space spanned by the second set of vectors projected on the complement of the space spanned by the first set of vectors. The first set thus defines directions that are excluded from the subspace. The default value of None indicates a standard normal mode calculation in the 3N-dimensional configuration space. :param delta: the rms step length for numerical differentiation. The default value of None indicates analytical differentiation. Numerical differentiation is available only when a subspace basis is used as well. Instead of calculating the full force constant matrix and then multiplying with the subspace basis, the subspace force constant matrix is obtained by numerical differentiation of the energy gradients along the basis vectors of the subspace. If the basis is much smaller than the full configuration space, this approach needs much less memory. :type delta: float :param sparse: a flag that indicates if a sparse representation of the force constant matrix is to be used. This is of interest when there are no long-range interactions and a subspace of smaller size then 3N is specified. In that case, the calculation will use much less memory with a sparse representation. :type sparse: bool """ if universe == None: return Features.checkFeatures(self, universe) Core.NormalModes.__init__(self, universe, subspace, delta, sparse, ['array', 'inv_relaxation_times']) self.friction = friction self.temperature = temperature self.weights = N.sqrt(friction.array) self.weights = self.weights[:, N.NewAxis] self._forceConstantMatrix() ev = self._diagonalize() self.inv_relaxation_times = ev self.sort_index = N.argsort(self.inv_relaxation_times) self.array.shape = (self.nmodes, self.natoms, 3) self.cleanup()
def finalize(self): """Finalizes the calculations (e.g. averaging the total term, output files creations ...). """ if self.architecture == 'monoprocessor': t = self.trajectory else: # Load the whole trajectory set. t = Trajectory(None, self.trajectoryFilename, 'r') orderedAtoms = sorted(t.universe.atomList(), key=operator.attrgetter('index')) groups = [ Collection([orderedAtoms[ind] for ind in g]) for g in self.group ] # 'freqencies' = 1D Numeric array. Frequencies at which the DOS was computed frequencies = N.arange(self.nFrames) / (2.0 * self.nFrames * self.dt) # The NetCDF output file is opened for writing. outputFile = NetCDFFile(self.output, 'w') outputFile.title = self.__class__.__name__ outputFile.jobinfo = self.information + '\nOutput file written on: %s\n\n' % asctime( ) # Dictionnary whose keys are of the form Gi where i is the group number # and the entries are the list of the index of the atoms building the group. comp = 1 for g in self.group: outputFile.jobinfo += 'Group %d: %s\n' % (comp, [index for index in g]) comp += 1 # Some dimensions are created. outputFile.createDimension('NFRAMES', self.nFrames) # Creation of the NetCDF output variables. # The time. TIMES = outputFile.createVariable('time', N.Float, ('NFRAMES', )) TIMES[:] = self.times[:] TIMES.units = 'ps' # The resolution function. RESOLUTIONFUNCTION = outputFile.createVariable('resolution_function', N.Float, ('NFRAMES', )) RESOLUTIONFUNCTION[:] = self.resolutionFunction[:] RESOLUTIONFUNCTION.units = 'unitless' # Creation of the NetCDF output variables. # The frequencies. FREQUENCIES = outputFile.createVariable('frequency', N.Float, ('NFRAMES', )) FREQUENCIES[:] = frequencies[:] FREQUENCIES.units = 'THz' OMEGAS = outputFile.createVariable('angular_frequency', N.Float, ('NFRAMES', )) OMEGAS[:] = 2.0 * N.pi * frequencies[:] OMEGAS.units = 'rad ps-1' avacfTotal = N.zeros((self.nFrames), typecode=N.Float) adosTotal = N.zeros((self.nFrames), typecode=N.Float) comp = 1 totalMass = 0.0 for g in groups: AVACF = outputFile.createVariable('avacf-group%s' % comp, N.Float, ('NFRAMES', )) AVACF[:] = self.AVACF[comp][:] AVACF.units = 'rad^2*ps^-2' N.add(avacfTotal, self.AVACF[comp], avacfTotal) ADOS = outputFile.createVariable('ados-group%s' % comp, N.Float, ('NFRAMES', )) ADOS[:] = self.ADOS[comp][:] ADOS.units = 'rad^2*ps^-1' N.add(adosTotal, g.mass() * self.ADOS[comp], adosTotal) comp += 1 totalMass += g.mass() adosTotal *= 0.5 * self.dt / (self.nGroups * totalMass) AVACF = outputFile.createVariable('avacf-total', N.Float, ('NFRAMES', )) AVACF[:] = avacfTotal AVACF.units = 'rad^2*ps^-2' ADOS = outputFile.createVariable('ados-total', N.Float, ('NFRAMES', )) ADOS[:] = adosTotal ADOS.units = 'rad^2*ps^-1' asciiVar = sorted(outputFile.variables.keys()) outputFile.close() self.toPlot = { 'netcdf': self.output, 'xVar': 'angular_frequency', 'yVar': 'ados-total' } # Create an ASCII version of the NetCDF output file. convertNetCDFToASCII(inputFile = self.output,\ outputFile = os.path.splitext(self.output)[0] + '.cdl',\ variables = asciiVar)
def calc(self, atomIndexes, trajectory): """Calculates the contribution for one group. @param atomIndexes: the index of the atoms of the group. @type atomIndexes: list of integers. @param trajectory: the trajectory. @type trajectory: MMTK.Trajectory.Trajectory object """ orderedAtoms = sorted(trajectory.universe.atomList(), key=operator.attrgetter('index')) group = Collection([orderedAtoms[ind] for ind in atomIndexes]) j, m, n = self.wignerIndexes # Those matrix will store the quaternions and the CMS coming from the RBT trajectory. quaternions = N.zeros((self.nFrames, 4), typecode=N.Float) # Case of a moving reference. if self.stepwiseRBT: # The reference configuration is always the one of the previous frame excepted for the first frame # where it is set by definition to the first frame (could we think about a cyclic alternative way ?). for comp in range(self.nFrames): frameIndex = self.frameIndexes[comp] if comp == 0: previousFrame = self.firstFrame else: previousFrame = self.frameIndexes[comp - 1] refConfig = trajectory.configuration[previousFrame] # The RBT is created just for the current step. rbt = trajectory.readRigidBodyTrajectory(group,\ first = frameIndex,\ last = frameIndex + 1,\ skip = 1,\ reference = refConfig) # The corresponding quaternions and cms are stored in their corresponding matrix. quaternions[comp, :] = copy.copy(rbt.quaternions) # The simplest case, the reference frame is fixed. # A unique RBT is performed from first to last skipping skip steps and using refConfig as the reference. else: # If a fixed reference has been set. We can already set the reference configuration here. refConfig = trajectory.configuration[self.referenceFrame] # The RBT is created. rbt = trajectory.readRigidBodyTrajectory(group,\ first = self.first,\ last = self.last,\ skip = self.skip,\ reference = refConfig) quaternions = rbt.quaternions # c1 is the scaling factor converting Wigner function into spherical harmonics. It depends only on j. c1 = N.sqrt(((2.0 * j + 1) / (4.0 * N.pi))) quat2 = N.zeros(quaternions.shape, typecode=N.Complex) # quat2[:,0] refers to the (q0+iq3) of equation 3.55 quat2[:, 0] = quaternions[:, 0] + 1j * quaternions[:, 3] # quat2[:,2] refers to the (q2+iq1) of equation 3.55 quat2[:, 2] = quaternions[:, 2] + 1j * quaternions[:, 1] # quat2[:,1] refers to the (q0-iq3) of equation 3.55 quat2[:, 1] = N.conjugate(quat2[:, 0]) # quat2[:,3] refers to the (q2-iq1) of equation 3.55 quat2[:, 3] = N.conjugate(quat2[:, 2]) pp = self.preparePP(j, m, n) Djmn = N.add.reduce( N.multiply.reduce(quat2[:, N.NewAxis, :]**pp[0][N.NewAxis, :, :], -1) * pp[1], 1) if m == n: Djnm = Djmn else: pp = self.preparePP(j, n, m) Djnm = N.add.reduce( N.multiply.reduce( quat2[:, N.NewAxis, :]**pp[0][N.NewAxis, :, :], -1) * pp[1], 1) Djmn = Djmn * c1 Djnm = Djnm * c1 return atomIndexes, (Djmn, Djnm)
def calc(self, atomIndexes, trajectory): """Calculates the contribution for one group. @param atomIndexes: the index of the atoms of the group. @type atomIndexes: list of integers. @param trajectory: the trajectory. @type trajectory: MMTK.Trajectory.Trajectory object """ orderedAtoms = sorted(trajectory.universe.atomList(), key=operator.attrgetter('index')) group = Collection([orderedAtoms[ind] for ind in atomIndexes]) rbtPerGroup = {} # Those matrix will store the quaternions and the CMS coming from the RBT trajectory. rbtPerGroup['quaternions'] = N.zeros((self.nFrames, 4), typecode=N.Float) rbtPerGroup['com'] = N.zeros((self.nFrames, 3), typecode=N.Float) rbtPerGroup['fit'] = N.zeros((self.nFrames, ), typecode=N.Float) rbtPerGroup['trajectory'] = {} # Case of a moving reference. if self.stepwiseRBT: # The reference configuration is always the one of the previous frame excepted for the first frame # where it is set by definition to the first frame (could we think about a cyclic alternative way ?). for comp in range(self.nFrames): frameIndex = self.frameIndexes[comp] if comp == 0: previousFrame = self.firstFrame else: previousFrame = self.frameIndexes[comp - 1] refConfig = trajectory.configuration[previousFrame] # The RBT is created just for the current step. rbt = trajectory.readRigidBodyTrajectory(group,\ first = frameIndex,\ last = frameIndex + 1,\ skip = 1,\ reference = refConfig) # The corresponding quaternions and cms are stored in their corresponding matrix. rbtPerGroup['quaternions'][comp, :] = copy.copy( rbt.quaternions) rbtPerGroup['com'][comp, :] = copy.copy(rbt.cms) rbtPerGroup['fit'][comp] = copy.copy(rbt.fit) # The simplest case, the reference frame is fixed. # A unique RBT is performed from first to last skipping skip steps and using refConfig as the reference. else: # If a fixed reference has been set. We can already set the reference configuration here. refConfig = trajectory.configuration[self.referenceFrame] # The RBT is created. rbt = trajectory.readRigidBodyTrajectory(group,\ first = self.first,\ last = self.last,\ skip = self.skip,\ reference = refConfig) # The corresponding quaternions and cms are stored in their corresponding matrix. rbtPerGroup['quaternions'] = copy.copy(rbt.quaternions) rbtPerGroup['com'] = copy.copy(rbt.cms) rbtPerGroup['fit'] = copy.copy(rbt.fit) # I can not use the centers of mass defined by rbt.cms because the reference frame # selected can be out of the selected frames for the Rigid Body Trajectory. centerOfMass = group.centerOfMass(refConfig) # Loop over the atoms of the group to set the RBT trajectory. for atom in group: rbtPerGroup['trajectory'][atom.index] = N.zeros((self.nFrames, 3), typecode=N.Float) # The coordinates of the atoms are centered around the center of mass of the group. xyz = refConfig[atom] - centerOfMass # Loop over the selected frames. for comp in range(self.nFrames): # The rotation matrix corresponding to the selected frame in the RBT. transfo = Quaternion( rbtPerGroup['quaternions'][comp, :]).asRotation() if self.removeTranslation: # The transformation matrix corresponding to the selected frame in the RBT. transfo = Translation(centerOfMass) * transfo # Compose with the CMS translation if the removeTranslation flag is set off. else: # The transformation matrix corresponding to the selected frame in the RBT. transfo = Translation(Vector( rbtPerGroup['com'][comp, :])) * transfo # The RBT is performed on the CMS centered coordinates of atom at. rbtPerGroup['trajectory'][atom.index][comp, :] = transfo( Vector(xyz)) return atomIndexes, rbtPerGroup
def _dihedral_parameters(p): return [p[4], Numeric.cos(p[5]), Numeric.sin(p[5]), p[6]]
def threeAngles(self, e1, e2, e3, tolerance=1e-7): """ Find three angles a1, a2, a3 such that Rotation(a1*e1)*Rotation(a2*e2)*Rotation(a3*e3) is equal to the rotation object. e1, e2, and e3 are non-zero vectors. There are two solutions, both of which are computed. @param e1: a rotation axis @type e1: L{Scientific.Geometry.Vector} @param e2: a rotation axis @type e2: L{Scientific.Geometry.Vector} @param e3: a rotation axis @type e3: L{Scientific.Geometry.Vector} @returns: a list containing two arrays of shape (3,), each containing the three angles of one solution @rtype: C{list} of C{N.array} @raise ValueError: if two consecutive axes are parallel """ # Written by Pierre Legrand ([email protected]) # # Basically this is a reimplementation of the David # Thomas's algorithm [1] described by Gerard Bricogne in [2]: # # [1] "Modern Equations of Diffractometry. Goniometry." D.J. Thomas # Acta Cryst. (1990) A46 Page 321-343. # # [2] "The ECC Cooperative Programming Workshop on Position-Sensitive # Detector Software." G. Bricogne, # Computational aspect of Protein Crystal Data Analysis, # Proceedings of the Daresbury Study Weekend (23-24/01/1987) # Page 122-126 e1 = e1.normal() e2 = e2.normal() e3 = e3.normal() # We are searching for the three angles a1, a2, a3 # If 2 consecutive axes are parallel: decomposition is not meaningful if (e1.cross(e2)).length() < tolerance or \ (e2.cross(e3)).length() < tolerance : raise ValueError('Consecutive parallel axes. Too many solutions') w = self(e3) # Solve the equation : _a.cosx + _b.sinx = _c _a = e1 * e3 - (e1 * e2) * (e2 * e3) _b = e1 * (e2.cross(e3)) _c = e1 * w - (e1 * e2) * (e2 * e3) _norm = (_a**2 + _b**2)**0.5 # Checking for possible errors in initial Rot matrix if _norm == 0: raise ValueError('FAILURE 1, norm = 0') if abs(_c / _norm) > 1 + tolerance: raise ValueError( 'FAILURE 2' + 'malformed rotation Tensor (non orthogonal?) %.8f' % (_c / _norm)) #if _c/_norm > 1: raise ValueError('Step1: No solution') _th = angleFromSineAndCosine(_b / _norm, _a / _norm) _xmth = N.arccos(_c / _norm) # a2a and a2b are the two possible solutions to the equation. a2a = mod_angle((_th + _xmth), 2 * N.pi) a2b = mod_angle((_th - _xmth), 2 * N.pi) solutions = [] # for each solution, find the two other angles (a1, a3). for a2 in (a2a, a2b): R2 = Rotation(e2, a2) v = R2(e3) v1 = v - (v * e1) * e1 w1 = w - (w * e1) * e1 norm = ((v1 * v1) * (w1 * w1))**0.5 if norm == 0: # in that case rotation 1 and 3 are about the same axis # so any solution for rotation 1 is OK a1 = 0. else: cosa1 = (v1 * w1) / norm sina1 = v1 * (w1.cross(e1)) / norm a1 = mod_angle(angleFromSineAndCosine(sina1, cosa1), 2 * N.pi) R3 = Rotation(e2, -1 * a2) * Rotation(e1, -1 * a1) * self # u = normalized test vector perpendicular to e3 # if e2 and e3 are // we have an exception before. # if we take u = e1^e3 then it will not work for # Euler and Kappa axes. u = (e2.cross(e3)).normal() cosa3 = u * R3(u) sina3 = u * (R3(u).cross(e3)) a3 = mod_angle(angleFromSineAndCosine(sina3, cosa3), 2 * N.pi) solutions.append(N.array([a1, a2, a3])) # Gives the closest solution to 0,0,0 first if N.add.reduce(solutions[0]**2) > \ N.add.reduce(solutions[1]**2): solutions = [solutions[1], solutions[0]] return solutions
def coherentScatteringFunction(self, q, time_range=(0., None, None), subset=None, weights=None, random_vectors=15, first_mode=6): """ :param q: the angular wavenumber :type q: float :param time_range: the time values at which the mean-square displacement is evaluated, specified as a range tuple (first, last, step). The defaults are first=0, last= 20 times the longest vibration perdiod, and step defined such that 300 points are used in total. :type time_range: tuple :param subset: the subset of the universe used in the calculation (default: the whole universe) :type subset: :class:~MMTK.Collections.GroupOfAtoms :param weights: the weight to be given to each atom in the average (default: coherent scattering lengths) :type weights: :class:~MMTK.ParticleProperties.ParticleScalar :param random_vectors: the number of random direction vectors used in the orientational average :type random_vectors: int :param first_mode: the first mode to be taken into account for the fluctuation calculation. The default value of 6 is right for molecules in vacuum. :type first_mode: int :returns: the Coherent Scattering Function as a function of time :rtype: Scientific.Functions.Interpolation.InterpolatingFunction """ if subset is None: subset = self.universe if weights is None: weights = self.universe.getParticleScalar('b_coherent') mask = subset.booleanMask() weights = N.repeat(weights.array, mask.array) weights = weights / N.sqrt(N.add.reduce(weights * weights)) friction = N.repeat(self.friction.array, mask.array) r = N.repeat(self.universe.configuration().array, mask.array) first, last, step = (time_range + (None, None))[:3] if last is None: last = 3. / self.rawMode(first_mode).inv_relaxation_time if step is None: step = (last - first) / 300. time = N.arange(first, last, step) natoms = subset.numberOfAtoms() kT = Units.k_B * self.temperature fcoh = N.zeros((len(time), ), N.Complex) random_vectors = Random.randomDirections(random_vectors) for v in random_vectors: phase = N.exp(-1.j * q * N.dot(r, v.array)) for ai in range(natoms): fbt = N.zeros((natoms, len(time)), N.Float) for i in range(first_mode, self.nmodes): irt = self.rawMode(i).inv_relaxation_time d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \ / N.sqrt(friction) ft = N.exp(-irt * time) / irt N.add(fbt, d[ai] * d[:, N.NewAxis] * ft[N.NewAxis, :], fbt) N.add(fbt, (-0.5 / irt) * (d[ai]**2 + d[:, N.NewAxis]**2), fbt) N.add( fcoh, weights[ai] * phase[ai] * N.dot(weights * N.conjugate(phase), N.exp(kT * fbt)), fcoh) return InterpolatingFunction((time, ), fcoh.real / len(random_vectors))
def incoherentScatteringFunction(self, q, time_range=(0., None, None), subset=None, random_vectors=15, first_mode=6): """ :param q: the angular wavenumber :type q: float :param time_range: the time values at which the mean-square displacement is evaluated, specified as a range tuple (first, last, step). The defaults are first=0, last= 20 times the longest vibration perdiod, and step defined such that 300 points are used in total. :type time_range: tuple :param subset: the subset of the universe used in the calculation (default: the whole universe) :type subset: :class:~MMTK.Collections.GroupOfAtoms :param random_vectors: the number of random direction vectors used in the orientational average :type random_vectors: int :param first_mode: the first mode to be taken into account for the fluctuation calculation. The default value of 6 is right for molecules in vacuum. :type first_mode: int :returns: the Incoherent Scattering Function as a function of time :rtype: Scientific.Functions.Interpolation.InterpolatingFunction """ if subset is None: subset = self.universe mask = subset.booleanMask() weights_inc = self.universe.getParticleScalar('b_incoherent') weights_inc = N.repeat(weights_inc.array**2, mask.array) weights_inc = weights_inc / N.add.reduce(weights_inc) friction = N.repeat(self.friction.array, mask.array) mass = N.repeat(self.universe.masses().array, mask.array) r = N.repeat(self.universe.configuration().array, mask.array) first, last, step = (time_range + (None, None))[:3] if last is None: last = 3. / self.weighedMode(first_mode).inv_relaxation_time if step is None: step = (last - first) / 300. time = N.arange(first, last, step) natoms = subset.numberOfAtoms() kT = Units.k_B * self.temperature finc = N.zeros((len(time), ), N.Float) eisf = 0. random_vectors = Random.randomDirections(random_vectors) for v in random_vectors: phase = N.exp(-1.j * q * N.dot(r, v.array)) faat = N.zeros((natoms, len(time)), N.Float) eisf_sum = N.zeros((natoms, ), N.Float) for i in range(first_mode, self.nmodes): irt = self.rawMode(i).inv_relaxation_time d = q * N.repeat((self.rawMode(i)*v).array, mask.array) \ / N.sqrt(friction) ft = (N.exp(-irt * time) - 1.) / irt N.add(faat, d[:, N.NewAxis]**2 * ft[N.NewAxis, :], faat) N.add(eisf_sum, -d**2 / irt, eisf_sum) N.add(finc, N.sum(weights_inc[:, N.NewAxis] * N.exp(kT * faat), 0), finc) eisf = eisf + N.sum(weights_inc * N.exp(kT * eisf_sum)) return InterpolatingFunction((time, ), finc / len(random_vectors))
def partitionIndex(self, x): return (int(N.floor(x[0] / self.partition_size)), int(N.floor(x[1] / self.partition_size)), int(N.floor(x[2] / self.partition_size)))
def staticStructureFactor(self, q_range=(1., 15.), subset=None, weights=None, random_vectors=15, first_mode=6): """ :param q_range: the range of angular wavenumber values :type q_range: tuple :param subset: the subset of the universe used in the calculation (default: the whole universe) :type subset: :class:~MMTK.Collections.GroupOfAtoms :param weights: the weight to be given to each atom in the average (default: coherent scattering lengths) :type weights: :class:~MMTK.ParticleProperties.ParticleScalar :param random_vectors: the number of random direction vectors used in the orientational average :type random_vectors: int :param first_mode: the first mode to be taken into account for the fluctuation calculation. The default value of 6 is right for molecules in vacuum. :type first_mode: int :returns: the Static Structure Factor as a function of angular wavenumber :rtype: Scientific.Functions.Interpolation.InterpolatingFunction """ if subset is None: subset = self.universe if weights is None: weights = self.universe.getParticleScalar('b_coherent') mask = subset.booleanMask() weights = N.repeat(weights.array, mask.array) weights = weights / N.sqrt(N.add.reduce(weights * weights)) friction = N.repeat(self.friction.array, mask.array) r = N.repeat(self.universe.configuration().array, mask.array) first, last, step = (q_range + (None, ))[:3] if step is None: step = (last - first) / 50. q = N.arange(first, last, step) kT = Units.k_B * self.temperature natoms = subset.numberOfAtoms() sq = 0. random_vectors = Random.randomDirections(random_vectors) for v in random_vectors: sab = N.zeros((natoms, natoms), N.Float) for i in range(first_mode, self.nmodes): irt = self.rawMode(i).inv_relaxation_time d = N.repeat((self.rawMode(i)*v).array, mask.array) \ / N.sqrt(friction) sab = sab + (d[N.NewAxis, :] - d[:, N.NewAxis])**2 / irt sab = sab[N.NewAxis, :, :] * q[:, N.NewAxis, N.NewAxis]**2 phase = N.exp(-1.j*q[:, N.NewAxis] * N.dot(r, v.array)[N.NewAxis, :]) \ * weights[N.NewAxis, :] temp = N.sum(phase[:, :, N.NewAxis] * N.exp(-0.5 * kT * sab), 1) temp = N.sum(N.conjugate(phase) * temp, 1) sq = sq + temp.real return InterpolatingFunction((q, ), sq / len(random_vectors))
def findClusters(self, preferences, max_iterations=500, convergence=50, damping=0.5): """ @param preferences: the preference values for the cluster identification. This can be either a single number, or a sequence with one value per item. @type preferences: C{float} or sequence of C{float} @param max_iterations: the number of iterations at which the algorithm is stopped even if there is no convergence. @type max_iterations: C{int} @param convergence: the number of iterations during which the cluster decomposition must remain stable before it is returned as converged. @type convergence: C{int} @param damping: a number between 0 and 1 that influences by fast affinity and responsibility values can change. @type damping: C{float} """ preferences = N.array(preferences) if len(preferences.shape) == 0: preferences = preferences + N.zeros((self.nitems, ), N.Float) if len(preferences) != self.nitems: raise ValueError("Number of preferences != number of items") noise_scale = 1.e-12 * (self.largest_similarity - self.smallest_similarity) s = N.concatenate([self.similarities, preferences]) for i in range(len(s)): s[i] += noise_scale * random.random() a = N.zeros(s.shape, N.Float) r = N.zeros(s.shape, N.Float) iterations_left = max_iterations convergence_count = 0 self.exemplar = N.zeros((self.nitems, ), N.Int) while True: a, r = _affinityPropagation(self, s, a, r, damping) e = a + r exemplar = N.zeros((self.nitems, ), N.Int) for i in range(self.nitems): ii, ik = self.e_indices[i] exemplar[i] = ik[N.argmax(N.take(e, ii))] if N.logical_and.reduce(exemplar == self.exemplar): convergence_count += 1 if convergence_count == convergence: break else: self.exemplar = exemplar iterations_left -= 1 if iterations_left == 0: raise ValueError("no convergence in %d iterations" % max_iterations) clusters = [] indices = N.arange(self.nitems) exemplar_indices = N.repeat(indices, self.exemplar == indices) for i in exemplar_indices: members = list(N.repeat(indices, self.exemplar == self.exemplar[i])) members.remove(i) members.insert(0, i) clusters.append([self.items[m] for m in members]) return clusters
def simfunc(p1, p2): return -N.sum((p1 - p2)**2)
N.take(rpos, dataset.a_update_indices_2[i])) a_new[:-dataset.nitems] = N.minimum(0., a_new[:-dataset.nitems]) a = damping * a + (1 - damping) * a_new return a, r if __name__ == "__main__": points = N.array([[-2.341500, 3.696800], [-1.109200, 3.111700], [-1.566900, 1.835100], [-2.658500, 0.664900], [-4.031700, 2.845700], [-3.081000, 2.101100], [2.588000, 1.781900], [3.292300, 3.058500], [4.031700, 1.622300], [3.081000, -0.611700], [0.264100, 0.398900], [1.320400, 2.207400], [0.193700, 3.643600], [1.954200, -0.505300], [1.637300, 1.409600], [-0.123200, -1.516000], [-1.355600, -3.058500], [0.017600, -4.016000], [1.003500, -3.590400], [0.017600, -2.420200], [-1.531700, -0.930900], [-1.144400, 0.505300], [0.616200, -1.516000], [1.707700, -2.207400], [2.095100, 3.430900]]) def simfunc(p1, p2): return -N.sum((p1 - p2)**2) data = DataSet(points, simfunc, symmetric=True) clusters = [N.array(c) for c in data.findClusters(data.median_similarity)] for c in clusters: print c from Gnuplot import plot
def _setupAIndices1(self): indices = N.zeros((self.nsimilarities, ), N.Int) for i in range(self.nitems): for k, index in self.index[i].items(): indices[index] = self.index[k][k] self.a_update_indices_1 = indices
def send(self, data, destination, tag): if destination != 0: raise MPIError("invalid MPI destination") self.messages.append((tag, Numeric.array(data, copy=1).ravel()))
def _addData(self, data, weights): data = N.array(data, N.Float) weights = N.array(weights, N.Float) mask = N.logical_and(N.less_equal(data, self.max), N.greater_equal(data, self.min)) data = N.repeat(data, mask) weights = N.repeat(weights, mask) data = N.floor((data - self.min)/self.bin_width).astype(N.Int) nbins = self.array.shape[0] histo = N.add.reduce(weights*N.equal(N.arange(nbins)[:,N.NewAxis], data), -1) histo[-1] = histo[-1] + N.add.reduce(N.repeat(weights, N.equal(nbins, data))) self.array[:, 1] = self.array[:, 1] + histo
def _intersectSphereCone(sphere, cone): if sphere.center != cone.center: raise GeomError("Not yet implemented") from_center = sphere.radius * N.cos(cone.angle) radius = sphere.radius * N.sin(cone.angle) return Circle(cone.center + from_center * cone.axis, cone.axis, radius)
def __call__(self, **options): # Process the keyword arguments self.setCallOptions(options) # Check if the universe has features not supported by the integrator Features.checkFeatures(self, self.universe) RT = R * self.getOption('T') delta_t = self.getOption('delta_t') if 'steps_per_trial' in self.call_options.keys(): steps_per_trial = self.getOption('steps_per_trial') ntrials = self.getOption('steps') / steps_per_trial else: steps_per_trial = self.getOption('steps') ntrials = 1 if 'max_diff' in self.call_options.keys(): max_diff = self.getOption('max_diff') else: max_diff = 1000. if 'normalize' in self.call_options.keys(): normalize = self.getOption('normalize') else: normalize = False # Seed the random number generator if 'random_seed' in self.call_options.keys(): np.random.seed(self.getOption('random_seed')) # Get the universe variables needed by the integrator masses = self.universe.masses() fixed = self.universe.getAtomBooleanArray('fixed') nt = self.getOption('threads') comm = self.getOption('mpi_communicator') evaluator = self.universe.energyEvaluator(threads=nt, mpi_communicator=comm) evaluator = evaluator.CEvaluator() late_args = (masses.array, fixed.array, evaluator, N.zeros((0, 2), N.Int), N.zeros( (0, ), N.Float), N.zeros((1, ), N.Int), N.zeros((0, ), N.Float), N.zeros( (2, ), N.Float), N.zeros( (0, ), N.Float), N.zeros((1, ), N.Float), delta_t, self.getOption('first_step'), steps_per_trial, self.getActions(), 'Velocity Verlet step') xs = [] energies = [] # Store initial configuration and potential energy xo = np.copy(self.universe.configuration().array) pe_o = self.universe.energy() acc = 0 for t in range(ntrials): # Initialize the velocity self.universe.initializeVelocitiesToTemperature( self.getOption('T')) # Store total energy eo = pe_o + self.universe.kineticEnergy() # Run the velocity verlet integrator self.run(MMTK_dynamics.integrateVV, (self.universe, self.universe.configuration().array, self.universe.velocities().array) + late_args) # Decide whether to accept the move pe_n = self.universe.energy() en = pe_n + self.universe.kineticEnergy() diff = np.abs(en - eo) if (not np.isnan(en)) and (diff < max_diff): xo = np.copy(self.universe.configuration().array) pe_o = pe_n acc += 1 if normalize: self.universe.normalizePosition() else: # print diff self.universe.setConfiguration(Configuration( self.universe, xo)) xs.append(np.copy(self.universe.configuration().array)) energies.append(pe_o) return (xs, energies, acc, ntrials, delta_t)
def project(self, axis, plane): self.depth = Numeric.dot(self.points, axis) self.projection = Numeric.dot(self.points, plane)
def _intersectBoxBox(box1, box2): c1 = N.maximum(box1.corners[0], box2.corners[0]) c2 = N.minimum(box1.corners[1], box2.corners[1]) if N.logical_or.reduce(N.greater_equal(c1, c2)): return None return Box(Vector(c1), Vector(c2))
def check_var(self, varName, sValues, pValues): errorMax = max(abs(N.ravel(sValues - pValues))) self.assertAlmostEqual(errorMax, 0.0, 6)
def __rsub__(self, other): return self._arithmetic(other, lambda a, b: N.subtract(b, a))
def __call__(self, **options): # Process the keyword arguments self.setCallOptions(options) # Check if the universe has features not supported by the integrator Features.checkFeatures(self, self.universe) RT = R*self.getOption('T') delta_t = self.getOption('delta_t') if 'steps_per_trial' in self.call_options.keys(): steps_per_trial = self.getOption('steps_per_trial') ntrials = self.getOption('steps')/steps_per_trial else: steps_per_trial = self.getOption('steps') ntrials = 1 if 'normalize' in self.call_options.keys(): normalize = self.getOption('normalize') else: normalize = False # Get the universe variables needed by the integrator masses = self.universe.masses() fixed = self.universe.getAtomBooleanArray('fixed') nt = self.getOption('threads') comm = self.getOption('mpi_communicator') evaluator = self.universe.energyEvaluator(threads=nt, mpi_communicator=comm) evaluator = evaluator.CEvaluator() late_args = ( masses.array, fixed.array, evaluator, N.zeros((0, 2), N.Int), N.zeros((0, ), N.Float), N.zeros((1,), N.Int), N.zeros((0,), N.Float), N.zeros((2,), N.Float), N.zeros((0,), N.Float), N.zeros((1,), N.Float), delta_t, self.getOption('first_step'), steps_per_trial, self.getActions(), 'Velocity Verlet step') xs = [] energies = [] acc = 0 for t in range(ntrials): # Initialize the velocity self.universe.initializeVelocitiesToTemperature(self.getOption('T')) # Store previous configuration and initial energy xo = self.universe.copyConfiguration() pe_o = self.universe.energy() eo = pe_o + self.universe.kineticEnergy() # Run the velocity verlet integrator self.run(MMTK_dynamics.integrateVV, (self.universe, self.universe.configuration().array, self.universe.velocities().array) + late_args) # Decide whether to accept the move pe_n = self.universe.energy() en = pe_n + self.universe.kineticEnergy() if not math.isnan(en): acc += 1 if normalize: self.universe.normalizePosition() else: self.universe.setConfiguration(xo) pe_n = pe_o xs.append(self.universe.copyConfiguration().array) energies.append(pe_n) return (xs, energies, float(acc)/float(ntrials), delta_t)
def hasPoint(self, point): return N.fabs((point - self.center).length() - self.radius) < eps