def get_imvector(h, m3): """ Compute eigenvector corresponding to the imaginary mode IN h = hessian m3 = mass vector (dimension = 1 x 3*natoms) OUT imv = eigenvector corresponding to the imaginary mode """ info("@get_imvector", verbosity.high) if h.size != m3.size ** 2: raise ValueError("@Get_imvector. Initial hessian size does not match system size.") m = 1.0 / (m3 ** 0.5) mm = np.outer(m, m) hm = np.multiply(h, mm) # Simmetrize to use linalg.eigh hmT = hm.T hm = (hmT + hm) / 2.0 d, w = np.linalg.eigh(hm) freq = np.sign(d) * np.absolute(d) ** 0.5 / (2 * np.pi * 3e10 * 2.4188843e-17) info(" @GEOP: 1 frequency %4.1f cm^-1" % freq[0], verbosity.low) info(" @GEOP: 2 frequency %4.1f cm^-1" % freq[1], verbosity.low) info(" @GEOP: 3 frequency %4.1f cm^-1" % freq[2], verbosity.low) if freq[0] > -80 and freq[0] < 0: raise ValueError(" @GEOP: Small negative frequency %4.1f cm^-1" % freq, verbosity.low) elif freq[0] > 0: raise ValueError("@GEOP: The smallest frequency is positive. We aren't in a TS. Please check your hessian") info(" @get_imvector: We stretch along the mode with freq %f cm^1" % freq[0], verbosity.low) imv = w[:, 0] * (m3[:] ** 0.5) imv = imv / np.linalg.norm(imv) return imv
def open(self): """Creates a new socket. Used so that we can create a interface object without having to also create the associated socket object. """ if self.mode == "unix": self.server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.server.bind("/tmp/ipi_" + self.address) info("Created unix socket with address " + self.address, verbosity.medium) except: raise ValueError("Error opening unix socket. Check if a file " + ("/tmp/ipi_" + self.address) + " exists, and remove it if unused.") elif self.mode == "inet": self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.bind((self.address,self.port)) info("Created inet socket with address " + self.address + " and port number " + str(self.port), verbosity.medium) else: raise NameError("InterfaceSocket mode " + self.mode + " is not implemented (should be unix/inet)") self.server.listen(self.slots) self.server.settimeout(SERVERTIMEOUT) self.clients = [] self.requests = [] self.jobs = []
def bind(self, geop): # call bind function from DummyOptimizer super(HessianOptimizer, self).bind(geop) # Specific for RateOptimizer self.hessian_update = geop.hessian_update self.hessian_asr = geop.hessian_asr self.hessian_init = geop.hessian_init # self.output_maker = geop.output_maker self.im.bind(self) # Hessian self.initial_hessian = None if geop.hessian.size != (self.beads.natoms * 3 * self.beads.q.size): if geop.hessian.size == (self.beads.natoms * 3)**2: self.initial_hessian = geop.hessian.copy() geop.hessian = np.zeros((self.beads.natoms * 3, self.beads.q.size), float) elif geop.hessian.size == 0 and geop.hessian_init == 'true': info(" Initial hessian is not provided. We are going to compute it.", verbosity.low) geop.hessian = np.zeros((self.beads.natoms * 3, self.beads.q.size)) if ((self.beads.q - self.beads.q[0]) == 0).all() and self.beads.nbeads > 1: raise ValueError("""We need a initial hessian in order to create our initial instanton geometry. Please provide a (1-bead) hessian or an initial instanton geometry.""") else: raise ValueError(" 'Hessian_init' is false, an initial hessian (of the proper size) must be provided.") self.hessian = geop.hessian
def init_file(mode, filename, dimension="length", units="automatic", cell_units="automatic"): """Reads a @mode file and returns the data contained in it. Args: mode: Type of file that should be read. filename: A string giving the name of the pdb file to be read from. Returns: A list of Atoms objects as read from each frame of the pdb file, and a Cell object as read from the final pdb frame. """ rfile = open(filename, "r") ratoms = [] info("Initializing from file %s. Dimension: %s, units: %s, cell_units: %s" % (filename, dimension, units, cell_units), verbosity.low) while True: # while loop, so that more than one configuration can be given # so multiple beads can be initialized at once. try: ret = read_file(mode, rfile, dimension=dimension, units=units, cell_units=cell_units) except EOFError: break ratoms.append(ret["atoms"]) return ratoms, ret["cell"] # if multiple frames, the last cell is returned
def process_units(comment, cell, data, names, masses, natoms, dimension="automatic", units="automatic", cell_units="automatic", mode="xyz"): """Convert the data in the file according to the units written in the i-PI format. Args: comment: cell: data: names: masses: output: Returns: """ dimension, units, cell_units = auto_units(comment, dimension, units, cell_units, mode) info("Interpreting input with dimension %s, units %s and cell units %s" % (dimension, units, cell_units), verbosity.high) # Units transformation cell *= unit_to_internal('length', cell_units, 1) # cell units transformation data *= unit_to_internal(dimension, units, 1) # units transformation # Return data as i-PI structures cell = Cell(cell) atoms = Atoms(natoms) atoms.q[:] = data atoms.names[:] = names atoms.m[:] = masses return { "atoms": atoms, "cell": cell, }
def __init__(self, init, beads, nm, cell, fcomponents, ensemble=None, motion=None, prefix=""): """Initialises System class. Args: init: A class to deal with initializing the system. beads: A beads object giving the atom positions. cell: A cell object giving the system box. fcomponents: A list of force components that are active for each replica of the system. bcomponents: A list of force components that are considered as bias, and act on each replica of the system. ensemble: An ensemble object giving the objects necessary for producing the correct ensemble. nm: A class dealing with path NM operations. prefix: A string used to differentiate the output files of different systems. """ info(" # Initializing system object ", verbosity.low) self.prefix = prefix self.init = init self.ensemble = ensemble self.motion = motion self.beads = beads self.cell = cell self.nm = nm self.fcomp = fcomponents self.forces = Forces() self.properties = Properties() self.trajs = Trajectories()
def __init__(self, nbeads, natoms): """Initializes nm_trans. Args: nbeads: The number of beads. natoms: The number of atoms. """ self.nbeads = nbeads self.natoms = natoms try: import pyfftw info("Import of PyFFTW successful", verbosity.medium) self.qdummy = pyfftw.n_byte_align_empty((nbeads, 3*natoms), 16, 'float32') self.qnmdummy = pyfftw.n_byte_align_empty((nbeads//2+1, 3*natoms), 16, 'complex64') self.fft = pyfftw.FFTW(self.qdummy, self.qnmdummy, axes=(0,), direction='FFTW_FORWARD') self.ifft = pyfftw.FFTW(self.qnmdummy, self.qdummy, axes=(0,), direction='FFTW_BACKWARD') except ImportError: #Uses standard numpy fft library if nothing better #is available info("Import of PyFFTW unsuccessful, using NumPy library instead", verbosity.medium) self.qdummy = np.zeros((nbeads,3*natoms), dtype='float32') self.qnmdummy = np.zeros((nbeads//2+1,3*natoms), dtype='complex64') def dummy_fft(self): self.qnmdummy = np.fft.rfft(self.qdummy, axis=0) def dummy_ifft(self): self.qdummy = np.fft.irfft(self.qnmdummy, n=self.nbeads, axis=0) self.fft = lambda: dummy_fft(self) self.ifft = lambda: dummy_ifft(self)
def _poll_loop(self): """The main thread loop. Runs until either the program finishes or a kill call is sent. Updates the pool of clients every UPDATEFREQ loops and loops every latency seconds until _poll_true becomes false. """ info(" @SOCKET: Starting the polling thread main loop.", verbosity.low) self._poll_iter = UPDATEFREQ while self._poll_true: time.sleep(self.latency) # makes sure to remove the last dead client as soon as possible -- and to get clients if we are dry if self._poll_iter >= UPDATEFREQ or len(self.clients)==0 or (len(self.clients) > 0 and not(self.clients[0].status & Status.Up)): self.pool_update() self._poll_iter = 0 self._poll_iter += 1 self.pool_distribute() if os.path.exists("EXIT"): # softexit info(" @SOCKET: Soft exit request from file EXIT. Flushing job queue.", verbosity.low) # releases all pending requests for r in self.requests: r["status"] = "Exit" for c in self.clients: try: c.shutdown(socket.SHUT_RDWR) c.close() except: pass # flush it all down the drain self.clients = [] self.jobs = [] self._poll_thread = None
def close(self): """Closes down the socket.""" info(" @SOCKET: Shutting down the driver interface.", verbosity.low ) self.server.shutdown(socket.SHUT_RDWR) self.server.close() if self.mode == "unix": os.unlink("/tmp/ipi_" + self.address)
def red2comp(h, nbeads, natoms): """Takes the reduced physical hessian and construct the 'complete' one (all 0 included) """ info("\n @Instanton: Creating 'complete' physical hessian \n", verbosity.high) i = natoms * 3 ii = nbeads * i h0 = np.zeros((ii, ii), float) for j in range(nbeads): h0[j * i:(j + 1) * i, j * i:(j + 1) * i] = h[:, j * i:(j + 1) * i] return h0
def get_all(self): """Driver routine. When one of the force, potential or virial are called, this sends the atoms and cell to the client code, requesting that it calculates the potential, forces and virial tensor. This then waits until the driver is finished, and then returns the ufvx list. Returns: A list of the form [potential, force, virial, extra]. """ # because we thread over many systems and outputs, we might get called # more than once. keep track of how many times we are called so we # can make sure to wait until the last call has returned before we release with self._threadlock: self._getallcount += 1 # this is converting the distribution library requests into [ u, f, v ] lists if self.request is None: self.request = self.queue() # sleeps until the request has been evaluated while self.request["status"] != "Done": if self.request["status"] == "Exit" or softexit.triggered: # now, this is tricky. we are stuck here and we cannot return meaningful results. # if we return, we may as well output wrong numbers, or mess up things. # so we can only call soft-exit and wait until that is done. then kill the thread # we are in. softexit.trigger(" @ FORCES : cannot return so will die off here") while softexit.exiting: time.sleep(self.ff.latencyt) sys.exit() time.sleep(self.ff.latency) # print diagnostics about the elapsed time info("# forcefield %s evaluated in %f (queue) and %f (dispatched) sec." % (self.ff.name, self.request["t_finished"] - self.request["t_queued"], self.request["t_finished"] - self.request["t_dispatched"]), verbosity.debug) # data has been collected, so the request can be released and a slot # freed up for new calculations result = self.request["result"] # reduce the reservation count (and wait for all calls to return) with self._threadlock: self._getallcount -= 1 # releases just once, but wait for all requests to be complete if self._getallcount == 0: self.ff.release(self.request) self.request = None else: while self._getallcount > 0: time.sleep(self.ff.latency) return result
def _poll_loop(self): """Polling loop. Loops over the different requests, checking to see when they have finished. """ info(" @ForceField: Starting the polling thread main loop.", verbosity.low) while self._doloop[0]: time.sleep(self.latency) self.poll()
def pool_update(self): """Deals with keeping the pool of client drivers up-to-date during a force calculation step. Deals with maintaining the client list. Clients that have disconnected are removed and their jobs removed from the list of running jobs and new clients are connected to the server. """ for c in self.clients[:]: if not (c.status & Status.Up): try: warning( " @SOCKET: Client " + str(c.peername) + " died or got unresponsive(C). Removing from the list.", verbosity.low, ) c.shutdown(socket.SHUT_RDWR) c.close() except: pass c.status = Status.Disconnected self.clients.remove(c) for [k, j] in self.jobs[:]: if j is c: self.jobs = [ w for w in self.jobs if not (w[0] is k and w[1] is j) ] # removes pair in a robust way # self.jobs.remove([k,j]) k["status"] = "Queued" k["start"] = -1 keepsearch = True while keepsearch: readable, writable, errored = select.select([self.server], [], [], 0.0) if self.server in readable: client, address = self.server.accept() client.settimeout(TIMEOUT) driver = DriverSocket(client) info( " @SOCKET: Client asked for connection from " + str(address) + ". Now hand-shaking.", verbosity.low, ) driver.poll() if driver.status | Status.Up: self.clients.append(driver) info(" @SOCKET: Handshaking was successful. Added to the client list.", verbosity.low) else: warning(" @SOCKET: Handshaking failed. Dropping connection.", verbosity.low) client.shutdown(socket.SHUT_RDWR) client.close() else: keepsearch = False
def softexit(self): """Deals with a soft exit request. Tries to ensure that a consistent restart checkpoint is written out. """ if self.step < self.tsteps: self.step += 1 if not self.rollback: info("SOFTEXIT: Saving the latest status at the end of the step") self.chk.store() self.chk.write(store=False)
def __init__(self, mode, syslist, fflist, outputs, prng, smotion=None, step=0, tsteps=1000, ttime=0, threads=False): """Initialises Simulation class. Args: mode: What kind of simulation is this syslist: A list of system objects fflist: A list of forcefield objects prng: A random number object. smotion: A "super-motion" class specifying what to do with different system replicas outputs: A list of output objects. step: An optional integer giving the current simulation time step. Defaults to 0. tsteps: An optional integer giving the total number of steps. Defaults to 1000. ttime: The simulation running time. Used on restart, to keep a cumulative total. """ info(" # Initializing simulation object ", verbosity.low) self.prng = prng self.mode = mode self.threading = threads dself = dd(self) self.syslist = syslist for s in syslist: s.prng = self.prng # bind the system's prng to self prng s.init.init_stage1(s) #! TODO - does this have any meaning now that we introduce the smotion class? if self.mode == "md" and len(syslist) > 1: warning("Multiple systems will evolve independently in a '" + self.mode + "' simulation.") self.fflist = {} for f in fflist: self.fflist[f.name] = f self.outtemplate = outputs dself.step = depend_value(name="step", value=step) self.tsteps = tsteps self.ttime = ttime self.smotion = smotion self.chk = None self.rollback = True
def __init__(self, beads, cell, forces, ensemble, prng, outputs, nm, init, step=0, tsteps=1000, ttime=0): """Initialises Simulation class. Args: beads: A beads object giving the atom positions. cell: A cell object giving the system box. forces: A forcefield object giving the force calculator for each replica of the system. ensemble: An ensemble object giving the objects necessary for producing the correct ensemble. prng: A random number object. outputs: A list of output objects. nm: A class dealing with path NM operations. init: A class to deal with initializing the simulation object. step: An optional integer giving the current simulation time step. Defaults to 0. tsteps: An optional integer giving the total number of steps. Defaults to 1000. ttime: The simulation running time. Used on restart, to keep a cumulative total. """ info(" # Initializing simulation object ", verbosity.low ) self.prng = prng self.ensemble = ensemble self.beads = beads self.cell = cell self.nm = nm # initialize the configuration of the system self.init = init init.init_stage1(self) self.flist = forces self.forces = Forces() self.outputs = outputs dset(self, "step", depend_value(name="step", value=step)) self.tsteps = tsteps self.ttime = ttime self.properties = Properties() self.trajs = Trajectories() self.chk = None self.rollback = True
def write(self, store=True): """Writes out the required trajectories. Used for both the checkpoint files and the soft-exit restart file. We have slightly different behaviour for these two different types of checkpoint file, as the soft-exit files have their store() function called automatically, and we do not want this to be updated as the status of the simulation after a soft-exit call is unlikely to be in a consistent state. On the other hand, the standard checkpoint files are not automatically updated in this way, and we must manually store the current state of the system before writing them. Args: store: A boolean saying whether the state of the system should be stored before writing the checkpoint file. """ if self._storing: info("@ CHECKPOINT: Write called while storing. Force re-storing", verbosity.low) self.store() if not (self.simul.step + 1) % self.stride == 0: return # function to use to open files open_function = open_backup if self.overwrite: filename = self.filename if self._continued: open_function = open else: filename = self.filename + "_" + str(self.step) # Advance the step counter before saving, so next time the correct index will be loaded. if store: self.step += 1 self.store() self.status.step.store(self.simul.step + 1) with open_function(filename, "w") as check_file: check_file.write(self.status.write(name="simulation")) # Do not use backed up file open on subsequent writes. self._continued = True
def spring_hessian(natoms, nbeads, m3, omega2, mode='half'): """Compute the 'spring hessian' OUT h = hessian with only the spring terms ('spring hessian') """ info(" @spring_hessian", verbosity.high) ii = natoms * 3 h = np.zeros([ii * nbeads, ii * nbeads]) if nbeads == 1: return h # Diagonal h_sp = m3 * omega2 diag1 = np.diag(h_sp) diag2 = np.diag(2.0 * h_sp) if mode == 'half': i = 0 h[i * ii:(i + 1) * ii, i * ii:(i + 1) * ii] += diag1 i = nbeads - 1 h[i * ii:(i + 1) * ii, i * ii:(i + 1) * ii] += diag1 for i in range(1, nbeads - 1): h[i * ii:(i + 1) * ii, i * ii:(i + 1) * ii] += diag2 elif mode == 'splitting' or mode == 'full': for i in range(0, nbeads): h[i * ii:(i + 1) * ii, i * ii:(i + 1) * ii] += diag2 else: raise ValueError("We can't compute the spring hessian.") # Non-Diagonal ndiag = np.diag(-h_sp) # Quasi-band for i in range(0, nbeads - 1): h[i * ii:(i + 1) * ii, (i + 1) * ii:(i + 2) * ii] += ndiag h[(i + 1) * ii:(i + 2) * ii, i * ii:(i + 1) * ii] += ndiag # Corner if mode == 'full': h[0:ii, (nbeads - 1) * ii:(nbeads) * ii] += ndiag h[(nbeads - 1) * ii:(nbeads) * ii, 0:ii] += ndiag return h
def mass(cls, label): """Function to access the mass_list attribute. Note that this does not require an instance of the Elements class to be created, as this is a class method. Therefore using Elements.mass(label) will give the mass of the element with the atomic symbol given by label. Args: label: The atomic symbol of the atom whose mass is required. Returns: A float giving the mass of the atom with atomic symbol label. """ try: return cls.mass_list[label] * Constants.amu except KeyError: info("Unknown element given, you must specify the mass", verbosity.low) return -1.0
def BFGS(x0, d0, fdf, fdf0, invhessian, big_step, tol, itmax): """BFGS minimization. Uses approximate line minimizations. Does one step. Arguments: x0: initial point d0: initial direction for line minimization fdf: function and gradient (mapper) fdf0: initial function and gradient value big_step: limit on step length tol: convergence tolerance itmax: maximum number of allowed iterations """ info(" @MINIMIZE: Started BFGS", verbosity.debug) zeps = 1.0e-13 u0, g0 = fdf0 # Maximum step size n = len(x0.flatten()) linesum = np.dot(x0.flatten(), x0.flatten()) big_step = big_step * max(np.sqrt(linesum), n) # Perform approximate line minimization in direction d0 x, u, g = min_approx(fdf, x0, fdf0, d0, big_step, tol, itmax) d_x = np.subtract(x, x0) # Update invhessian. # Here we are breaking the fixatom constrain I d_g = np.subtract(g, g0) hdg = np.dot(invhessian, d_g.flatten()) fac = np.dot(d_g.flatten(), d_x.flatten()) fae = np.dot(d_g.flatten(), hdg) sumdg = np.dot(d_g.flatten(), d_g.flatten()) sumxi = np.dot(d_x.flatten(), d_x.flatten()) # Skip update if not 'fac' sufficiently positive if fac > np.sqrt(zeps * sumdg * sumxi): fac = 1.0 / fac fad = 1.0 / fae # Compute BFGS term dg = np.subtract((fac * d_x).flatten(), fad * hdg) invhessian += np.outer(d_x, d_x) * fac - np.outer(hdg, hdg) * fad + np.outer(dg, dg) * fae info(" @MINIMIZE: Updated invhessian", verbosity.debug) else: info(" @MINIMIZE: Skipped invhessian update; direction x gradient insufficient", verbosity.debug) # Update direction # Here we are breaking the fixatom constrain II d = np.dot(invhessian, -g.flatten()) d0[:] = d.reshape(d_x.shape) info(" @MINIMIZE: Updated search direction", verbosity.debug)
def set_vector(iif, dq, rq): """Initializes a vector from an another vector. If the first dimension is different, i.e. the two vectors correspond to a different number of beads, then the ring polymer contraction/expansion is used to rescale the original vector to the one used in the simulation, as described in the paper T. E. Markland and D. E. Manolopoulos, J. Chem. Phys. 129, 024105, (2008). Args: iif: An Initializer object specifying the value of a vector. dq: The vector to be initialized. rq: The vector to initialize from. """ (nbeads, natoms) = rq.shape natoms /= 3 (dbeads, datoms) = dq.shape datoms /= 3 # Check that indices make sense if iif.index < 0 and natoms != datoms: raise ValueError("Initialization tries to mix up structures with different atom numbers.") if iif.index >= datoms: raise ValueError("Cannot initialize single atom as atom index %d is larger than the number of atoms" % iif.index) if iif.bead >= dbeads: raise ValueError("Cannot initialize single bead as bead index %d is larger than the number of beads" % iif.bead) if iif.bead < 0: # we are initializing the path res = nm_rescale(nbeads, dbeads) # path rescaler if nbeads != dbeads: info(" # Initialize is rescaling from %5d beads to %5d beads" % (nbeads, dbeads), verbosity.low) if iif.index < 0: dq[:] = res.b1tob2(rq) else: # we are initializing a specific atom dq[:, 3 * iif.index:3 * (iif.index + 1)] = res.b1tob2(rq) else: # we are initializing a specific bead if iif.index < 0: dq[iif.bead] = rq else: dq[iif.bead, 3 * iif.index:3 * (iif.index + 1)] = rq
def bind(self, beads=None, atoms=None, pm=None, nm=None, prng=None, fixdof=None): """Binds the appropriate degrees of freedom to the thermostat. This takes an object with degrees of freedom, and makes their momentum and mass vectors members of the thermostat. It also then creates the objects that will hold the data needed in the thermostat algorithms and the dependency network. Args: beads: An optional beads object to take the mass and momentum vectors from. atoms: An optional atoms object to take the mass and momentum vectors from. pm: An optional tuple containing a single momentum value and its conjugate mass. prng: An optional pseudo random number generator object. Defaults to Random(). fixdof: An optional integer which can specify the number of constraints applied to the system. Defaults to zero. Raises: TypeError: Raised if no appropriate degree of freedom or object containing a momentum vector is specified for the thermostat to couple to. """ super(ThermoGLE, self).bind(beads=beads, atoms=atoms, pm=pm, prng=prng, fixdof=fixdof) dself = dd(self) # allocates, initializes or restarts an array of s's if self.s.shape != (self.ns + 1, len(dself.m)): if len(self.s) > 0: warning("Mismatch in GLE s array size on restart, will reinitialise to free particle.", verbosity.low) self.s = np.zeros((self.ns + 1, len(dself.m))) # Initializes the s vector in the free-particle limit info(" GLE additional DOFs initialised to the free-particle limit.", verbosity.low) SC = stab_cholesky(self.C * Constants.kb) self.s[:] = np.dot(SC, self.prng.gvec(self.s.shape)) else: info("GLE additional DOFs initialised from input.", verbosity.medium)
def Instanton(x0, f0, f1, h, update, asr, im, gm, big_step, opt, m, omaker): """Do one step. Update hessian for the new position. Update the position and force inside the mapper. Input: x0 = last positions f0 = last physical forces f1 = last spring forces h = physical hessian update = how to update the hessian asr = how to clean the hessian im = spring mapper gm = gradient mapper big_step = limit on step length opt = optimization algorithm to use m = type of calculation: rate or splitting""" info(" @Instanton_step", verbosity.high) if opt == 'nichols': # Construct hessian and get eigenvalues and eigenvector h0 = red2comp(h, im.dbeads.nbeads, im.dbeads.natoms) # construct complete hessian from reduced h1 = np.add(im.h, h0) # add spring terms to the physical hessian d, w = clean_hessian(h1, im.dbeads.q, im.dbeads.natoms, im.dbeads.nbeads, im.dbeads.m, im.dbeads.m3, asr) # Find new movement direction if m == 'rate': d_x = nichols(f0, f1, d, w, im.dbeads.m3, big_step) elif m == 'splitting': d_x = nichols(f0, f1, d, w, im.dbeads.m3, big_step, mode=0) elif opt == 'NR': h_up_band = banded_hessian(h, im) # create upper band matrix f = (f0 + f1).reshape(im.dbeads.natoms * 3 * im.dbeads.nbeads, 1) d_x = invmul_banded(h_up_band, f) d_x.shape = im.dbeads.q.shape # Rescale step d_x_max = np.amax(np.absolute(d_x)) info(" @Instanton: Current step norm = %g" % d_x_max, verbosity.medium) if np.amax(np.absolute(d_x)) > big_step: info(" @Instanton: Attempted step norm = %g, scaled down to %g" % (d_x_max, big_step), verbosity.low) d_x *= big_step / np.amax(np.absolute(d_x_max)) # Make movement and get new energy (u) and forces(f) using mapper x = x0 + d_x im(x, ret=False) # Only to update the mapper u, g2 = gm(x) f = -g2 # Update hessian if update == 'powell': d_g = np.subtract(f0, f) i = im.dbeads.natoms * 3 for j in range(im.dbeads.nbeads): aux = h[:, j * i:(j + 1) * i] dg = d_g[j, :] dx = d_x[j, :] Powell(dx, dg, aux) elif update == 'recompute': get_hessian(h, gm, x, omaker)
def step(self, step=None): """ Does one simulation time step. Attributes: qtime : The time taken in updating the real positions. tr : current trust radius """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: info(" @GEOP: Initializing BFGSTRM", verbosity.debug) self.old_x[:] = self.beads.q self.old_u[:] = self.forces.pot self.old_f[:] = self.forces.f if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 # Make one step. ( A step is finished when a movement is accepted) BFGSTRM(self.old_x, self.old_u, self.old_f, self.hessian, self.tr, self.gm, self.big_step) info(" Number of force calls: %d" % (self.gm.fcount)); self.gm.fcount = 0 # Update positions and forces self.beads.q = self.gm.dbeads.q self.forces.transfer_forces(self.gm.dforces) # This forces the update of the forces # Exit simulation step d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x))) self.exitstep(self.forces.pot, self.old_u, d_x_max)
def init_stage2(self, simul): """Initializes the simulation -- second stage. Takes a simulation object which has been fully generated, and restarts additional information such as the thermostat internal state. Args: simul: A simulation object to be initialized. Raises: ValueError: Raised if there is a problem with the initialization, if something that should have been has not been, or if the objects that have been specified are not compatible with each other. """ for (k, v) in self.queue: info(" # Initializer (stage 2) parsing " + str(k) + " object.", verbosity.high) if k == "gle": # read thermostat parameters from file if not (hasattr(simul.ensemble, "thermostat")): raise ValueError("Ensemble does not have a thermostat to initialize") if not (hasattr(simul.ensemble.thermostat, "s")): raise ValueError("There is nothing to initialize in non-GLE thermostats") ssimul = simul.ensemble.thermostat.s if v.mode == "manual": sinput = v.value.copy() if (sinput.size() != ssimul.size()): raise ValueError("Size mismatch in thermostat initialization data") sinput.shape = ssimul.shape elif v.mode == "chk": rmotion = init_chk(v.value)[2] if not hasattr(rmotion, "thermostat") or not hasattr(rmotion.thermostat, "s"): raise ValueError("Checkpoint file does not contain usable thermostat data") sinput = rmotion.thermostat.s.copy() if sinput.shape != ssimul.shape: raise ValueError("Shape mismatch in thermostat initialization data") # if all the preliminary checks are good, we can initialize the s's ssimul[:] = sinput
def open_backup(filename, mode="r", buffering=-1): """A wrapper around `open` which saves backup files. If the file is opened in write mode and already exists, it is first backed up under a new file name, keeping all previous backups. Then, a new file is opened for writing. For reference: https://docs.python.org/2/library/functions.html#open Args: The same as for `open`. Returns: An open file as returned by `open`. """ if mode.startswith("w"): # If writing, make sure nothing is overwritten. i = 0 fn_backup = filename while os.path.isfile(fn_backup): fn_backup = "#" + filename + "#%i#" % i i += 1 if fn_backup != filename: os.rename(filename, fn_backup) info( "Backup performed: {0:s} -> {1:s}".format(filename, fn_backup), verbosity.low, ) else: # There is no need to back up. # `open` will sort out whether `mode` is valid. pass return open(filename, mode, buffering)
def invmul_banded(A, B, posdef=False): """A is in upper banded form Solve H.h = -G for Newton - Raphson step, h using invmul_banded(H, -G) take step x += h to find minimum or transition state """ try: from scipy import linalg info("Import of scipy successful", verbosity.medium) except ImportError: raise ValueError(" ") if posdef: return linalg.solveh_banded(A, B) else: u = len(A) - 1 l = u newA = sym_band(A) # np.set_printoptions(precision=6, suppress=True, threshold=np.nan, linewidth=1000) # print linalg.eigvals_banded(A) # sys.exit(0) return linalg.solve_banded((l, u), newA, B)
def __init__(self, latency=1.0, name="", H=None, xref=None, vref=0.0, pars=None, dopbc=False, threaded=True): """Initialises FFDebye. Args: pars: Optional dictionary, giving the parameters needed by the driver. """ # a socket to the communication library is created or linked # NEVER DO PBC -- forces here are computed without. super(FFDebye, self).__init__(latency, name, pars, dopbc=False) if H is None: raise ValueError("Must provide the Hessian for the Debye crystal.") if xref is None: raise ValueError("Must provide a reference configuration for the Debye crystal.") self.H = H self.xref = xref self.vref = vref eigsys = np.linalg.eigh(self.H) info(" @ForceField: Hamiltonian eigenvalues: " + ' '.join(map(str, eigsys[0])), verbosity.medium)
def open(self): """Creates a new socket. Used so that we can create a interface object without having to also create the associated socket object. """ if self.mode == "unix": self.server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.server.bind("/tmp/ipi_" + self.address) info("Created unix socket with address " + self.address, verbosity.medium) except socket.error: raise RuntimeError( "Error opening unix socket. Check if a file " + ("/tmp/ipi_" + self.address) + " exists, and remove it if unused.") elif self.mode == "inet": self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server.bind((self.address, self.port)) info( "Created inet socket with address " + self.address + " and port number " + str(self.port), verbosity.medium, ) else: raise NameError("InterfaceSocket mode " + self.mode + " is not implemented (should be unix/inet)") self.server.listen(self.slots) self.server.settimeout(SERVERTIMEOUT) # these are the two main objects the socket interface should worry about and manage self.clients = [ ] # list of active clients (working or ready to compute) self.jobs = [] # list of jobs
def get_nmm(self): """Returns dynamical mass factors, i.e. the scaling of normal mode masses that determine the path dynamics (but not statics).""" # also checks that the frequencies and the mode given in init are # consistent with the beads and ensemble dmf = np.zeros(self.nbeads,float) dmf[:] = 1.0 if self.mode == "rpmd": if len(self.nm_freqs) > 0: warning("nm.frequencies will be ignored for RPMD mode.", verbosity.low) elif self.mode == "manual": if len(self.nm_freqs) != self.nbeads-1: raise ValueError("Manual path mode requires (nbeads-1) frequencies, one for each internal mode of the path.") for b in range(1, self.nbeads): sk = self.omegak[b]/self.nm_freqs[b-1] dmf[b] = sk**2 elif self.mode == "pa-cmd": if len(self.nm_freqs) > 1: warning("Only the first element in nm.frequencies will be considered for PA-CMD mode.", verbosity.low) if len(self.nm_freqs) == 0: raise ValueError("PA-CMD mode requires the target frequency of all the internal modes.") for b in range(1, self.nbeads): sk = self.omegak[b]/self.nm_freqs[0] info(" ".join(["NM FACTOR", str(b), str(sk), str(self.omegak[b]), str(self.nm_freqs[0])]), verbosity.medium) dmf[b] = sk**2 elif self.mode == "wmax-cmd": if len(self.nm_freqs) > 2: warning("Only the first two element in nm.frequencies will be considered for WMAX-CMD mode.", verbosity.low) if len(self.nm_freqs) < 2: raise ValueError("WMAX-CMD mode requires [wmax, wtarget]. The normal modes will be scaled such that the first internal mode is at frequency wtarget and all the normal modes coincide at frequency wmax.") wmax = self.nm_freqs[0] wt = self.nm_freqs[1] for b in range(1, self.nbeads): sk = 1.0/np.sqrt((wt)**2*(1+(wmax/self.omegak[1])**2)/(wmax**2+(self.omegak[b])**2)) dmf[b] = sk**2 return dmf
def __init__(self, latency=1.0, name="", H=None, xref=None, vref=0.0, pars=None, dopbc=False, threaded=False): """Initialises FFDebye. Args: pars: Optional dictionary, giving the parameters needed by the driver. """ # a socket to the communication library is created or linked # NEVER DO PBC -- forces here are computed without. super(FFDebye, self).__init__(latency, name, pars, dopbc=False) if H is None: raise ValueError("Must provide the Hessian for the Debye crystal.") if xref is None: raise ValueError("Must provide a reference configuration for the Debye crystal.") self.H = H self.xref = xref self.vref = vref eigsys = np.linalg.eigh(self.H) info(" @ForceField: Hamiltonian eigenvalues: " + ' '.join(map(str, eigsys[0])), verbosity.medium)
def step(self, step=None): """ Does one simulation time step Attributes: ttime: The time taken in applying the thermostat steps. """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) # Store previous forces for warning exit condition self.old_f[:] = self.forces.f # Check for fixatoms if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 dq1 = dstrip(self.old_f) # Move direction for steepest descent dq1_unit = dq1 / np.sqrt(np.dot(dq1.flatten(), dq1.flatten())) info(" @GEOP: Determined SD direction", verbosity.debug) # Set position and direction inside the mapper self.lm.set_dir(dstrip(self.beads.q), dq1_unit) # Reuse initial value since we have energy and forces already u0, du0 = (self.forces.pot.copy(), np.dot(dstrip(self.forces.f.flatten()), dq1_unit.flatten())) # Do one SD iteration; return positions and energy # (x, fx,dfx) = min_brent(self.lm, fdf0=(u0, du0), x0=0.0, #DELETE min_brent(self.lm, fdf0=(u0, du0), x0=0.0, tol=self.ls_options["tolerance"] * self.tolerances["energy"], itmax=self.ls_options["iter"], init_step=self.ls_options["step"]) info(" Number of force calls: %d" % (self.lm.fcount)) self.lm.fcount = 0 # Update positions and forces self.beads.q = self.lm.dbeads.q self.forces.transfer_forces( self.lm.dforces) # This forces the update of the forces d_x = np.absolute(np.subtract(self.beads.q, self.lm.x0)) x = np.linalg.norm(d_x) # Automatically adapt the search step for the next iteration. # Relaxes better with very small step --> multiply by factor of 0.1 or 0.01 self.ls_options["step"] = 0.1 * x * self.ls_options["adaptive"] + ( 1 - self.ls_options["adaptive"]) * self.ls_options["step"] # Exit simulation step d_x_max = np.amax(np.absolute(d_x)) self.exitstep(self.forces.pot, u0, d_x_max)
def get_o_nmm(self): """Returns dynamical mass factors, i.e. the scaling of normal mode masses that determine the path dynamics (but not statics).""" # also checks that the frequencies and the mode given in init are # consistent with the beads and ensemble dmf = np.ones(self.nbeads, float) if self.mode == "rpmd": if len(self.nm_freqs) > 0: warning("nm.frequencies will be ignored for RPMD mode.", verbosity.low) elif self.mode == "manual": if len(self.nm_freqs) != self.nbeads - 1: raise ValueError("Manual path mode requires (nbeads-1) frequencies, one for each internal mode of the path.") for b in range(1, self.nbeads): sk = self.o_omegak[b] / self.nm_freqs[b - 1] dmf[b] = sk**2 elif self.mode == "pa-cmd": if len(self.nm_freqs) > 1: warning("Only the first element in nm.frequencies will be considered for PA-CMD mode.", verbosity.low) if len(self.nm_freqs) == 0: raise ValueError("PA-CMD mode requires the target frequency of all the internal modes.") for b in range(1, self.nbeads): sk = self.o_omegak[b] / self.nm_freqs[0] info(" ".join(["NM FACTOR", str(b), str(sk), str(self.o_omegak[b]), str(self.nm_freqs[0])]), verbosity.medium) dmf[b] = sk**2 elif self.mode == "wmax-cmd": if len(self.nm_freqs) > 2: warning("Only the first two element in nm.frequencies will be considered for WMAX-CMD mode.", verbosity.low) if len(self.nm_freqs) < 2: raise ValueError("WMAX-CMD mode requires [wmax, wtarget]. The normal modes will be scaled such that the first internal mode is at frequency wtarget and all the normal modes coincide at frequency wmax.") wmax = self.nm_freqs[0] wt = self.nm_freqs[1] for b in range(1, self.nbeads): sk = 1.0 / np.sqrt((wt)**2 * (1 + (wmax / self.o_omegak[1])**2) / (wmax**2 + (self.o_omegak[b])**2)) dmf[b] = sk**2 return dmf
def _poll_loop(self): """The main thread loop. Runs until either the program finishes or a kill call is sent. Updates the pool of clients every UPDATEFREQ loops and loops every latency seconds until _poll_true becomes false. """ info(" @SOCKET: Starting the polling thread main loop.", verbosity.low) self._poll_iter = UPDATEFREQ while self._poll_true: time.sleep(self.latency) # makes sure to remove the last dead client as soon as possible -- and to get clients if we are dry if self._poll_iter >= UPDATEFREQ or len(self.clients) == 0 or ( len(self.clients) > 0 and not (self.clients[0].status & Status.Up)): self.pool_update() self._poll_iter = 0 self._poll_iter += 1 self.pool_distribute() if os.path.exists("EXIT"): # softexit info( " @SOCKET: Soft exit request from file EXIT. Flushing job queue.", verbosity.low) # releases all pending requests for r in self.requests: r["status"] = "Exit" for c in self.clients: try: c.shutdown(socket.SHUT_RDWR) c.close() except: pass # flush it all down the drain self.clients = [] self.jobs = [] self._poll_thread = None
def bind(self, geop): # call bind function from DummyOptimizer super(HessianOptimizer, self).bind(geop) # Specific for RateOptimizer self.hessian_update = geop.hessian_update self.hessian_asr = geop.hessian_asr self.hessian_init = geop.hessian_init self.im.bind(self) # Hessian self.initial_hessian = None if geop.hessian.size != (self.beads.natoms * 3 * self.beads.q.size): if geop.hessian.size == (self.beads.natoms * 3)**2: self.initial_hessian = geop.hessian.copy() geop.hessian = np.zeros( (self.beads.natoms * 3, self.beads.q.size), float) elif geop.hessian.size == 0 and geop.hessian_init == 'true': info( " Initial hessian is not provided. We are going to compute it.", verbosity.low) geop.hessian = np.zeros( (self.beads.natoms * 3, self.beads.q.size)) if ((self.beads.q - self.beads.q[0]) == 0).all() and self.beads.nbeads > 1: raise ValueError( """We need a initial hessian in order to create our initial instanton geometry. Please provide a (1-bead) hessian or an initial instanton geometry.""" ) else: raise ValueError( " 'Hessian_init' is false, an initial hessian (of the proper size) must be provided." ) self.hessian = geop.hessian
def post_step(self, step, new_x, d_x, activearrays): d_x_max = np.amax(np.absolute(d_x)) info("Current step norm = {}".format(d_x_max), verbosity.medium) # Get new energy (u) and forces(f) using mapper self.im(new_x, ret=False, new_disc=True) # Only to update the mapper u, g2 = self.gm(new_x, new_disc=False) f = -g2 d_g = np.subtract(activearrays["old_f"], f) # Update self.update_hessian(self.options["hessian_update"], activearrays["hessian"], new_x, d_x, d_g) self.update_pos_for() # Print self.print_geo(step) self.print_hess(step) # Check Exit and only then update old arrays self.exit = self.exitstep(d_x_max, step) self.update_old_pos_for()
def close(self): """Closes down the socket.""" info(" @SOCKET: Shutting down the driver interface.", verbosity.low ) for c in self.clients: try: c.shutdown(socket.SHUT_RDWR) c.close() except: pass # flush it all down the drain self.clients = [] self.jobs = [] try: self.server.shutdown(socket.SHUT_RDWR) self.server.close() except: info(" @SOCKET: Problem shutting down the server socket. Will just continue and hope for the best.", verbosity.low) if self.mode == "unix": os.unlink("/tmp/ipi_" + self.address)
def mass(cls, label): """Function to access the mass_list attribute. Note that this does not require an instance of the Elements class to be created, as this is a class method. Therefore using Elements.mass(label) will give the mass of the element with the atomic symbol given by label. Args: label: The atomic symbol of the atom whose mass is required. Returns: A float giving the mass of the atom with atomic symbol label. """ try: label = label[0] + label[1:].lower( ) # making all letters lower case except first one # me c*g return cls.mass_list[label] * Constants.amu except KeyError: info( "Unknown element given (" + label + "), you must specify the mass", verbosity.low) return -1.0
def __init__(self, init_file, args_str, param_file, latency=1.0e-3, name="", pars=None, dopbc=True, threaded=False): """Initialises QUIP. Args: pars: Mandatory dictionary, giving the parameters needed by QUIP. """ if quippy is None: info("QUIPPY import failed", verbosity.low) raise quippy_exc # a socket to the communication library is created or linked super(FFQUIP, self).__init__(latency, name, pars, dopbc, threaded=threaded) self.init_file = init_file self.args_str = args_str self.param_file = param_file # Initializes an atoms object and the interaction potential self.atoms = quippy.Atoms(self.init_file) self.pot = quippy.Potential(self.args_str, param_filename=self.param_file) # Initializes the conversion factors from i-pi to QUIP self.len_conv = unit_to_user("length", "angstrom", 1) self.energy_conv = unit_to_user("energy", "electronvolt", 1) self.force_conv = unit_to_user("force", "ev/ang", 1)
def init_file(mode, filename, dimension="length", units="automatic", cell_units="automatic"): """Reads a @mode file and returns the data contained in it. Args: mode: Type of file that should be read. filename: A string giving the name of the pdb file to be read from. Returns: A list of Atoms objects as read from each frame of the pdb file, and a Cell object as read from the final pdb frame. """ rfile = open(filename, "r") ratoms = [] info( " # Initializing from file %s. Dimension: %s, units: %s, cell_units: %s" % (filename, dimension, units, cell_units), verbosity.low, ) while True: # while loop, so that more than one configuration can be given # so multiple beads can be initialized at once. try: ret = read_file(mode, rfile, dimension=dimension, units=units, cell_units=cell_units) except EOFError: break ratoms.append(ret["atoms"]) return ratoms, ret["cell"] # if multiple frames, the last cell is returned
def open_backup(filename, mode='r', buffering=-1): """A wrapper around `open` which saves backup files. If the file is opened in write mode and already exists, it is first backed up under a new file name, keeping all previous backups. Then, a new file is opened for writing. For reference: https://docs.python.org/2/library/functions.html#open Args: The same as for `open`. Returns: An open file as returned by `open`. """ if mode.startswith('w'): # If writing, make sure nothing is overwritten. i = 0 fn_backup = filename while os.path.isfile(fn_backup): fn_backup = '#' + filename + '#%i#' % i i += 1 if fn_backup != filename: os.rename(filename, fn_backup) info('Backup performed: {0:s} -> {1:s}'.format(filename, fn_backup), verbosity.low) else: # There is no need to back up. # `open` will sort out whether `mode` is valid. pass return open(filename, mode, buffering)
def __init__(self, nbeads, natoms, open_paths=None): """Initializes nm_trans. Args: nbeads: The number of beads. natoms: The number of atoms. """ self.nbeads = nbeads self.natoms = natoms if open_paths is None: open_paths = [] self._open = open_paths # for atoms with open path we still use the matrix transformation self._b2o_nm = mk_o_nm_matrix(nbeads) self._o_nm2b = self._b2o_nm.T try: import pyfftw info("Import of PyFFTW successful", verbosity.medium) self.qdummy = pyfftw.n_byte_align_empty((nbeads, 3 * natoms), 16, 'float32') self.qnmdummy = pyfftw.n_byte_align_empty((nbeads // 2 + 1, 3 * natoms), 16, 'complex64') self.fft = pyfftw.FFTW(self.qdummy, self.qnmdummy, axes=(0,), direction='FFTW_FORWARD') self.ifft = pyfftw.FFTW(self.qnmdummy, self.qdummy, axes=(0,), direction='FFTW_BACKWARD') except ImportError: # Uses standard numpy fft library if nothing better # is available info("Import of PyFFTW unsuccessful, using NumPy library instead", verbosity.medium) self.qdummy = np.zeros((nbeads, 3 * natoms), dtype='float32') self.qnmdummy = np.zeros((nbeads // 2 + 1, 3 * natoms), dtype='complex64') def dummy_fft(self): self.qnmdummy = np.fft.rfft(self.qdummy, axis=0) def dummy_ifft(self): self.qdummy = np.fft.irfft(self.qnmdummy, n=self.nbeads, axis=0) self.fft = lambda: dummy_fft(self) self.ifft = lambda: dummy_ifft(self)
def bind(self, simul): """Calls the bind routines for all the objects in the system.""" self.simul = simul # keeps a handle to the parent simulation object # binds important computation engines info(" # Binding the forces ", verbosity.low) self.forces.bind( self.beads, self.cell, self.fcomp, self.simul.fflist, open_paths=self.nm.open_paths, ) self.nm.bind(self.ensemble, self.motion, beads=self.beads, forces=self.forces) self.ensemble.bind( self.beads, self.nm, self.cell, self.forces, self.simul.fflist ) self.motion.bind( self.ensemble, self.beads, self.nm, self.cell, self.forces, self.prng, simul.output_maker, ) dpipe(dd(self.nm).omegan2, dd(self.forces).omegan2) self.init.init_stage2(self) # binds output management objects self._propertylock = threading.Lock() self.properties.bind(self) self.trajs.bind(self)
def step(self, step=None): """ Does one simulation time step. Attributes: qtime : The time taken in updating the real positions. tr : current trust radius """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: info(" @GEOP: Initializing BFGSTRM", verbosity.debug) self.old_x[:] = self.beads.q self.old_u[:] = self.forces.pot self.old_f[:] = self.forces.f if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 # Reduce dimensionality masked_old_x = self.old_x[:, self.gm.fixatoms_mask] masked_hessian = self.hessian[np.ix_(self.gm.fixatoms_mask, self.gm.fixatoms_mask)] # Do one iteration of BFGSTRM. # The Hessian is updated inside. Everything is passed inside BFGSTRM() in masked form, including the Hessian BFGSTRM(masked_old_x, self.old_u, self.old_f[:, self.gm.fixatoms_mask], masked_hessian, self.tr, self.gm, self.big_step) # Restore dimensionality of the hessian self.hessian[np.ix_(self.gm.fixatoms_mask, self.gm.fixatoms_mask)] = masked_hessian else: # Make one step. ( A step is finished when a movement is accepted) BFGSTRM(self.old_x, self.old_u, self.old_f, self.hessian, self.tr, self.gm, self.big_step) info(" Number of force calls: %d" % (self.gm.fcount)) self.gm.fcount = 0 # Update positions and forces self.beads.q = self.gm.dbeads.q self.forces.transfer_forces( self.gm.dforces) # This forces the update of the forces # Exit simulation step d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x))) self.exitstep(self.forces.pot, self.old_u, d_x_max)
def step(self, step=None): """ Does one simulation time step.""" activearrays = self.pre_step(step) # First construct complete hessian from reduced h0 = red2comp(activearrays["hessian"], self.im.dbeads.nbeads, self.im.dbeads.natoms, self.im.coef) # Add spring terms to the physical hessian h1 = np.add(self.im.h, h0) # Get eigenvalues and eigenvector. d, w = clean_hessian(h1, self.im.dbeads.q, self.im.dbeads.natoms, self.im.dbeads.nbeads, self.im.dbeads.m, self.im.dbeads.m3, self.options["hessian_asr"]) # d,w =np.linalg.eigh(h1) #Cartesian info('\n@Nichols: 1st freq {} cm^-1'.format(units.unit_to_user('frequency', 'inversecm', np.sign(d[0]) * np.sqrt(np.absolute(d[0])))), verbosity.medium) info('@Nichols: 2nd freq {} cm^-1'.format(units.unit_to_user('frequency', 'inversecm', np.sign(d[1]) * np.sqrt(np.absolute(d[1])))), verbosity.medium) info('@Nichols: 3rd freq {} cm^-1'.format(units.unit_to_user('frequency', 'inversecm', np.sign(d[2]) * np.sqrt(np.absolute(d[2])))), verbosity.medium) #info('@Nichols: 4th freq {} cm^-1'.format(units.unit_to_user('frequency','inversecm',np.sign(d[3])*np.sqrt(np.absolute(d[3])))),verbosity.medium) #info('@Nichols: 8th freq {} cm^-1\n'.format(units.unit_to_user('frequency','inversecm',np.sign(d[7])*np.sqrt(np.absolute(d[7])))),verbosity.medium) # Find new movement direction if self.options["mode"] == 'rate': f = activearrays["old_f"] * (self.im.coef[1:] + self.im.coef[:-1]) / 2 d_x = nichols(f, self.im.f, d, w, self.im.dbeads.m3, activearrays["big_step"]) elif self.options["mode"] == 'splitting': d_x = nichols(activearrays["old_f"], self.im.f, d, w, self.im.dbeads.m3, activearrays["big_step"], mode=0) # Rescale step if necessary if np.amax(np.absolute(d_x)) > activearrays["big_step"]: info("Step norm, scaled down to {}".format(activearrays["big_step"]), verbosity.low) d_x *= activearrays["big_step"] / np.amax(np.absolute(d_x)) # Get the new full-position d_x_full = self.fix.get_full_vector(d_x, t=1) new_x = self.optarrays["old_x"].copy() + d_x_full self.post_step(step, new_x, d_x, activearrays)
def step(self, step=None): """ Does one simulation time step. Attributes: qtime: The time taken in updating the positions. """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: info(" @GEOP: Initializing BFGS", verbosity.debug) self.d += dstrip(self.forces.f) / np.sqrt( np.dot(self.forces.f.flatten(), self.forces.f.flatten())) if len(self.fixatoms) > 0: for dqb in self.d: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 self.old_x[:] = self.beads.q self.old_u[:] = self.forces.pot self.old_f[:] = self.forces.f if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 fdf0 = (self.old_u, -self.old_f) # Do one iteration of BFGS # The invhessian and the directions are updated inside. BFGS(self.old_x, self.d, self.gm, fdf0, self.invhessian, self.big_step, self.ls_options["tolerance"] * self.tolerances["energy"], self.ls_options["iter"]) info(" Number of force calls: %d" % (self.gm.fcount)) self.gm.fcount = 0 # Update positions and forces self.beads.q = self.gm.dbeads.q self.forces.transfer_forces( self.gm.dforces) # This forces the update of the forces # Exit simulation step d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x))) self.exitstep(self.forces.pot, self.old_u, d_x_max)
def step(self, step=None): """ Does one simulation time step Attributes: ttime: The time taken in applying the thermostat steps. """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: info(" @GEOP: Initializing L-BFGS", verbosity.debug) print self.d self.d += dstrip(self.forces.f) / np.sqrt( np.dot(self.forces.f.flatten(), self.forces.f.flatten())) self.old_x[:] = self.beads.q self.old_u[:] = self.forces.pot self.old_f[:] = self.forces.f if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 fdf0 = (self.old_u, -self.old_f) # We update everything within L_BFGS (and all other calls). L_BFGS(self.old_x, self.d, self.gm, self.qlist, self.glist, fdf0, self.big_step, self.ls_options["tolerance"] * self.tolerances["energy"], self.ls_options["iter"], self.corrections, self.scale, step) info(" Number of force calls: %d" % (self.gm.fcount)) self.gm.fcount = 0 # Update positions and forces self.beads.q = self.gm.dbeads.q self.forces.transfer_forces( self.gm.dforces) # This forces the update of the forces # Exit simulation step d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x))) self.exitstep(self.forces.pot, self.old_u, d_x_max)
def step(self, step=None): """ Does one simulation time step. Attributes: qtime : The time taken in updating the real positions. tr : current trust radius """ self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: info(" @GEOP: Initializing BFGSTRM", verbosity.debug) self.old_x[:] = self.beads.q self.old_u[:] = self.forces.pot self.old_f[:] = self.forces.f if len(self.fixatoms) > 0: for dqb in self.old_f: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 # Make one step. ( A step is finished when a movement is accepted) BFGSTRM(self.old_x, self.old_u, self.old_f, self.hessian, self.tr, self.gm, self.big_step) info(" Number of force calls: %d" % (self.gm.fcount)) self.gm.fcount = 0 # Update positions and forces self.beads.q = self.gm.dbeads.q self.forces.transfer_forces( self.gm.dforces) # This forces the update of the forces # Exit simulation step d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x))) self.exitstep(self.forces.pot, self.old_u, d_x_max)
def step(self, step=None): """Does one simulation time step Attributes: ptime: The time taken in updating the velocities. qtime: The time taken in updating the positions. ttime: The time taken in applying the thermostat steps. """ self.ptime = 0.0 self.ttime = 0.0 self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if step == 0: gradf1 = dq1 = dstrip(self.forces.f) # Move direction for 1st conjugate gradient step dq1_unit = dq1 / np.sqrt(np.dot(gradf1.flatten(), gradf1.flatten())) info(" @GEOP: Determined SD direction", verbosity.debug) else: gradf0 = self.old_f dq0 = self.d gradf1 = dstrip(self.forces.f) beta = np.dot((gradf1.flatten() - gradf0.flatten()), gradf1.flatten()) / (np.dot(gradf0.flatten(), gradf0.flatten())) dq1 = gradf1 + max(0.0, beta) * dq0 dq1_unit = dq1 / np.sqrt(np.dot(dq1.flatten(), dq1.flatten())) info(" @GEOP: Determined CG direction", verbosity.debug) # Store force and direction for next CG step self.d[:] = dq1 self.old_f[:] = gradf1 if len(self.fixatoms) > 0: for dqb in dq1_unit: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 self.lm.set_dir(dstrip(self.beads.q), dq1_unit) # Reuse initial value since we have energy and forces already u0, du0 = (self.forces.pot.copy(), np.dot(dstrip(self.forces.f.flatten()), dq1_unit.flatten())) # Do one CG iteration; return positions and energy min_brent(self.lm, fdf0=(u0, du0), x0=0.0, tol=self.ls_options["tolerance"] * self.tolerances["energy"], itmax=self.ls_options["iter"], init_step=self.ls_options["step"]) info(" Number of force calls: %d" % (self.lm.fcount)); self.lm.fcount = 0 # Update positions and forces self.beads.q = self.lm.dbeads.q self.forces.transfer_forces(self.lm.dforces) # This forces the update of the forces d_x = np.absolute(np.subtract(self.beads.q, self.lm.x0)) x = np.linalg.norm(d_x) # Automatically adapt the search step for the next iteration. # Relaxes better with very small step --> multiply by factor of 0.1 or 0.01 self.ls_options["step"] = 0.1 * x * self.ls_options["adaptive"] + (1 - self.ls_options["adaptive"]) * self.ls_options["step"] # Exit simulation step d_x_max = np.amax(np.absolute(d_x)) self.exitstep(self.forces.pot, u0, d_x_max)
def init_stage1(self, simul): """Initializes the simulation -- first stage. Takes a simulation object, and uses all the data in the initialization queue to fill up the beads and cell data needed to run the simulation. Args: simul: A simulation object to be initialized. Raises: ValueError: Raised if there is a problem with the initialization, if something that should have been has not been, or if the objects that have been specified are not compatible with each other. """ if simul.beads.nbeads == 0: fpos = fmom = fmass = flab = fcell = False # we don't have an explicitly defined beads object yet else: fpos = fmom = fmass = flab = fcell = True for (k, v) in self.queue: info(" # Initializer (stage 1) parsing " + str(k) + " object.", verbosity.high) if k == "cell": if fcell: warning("Overwriting previous cell parameters", verbosity.medium) if v.mode == "pdb": rh = init_pdb(v.value)[1].h elif v.mode == "chk": rh = init_chk(v.value)[1].h else: rh = v.value.reshape((3, 3)) rh *= unit_to_internal("length", v.units, 1.0) simul.cell.h = rh if simul.cell.V == 0.0: ValueError("Cell provided has zero volume") fcell = True elif k == "masses": if simul.beads.nbeads == 0: raise ValueError( "Cannot initialize the masses before the size of the system is known" ) if fmass: warning("Overwriting previous atomic masses", verbosity.medium) if v.mode == "manual": rm = v.value else: rm = init_beads(v, self.nbeads).m rm *= unit_to_internal("mass", v.units, 1.0) if v.bead < 0: # we are initializing the path if (fmom and fmass): warning( "Rescaling momenta to make up for changed mass", verbosity.medium) simul.beads.p /= simul.beads.sm3 # go to mass-scaled momenta, that are mass-invariant if v.index < 0: simul.beads.m = rm else: # we are initializing a specific atom simul.beads.m[v.index:v.index + 1] = rm if (fmom and fmass): # finishes correcting the momenta simul.beads.p *= simul.beads.sm3 # back to normal momenta else: raise ValueError("Cannot change the mass of a single bead") fmass = True elif k == "labels": if simul.beads.nbeads == 0: raise ValueError( "Cannot initialize the labels before the size of the system is known" ) if flab: warning("Overwriting previous atomic labels", verbosity.medium) if v.mode == "manual": rn = v.value else: rn = init_beads(v, self.nbeads).names if v.bead < 0: # we are initializing the path if v.index < 0: simul.beads.names = rn else: # we are initializing a specific atom simul.beads.names[v.index:v.index + 1] = rn else: raise ValueError( "Cannot change the label of a single bead") flab = True elif k == "positions": if fpos: warning("Overwriting previous atomic positions", verbosity.medium) # read the atomic positions as a vector rq = init_vector(v, self.nbeads) rq *= unit_to_internal("length", v.units, 1.0) (nbeads, natoms) = rq.shape natoms /= 3 # check if we must initialize the simulation beads if simul.beads.nbeads == 0: if v.index >= 0: raise ValueError( "Cannot initialize single atoms before the size of the system is known" ) simul.beads.resize(natoms, self.nbeads) set_vector(v, simul.beads.q, rq) fpos = True elif ( k == "velocities" or k == "momenta" ) and v.mode == "thermal": # intercept here thermal initialization, so we don't need to check further down if fmom: warning("Overwriting previous atomic momenta", verbosity.medium) if simul.beads.natoms == 0: raise ValueError( "Cannot initialize momenta before the size of the system is known." ) if not fmass: raise ValueError( "Trying to resample velocities before having masses.") rtemp = v.value * unit_to_internal("temperature", v.units, 1.0) if rtemp <= 0: warning( "Using the simulation temperature to resample velocities", verbosity.low) rtemp = simul.ensemble.temp else: info( " # Resampling velocities at temperature %s %s" % (v.value, v.units), verbosity.low) # pull together a mock initialization to get NM masses right #without too much code duplication if v.bead >= 0: raise ValueError("Cannot thermalize a single bead") if v.index >= 0: rnatoms = 1 else: rnatoms = simul.beads.natoms rbeads = Beads(rnatoms, simul.beads.nbeads) if v.index < 0: rbeads.m[:] = simul.beads.m else: rbeads.m[:] = simul.beads.m[v.index] rnm = NormalModes(mode=simul.nm.mode, transform_method=simul.nm.transform_method, freqs=simul.nm.nm_freqs) rens = Ensemble(dt=simul.ensemble.dt, temp=simul.ensemble.temp) rnm.bind(rbeads, rens) # then we exploit the sync magic to do a complicated initialization # in the NM representation # with (possibly) shifted-frequencies NM rnm.pnm = simul.prng.gvec( (rbeads.nbeads, 3 * rbeads.natoms)) * np.sqrt( rnm.dynm3) * np.sqrt( rbeads.nbeads * rtemp * Constants.kb) if v.index < 0: simul.beads.p = rbeads.p else: simul.beads.p[:, 3 * v.index:3 * (v.index + 1)] = rbeads.p fmom = True elif k == "momenta": if fmom: warning("Overwriting previous atomic momenta", verbosity.medium) # read the atomic momenta as a vector rp = init_vector(v, self.nbeads, momenta=True) rp *= unit_to_internal("momentum", v.units, 1.0) (nbeads, natoms) = rp.shape natoms /= 3 # checks if we must initialize the simulation beads if simul.beads.nbeads == 0: if v.index >= 0: raise ValueError( "Cannot initialize single atoms before the size of the system is known" ) simul.beads.resize(natoms, self.nbeads) rp *= np.sqrt(self.nbeads / nbeads) set_vector(v, simul.beads.p, rp) fmom = True elif k == "velocities": if fmom: warning("Overwriting previous atomic momenta", verbosity.medium) # read the atomic velocities as a vector rv = init_vector(v, self.nbeads) rv *= unit_to_internal("velocity", v.units, 1.0) (nbeads, natoms) = rv.shape natoms /= 3 # checks if we must initialize the simulation beads if simul.beads.nbeads == 0 or not fmass: ValueError( "Cannot initialize velocities before the masses of the atoms are known" ) simul.beads.resize(natoms, self.nbeads) warning( "Initializing from velocities uses the previously defined masses -- not the masses inferred from the file -- to build momenta", verbosity.low) if v.index >= 0: rv *= simul.beads.m[v.index] elif v.bead >= 0: rv *= simul.beads.m3[0] else: rv *= simul.beads.m3 rv *= np.sqrt(self.nbeads / nbeads) set_vector(v, simul.beads.p, rv) fmom = True elif k == "thermostat": pass # thermostats must be initialized in a second stage if simul.beads.natoms == 0: raise ValueError( "Initializer could not initialize the atomic positions") if simul.cell.V == 0: raise ValueError("Initializer could not initialize the cell") for i in range(simul.beads.natoms): if simul.beads.m[i] <= 0: raise ValueError("Initializer could not initialize the masses") if simul.beads.names[i] == "": raise ValueError( "Initializer could not initialize the atom labels") if not fmom: warning( "Momenta not specified in initialize. Will start with zero velocity if they are not specified in beads.", verbosity.low)
def exitstep(self, fx, u0, x): """ Exits the simulation step. Computes time, checks for convergence. """ info(" @GEOP: Updating bead positions", verbosity.debug) self.qtime += time.time() if len(self.fixatoms) > 0: ftmp = self.forces.f.copy() for dqb in ftmp: dqb[self.fixatoms * 3] = 0.0 dqb[self.fixatoms * 3 + 1] = 0.0 dqb[self.fixatoms * 3 + 2] = 0.0 fmax = np.amax(np.absolute(ftmp)) else: fmax = np.amax(np.absolute(self.forces.f)) e = np.absolute((fx - u0) / self.beads.natoms) info("@GEOP", verbosity.medium) self.tolerances["position"] info(" Current energy %e" % (fx)) info(" Position displacement %e Tolerance %e" % (x, self.tolerances["position"]), verbosity.medium) info(" Max force component %e Tolerance %e" % (fmax, self.tolerances["force"]), verbosity.medium) info(" Energy difference per atom %e Tolerance %e" % (e, self.tolerances["energy"]), verbosity.medium) if (np.linalg.norm(self.forces.f.flatten() - self.old_f.flatten()) <= 1e-20): info("Something went wrong, the forces are not changing anymore." " This could be due to an overly small tolerance threshold " "that makes no physical sense. Please check if you are able " "to reach such accuracy with your force evaluation" " code (client).") if (np.absolute((fx - u0) / self.beads.natoms) <= self.tolerances["energy"]) \ and (fmax <= self.tolerances["force"]) \ and (x <= self.tolerances["position"]): self.converged = True
def pool_update(self): """Deals with keeping the pool of client drivers up-to-date during a force calculation step. Deals with maintaining the client list. Clients that have disconnected are removed and their jobs removed from the list of running jobs and new clients are connected to the server. """ # check for disconnected clients for c in self.clients[:]: if not (c.status & Status.Up): try: warning( " @SOCKET: Client " + str(c.peername) + " died or got unresponsive(C). Removing from the list.", verbosity.low, ) c.shutdown(socket.SHUT_RDWR) c.close() except socket.error: pass c.status = Status.Disconnected self.clients.remove(c) # requeue jobs that have been left hanging for [k, j, tc] in self.jobs[:]: if tc.is_alive(): tc.join(2) if j is c: self.jobs = [ w for w in self.jobs if not (w[0] is k and w[1] is j) ] # removes pair in a robust way k["status"] = "Queued" k["start"] = -1 if len(self.clients) == 0: searchtimeout = SERVERTIMEOUT else: searchtimeout = 0.0 keepsearch = True while keepsearch: readable, writable, errored = select.select( [self.server], [], [], searchtimeout ) if self.server in readable: client, address = self.server.accept() client.settimeout(TIMEOUT) driver = Driver(client) info( " @SOCKET: Client asked for connection from " + str(address) + ". Now hand-shaking.", verbosity.low, ) driver.get_status() if driver.status | Status.Up: driver.exit_on_disconnect = self.exit_on_disconnect self.clients.append(driver) info( " @SOCKET: Handshaking was successful. Added to the client list.", verbosity.low, ) self.poll_iter = UPDATEFREQ # if a new client was found, will try again harder next time searchtimeout = SERVERTIMEOUT else: warning( " @SOCKET: Handshaking failed. Dropping connection.", verbosity.low, ) client.shutdown(socket.SHUT_RDWR) client.close() else: keepsearch = False
def step(self, step=None): if step is not None and step % self.stride != 0: return # Initialize positions to the actual positions, possibly with contraction self.dnm.qnm[:] = ( self.basenm.qnm[: self.nbeads] * np.sqrt(self.nbeads) / np.sqrt(self.basebeads.nbeads) ) # Randomized momenta self.dnm.pnm = ( self.prng.gvec((self.dbeads.nbeads, 3 * self.dbeads.natoms)) * np.sqrt(self.dnm.dynm3) * np.sqrt(self.dens.temp * self.dbeads.nbeads * Constants.kb) ) self.dnm.pnm[0] = 0.0 # Resets the frequency matrix self.omega2[:] = 0.0 self.tmtx -= time.time() self.increment(self.dnm) self.tmtx += time.time() # sample by constrained-centroid dynamics for istep in range(self.nsamples): self.tmc -= time.time() self.ccdyn.step(step) self.tmc += time.time() self.tmtx -= time.time() self.increment(self.dnm) self.tmtx += time.time() self.neval += 1 self.omega2 /= ( self.dbeads.nbeads * self.dens.temp * (self.nsamples + 1) * (self.dbeads.nbeads - 1) ) self.tsave -= time.time() if self.screen > 0.0: scr = self.matrix_screen() self.omega2 *= scr # ensure perfect symmetry self.omega2[:] = 0.5 * (self.omega2 + self.omega2.transpose()) # only save lower triangular part self.omega2[:] = np.tril(self.omega2) # save as a sparse matrix in half precision save_omega2 = sparse.csc_matrix(self.omega2.astype(np.float16)) # save the frequency matrix to the PLANETARY file self.save_matrix(save_omega2) self.tsave += time.time() info( "@ PLANETARY MODEL Average timing: %f s, %f s, %f s\n" % (self.tmc / self.neval, self.tmtx / self.neval, self.tsave / self.neval), verbosity.high, )
def getforce(self): """Gets the potential energy, force and virial from the driver. Raises: InvalidStatus: Raised if the status is not HasData. Disconnected: Raised if the driver has disconnected. Returns: A list of the form [potential, force, virial, extra]. """ if self.status & Status.HasData: self.sendall(Message("getforce")) reply = "" while True: try: reply = self.recv_msg() except socket.timeout: warning( " @SOCKET: Timeout in getforce, trying again!", verbosity.low ) continue except: warning( " @SOCKET: Error while receiving message: %s" % (reply), verbosity.low, ) raise Disconnected() if reply == Message("forceready"): break else: warning( " @SOCKET: Unexpected getforce reply: %s" % (reply), verbosity.low, ) if reply == "": raise Disconnected() else: raise InvalidStatus("Status in getforce was " + str(self.status)) mu = np.float64() mu = self.recvall(mu) mlen = np.int32() mlen = self.recvall(mlen) mf = np.zeros(3 * mlen, np.float64) mf = self.recvall(mf) mvir = np.zeros((3, 3), np.float64) mvir = self.recvall(mvir) # Machinery to return a string as an "extra" field. # Comment if you are using a ancient patched driver that does not return anything! # Actually, you should really update your driver, you're like half a decade behind. mlen = np.int32() mlen = self.recvall(mlen) if mlen > 0: mxtra = np.zeros(mlen, np.character) mxtra = self.recvall(mxtra) mxtra = bytearray(mxtra).decode("utf-8") else: mxtra = "" mxtradict = {} if mxtra: try: mxtradict = json.loads(mxtra) info("Extra string JSON has been loaded.", verbosity.debug) except: # if we can't parse it as a dict, issue a warning and carry on info( "Extra string could not be loaded as a dictionary. Extra=" + mxtra, verbosity.debug, ) mxtradict = {} pass if "raw" in mxtradict: raise ValueError( "'raw' cannot be used as a field in a JSON-formatted extra string" ) mxtradict["raw"] = mxtra return [mu, mf, mvir, mxtradict]
def step(self, step=None): """Tries to exchange replica.""" if self.stride <= 0.0: return info("\nTrying to exchange replicas on STEP %d" % step, verbosity.debug) t_start = time.time() fxc = False sl = self.syslist t_eval = 0 t_swap = 0 for i in range(len(sl)): for j in range(i): if (1.0 / self.stride < self.prng.u): continue # tries a swap with probability 1/stride t_eval -= time.time() ti = sl[i].ensemble.temp tj = sl[j].ensemble.temp eci = sl[i].ensemble.econs ecj = sl[j].ensemble.econs pensi = sl[i].ensemble.lpens pensj = sl[j].ensemble.lpens t_eval += time.time() t_swap -= time.time() ensemble_swap(sl[i].ensemble, sl[j].ensemble) # tries to swap the ensembles! # it is generally a good idea to rescale the kinetic energies, # which means that the exchange is done only relative to the potential energy part. if self.rescalekin: # also rescales the velocities -- should do the same with cell velocities sl[i].beads.p *= np.sqrt(tj / ti) sl[j].beads.p *= np.sqrt(ti / tj) try: # if motion has a barostat, and barostat has a momentum, does the swap # also note that the barostat has a hidden T dependence inside the mass, so # as a matter of fact <p^2> \propto T^2 sl[i].motion.barostat.p *= (tj / ti) sl[j].motion.barostat.p *= (ti / tj) except AttributeError: pass t_swap += time.time() t_eval -= time.time() newpensi = sl[i].ensemble.lpens newpensj = sl[j].ensemble.lpens pxc = np.exp((newpensi + newpensj) - (pensi + pensj)) t_eval += time.time() if (pxc > self.prng.u): # really does the exchange info(" @ PT: SWAPPING replicas % 5d and % 5d." % (i, j), verbosity.low) # if we have GLE thermostats, we also have to exchange rescale the s!!! gle_scale(sl[i], (tj / ti)) gle_scale(sl[j], (ti / tj)) t_eval -= time.time() # we just have to carry on with the swapped ensembles, but we also keep track of the changes in econs sl[i].ensemble.eens += eci - sl[i].ensemble.econs sl[j].ensemble.eens += ecj - sl[j].ensemble.econs t_eval += time.time() self.repindex[i], self.repindex[j] = self.repindex[ j], self.repindex[i] # keeps track of the swap fxc = True # signal that an exchange has been made! else: # undoes the swap t_swap -= time.time() ensemble_swap(sl[i].ensemble, sl[j].ensemble) # undoes the kinetic scaling if self.rescalekin: sl[i].beads.p *= np.sqrt(ti / tj) sl[j].beads.p *= np.sqrt(tj / ti) try: sl[i].motion.barostat.p *= (ti / tj) sl[j].motion.barostat.p *= (tj / ti) except AttributeError: pass t_swap += time.time() info( " @ PT: SWAP REJECTED BETWEEN replicas % 5d and % 5d." % (i, j), verbosity.low) #tempi = copy(self.syslist[i].ensemble.temp) #self.syslist[i].ensemble.temp = copy(self.syslist[j].ensemble.temp) # velocities have to be adjusted according to the new temperature if fxc: # writes out the new status #with open(self.swapfile, "a") as sf: self.sf.write("% 10d" % (step)) for i in self.repindex: self.sf.write(" % 5d" % (i)) self.sf.write("\n") self.sf.force_flush() info( "# REMD step evaluated in %f (%f eval, %f swap) sec." % (time.time() - t_start, t_eval, t_swap), verbosity.debug)
def pool_distribute(self): """Deals with keeping the list of jobs up-to-date during a force calculation step. Deals with maintaining the jobs list. Gets data from drivers that have finished their calculation and removes that job from the list of running jobs, adds jobs to free clients and initialises the forcefields of new clients. """ ttotal = tdispatch = tcheck = 0 ttotal -= time.time() # get clients that are still free freec = self.clients[:] for [r2, c, ct] in self.jobs: freec.remove(c) # fills up list of pending requests if empty, or if clients are abundant if len(self.prlist) == 0 or len(freec) > len(self.prlist): self.prlist = [r for r in self.requests if r["status"] == "Queued"] if self.match_mode == "auto": match_seq = ["match", "none", "free", "any"] elif self.match_mode == "any": match_seq = ["any"] # first: dispatches jobs to free clients (if any!) # tries first to match previous replica<>driver association, then to get new clients, and only finally send the a new replica to old drivers ndispatch = 0 tdispatch -= time.time() while len(freec) > 0 and len(self.prlist) > 0: for match_ids in match_seq: for fc in freec[:]: if self.dispatch_free_client(fc, match_ids): freec.remove(fc) ndispatch += 1 if len(self.prlist) == 0: break if len(freec) > 0: self.prlist = [ r for r in self.requests if r["status"] == "Queued" ] tdispatch += time.time() # now check for client status if len(self.jobs) == 0: for c in self.clients: if c.status == Status.Disconnected: # client disconnected. force a pool_update self.poll_iter = UPDATEFREQ return # check for finished jobs nchecked = 0 nfinished = 0 tcheck -= time.time() for [r, c, ct] in self.jobs[:]: chk = self.check_job_finished(r, c, ct) if chk == 1: nfinished += 1 elif chk == 0: self.poll_iter = UPDATEFREQ # client disconnected. force a pool_update nchecked += 1 tcheck += time.time() ttotal += time.time() info( "POLL TOTAL: %10.4f Dispatch(N,t): %4i, %10.4f Check(N,t): %4i, %10.4f" % (ttotal, ndispatch, tdispatch, nchecked, tcheck), verbosity.debug) if nfinished > 0: # don't wait, just try again to distribute self.pool_distribute()
def step(self, step=None): """Does one simulation time step.""" self.ptime = 0.0 self.ttime = 0.0 self.qtime = -time.time() info("\nMD STEP %d" % step, verbosity.debug) if self.mode == "bfgs": # BFGS Minimization # Initialize approximate Hessian inverse to the identity and direction # to the steepest descent direction if step == 0: # or np.sqrt(np.dot(self.bfgsm.d, self.bfgsm.d)) == 0.0: <-- this part for restarting at claimed minimum (optional) info(" @GEOP: Initializing BFGS", verbosity.debug) self.bfgsm.d = depstrip(self.forces.f) / np.sqrt(np.dot(self.forces.f.flatten(), self.forces.f.flatten())) self.bfgsm.xold = self.beads.q.copy() # Current energy and forces u0 = self.forces.pot.copy() du0 = - self.forces.f # Store previous forces self.cg_old_f[:] = self.forces.f # Do one iteration of BFGS, return new point, function value, # move direction, and current Hessian to use for next iteration self.beads.q, fx, self.bfgsm.d, self.invhessian = BFGS(self.beads.q, self.bfgsm.d, self.bfgsm, fdf0=(u0, du0), invhessian=self.invhessian, max_step=self.max_step, tol=self.ls_options["tolerance"], itmax=self.ls_options["iter"]) # x = current position - previous position; use for exit tolerance x = np.amax(np.absolute(np.subtract(self.beads.q, self.bfgsm.xold))) # Store old position self.bfgsm.xold[:] = self.beads.q info(" @GEOP: Updating bead positions", verbosity.debug) elif self.mode == "lbfgs": # L-BFGS Minimization # Initialize approximate Hessian inverse to the identity and direction # to the steepest descent direction # Initialize lists of previous positions and gradient if step == 0: # or np.sqrt(np.dot(self.bfgsm.d, self.bfgsm.d)) == 0.0: <-- this part for restarting at claimed minimum (optional) info(" @GEOP: Initializing L-BFGS", verbosity.debug) self.bfgsm.d = depstrip(self.forces.f) / np.sqrt(np.dot(self.forces.f.flatten(), self.forces.f.flatten())) self.bfgsm.xold = self.beads.q.copy() self.qlist = np.zeros((self.corrections, len(self.beads.q.flatten()))) self.glist = np.zeros((self.corrections, len(self.beads.q.flatten()))) # Current energy and force u0, du0 = (self.forces.pot.copy(), - self.forces.f) # Store previous forces self.cg_old_f[:] = self.forces.f.reshape(len(self.cg_old_f)) # Do one iteration of L-BFGS, return new point, function value, # move direction, and current Hessian to use for next iteration self.beads.q, fx, self.bfgsm.d, self.qlist, self.glist = L_BFGS(self.beads.q, self.bfgsm.d, self.bfgsm, self.qlist, self.glist, fdf0=(u0, du0), max_step=self.max_step, tol=self.ls_options["tolerance"], itmax=self.ls_options["iter"], m=self.corrections, k=step) info(" @GEOP: Updated position list", verbosity.debug) info(" @GEOP: Updated gradient list", verbosity.debug) # x = current position - old position. Used for convergence tolerance x = np.amax(np.absolute(np.subtract(self.beads.q, self.bfgsm.xold))) # Store old position self.bfgsm.xold[:] = self.beads.q info(" @GEOP: Updated bead positions", verbosity.debug) # Routine for steepest descent and conjugate gradient else: if (self.mode == "sd" or step == 0): # Steepest descent minimization # gradf1 = force at current atom position # dq1 = direction of steepest descent # dq1_unit = unit vector of dq1 gradf1 = dq1 = depstrip(self.forces.f) # Move direction for steepest descent and 1st conjugate gradient step dq1_unit = dq1 / np.sqrt(np.dot(gradf1.flatten(), gradf1.flatten())) info(" @GEOP: Determined SD direction", verbosity.debug) else: # Conjugate gradient, Polak-Ribiere # gradf1: force at current atom position # gradf0: force at previous atom position # dq1 = direction to move # dq0 = previous direction # dq1_unit = unit vector of dq1 gradf0 = self.cg_old_f dq0 = self.cg_old_d gradf1 = depstrip(self.forces.f) beta = np.dot((gradf1.flatten() - gradf0.flatten()), gradf1.flatten()) / (np.dot(gradf0.flatten(), gradf0.flatten())) dq1 = gradf1 + max(0.0, beta) * dq0 dq1_unit = dq1 / np.sqrt(np.dot(dq1.flatten(), dq1.flatten())) info(" @GEOP: Determined CG direction", verbosity.debug) # Store force and direction for next CG step self.cg_old_d[:] = dq1 self.cg_old_f[:] = gradf1 if len(self.fixatoms) > 0: for dqb in dq1_unit: dqb[self.fixatoms*3] = 0.0 dqb[self.fixatoms*3+1] = 0.0 dqb[self.fixatoms*3+2] = 0.0 self.lm.set_dir(depstrip(self.beads.q), dq1_unit) # Reuse initial value since we have energy and forces already u0, du0 = (self.forces.pot.copy(), np.dot(depstrip(self.forces.f.flatten()), dq1_unit.flatten())) # Do one SD/CG iteration; return positions and energy (x, fx) = min_brent(self.lm, fdf0=(u0, du0), x0=0.0, tol=self.ls_options["tolerance"], itmax=self.ls_options["iter"], init_step=self.ls_options["step"]) # Automatically adapt the search step for the next iteration. # Relaxes better with very small step --> multiply by factor of 0.1 or 0.01 self.ls_options["step"] = 0.1 * x * self.ls_options["adaptive"] + (1 - self.ls_options["adaptive"]) * self.ls_options["step"] self.beads.q += dq1_unit * x info(" @GEOP: Updated bead positions", verbosity.debug) self.qtime += time.time() # Determine conditions for converged relaxation if ((fx - u0) / self.beads.natoms <= self.tolerances["energy"])\ and ((np.amax(np.absolute(self.forces.f)) <= self.tolerances["force"]) or (np.sqrt(np.dot(self.forces.f.flatten() - self.cg_old_f.flatten(), self.forces.f.flatten() - self.cg_old_f.flatten())) == 0.0))\ and (x <= self.tolerances["position"]): softexit.trigger("Geometry optimization converged. Exiting simulation")
def pool_distribute(self): """Deals with keeping the list of jobs up-to-date during a force calculation step. Deals with maintaining the jobs list. Gets data from drivers that have finished their calculation and removes that job from the list of running jobs, adds jobs to free clients and initialises the forcefields of new clients. """ for c in self.clients: if c.status == Status.Disconnected: # client disconnected. force a pool_update self._poll_iter = UPDATEFREQ return if not c.status & (Status.Ready | Status.NeedsInit): c.poll() for [r, c] in self.jobs[:]: if c.status & Status.HasData: try: r["result"] = c.getforce() if len(r["result"][1]) != len(r["pos"]): raise InvalidSize except Disconnected: c.status = Status.Disconnected continue except InvalidSize: warning( " @SOCKET: Client returned an inconsistent number of forces. Will mark as disconnected and try to carry on.", verbosity.low) c.status = Status.Disconnected continue except: warning( " @SOCKET: Client got in a awkward state during getforce. Will mark as disconnected and try to carry on.", verbosity.low) c.status = Status.Disconnected continue c.poll() while c.status & Status.Busy: # waits, but check if we got stuck. if self.timeout > 0 and r["start"] > 0 and time.time( ) - r["start"] > self.timeout: warning( " @SOCKET: Timeout! HASDATA for bead " + str(r["id"]) + " has been running for " + str(time.time() - r["start"]) + " sec.", verbosity.low) warning( " @SOCKET: Client " + str(c.peername) + " died or got unresponsive(A). Disconnecting.", verbosity.low) try: c.shutdown(socket.SHUT_RDWR) except: pass c.close() c.status = Status.Disconnected continue c.poll() if not (c.status & Status.Up): warning( " @SOCKET: Client died a horrible death while getting forces. Will try to cleanup.", verbosity.low) continue r["status"] = "Done" c.lastreq = r[ "id"] # saves the ID of the request that the client has just processed self.jobs = [ w for w in self.jobs if not (w[0] is r and w[1] is c) ] # removes pair in a robust way if self.timeout > 0 and c.status != Status.Disconnected and r[ "start"] > 0 and time.time() - r["start"] > self.timeout: warning( " @SOCKET: Timeout! Request for bead " + str(r["id"]) + " has been running for " + str(time.time() - r["start"]) + " sec.", verbosity.low) warning( " @SOCKET: Client " + str(c.peername) + " died or got unresponsive(B). Disconnecting.", verbosity.low) try: c.shutdown(socket.SHUT_RDWR) except socket.error: e = sys.exc_info() warning( " @SOCKET: could not shut down cleanly the socket. %s: %s in file '%s' on line %d" % (e[0].__name__, e[1], os.path.basename(e[2].tb_frame.f_code.co_filename), e[2].tb_lineno), verbosity.low) c.close() c.poll() c.status = Status.Disconnected freec = self.clients[:] for [r2, c] in self.jobs: freec.remove(c) pendr = self.requests[:] pendr = [r for r in self.requests if r["status"] == "Queued"] for fc in freec[:]: matched = False # first, makes sure that the client is REALLY free if not (fc.status & Status.Up): self.clients.remove( fc ) # if fc is in freec it can't be associated with a job (we just checked for that above) continue if fc.status & Status.HasData: continue if not (fc.status & (Status.Ready | Status.NeedsInit | Status.Busy)): warning( " @SOCKET: Client " + str(fc.peername) + " is in an unexpected status " + str(fc.status) + " at (1). Will try to keep calm and carry on.", verbosity.low) continue for match_ids in ("match", "none", "free", "any"): for r in pendr[:]: if match_ids == "match" and not fc.lastreq is r["id"]: continue elif match_ids == "none" and not fc.lastreq is None: continue elif match_ids == "free" and fc.locked: continue info( " @SOCKET: Assigning [%5s] request id %4s to client with last-id %4s (% 3d/% 3d : %s)" % (match_ids, str(r["id"]), str(fc.lastreq), self.clients.index(fc), len(self.clients), str(fc.peername)), verbosity.high) while fc.status & Status.Busy: fc.poll() if fc.status & Status.NeedsInit: fc.initialize(r["id"], r["pars"]) fc.poll() while fc.status & Status.Busy: # waits for initialization to finish. hopefully this is fast fc.poll() if fc.status & Status.Ready: fc.sendpos(r["pos"], r["cell"]) r["status"] = "Running" r["start"] = time.time( ) # sets start time for the request fc.poll() self.jobs.append([r, fc]) fc.locked = (fc.lastreq is r["id"]) matched = True # removes r from the list of pending jobs pendr = [nr for nr in pendr if (not nr is r)] break else: warning( " @SOCKET: Client " + str(fc.peername) + " is in an unexpected status " + str(fc.status) + " at (2). Will try to keep calm and carry on.", verbosity.low) if matched: break # doesn't do a second (or third) round if it managed