def test_smart_indexing(self): a1 = cctk.OneIndexedArray([1, 2, 3]) a2 = cctk.OneIndexedArray([4, 5, 6]) a3 = np.hstack((a1, a2)) self.assertListEqual(list(a3), [1, 2, 3, 4, 5, 6]) self.assertTrue(isinstance(a3, np.ndarray)) a3 = cctk.OneIndexedArray(a3) self.assertListEqual(list(a3), [1, 2, 3, 4, 5, 6]) self.assertTrue(isinstance(a3, cctk.OneIndexedArray)) a4 = np.vstack((a3, a3)) a4 = cctk.OneIndexedArray(a4) self.assertTrue(a4.shape[0] == 2) self.assertTrue(a4.shape[1] == 6) self.assertListEqual(list(a4[1]), [1, 2, 3, 4, 5, 6]) idx = [True, True, False, True, False, False] b1 = a3[idx] self.assertListEqual(list(b1), [1, 2, 4]) self.assertTrue(isinstance(b1, cctk.OneIndexedArray)) idx = [1, 2, 4] b2 = a3[idx] self.assertListEqual(list(b1), [1, 2, 4]) self.assertTrue(isinstance(b1, cctk.OneIndexedArray))
def gen_test_traj(): calc = presto.calculators.XTBCalculator( potential=presto.potentials.SphericalHarmonicPotential(radius=10)) traj = presto.trajectory.Trajectory( timestep=0.5, atomic_numbers=cctk.OneIndexedArray([1, 1]), high_atoms=np.array([]), active_atoms=np.array([1, 2]), calculator=presto.calculators.XTBCalculator( potential=presto.potentials.SphericalHarmonicPotential(radius=10)), integrator=presto.integrators.VelocityVerletIntegrator(), stop_time=10000, ) p1 = cctk.OneIndexedArray([[12, 0, 0], [11.5, 0, 0]]) p2 = cctk.OneIndexedArray([[12, 0, 0], [10, 0, 0]]) v = cctk.OneIndexedArray([[2, 0, 0], [0, 0, 0]]) a = cctk.OneIndexedArray([[0, 0, 0], [0, 0, 0]]) frame1 = presto.frame.Frame(traj, p1, v, a) frame2 = presto.frame.Frame(traj, p2, v, a) traj.frames = [frame1, frame2] assert isinstance(traj, presto.trajectory.Trajectory) assert isinstance(frame1, presto.frame.Frame) assert isinstance(frame2, presto.frame.Frame) return traj
def test_com_motion(self): num_atoms = 4 zs = cctk.OneIndexedArray([1] * num_atoms) traj = presto.trajectory.Trajectory( timestep=0.5, atomic_numbers=zs, high_atoms=np.array([]), active_atoms=np.array([]), calculator=presto.calculators.XTBCalculator(), integrator=presto.integrators.VelocityVerletIntegrator(), stop_time=1000, ) positions = cctk.OneIndexedArray([[2, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0]]) velocities = cctk.OneIndexedArray([[0, 2, 1], [0, -1, 0], [1, 0, -2], [-1, 0, 0]]) accels = cctk.OneIndexedArray([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) frame = presto.frame.Frame(traj, positions, velocities, accels) frame.remove_com_motion() self.assertTrue( np.linalg.norm( np.sum(traj.masses.reshape(-1, 1) * frame.velocities, axis=0)) < 0.0001)
def gen_test_frame(self, distance): if os.path.exists("test/static/constraint-test.chk"): os.remove("test/static/constraint-test.chk") zs = cctk.OneIndexedArray([1, 1]) c = presto.constraints.PairwisePolynomialConstraint(1, 2, distance, power=2) traj = presto.trajectory.Trajectory( timestep=0.5, atomic_numbers=zs, high_atoms=np.array([]), active_atoms=np.array([1, 2]), calculator=presto.calculators.Calculator([c]), integrator=presto.integrators.VelocityVerletIntegrator(), stop_time=250, checkpoint_filename="test/static/constraint-test.chk", bath_scheduler=298, ) if os.path.exists("test/static/constraint-test.chk.lock"): os.remove("test/static/constraint-test.chk.lock") self.assertTrue(isinstance(traj, presto.trajectory.Trajectory)) z = cctk.OneIndexedArray([[0, 0, 0], [0, 0, 0]]) p = cctk.OneIndexedArray([[1, 0, 0], [0, 0, 0]]) frame = presto.frame.Frame(traj, p, z, z) self.assertTrue(isinstance(frame, presto.frame.Frame)) traj.frames = [frame] traj.save() return frame
def test_remd(self): if os.path.exists("test/static/remd-chk1.chk"): os.remove("test/static/remd-chk1.chk") if os.path.exists("test/static/remd-chk2.chk"): os.remove("test/static/remd-chk2.chk") def sched_300k(time): return 300 def sched_330k(time): return 330 zs = cctk.OneIndexedArray([2, 10]) traj1 = presto.trajectory.Trajectory( timestep=1, atomic_numbers=zs, high_atoms=np.array([2]), active_atoms=np.array([1, 2]), calculator=presto.calculators.XTBCalculator(), integrator=presto.integrators.LangevinIntegrator(), bath_scheduler=sched_300k, checkpoint_filename="test/static/remd-chk1.chk", stop_time=400, ) traj2 = presto.trajectory.Trajectory( timestep=1, atomic_numbers=zs, high_atoms=np.array([2]), active_atoms=np.array([1, 2]), calculator=presto.calculators.XTBCalculator(), integrator=presto.integrators.LangevinIntegrator(), bath_scheduler=sched_330k, checkpoint_filename="test/static/remd-chk2.chk", stop_time=400, ) x = cctk.OneIndexedArray([[1, 0, 0], [0, 0, 0]]) traj1.initialize(positions=x) x = cctk.OneIndexedArray([[2, 0, 0], [0, 0, 0]]) traj2.initialize(positions=x) remd = presto.replica_exchange.ReplicaExchange( trajectories=[traj1, traj2], swap_interval=25, checkpoint_filename="test/static/remd.chk") remd.run()
def parse_hirshfeld(hirshfeld_block): charges = [] spins = [] if len(hirshfeld_block) == 0: return None, None for line in hirshfeld_block[0].split("\n")[2:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) == 8: charges.append(float(fields[2])) spins.append(float(fields[3])) return cctk.OneIndexedArray(charges), cctk.OneIndexedArray(spins)
def test_anchor(self): anchor = presto.constraints.Anchor(1) position = cctk.OneIndexedArray([[1, 0, 0], [0, 0, 0]]) position[1] = np.asarray([1, 1, 1]) f, e = anchor.evaluate(position) for x in list(f[1]): self.assertTrue(x + 0.0004184 < 0.000001) self.assertTrue(e - 0.0031379999999999997 < 0.000001)
def gen_test_frame(self): zs = cctk.OneIndexedArray([2]) traj = presto.trajectory.Trajectory( timestep=0.5, atomic_numbers=zs, high_atoms=np.array([]), active_atoms=np.array([1]), stop_time=100, calculator=presto.calculators.XTBCalculator(), integrator=presto.integrators.LangevinIntegrator(viscosity=0.0001), ) self.assertTrue(isinstance(traj, presto.trajectory.Trajectory)) e0 = cctk.OneIndexedArray([[0, 0, 0]]) e1 = cctk.OneIndexedArray([[1, 0, 0]]) frame = presto.frame.Frame(traj, e0, e1, e0) self.assertTrue(isinstance(frame, presto.frame.Frame)) return frame
def parse_forces(force_block): forces = [] for line in force_block[0].split("\n")[2:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) == 5: forces.append( [float(fields[2]), float(fields[3]), float(fields[4])]) return cctk.OneIndexedArray(forces)
def test_indexing(self): array = [1, 2, 3, 4, 5] new_a = cctk.OneIndexedArray(array) self.assertEqual(len(new_a), 5) self.assertEqual(str(new_a), "[1 2 3 4 5]") self.assertEqual(str(new_a[1:3]), "[1 2]") self.assertEqual(new_a[5], 5) self.assertEqual(new_a[1], 1) self.assertTrue( isinstance(cctk.OneIndexedArray(new_a), cctk.OneIndexedArray)) self.assertEqual(new_a[[1]], 1) self.assertListEqual(list(new_a[[1, 2]]), [1, 2]) new_a[1] = 8 self.assertEqual(str(new_a), "[8 2 3 4 5]") self.assertEqual(new_a[1], 8) a_2d = [[1, 1, 1], [2, 2, 2], [3, 3, 3]] new_a_2d = cctk.OneIndexedArray(a_2d) self.assertEqual(new_a_2d[1, 1], 1) self.assertEqual(new_a_2d[2, 0], 2) new_a_2d[3, 0] = 7 self.assertEqual(new_a_2d[3, 0], 7) new_a_2d[3] = np.array([-1, -2, 0]) self.assertListEqual(list(new_a_2d[3]), [-1, -2, 0]) self.assertTrue(isinstance(new_a_2d, cctk.OneIndexedArray)) v = new_a_2d[1] self.assertTrue(isinstance(v, np.ndarray)) new_a_2d[1] = np.asarray([4, 4, 4]) self.assertListEqual(list(new_a_2d[1]), [4, 4, 4])
def parse_charges_dipole(mulliken_block, dipole_block): charges = [] dipole = 0 dipole_v = np.zeros(shape=3) for line in mulliken_block[0].split("\n")[2:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) == 3: charges.append(float(fields[2])) for line in dipole_block[0].split("\n")[1:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) == 8: dipole_v[0] = float(fields[1]) dipole_v[1] = float(fields[3]) dipole_v[2] = float(fields[5]) dipole = float(fields[7]) break return cctk.OneIndexedArray(charges), dipole, dipole_v
def run_xtb(molecule, gfn=2, parallel=8, xcontrol_path=None, topo_path=None, directory=None): """ Run an xtb job. Args: molecule (cctk.Molecule): gfn (int or "ff"): which gfn method to use parallel (int): xcontrol_path (str): topo_path (str): directory (str): Returns: energy forces elapsed time """ # check that xtb is even on this system assert presto.config.HAS_XTB, f"xtb not present; can't run job!" forces, energy = None, None manager = ExternalProgramManager(directory) # build command command = presto.config.XTB_EXEC if gfn == "ff": command += " --gfnff" else: command += f" --gfn {gfn}" command += f" --chrg {molecule.charge} --uhf {molecule.multiplicity - 1}" if parallel > 1: command += f" --parallel {parallel}" if xcontrol_path: command += f" --input {xcontrol_path}" command += " --grad xtb-in.xyz &> xtb-out.out" # set system params os.environ["OMP_NUM_THREADS"] = str(parallel) os.environ["MKL_NUM_THREADS"] = str(parallel) os.environ["OMP_STACKSIZE"] = "4G" os.environ["OMP_MAX_ACTIVE_LEVELS"] = "1" os.environ["XTBPATH"] = presto.config.XTB_PATH os.environ["XTBHOME"] = presto.config.XTB_PATH # write input .xyz file cctk.XYZFile.write_molecule_to_file(f"{manager.workdir}/xtb-in.xyz", molecule) if topo_path and os.path.exists(topo_path): manager.copy_to_work(topo_path, "gfnff_topo") # run xtb start = time.time() result = sp.run(command, cwd=manager.workdir, shell=True, capture_output=True) end = time.time() elapsed = end - start # make sure things ran ok result.check_returncode() assert os.path.isfile(f"{manager.workdir}/energy"), "no energy file!" assert os.path.isfile(f"{manager.workdir}/gradient"), "no gradient file!" # parse energy with open(f"{manager.workdir}/energy", "r") as f: energy_lines = f.read().splitlines() energy = float(energy_lines[1].split()[1]) # parse forces with open(f"{manager.workdir}/gradient", "r") as f: gradient_lines = f.read().splitlines() forces = [] for line in gradient_lines: fields = line.split() if len(fields) == 3: x,y,z = float(fields[0]), float(fields[1]), float(fields[2]) forces.append([-x,-y,-z]) forces = cctk.OneIndexedArray(forces) forces = forces * presto.constants.AMU_A2_FS2_PER_HARTREE_BOHR assert len(forces) == molecule.get_n_atoms(), "unexpected number of atoms" # save topology if gfn == "ff" and not os.path.exists(topo_path): assert os.path.exists(f"{manager.workdir}/gfnff_topo"), "xtb didn't generate topology file!" manager.copy_to_home("gfnff_topo", topo_path) # clean up and return data manager.cleanup() del manager return energy, forces, elapsed
def parse_modes(freq_block, num_atoms, hpmodes=False): freqs = list() masses = list() force_ks = list() intensities = list() displacements = list() if len(freq_block) == 0: return list() chunks = freq_block[0].split("Freq") if hpmodes: chunks = chunks[1:] for chunk in chunks: lines = chunk.split("\n") if hpmodes: num_cols = len(re.split(" +", lines[0])) - 2 current_displacements = [ np.zeros(shape=(num_atoms, 3)) for x in range(num_cols) ] if len(freqs): new_freqs = list(filter(None, re.split(" +", lines[0])))[2:] if float(new_freqs[-1]) <= float(freqs[-1]): break # want to skip the non-hpmodes section, so no looping allowed else: freqs += new_freqs else: freqs += list(filter(None, re.split(" +", lines[0])))[2:] masses += list(filter(None, re.split(" +", lines[1])))[3:] force_ks += list(filter(None, re.split(" +", lines[2])))[3:] intensities += list(filter(None, re.split(" +", lines[3])))[3:] for line in lines[6:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) < (num_cols + 3): continue if fields[0] == "Harmonic": break for col_idx, val in enumerate(fields[3:]): current_displacements[col_idx][int(fields[1]) - 1][int(fields[0]) - 1] = val for d in current_displacements: displacements.append(d.view(cctk.OneIndexedArray)) else: current_displacements = [ list() for _ in re.split(" +", lines[0])[2:] ] freqs += re.split(" +", lines[0])[2:] masses += re.split(" +", lines[1])[4:] force_ks += re.split(" +", lines[2])[4:] intensities += re.split(" +", lines[3].rstrip())[4:] for line in lines[5:]: fields = re.split(" +", line) fields = list(filter(None, fields)) if len(fields) < 4: break current_displacements[0].append( [float(x) for x in fields[2:5]]) if len(current_displacements) > 1: current_displacements[1].append( [float(x) for x in fields[5:8]]) if len(current_displacements) > 2: current_displacements[2].append( [float(x) for x in fields[8:11]]) for d in current_displacements: displacements.append(cctk.OneIndexedArray(d)) freqs = [float(x) for x in freqs] masses = [float(x) for x in masses] force_ks = [float(x) for x in force_ks] intensities = [float(x) for x in intensities] assert len(freqs) == len(masses) assert len(freqs) == len(force_ks) assert len(freqs) == len(displacements) modes = list() for f, m, k, i, d in zip(freqs, masses, force_ks, intensities, displacements): k *= 143.9326 # mdyne Å**-1 to kcal/mol Å**-2 modes.append( cctk.VibrationalMode(frequency=f, reduced_mass=m, force_constant=k, intensity=i, displacements=d)) return modes
def scale_nmr_shifts(ensemble, symmetrical_atom_numbers=None, scaling_factors="default", property_name="isotropic_shielding"): """ Apply linear scaling to isotropic shieldings to get chemical shifts. Shifts are calculated as (intercept-shielding)/slope. If there are no shifts available for a structure, None will be placed in both return lists. Args: ensemble: an ``Ensemble`` with calculated nmr shifts symmetrical_atom_numbers: None to perform no symmetry-averaging, a list of lists of 1-indexed atom numbers (e.g. [ [2,4,5], [7,8] ]) for a ConformationalEnsemble, or triply-nested lists for an Ensemble, where the outer index refers to the index of the Ensemble. scaling_factors: "default" to use DEFAULT_NMR_SCALING_FACTORS or a dict (atomic symbol --> (slope,intercept)). Elements for which scaling factors are not provided will be ignored. property_name: the key in properties_dict to use to locate the predicted isotropic shieldings (default="isotropic_shielding") Returns: scaled_shifts: np.array (matching the shape of the original shieldings minus symmetry averaging) shift_labels: np.array (also matches shape) """ # check inputs assert isinstance( ensemble, cctk.Ensemble ), f"expected Ensemble but got {str(type(ensemble))} instead" assert len(ensemble) > 0, "empty ensemble not allowed" if symmetrical_atom_numbers is None: symmetrical_atom_numbers = [] assert isinstance( symmetrical_atom_numbers, list ), f"symmetrical atom numbers should be specified as a list of lists, but got {str(type(ensemble))} instead" for l in symmetrical_atom_numbers: assert isinstance( l, list ), f"symmetrical atom numbers must be specified as lists, but got {str(type(l))} instead: {str(l)}" if scaling_factors == "default": scaling_factors = DEFAULT_NMR_SCALING_FACTORS else: assert isinstance(scaling_factors, dict) assert len(scaling_factors) > 0, "must provide scaling factors" assert isinstance(property_name, str) and len( property_name) > 0, f"property_name {property_name} is invalid" # get shieldings and scale all_scaled_shifts = [] all_shift_labels = [] for i, (molecule, properties) in enumerate(ensemble.items()): if property_name in properties: # get atom numbers and atomic elements as OneIndexedArrays atomic_numbers = molecule.atomic_numbers n_atoms = len(atomic_numbers) atomic_symbols = [get_symbol(n) for n in atomic_numbers] atomic_symbols = cctk.OneIndexedArray(atomic_symbols) atom_numbers = list(range(1, n_atoms + 1)) # symbol_dict = dict(zip(atomic_numbers,atomic_symbols)) all_labels = [ f"{current_symbol}{atom_number}" for current_symbol, atom_number in zip(atomic_symbols, atom_numbers) ] all_labels = cctk.OneIndexedArray(all_labels) label_dict = dict(zip(atom_numbers, all_labels)) # check symmetrical atom numbers make sense n_atoms = len(atomic_numbers) symmetrical_groups_dict = { } # symbol --> [ [list1], [list2], ...] where each list is a group of symmetrical atom numbers symmetrical_groups_dict2 = { } # symbol --> [ union of all symmetrical atom numbers for this symbol ] # unique_atoms_dict = {} # symbol --> [ union of all unique atom numbers for this symbol ] for symmetrical_group in symmetrical_atom_numbers: assert len( symmetrical_group ) > 1, "must be at least 2 symmetrical nuclei in a group" assert len(symmetrical_group) == len( set(symmetrical_group) ), f"check for duplicate atom numbers in {symmetrical_group}" symmetrical_symbol = None for atom_number in symmetrical_group: assert 1 <= atom_number <= n_atoms, f"atom number {atom_number} is out of range" if symmetrical_symbol is None: symmetrical_symbol = atomic_symbols[atom_number] assert symmetrical_symbol in scaling_factors, f"no scaling factors available for the element {symmetrical_symbol}" assert atomic_symbols[atom_number] == symmetrical_symbol,\ (f"all atoms in a symmetrical group must correspond to the same element\n" f"expected element {symmetrical_symbol} for atom {atom_number}," f"but got element {atomic_symbols[atom_number]}") if symmetrical_symbol not in symmetrical_groups_dict: symmetrical_groups_dict[symmetrical_symbol] = [] symmetrical_groups_dict[symmetrical_symbol].append( symmetrical_group) if symmetrical_symbol not in symmetrical_groups_dict2: symmetrical_groups_dict2[symmetrical_symbol] = [] symmetrical_groups_dict2[symmetrical_symbol].extend( symmetrical_group) # get shieldings all_shieldings = properties[property_name] # iterate through requested elements molecule_shifts = [] molecule_labels = [] for symbol_of_interest, (slope, intercept) in scaling_factors.items(): # sanity checks assert isinstance( slope, float ), f"expected slope to be float, but got {str(type(slope))}" assert slope != 0, "zero slope not allowed" assert isinstance( intercept, float ), f"expected intercept to be float, but got {str(type(intercept))}" # determine unique atoms unique_atom_numbers_list = [] for atomic_symbol, atom_number in zip(atomic_symbols, atom_numbers): if atomic_symbol != symbol_of_interest: continue if symbol_of_interest in symmetrical_groups_dict2: if atom_number in symmetrical_groups_dict2[ symbol_of_interest]: continue unique_atom_numbers_list.append(atom_number) # extract relevant shieldings and labels for unique atoms if len(unique_atom_numbers_list) > 0: selected_shieldings = list( all_shieldings[unique_atom_numbers_list]) selected_labels = list( all_labels[unique_atom_numbers_list]) else: selected_shieldings = [] selected_labels = [] # extract relevant shieldings and labels for symmetrical groups symmetrical_groups = [] if symbol_of_interest in symmetrical_groups_dict: symmetrical_groups = symmetrical_groups_dict[ symbol_of_interest] for symmetrical_group in symmetrical_groups: first_atom_number = symmetrical_group[0] current_atomic_symbol = atomic_symbols[first_atom_number] if current_atomic_symbol == symbol_of_interest: group_shieldings = all_shieldings[symmetrical_group] averaged_shielding = group_shieldings.mean() selected_shieldings.append(averaged_shielding) label = f"{current_atomic_symbol}" for j, atom_number in enumerate(symmetrical_group): label += f"{atom_number}" if j < len(symmetrical_group) - 1: label += "/" selected_labels.append(label) # apply scaling assert len(selected_shieldings) == len( selected_labels ), "shieldings and labels should have 1:1 correspondence" selected_shifts = np.array(selected_shieldings) selected_shifts = (intercept - selected_shifts) / slope selected_labels = np.array(selected_labels) # update results molecule_shifts.extend(selected_shifts) molecule_labels.extend(selected_labels) # update master results if appropriate if len(molecule_shifts) > 0: all_scaled_shifts.append(molecule_shifts) all_shift_labels.append(molecule_labels) else: # assume this means a bug raise ValueError( "no relevant shieldings were extracted for this molecule!") else: # there are no shieldings available, so append None all_scaled_shifts.append(None) all_shift_labels.append(None) # return result scaled_shifts = np.array(all_scaled_shifts) shift_labels = np.array(all_shift_labels) return scaled_shifts, shift_labels
def __init__( self, calculator=None, integrator=None, reporters=list(), checks=list(), timestep=None, atomic_numbers=None, high_atoms=None, forwards=True, checkpoint_filename=None, checkpoint_interval=10, stop_time=None, save_interval=1, buffer=100, load_frames="all", # or ``first`` or ``last`` or a slice bath_scheduler=298, termination_function=None, **kwargs): # do this first! if timestep is not None: assert timestep > 0, "can't have timestep ≤ 0!" self.timestep = float(timestep) # also do this first, so checkpoint file can overrule as needed if forwards is not None: assert isinstance(forwards, bool), "forwards must be bool" self.forwards = forwards elif not hasattr(self, "forwards"): self.forwards = True if checkpoint_filename is not None: assert isinstance(checkpoint_filename, str), "need string for file" self.checkpoint_filename = checkpoint_filename assert isinstance( checkpoint_interval, int ) and checkpoint_interval > 0, "checkpoint_interval must be positive integer" self.checkpoint_interval = checkpoint_interval self.lock = None self.initialize_lock() self.frames = list() if self.has_checkpoint(): self.load_from_checkpoint(load_frames) # now we carry on building the "mundane" attributes if calculator is not None: assert isinstance( calculator, presto.calculators.Calculator), "need a valid calculator!" self.calculator = calculator if integrator is not None: assert isinstance( integrator, presto.integrators.Integrator), "need a valid integrator!" self.integrator = integrator assert all([isinstance(c, presto.checks.Check) for c in checks]) self.checks = checks assert all( [isinstance(r, presto.reporters.Reporter) for r in reporters]) self.reporters = reporters if atomic_numbers is not None: assert isinstance(atomic_numbers, cctk.OneIndexedArray ), "atomic numbers must be cctk 1-indexed array!" self.atomic_numbers = atomic_numbers elif not hasattr(self, "atomic_numbers"): raise ValueError("no atomic numbers specified") if not hasattr(self, "finished"): self.finished = False if high_atoms is not None: assert isinstance(high_atoms, np.ndarray), "high_atoms must be np.ndarray!" self.high_atoms = high_atoms else: self.high_atoms = None active_atoms = None if "active_atoms" in kwargs: active_atoms = kwargs["active_atoms"] assert isinstance(active_atoms, np.ndarray), "active_atoms must be np.ndarray!" self.active_atoms = active_atoms elif "inactive_atoms" in kwargs: self.set_inactive_atoms(kwargs["inactive_atoms"]) else: # assume all atoms are active self.set_inactive_atoms(None) if not hasattr(self, "masses"): self.masses = cctk.OneIndexedArray([ float(cctk.helper_functions.draw_isotopologue(z)) for z in atomic_numbers ]) if not hasattr(self, "frames"): self.frames = [] if not hasattr(self, "stop_time"): assert (isinstance(stop_time, float)) or (isinstance( stop_time, int)), "stop_time needs to be numeric!" assert stop_time > 0, "stop_time needs to be positive!" self.stop_time = stop_time assert isinstance(save_interval, int), "save_interval needs to be positive" assert save_interval > 0, "save_interval needs to be positive" self.save_interval = save_interval assert isinstance(buffer, int), "buffer needs to be positive" assert buffer > 0, "buffer needs to be positive" self.buffer = buffer # build bath scheduler if hasattr(bath_scheduler, "__call__"): self.bath_scheduler = bath_scheduler elif isinstance(bath_scheduler, (int, float)): # most of the time it's ok just to keep things constant. def sched(time): return bath_scheduler self.bath_scheduler = sched else: raise ValueError( f"unknown type {type(bath_scheduler)} for bath_scheduler - want either a function or a number!" ) # build termination function if termination_function is not None: assert hasattr( termination_function, "__call__"), "termination_function must be a function!" self.termination_function = termination_function else: # if we haven't specified any criteria, we don't want to end before time's up! so we'll just say "end never." def term(time): return False self.termination_function = term
def load_from_checkpoint(self, frames="all"): """ Loads frames from ``self.checkpoint_filename``. Args: frames (Slice object): if not all frames are desired, a Slice object can be passed or a string - ``all``, ``first``, ``last``, or ``buffer`` Returns: nothing """ if not self.has_checkpoint(): return # nothing to load! if frames == "all": frames = slice(None) elif frames == "first": frames = slice(1, None, None) elif frames == "last": frames = slice(-1, None, None) elif frames == "buffer": frames = slice(-self.buffer, None, None) else: assert isinstance( frames, slice ), "load_frames must be ``all``, ``first``, ``last``, or slice" self.initialize_lock() self.lock.acquire() with h5py.File(self.checkpoint_filename, "r") as h5: atomic_numbers = h5.attrs["atomic_numbers"] self.atomic_numbers = cctk.OneIndexedArray(atomic_numbers) masses = h5.attrs["masses"] self.masses = cctk.OneIndexedArray(masses) self.finished = h5.attrs['finished'] self.forwards = h5.attrs['forwards'] self.frames = [] if len(h5.get("all_energies")): all_energies = h5.get("all_energies")[frames] all_positions = h5.get("all_positions")[frames] all_velocities = h5.get("all_velocities")[frames] all_accels = h5.get("all_accelerations")[frames] temperatures = h5.get("bath_temperatures")[frames] all_times = h5.get("all_times")[frames] if isinstance(all_energies, np.ndarray): assert len(all_positions) == len(all_energies) assert len(all_velocities) == len(all_energies) assert len(all_accels) == len(all_energies) assert len(all_times) == len(all_energies) for i, t in enumerate(all_times): self.frames.append( presto.frame.Frame( self, all_positions[i].view(cctk.OneIndexedArray), all_velocities[i].view(cctk.OneIndexedArray), all_accels[i].view(cctk.OneIndexedArray), energy=all_energies[i], bath_temperature=temperatures[i], time=all_times[i], )) logger.info( f"Loaded trajectory from checkpoint file {self.checkpoint_filename} -- {len(self.frames)} frames read." ) self.lock.release() return