def test_selective_dynamics(self): output_filename = os.path.join(test_dir, "vasprun.xml.indirect.gz") vr = Vasprun(output_filename) # assert that the max force is above the threshold max_constrained = np.max(np.linalg.norm(vr.ionic_steps[-1]['forces'], axis=1)) self.assertTrue(max_constrained > 0.5) handler = MaxForceErrorHandler(output_filename, max_force_threshold=0.5) self.assertFalse(handler.check())
def test_check_correct(self): #NOTE: the vasprun here has had projected and partial eigenvalues removed subdir = os.path.join(test_dir, "max_force") os.chdir(subdir) shutil.copy("INCAR", "INCAR.orig") shutil.copy("POSCAR", "POSCAR.orig") h = MaxForceErrorHandler() self.assertTrue(h.check()) d = h.correct() self.assertEqual(d["errors"], ['MaxForce']) os.remove(os.path.join(subdir, "error.1.tar.gz")) incar = Incar.from_file('INCAR') poscar = Poscar.from_file('POSCAR') contcar = Poscar.from_file('CONTCAR') shutil.move("INCAR.orig", "INCAR") shutil.move("POSCAR.orig", "POSCAR") self.assertEqual(poscar.structure, contcar.structure) self.assertAlmostEqual(incar['EDIFFG'], 0.005)
def run_task(self, fw_spec): handler_groups = { "default": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), DriftErrorHandler() ], "strict": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), AliasingErrorHandler(), DriftErrorHandler() ], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, str): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", CUSTODIAN_MAX_ERRORS) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [ VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run( vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get("half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "metagga_opt_run": jobs = VaspJob.metagga_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get( "half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=9, half_kpts_first_relax=self.get( "half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "neb": # TODO: @shyuep @HanmeiTang This means that NEB can only be run (i) in reservation mode # and (ii) when the queueadapter parameter is overridden and (iii) the queue adapter # has a convention for nnodes (with that name). Can't the number of nodes be made a # parameter that the user sets differently? e.g., fw_spec["neb_nnodes"] must be set # when setting job_type=NEB? Then someone can use this feature in non-reservation # mode and without this complication. -computron nnodes = int(fw_spec["_queueadapter"]["nnodes"]) # TODO: @shyuep @HanmeiTang - I am not sure what the code below is doing. It looks like # it is trying to override the number of processors. But I tried running the code # below after setting "vasp_cmd = 'mpirun -n 16 vasp'" and the code fails. # (i) Is this expecting an array vasp_cmd rather than String? If so, that's opposite to # the rest of this task's convention and documentation # (ii) can we get rid of this hacking in the first place? e.g., allowing the user to # separately set the NEB_VASP_CMD as an env_variable and not rewriting the command # inside this. # -computron # Index the tag "-n" or "-np" index = [i for i, s in enumerate(vasp_cmd) if '-n' in s] ppn = int(vasp_cmd[index[0] + 1]) vasp_cmd[index[0] + 1] = str(nnodes * ppn) # Do the same for gamma_vasp_cmd if gamma_vasp_cmd: index = [i for i, s in enumerate(gamma_vasp_cmd) if '-n' in s] ppn = int(gamma_vasp_cmd[index[0] + 1]) gamma_vasp_cmd[index[0] + 1] = str(nnodes * ppn) jobs = [ VaspNEBJob(vasp_cmd, final=False, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handler_group = self.get("handler_group", "default") if isinstance(handler_group, str): handlers = handler_groups[handler_group] else: handlers = handler_group if self.get("max_force_threshold"): handlers.append( MaxForceErrorHandler( max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) if job_type == "neb": validators = [ ] # CINEB vasprun.xml sometimes incomplete, file structure different else: validators = [VasprunXMLValidator(), VaspFilesValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run() if os.path.exists(zpath("custodian.json")): stored_custodian_data = { "custodian": loadfn(zpath("custodian.json")) } return FWAction(stored_data=stored_custodian_data)
def run_task(self, fw_spec): handler_groups = { "default": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler() ], "strict": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), AliasingErrorHandler() ], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, six.string_types): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", 5) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [ VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=False) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=5, half_kpts_first_relax=False) else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handlers = handler_groups[self.get("handler_group", "default")] if self.get("max_force_threshold"): handlers.append( MaxForceErrorHandler( max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) validators = [VasprunXMLValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run()