def get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe, gzipped=False) handlers = [ VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler() ] else: jobs = [VaspJob(v_exe)] handlers = [ VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler() ] params = { 'jobs': [j_decorate(j.to_dict) for j in jobs], 'handlers': [h.to_dict for h in handlers], 'max_errors': 10 } return VaspCustodianTask(params)
def get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node handlers = [ VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler(), PositiveEnergyErrorHandler() ] if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) elif 'static' in task_type or 'deformed' in task_type: jobs = [VaspJob(v_exe)] else: # non-SCF runs jobs = [VaspJob(v_exe)] handlers = [] params = { 'jobs': [j_decorate(j.as_dict()) for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5 } return VaspCustodianTask(params)
def run_task(self, fw_spec): handler_groups = { "default": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler()], "strict": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), AliasingErrorHandler()], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, six.string_types): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", 5) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=False) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=5, half_kpts_first_relax=False) else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handlers = handler_groups[self.get("handler_group", "default")] if self.get("max_force_threshold"): handlers.append(MaxForceErrorHandler(max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) validators = [VasprunXMLValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run()
def run_task(self, fw_spec): workdir = fw_spec['workdir'] vasp_cmd = fw_spec['vasp_cmd'] os.chdir(workdir) jobs = VaspJob.double_relaxation_run(vasp_cmd) handlers=[VaspErrorHandler(),UnconvergedErrorHandler(),FrozenJobErrorHandler(),\ NonConvergingErrorHandler(nionic_steps=5, change_algo=True),MeshSymmetryErrorHandler()] c = Custodian(handlers, jobs, max_errors=10) c.run()
def _get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe, gzipped=False) else: jobs = [VaspJob(v_exe)] handlers = [VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler()] params = {'jobs': [j.to_dict for j in jobs], 'handlers': [h.to_dict for h in handlers], 'max_errors': 10, 'auto_npar': False, 'auto_gamma': False} return VaspCustodianTask(params)
def get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node handlers = [VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler(), PositiveEnergyErrorHandler()] if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe, gzipped=False) elif 'static' in task_type: jobs = [VaspJob(v_exe)] else: # non-SCF runs jobs = [VaspJob(v_exe)] handlers = [] params = {'jobs': [j_decorate(j.as_dict()) for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5} return VaspCustodianTask(params)
def structure_to_wf(structure): """ This method starts with a Structure object and creates a Workflow object The workflow has two steps - a structure relaxation and a static run :param structure: :return: """ fws = [] # list of FireWorks to run connections = defaultdict(list) # dependencies between FireWorks # generate VASP input objects for 1st VASP run - this is put in the FW spec mpvis = MPGGAVaspInputSet(user_incar_settings={'NPAR': 2}) incar = mpvis.get_incar(structure) poscar = mpvis.get_poscar(structure) kpoints = mpvis.get_kpoints(structure) potcar = mpvis.get_potcar(structure) # serialize the VASP input objects to the FW spec spec = {} spec['vasp'] = {} spec['vasp']['incar'] = incar.as_dict() spec['vasp']['poscar'] = poscar.as_dict() spec['vasp']['kpoints'] = kpoints.as_dict() spec['vasp']['potcar'] = potcar.as_dict() spec['vaspinputset_name'] = mpvis.__class__.__name__ spec['task_type'] = 'GGA optimize structure (2x) example' # set up the custodian that we want to run jobs = VaspJob.double_relaxation_run('', gzipped=False) for j in jobs: # turn off auto npar, it doesn't work for >1 node j.auto_npar = False handlers = [VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler()] c_params = {'jobs': [j.as_dict() for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5} custodiantask = VaspCustodianTaskEx(c_params) # 1st Firework - run GGA optimize structure # VaspWriterTask - write input files (INCAR, POSCAR, KPOINTS, POSCAR) based on spec # CustodianTaskEx - run VASP within a custodian tasks = [VaspWriterTask(), custodiantask] fws.append(Firework(tasks, spec, name=get_name(structure, spec['task_type']), fw_id=1)) # 2nd Firework - insert previous run into DB spec = {'task_type': 'VASP db insertion example'} fws.append( Firework([VaspToDBTaskEx()], spec, name=get_name(structure, spec['task_type']), fw_id=2)) connections[1] = [2] # 3rd Firework - static run. # VaspCopyTask - copy output from previous run to this directory # SetupStaticRunTask - override old parameters for static run # CustodianTaskEx - run VASP within a custodian spec = {'task_type': 'GGA static example'} copytask = VaspCopyTask({'use_CONTCAR': True, 'skip_CHGCAR': True}) setuptask = SetupStaticRunTask() custodiantask = VaspCustodianTaskEx({'jobs': [VaspJob('', auto_npar=False).as_dict()], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5}) fws.append(Firework([copytask, setuptask, custodiantask], spec, name=get_name(structure, spec['task_type']), fw_id=3)) connections[2] = [3] # 4th Firework - insert previous run into DB spec = {'task_type': 'VASP db insertion example'} fws.append( Firework([VaspToDBTaskEx()], spec, name=get_name(structure, spec['task_type']), fw_id=4)) connections[3] = [4] return Workflow(fws, connections, name=get_slug(structure.formula))
def structure_to_wf(structure): """ This method starts with a Structure object and creates a Workflow object The workflow has two steps - a structure relaxation and a static run :param structure: :return: """ fws = [] # list of FireWorks to run connections = defaultdict(list) # dependencies between FireWorks # generate VASP input objects for 1st VASP run - this is put in the FW spec mpvis = MPGGAVaspInputSet(user_incar_settings={'NPAR': 2}) incar = mpvis.get_incar(structure) poscar = mpvis.get_poscar(structure) kpoints = mpvis.get_kpoints(structure) potcar = mpvis.get_potcar(structure) # serialize the VASP input objects to the FW spec spec = {} spec['vasp'] = {} spec['vasp']['incar'] = incar.as_dict() spec['vasp']['poscar'] = poscar.as_dict() spec['vasp']['kpoints'] = kpoints.as_dict() spec['vasp']['potcar'] = potcar.as_dict() spec['vaspinputset_name'] = mpvis.__class__.__name__ spec['task_type'] = 'GGA optimize structure (2x) example' # set up the custodian that we want to run jobs = VaspJob.double_relaxation_run('') for j in jobs: # turn off auto npar, it doesn't work for >1 node j.auto_npar = False handlers = [ VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler() ] c_params = { 'jobs': [j.as_dict() for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5 } custodiantask = VaspCustodianTaskEx(c_params) # 1st Firework - run GGA optimize structure # VaspWriterTask - write input files (INCAR, POSCAR, KPOINTS, POSCAR) based on spec # CustodianTaskEx - run VASP within a custodian tasks = [VaspWriterTask(), custodiantask] fws.append( Firework(tasks, spec, name=get_name(structure, spec['task_type']), fw_id=1)) # 2nd Firework - insert previous run into DB spec = {'task_type': 'VASP db insertion example'} fws.append( Firework([VaspToDBTaskEx()], spec, name=get_name(structure, spec['task_type']), fw_id=2)) connections[1] = [2] # 3rd Firework - static run. # VaspCopyTask - copy output from previous run to this directory # SetupStaticRunTask - override old parameters for static run # CustodianTaskEx - run VASP within a custodian spec = {'task_type': 'GGA static example'} copytask = VaspCopyTask({'use_CONTCAR': True, 'skip_CHGCAR': True}) setuptask = SetupStaticRunTask() custodiantask = VaspCustodianTaskEx({ 'jobs': [VaspJob('', auto_npar=False).as_dict()], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5 }) fws.append( Firework([copytask, setuptask, custodiantask], spec, name=get_name(structure, spec['task_type']), fw_id=3)) connections[2] = [3] # 4th Firework - insert previous run into DB spec = {'task_type': 'VASP db insertion example'} fws.append( Firework([VaspToDBTaskEx()], spec, name=get_name(structure, spec['task_type']), fw_id=4)) connections[3] = [4] return Workflow(fws, connections, name=get_slug(structure.formula))
def launch_workflow( self, launchpad_dir="", k_product=50, job=None, user_incar_settings=None, potcar_functional="PBE", additional_handlers=[], ): """ Creates a list of Fireworks. Each Firework represents calculations that will be done on a slab system of a compound in a specific orientation. Each Firework contains a oriented unit cell relaxation job and a WriteSlabVaspInputs which creates os. Firework(s) depending on whether or not Termination=True. Vasp outputs from all slab and oriented unit cell calculations will then be inserted into a database. Args: launchpad_dir (str path): The path to my_launchpad.yaml. Defaults to the current working directory containing your runs k_product: kpts[0][0]*a. Decide k density without kpoint0, default to 50 cwd: (str path): The curent working directory. Location of where you want your vasp outputs to be. job (VaspJob): The command (cmd) entered into VaspJob object. Default is specifically set for running vasp jobs on Carver at NERSC (use aprun for Hopper or Edison). user_incar_settings(dict): A dict specifying additional incar settings, default to None (ediff_per_atom=False) potcar_functional (str): default to PBE """ launchpad = LaunchPad.from_file(os.path.join(os.environ["HOME"], launchpad_dir, "my_launchpad.yaml")) if self.reset: launchpad.reset("", require_password=False) # Scratch directory reffered to by custodian. # May be different on non-Nersc systems. if not job: job = VaspJob(["mpirun", "-n", "64", "vasp"], auto_npar=False, copy_magmom=True) handlers = [ VaspErrorHandler(), NonConvergingErrorHandler(), UnconvergedErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(timeout=3600), ] if additional_handlers: handlers.extend(additional_handlers) cust_params = { "custodian_params": {"scratch_dir": os.path.join("/global/scratch2/sd/", os.environ["USER"])}, "jobs": job.double_relaxation_run(job.vasp_cmd, auto_npar=False), "handlers": handlers, "max_errors": 100, } # will return a list of jobs # instead of just being one job fws = [] for key in self.miller_dict.keys(): # Enumerate through all compounds in the dictionary, # the key is the compositional formula of the compound print key for miller_index in self.miller_dict[key]: # Enumerates through all miller indices we # want to create slabs of that compound from print str(miller_index) max_norm = max(miller_index) if self.max_normal_search else None # Whether or not we want to use the # max_normal_search algorithm from surface.py print "true or false max norm is ", max_norm, self.max_normal_search slab = SlabGenerator( self.unit_cells_dict[key][0], miller_index, self.ssize, self.vsize, max_normal_search=max_norm ) oriented_uc = slab.oriented_unit_cell if self.fail_safe and len(oriented_uc) > 199: break # This method only creates the oriented unit cell, the # slabs are created in the WriteSlabVaspInputs task. # WriteSlabVaspInputs will create the slabs from # the contcar of the oriented unit cell calculation handler = [] tasks = [] folderbulk = "/%s_%s_k%s_s%sv%s_%s%s%s" % ( oriented_uc.composition.reduced_formula, "bulk", k_product, self.ssize, self.vsize, str(miller_index[0]), str(miller_index[1]), str(miller_index[2]), ) cwd = os.getcwd() if self.get_bulk_e: tasks.extend( [ WriteUCVaspInputs( oriented_ucell=oriented_uc, folder=folderbulk, cwd=cwd, user_incar_settings=user_incar_settings, potcar_functional=potcar_functional, k_product=k_product, ), RunCustodianTask(dir=folderbulk, cwd=cwd, **cust_params), VaspSlabDBInsertTask( struct_type="oriented_unit_cell", loc=folderbulk, cwd=cwd, miller_index=miller_index, **self.vaspdbinsert_params ), ] ) # Slab will inherit average final magnetic moment # of the bulk from outcar, will have to generalize # this for systems with different elements later # element = oriented_uc.species[0] # out = Outcar(cwd+folderbulk) # out_mag = out.magnetization # tot_mag = [mag['tot'] for mag in out_mag] # magmom = np.mean(tot_mag) # user_incar_settings['MAGMOM'] = {element: magmom} tasks.append( WriteSlabVaspInputs( folder=folderbulk, cwd=cwd, user_incar_settings=user_incar_settings, terminations=self.terminations, custodian_params=cust_params, vaspdbinsert_parameters=self.vaspdbinsert_params, potcar_functional=potcar_functional, k_product=k_product, miller_index=miller_index, min_slab_size=self.ssize, min_vacuum_size=self.vsize, ucell=self.unit_cells_dict[key][0], ) ) fw = Firework(tasks, name=folderbulk) fws.append(fw) wf = Workflow(fws, name="Surface Calculations") launchpad.add_wf(wf)
def test_static(self): # Just a basic test of init. VaspJob.double_relaxation_run(["vasp"])
def run_task(self, fw_spec): handler_groups = { "default": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), DriftErrorHandler() ], "strict": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), AliasingErrorHandler(), DriftErrorHandler() ], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, str): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", CUSTODIAN_MAX_ERRORS) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [ VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run( vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get("half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "metagga_opt_run": jobs = VaspJob.metagga_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get( "half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=9, half_kpts_first_relax=self.get( "half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "neb": # TODO: @shyuep @HanmeiTang This means that NEB can only be run (i) in reservation mode # and (ii) when the queueadapter parameter is overridden and (iii) the queue adapter # has a convention for nnodes (with that name). Can't the number of nodes be made a # parameter that the user sets differently? e.g., fw_spec["neb_nnodes"] must be set # when setting job_type=NEB? Then someone can use this feature in non-reservation # mode and without this complication. -computron nnodes = int(fw_spec["_queueadapter"]["nnodes"]) # TODO: @shyuep @HanmeiTang - I am not sure what the code below is doing. It looks like # it is trying to override the number of processors. But I tried running the code # below after setting "vasp_cmd = 'mpirun -n 16 vasp'" and the code fails. # (i) Is this expecting an array vasp_cmd rather than String? If so, that's opposite to # the rest of this task's convention and documentation # (ii) can we get rid of this hacking in the first place? e.g., allowing the user to # separately set the NEB_VASP_CMD as an env_variable and not rewriting the command # inside this. # -computron # Index the tag "-n" or "-np" index = [i for i, s in enumerate(vasp_cmd) if '-n' in s] ppn = int(vasp_cmd[index[0] + 1]) vasp_cmd[index[0] + 1] = str(nnodes * ppn) # Do the same for gamma_vasp_cmd if gamma_vasp_cmd: index = [i for i, s in enumerate(gamma_vasp_cmd) if '-n' in s] ppn = int(gamma_vasp_cmd[index[0] + 1]) gamma_vasp_cmd[index[0] + 1] = str(nnodes * ppn) jobs = [ VaspNEBJob(vasp_cmd, final=False, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handler_group = self.get("handler_group", "default") if isinstance(handler_group, str): handlers = handler_groups[handler_group] else: handlers = handler_group if self.get("max_force_threshold"): handlers.append( MaxForceErrorHandler( max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) if job_type == "neb": validators = [ ] # CINEB vasprun.xml sometimes incomplete, file structure different else: validators = [VasprunXMLValidator(), VaspFilesValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run() if os.path.exists(zpath("custodian.json")): stored_custodian_data = { "custodian": loadfn(zpath("custodian.json")) } return FWAction(stored_data=stored_custodian_data)
def run_task(self, fw_spec): handler_groups = { "default": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler() ], "strict": [ VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), AliasingErrorHandler() ], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, six.string_types): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", 5) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [ VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd) ] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=False) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=5, half_kpts_first_relax=False) else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handlers = handler_groups[self.get("handler_group", "default")] if self.get("max_force_threshold"): handlers.append( MaxForceErrorHandler( max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) validators = [VasprunXMLValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run()
def launch_workflow(self, launchpad_dir="", k_product=50, job=None, user_incar_settings=None, potcar_functional='PBE', additional_handlers=[]): """ Creates a list of Fireworks. Each Firework represents calculations that will be done on a slab system of a compound in a specific orientation. Each Firework contains a oriented unit cell relaxation job and a WriteSlabVaspInputs which creates os. Firework(s) depending on whether or not Termination=True. Vasp outputs from all slab and oriented unit cell calculations will then be inserted into a database. Args: launchpad_dir (str path): The path to my_launchpad.yaml. Defaults to the current working directory containing your runs k_product: kpts[0][0]*a. Decide k density without kpoint0, default to 50 cwd: (str path): The curent working directory. Location of where you want your vasp outputs to be. job (VaspJob): The command (cmd) entered into VaspJob object. Default is specifically set for running vasp jobs on Carver at NERSC (use aprun for Hopper or Edison). user_incar_settings(dict): A dict specifying additional incar settings, default to None (ediff_per_atom=False) potcar_functional (str): default to PBE """ launchpad = LaunchPad.from_file( os.path.join(os.environ["HOME"], launchpad_dir, "my_launchpad.yaml")) if self.reset: launchpad.reset('', require_password=False) # Scratch directory reffered to by custodian. # May be different on non-Nersc systems. if not job: job = VaspJob(["mpirun", "-n", "64", "vasp"], auto_npar=False, copy_magmom=True) handlers = [ VaspErrorHandler(), NonConvergingErrorHandler(), UnconvergedErrorHandler(), PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(timeout=3600) ] if additional_handlers: handlers.extend(additional_handlers) cust_params = { "custodian_params": { "scratch_dir": os.path.join("/global/scratch2/sd/", os.environ["USER"]) }, "jobs": job.double_relaxation_run(job.vasp_cmd, auto_npar=False), "handlers": handlers, "max_errors": 100 } # will return a list of jobs # instead of just being one job fws = [] for key in self.miller_dict.keys(): # Enumerate through all compounds in the dictionary, # the key is the compositional formula of the compound print key for miller_index in self.miller_dict[key]: # Enumerates through all miller indices we # want to create slabs of that compound from print str(miller_index) max_norm = max( miller_index) if self.max_normal_search else None # Whether or not we want to use the # max_normal_search algorithm from surface.py print 'true or false max norm is ', max_norm, self.max_normal_search slab = SlabGenerator(self.unit_cells_dict[key][0], miller_index, self.ssize, self.vsize, max_normal_search=max_norm) oriented_uc = slab.oriented_unit_cell if self.fail_safe and len(oriented_uc) > 199: break # This method only creates the oriented unit cell, the # slabs are created in the WriteSlabVaspInputs task. # WriteSlabVaspInputs will create the slabs from # the contcar of the oriented unit cell calculation handler = [] tasks = [] folderbulk = '/%s_%s_k%s_s%sv%s_%s%s%s' % ( oriented_uc.composition.reduced_formula, 'bulk', k_product, self.ssize, self.vsize, str(miller_index[0]), str(miller_index[1]), str(miller_index[2])) cwd = os.getcwd() if self.get_bulk_e: tasks.extend([ WriteUCVaspInputs( oriented_ucell=oriented_uc, folder=folderbulk, cwd=cwd, user_incar_settings=user_incar_settings, potcar_functional=potcar_functional, k_product=k_product), RunCustodianTask(dir=folderbulk, cwd=cwd, **cust_params), VaspSlabDBInsertTask(struct_type="oriented_unit_cell", loc=folderbulk, cwd=cwd, miller_index=miller_index, **self.vaspdbinsert_params) ]) # Slab will inherit average final magnetic moment # of the bulk from outcar, will have to generalize # this for systems with different elements later # element = oriented_uc.species[0] # out = Outcar(cwd+folderbulk) # out_mag = out.magnetization # tot_mag = [mag['tot'] for mag in out_mag] # magmom = np.mean(tot_mag) # user_incar_settings['MAGMOM'] = {element: magmom} tasks.append( WriteSlabVaspInputs( folder=folderbulk, cwd=cwd, user_incar_settings=user_incar_settings, terminations=self.terminations, custodian_params=cust_params, vaspdbinsert_parameters=self.vaspdbinsert_params, potcar_functional=potcar_functional, k_product=k_product, miller_index=miller_index, min_slab_size=self.ssize, min_vacuum_size=self.vsize, ucell=self.unit_cells_dict[key][0])) fw = Firework(tasks, name=folderbulk) fws.append(fw) wf = Workflow(fws, name='Surface Calculations') launchpad.add_wf(wf)
def run_task(self, fw_spec): handler_groups = { "default": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(),PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), DriftErrorHandler()], "strict": [VaspErrorHandler(), MeshSymmetryErrorHandler(), UnconvergedErrorHandler(), NonConvergingErrorHandler(),PotimErrorHandler(), PositiveEnergyErrorHandler(), FrozenJobErrorHandler(), StdErrHandler(), AliasingErrorHandler(), DriftErrorHandler()], "md": [VaspErrorHandler(), NonConvergingErrorHandler()], "no_handler": [] } vasp_cmd = env_chk(self["vasp_cmd"], fw_spec) if isinstance(vasp_cmd, six.string_types): vasp_cmd = os.path.expandvars(vasp_cmd) vasp_cmd = shlex.split(vasp_cmd) # initialize variables job_type = self.get("job_type", "normal") scratch_dir = env_chk(self.get("scratch_dir"), fw_spec) gzip_output = self.get("gzip_output", True) max_errors = self.get("max_errors", 5) auto_npar = env_chk(self.get("auto_npar"), fw_spec, strict=False, default=False) gamma_vasp_cmd = env_chk(self.get("gamma_vasp_cmd"), fw_spec, strict=False, default=None) if gamma_vasp_cmd: gamma_vasp_cmd = shlex.split(gamma_vasp_cmd) # construct jobs if job_type == "normal": jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)] elif job_type == "double_relaxation_run": jobs = VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get("half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "metagga_opt_run": jobs = VaspJob.metagga_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), half_kpts_first_relax=self.get("half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "full_opt_run": jobs = VaspJob.full_opt_run(vasp_cmd, auto_npar=auto_npar, ediffg=self.get("ediffg"), max_steps=9, half_kpts_first_relax=self.get("half_kpts_first_relax", HALF_KPOINTS_FIRST_RELAX)) elif job_type == "neb": # TODO: @shyuep @HanmeiTang This means that NEB can only be run (i) in reservation mode # and (ii) when the queueadapter parameter is overridden and (iii) the queue adapter # has a convention for nnodes (with that name). Can't the number of nodes be made a # parameter that the user sets differently? e.g., fw_spec["neb_nnodes"] must be set # when setting job_type=NEB? Then someone can use this feature in non-reservation # mode and without this complication. -computron nnodes = int(fw_spec["_queueadapter"]["nnodes"]) # TODO: @shyuep @HanmeiTang - I am not sure what the code below is doing. It looks like # it is trying to override the number of processors. But I tried running the code # below after setting "vasp_cmd = 'mpirun -n 16 vasp'" and the code fails. # (i) Is this expecting an array vasp_cmd rather than String? If so, that's opposite to # the rest of this task's convention and documentation # (ii) can we get rid of this hacking in the first place? e.g., allowing the user to # separately set the NEB_VASP_CMD as an env_variable and not rewriting the command # inside this. # -computron # Index the tag "-n" or "-np" index = [i for i, s in enumerate(vasp_cmd) if '-n' in s] ppn = int(vasp_cmd[index[0] + 1]) vasp_cmd[index[0] + 1] = str(nnodes * ppn) # Do the same for gamma_vasp_cmd if gamma_vasp_cmd: index = [i for i, s in enumerate(gamma_vasp_cmd) if '-n' in s] ppn = int(gamma_vasp_cmd[index[0] + 1]) gamma_vasp_cmd[index[0] + 1] = str(nnodes * ppn) jobs = [VaspNEBJob(vasp_cmd, final=False, auto_npar=auto_npar, gamma_vasp_cmd=gamma_vasp_cmd)] else: raise ValueError("Unsupported job type: {}".format(job_type)) # construct handlers handler_group = self.get("handler_group", "default") if isinstance(handler_group, six.string_types): handlers = handler_groups[handler_group] else: handlers = handler_group if self.get("max_force_threshold"): handlers.append(MaxForceErrorHandler(max_force_threshold=self["max_force_threshold"])) if self.get("wall_time"): handlers.append(WalltimeHandler(wall_time=self["wall_time"])) if job_type == "neb": validators = [] # CINEB vasprun.xml sometimes incomplete, file structure different else: validators = [VasprunXMLValidator(), VaspFilesValidator()] c = Custodian(handlers, jobs, validators=validators, max_errors=max_errors, scratch_dir=scratch_dir, gzipped_output=gzip_output) c.run() if os.path.exists(zpath("custodian.json")): return FWAction(stored_data=loadfn(zpath("custodian.json")))
__date__ = "3/20/17" __email__ = "z9wang at ucsd.edu" MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) launchpad = LaunchPad.from_file( os.path.join(os.environ["HOME"], ".fireworks", "my_launchpad.yaml")) struct_file = os.path.join(MODULE_DIR, "test_files/ICSD_182730_Si.cif") material_id = "material_id" s = Structure.from_file(struct_file) vasp_jobs = VaspJob( ["srun", "-n", "32", "-c", "16", "--cpu_bind=cores", "vasp_std"], auto_npar=False) double_relaxations = VaspJob.double_relaxation_run(vasp_cmd=vasp_jobs, auto_npar=False) scratch_dir = "/global/cscratch1/sd/{}/temp_project".format(os.environ["USER"]) fw1 = Firework([ MPRelaxationVASPInputTask(structure=s.as_dict()), RunCustodianTask(jobs=[j.as_dict() for j in double_relaxations], custodian_params={"scratch_dir": scratch_dir}), TransferResultsTask(material_id=material_id, job_type="relax") ], name="{} MP Relax".format(material_id)) fw2 = Firework([ MPStaticVASPInputTask(material_id=material_id), RunCustodianTask(jobs=[vasp_jobs.as_dict()], custodian_params={"scratch_dir": scratch_dir}),