def test_fizzled_2(self): # WF(Fizzled -> Ready(allow fizz parents)) == RUNNING one = fw.Firework([], state="FIZZLED", fw_id=1) two = fw.Firework([], state="READY", fw_id=2, spec={"_allow_fizzled_parents": True}, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "RUNNING")
def test_fizzled_2(self): # WF(Fizzled -> Ready(allow fizz parents)) == RUNNING one = fw.Firework([], state='FIZZLED', fw_id=1) two = fw.Firework([], state='READY', fw_id=2, spec={'_allow_fizzled_parents': True}, parents=one) self.assertEqual(fw.Workflow([one, two]).state, 'RUNNING')
def test_fizzled_3(self): # WF(Fizzled -> Completed(allow fizz parents)) == COMPLETED one = fw.Firework([], state="FIZZLED", fw_id=1) two = fw.Firework([], state="COMPLETED", fw_id=2, spec={"_allow_fizzled_parents": True}, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "COMPLETED")
def make_genetic_workflow(ngens, ncandidates, template, initial_pop, bounds, conditions, ff_updater, wf_name): """Make a genetic alg. forcefield optimisation workflow Parameters ---------- ngens : int number of generations to run algorithm for ncandidates : int number of candidates in the generation template : str path to the template initial_pop : tuple of tuples description of initial candidates bounds : tuple tuple containing the minimum and maximum bounds for each parameter eg `((1.0, 2.0), (10.0, 20.0))` clamps the first value between 1.0 and 2.0 conditions : tuple of tuples tuple of (T, P, reference result) for each point to match ff_updater : function function which does the manipulation of forcefield files wf_name : str unique key to refer to this workflow by Returns ------- workflow : fw.Workflow the Workflow ready to be put on launchpad """ first = make_first_generation( template=template, initial_pop=initial_pop, ncandidates=ncandidates, conditions=conditions, ff_updater=ff_updater, wf_name=wf_name, ) gen = first fws = first for gen_id in range(ngens): # the parent FW for next generation is last FW in previous gen parent = gen[-1] gen = make_generation_n( ncandidates=ncandidates, bounds=bounds, conditions=conditions, parent=parent, generation_id=gen_id + 1, ff_updater=ff_updater, wf_name=wf_name, ) fws.extend(gen) return fw.Workflow(fws, name=wf_name)
def test_fizzled_4(self): # one child doesn't allow fizzled parents one = fw.Firework([], state='FIZZLED', fw_id=1) two = fw.Firework([], state='READY', fw_id=2, spec={'_allow_fizzled_parents': True}, parents=one) three = fw.Firework([], state='WAITING', fw_id=3, parents=one) self.assertEqual(fw.Workflow([one, two, three]).state, 'FIZZLED')
def test_fizzled_4(self): # one child doesn't allow fizzled parents one = fw.Firework([], state="FIZZLED", fw_id=1) two = fw.Firework([], state="READY", fw_id=2, spec={"_allow_fizzled_parents": True}, parents=one) three = fw.Firework([], state="WAITING", fw_id=3, parents=one) self.assertEqual(fw.Workflow([one, two, three]).state, "FIZZLED")
def analysis_task(successful_raspa, launchpad): # run analysis on a successful raspa simulation firework = fw.Firework( [ gcwf.firetasks.AnalyseSimulation(fmt='raspa', parallel_id=1), TellTale() ], spec={'simtree': os.path.abspath(successful_raspa)}, ) launchpad(fw.Workflow([firework])) yield successful_raspa
def run(self, processing_mode, daemon_log=None): if not daemon_log: daemon_log = os.path.join(FW_WFLOW_LAUNCH_LOC, getpass.getuser(), "daemon.log") self.set_launch_dir() if processing_mode == 'serial': unique_serial_key = str(uuid.uuid4()) serial_worker = fireworks.FWorker(name=unique_serial_key) for job in self.jobs_list: job.spec['_fworker'] = unique_serial_key self.workflow = fireworks.Workflow(self.jobs_list, self.job_dependencies, name=self.name) self.launchpad.add_wf(self.workflow) rocket_launcher.rapidfire(self.launchpad, fworker=serial_worker) elif processing_mode == 'LSF': for job in self.jobs_list: job.spec['_fworker'] = 'LSF' self.workflow = fireworks.Workflow(self.jobs_list, self.job_dependencies, name=self.name) self.launchpad.add_wf(self.workflow) self.watcher_daemon(daemon_log)
def test_fizzled_6(self): # deep fizzled fireworks, but still RUNNING one = fw.Firework([], state="FIZZLED", fw_id=1) two = fw.Firework([], state="FIZZLED", fw_id=2, spec={"_allow_fizzled_parents": True}, parents=one) three = fw.Firework([], state="READY", fw_id=3, spec={"_allow_fizzled_parents": True}, parents=two) self.assertEqual(fw.Workflow([one, two, three]).state, "RUNNING")
def copytemplate(sample_input, launchpad): cp1 = fw.Firework(gcwf.firetasks.CopyTemplate(temperature=10, pressure=20, ncycles=1234, parallel_id=1, fmt='raspa'), spec={ 'template': os.path.join(sample_input, 'template'), }) wf = fw.Workflow([cp1]) launchpad(wf) print(os.listdir()) return glob.glob('sim_*')[0]
def analysis_task_with_previous(successful_raspa, launchpad): # run analysis on a successful raspa simulation firework = fw.Firework( [ gcwf.firetasks.AnalyseSimulation( fmt='raspa', parallel_id=1, previous_results='0,123\n673,456\n', ), TellTale() ], spec={'simtree': os.path.abspath(successful_raspa)}, ) launchpad(fw.Workflow([firework])) yield successful_raspa
def bulk_relaxation( self, atoms, parameters, spec=None): """Run a relaxation of a given DB entry or atoms object. If a database object is used, the calculation will automatically store the keys and data for later retrieval. The entries uuid will also be stored and `data.calculator_parameters` will be used as the calculation parameters. Parameters ---------- images : Atoms object Initial atoms to perform workflow on. parameters : dict Calculation parameters to use. workflow_name : str Name of the fireworks calculation to be used. spec : dict Additional fireworks specifications to pass to the database. """ atoms.info['calculator_parameters'] = parameters encoding = fwio.atoms_to_encode(atoms) t0 = fireworks.PyTask( func='catkit.flow.fwio.encode_to_atoms', args=[encoding]) t1 = fireworks.PyTask( func='catkit.flow.fwase.catflow_relaxation', stored_data_varname='trajectory') tasks = [t0, t1] if spec is None: spec = {} firework = fireworks.Firework(tasks, spec=spec) workflow = fireworks.Workflow([firework], name='bulk_relaxation') workflow_id = self.launchpad.add_wf(workflow)[-1] return workflow_id
def prepare_resample(self, previous_simdirs, previous_results, ncycles, wfname, template): """Prepare a new sampling stage Parameters ---------- previous_simdirs, previous_results : dict mapping of parallel id to previous simulation path and results ncycles : int number of steps still required (in total across parallel jobs) wfname : str unique name for this Workflow template : str path to sim template Returns ------- detour : fw.Workflow new sampling stages that must be done """ from .workflow_creator import make_sampling_point nparallel = len(previous) # adjust ncycles based on how many parallel runs we have ncycles = ncycles // nparallel runs, pps = make_sampling_point( parent_fw=None, T=self['temperature'], P=self['pressure'], ncycles=ncycles, nparallel=nparallel, simfmt=self['fmt'], wfname=wfname, template=template, workdir=self['workdir'], simple=self['simple'], previous_results=previous_results, previous_simdirs=previous_simdirs, ) return fw.Workflow(runs + pps)
def run_task(self, fw_spec): simtree = fw_spec['simtree'] # check exit # will raise Error if simulation didn't finish finished = self.check_exit(self['fmt'], simtree) # parse results results = self.parse_results(self['fmt'], simtree) # save csv of results from *this* simulation utils.save_csv(results, os.path.join(simtree, 'this_sim_results.csv')) if self.get('previous_result', None) is not None: results = self.prepend_previous(self['previous_result'], results) # csv of results from all generations of this simulation utils.save_csv(results, os.path.join(simtree, 'total_results.csv')) if not finished: new_fws = self.prepare_restart( template=fw_spec['template'] previous_simdir=simtree, current_result=results, wfname=fw_spec['_category'], ) return fw.FWAction( detours=fw.Workflow(new_fws) ) else: parallel_id = self['parallel_id'] return fw.FWAction( stored_data={'result': results.to_csv()}, mod_spec=[{ '_push': { 'results': (parallel_id, results.to_csv()), 'simpaths': (parallel_id, simtree), } }] )
def make_packing_workflow(spec, simple=True): """Create an entire Isotherm creation Workflow Parameters ---------- spec : dict has all the information for workflow simple : bool, optional use decorrelation analysis to determine if to run more sims Returns ------- workflow : fw.Workflow Workflow object ready to submit to LaunchPad """ temperatures = spec['temperatures'] pressures = spec['pressures'] nparallel = spec['nparallel'] ncycles = spec['ncycles'] template = spec['template'] workdir = spec['workdir'] wfname = spec['name'] if not isinstance(template, dict): dict_template = False # Passed path to template # if passed path to template, slurp it up # old method of slurping up directory stuff = utils.slurp_directory(template) else: dict_template = True # Else passed dict of stuff stuff = template simfmt = utils.guess_format(stuff) stuff = utils.escape_template(stuff) if dict_template: init = fw.Firework(firetasks.InitTemplate(contents=stuff, workdir=workdir), spec={'_category': wfname}, name='Template Init') setup = [init] else: init = None setup = [] simulations = [] # list of simulation fireworks post_processing = [] # list of post processing fireworks for T, P in itertools.product(temperatures, pressures): this_condition = make_Simfireworks( parent_fw=init, T=T, P=P, ncycles=ncycles, nparallel=nparallel, simfmt=simfmt, wfname=wfname, template=template, workdir=workdir, ) this_condition_PP = make_PostProcess( parent_fw=this_condition, T=T, P=P, wfname=wfname, simple=simple, ) simulations.extend(this_condition) post_processing.append(this_condition_PP) flat_decide = fw.Firework(CapacityDecider(), ) iso_create = fw.Firework(firetasks.IsothermCreate(workdir=workdir), parents=post_processing, spec={'_category': wfname}, name='Isotherm create') wf = fw.Workflow( setup + simulations + post_processing + [iso_create], name=wfname, metadata={'GCMCWorkflow': True}, # tag as GCMCWorkflow workflow ) return wf
def test_running_1(self): one = fw.Firework([], state="COMPLETED", fw_id=1) two = fw.Firework([], state="READY", fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "RUNNING")
def test_defused(self): # any defused == defused one = fw.Firework([], state='COMPLETED', fw_id=1) two = fw.Firework([], state='DEFUSED', fw_id=2) self.assertEqual(fw.Workflow([one, two]).state, 'DEFUSED')
def test_running_2(self): one = fw.Firework([], state="RUNNING", fw_id=1) two = fw.Firework([], state="WAITING", fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "RUNNING")
def test_fizzled_1(self): # WF(Fizzled -> Waiting(no fizz parents)) == FIZZLED one = fw.Firework([], state="FIZZLED", fw_id=1) two = fw.Firework([], state="WAITING", fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "FIZZLED")
def test_paused(self): # any paused == paused one = fw.Firework([], state="COMPLETED", fw_id=1) two = fw.Firework([], state="PAUSED", fw_id=2) self.assertEqual(fw.Workflow([one, two]).state, "PAUSED")
def test_archived(self): one = fw.Firework([], state="ARCHIVED", fw_id=1) two = fw.Firework([], state="ARCHIVED", fw_id=2) self.assertEqual(fw.Workflow([one, two]).state, "ARCHIVED")
def test_ready(self): one = fw.Firework([], state="READY", fw_id=1) two = fw.Firework([], state="READY", fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "READY")
def LaunchedInitTempFW(InitTempFW, launchpad): launchpad(fw.Workflow([InitTempFW])) return
def test_completed(self): # all leaves complete one = fw.Firework([], state="COMPLETED", fw_id=1) two = fw.Firework([], state="COMPLETED", fw_id=2) self.assertEqual(fw.Workflow([one, two]).state, "COMPLETED")
def run_raspa(short_raspa, launchpad): job = fw.Firework([gcwf.firetasks.RunSimulation(fmt='raspa')], spec={'simtree': short_raspa}) launchpad(fw.Workflow([job]))
def test_fizzled_5(self): # leaf is fizzled, wf is fizzled one = fw.Firework([], state="COMPLETED", fw_id=1) two = fw.Firework([], state="FIZZLED", fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, "FIZZLED")
def submit_relaxation( self, image, workflow_name, parameters=None, spec=None): """Run a relaxation of a given DB entry or atoms object. If a database object is used, the calculation will automatically store the keys and data for later retrieval. The entries uuid will also be stored and `data.calculator_parameters` will be used as the calculation parameters. Parameters ---------- images : Atoms object | AtomsRow object ASE database entry or atoms object to relax. workflow_name : str Name of the fireworks calculation to be used. parameters : dict Calculation parameters to use. Will be pulled from a database entry `data.calculator_parameters`. spec : dict Additional fireworks specifications to pass to the database. """ keys, data = {}, {} if isinstance(image, AtomsRow): atoms = image.toatoms() keys.update(image.key_value_pairs) keys.update({'uuid': image.unique_id}) data.update(image.data) else: atoms = image if parameters is None: if data.get('calculator_parameters'): parameters = data.get('calculator_parameters') del data['calculator_parameters'] elif atoms.info.get('calculator_parameters'): parameters = atoms.info.get('calculator_parameters') else: raise ValueError('Calculation parameters missing.') calculator = parameters.pop('calculator_name', None) if calculator is None: raise ValueError("'calculator_name' missing from parameters.") atoms.info['calculator_parameters'] = parameters for k, v in data.items(): if isinstance(v, np.ndarray): fwio.array_to_list(v) data[k] = v encoding = fwio.atoms_to_encode(atoms) t0 = fireworks.PyTask( func='catkit.flow.fwio.encode_to_atoms', args=[encoding]) t1 = fireworks.PyTask( func='catkit.flow.fwase.get_potential_energy', args=[calculator], stored_data_varname='trajectory') tasks = [t0, t1] if spec is None: spec = {'keys': keys, 'data': data} else: spec.update({'keys': keys, 'data': data}) firework = fireworks.Firework(tasks, spec=spec) workflow = fireworks.Workflow([firework], name=workflow_name) self.launchpad.add_wf(workflow)
def test_reserved(self): one = fw.Firework([], state='RESERVED', fw_id=1) two = fw.Firework([], state='READY', fw_id=2, parents=one) self.assertEqual(fw.Workflow([one, two]).state, 'RESERVED')