Beispiel #1
0
    def test_run(self):
        create_fw = Firework([mock_objects.CreateOutputsTask(extensions=["WFK", "DEN"])], fw_id=1)
        delete_fw = Firework([FinalCleanUpTask(["WFK", "1WF"])], parents=create_fw, fw_id=2,
                             spec={"_add_launchpad_and_fw_id": True})

        wf = Workflow([create_fw, delete_fw])

        self.lp.add_wf(wf)

        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR, nlaunches=1)

        # check that the files have been created
        create_fw = self.lp.get_fw_by_id(1)
        create_ldir = create_fw.launches[0].launch_dir

        for d in ["tmpdata", "outdata", "indata"]:
            assert os.path.isfile(os.path.join(create_ldir, d, "tmp_WFK"))
            assert os.path.isfile(os.path.join(create_ldir, d, "tmp_DEN"))

        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR, nlaunches=1)

        wf = self.lp.get_wf_by_fw_id(1)

        assert wf.state == "COMPLETED"

        for d in ["tmpdata", "indata"]:
            assert not os.path.isfile(os.path.join(create_ldir, d, "tmp_WFK"))
            assert not os.path.isfile(os.path.join(create_ldir, d, "tmp_DEN"))

        assert not os.path.isfile(os.path.join(create_ldir, "outdata", "tmp_WFK"))
        assert os.path.isfile(os.path.join(create_ldir, "outdata", "tmp_DEN"))
Beispiel #2
0
    def itest_scf_wf(self, lp, fworker, tmpdir, input_scf_si_low, use_autoparal):
        """
        Tests a simple scf run with the ScfFWWorkflow
        """
        wf = ScfFWWorkflow(input_scf_si_low, autoparal=use_autoparal)

        wf.add_final_cleanup(["WFK"])

        scf_fw_id = wf.scf_fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_fw_id = old_new[scf_fw_id]

        rapidfire(lp, fworker, m_dir=str(tmpdir))

        fw = lp.get_fw_by_id(scf_fw_id)

        assert fw.state == "COMPLETED"

        wf = lp.get_wf_by_fw_id(scf_fw_id)

        assert wf.state == "COMPLETED"

        # check the effect of the final cleanup
        scf_task = load_abitask(get_fw_by_task_index(wf, "scf", index=1))

        assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_WFK"))) == 0
        assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_DEN"))) == 1
        assert len(glob.glob(os.path.join(scf_task.tmpdir.path, "*"))) == 0
        assert len(glob.glob(os.path.join(scf_task.indir.path, "*"))) == 0

        if self.check_numerical_values:
            with scf_task.open_gsr() as gsr:
                assert gsr.energy == pytest.approx(-241.239839134, rel=0.05)
    def test_rerun_fws2(self):
        # Launch all fireworks
        rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
        fw = self.lp.get_fw_by_id(self.zeus_fw_id)
        launches = fw.launches
        first_ldir = launches[0].launch_dir
        ts = datetime.datetime.utcnow()

        # Rerun Zeus
        self.lp.rerun_fw(self.zeus_fw_id, rerun_duplicates=True)
        rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)

        fw = self.lp.get_fw_by_id(self.zeus_fw_id)
        launches = fw.launches
        fw_start_t =  launches[0].time_start
        second_ldir = launches[0].launch_dir

        self.assertNotEqual(first_ldir,second_ldir)

        self.assertTrue(fw_start_t > ts)
        for fw_id in self.zeus_child_fw_ids:
            fw = self.lp.get_fw_by_id(fw_id)
            fw_start_t =  fw.launches[0].time_start
            self.assertTrue(fw_start_t > ts)
        for fw_id in self.zeus_sib_fw_ids:
            fw = self.lp.get_fw_by_id(fw_id)
            fw_start_t =  fw.launches[0].time_start
            self.assertFalse(fw_start_t > ts)
Beispiel #4
0
    def itest_not_converged(self, lp, fworker, tmpdir, input_scf_phonon_si_low):
        """
        Tests the missed convergence and restart for the Strain task. Other restarts are checked in
        the PhononWorkflow tests.
        """

        # only strain perturbations included
        dfpt_inputs = dfpt_from_gsinput(input_scf_phonon_si_low, do_ddk=False, do_dde=False,
                                        do_strain=True, do_dte=False, skip_dte_permutations=True,
                                        strain_tol={"tolvrs": 1.0e-7} )
        dfpt_inputs.set_vars(nstep=3)
        wf = DfptFWWorkflow(input_scf_phonon_si_low, ddk_inp=None, dde_inp=None, ph_inp=None,
                            strain_inp=dfpt_inputs.filter_by_tags(STRAIN), dte_inp=None,
                            nscf_inp=None, initialization_info=None, autoparal=False)

        scf_id = wf.scf_fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_id = old_new[scf_id]

        # run the scf
        rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)
        # pause all the remaining workflow and reignite the task types one by one to check the restart
        lp.pause_wf(scf_id)

        # strain
        check_restart_task_type(lp, fworker, tmpdir, scf_id, "strain_pert_0")
Beispiel #5
0
    def test_copyfilesfromcalcloc(self):
        fw1 = Firework([CopyVaspOutputs(calc_dir=self.plain_outdir),
                        PassCalcLocs(name="fw1")], name="fw1")

        fw2 = Firework([CopyVaspOutputs(calc_dir=self.relax2_outdir),
                        PassCalcLocs(name="fw2")], name="fw2")

        fw3 = Firework([CopyFilesFromCalcLoc(calc_loc="fw1",
                                             filenames=["POSCAR"],
                                             name_prepend="",
                                             name_append="_0"),
                        CopyFilesFromCalcLoc(calc_loc="fw2",
                                             filenames=["POSCAR"],
                                             name_prepend="",
                                             name_append="_1"),
                        PassCalcLocs(name="fw3")],
                       name="fw3", parents=[fw1, fw2])
        fw4 = Firework([PassCalcLocs(name="fw4")], name="fw4", parents=fw3)

        wf = Workflow([fw1, fw2, fw3, fw4])
        self.lp.add_wf(wf)
        rapidfire(self.lp)

        fw4 = self.lp.get_fw_by_id(self.lp.get_fw_ids({"name": "fw4"})[0])

        calc_locs = fw4.spec["calc_locs"]
        self.assertTrue(os.path.exists(get_calc_loc("fw3", calc_locs)["path"] +
                                       "/POSCAR_0"))
        self.assertTrue(os.path.exists(get_calc_loc("fw3", calc_locs)["path"] +
                                       "/POSCAR_1"))
    def test_wf(self):
        self.wf = self._simulate_vasprun(self.wf)

        self.assertEqual(len(self.wf.fws), 8)
        # check vasp parameters for ionic relaxation
        defo_vis = [fw.spec["_tasks"][2]['vasp_input_set'] 
                    for fw in self.wf.fws if "deform" in fw.name]
        assert all([vis['user_incar_settings']['NSW']==99 for vis in defo_vis])
        assert all([vis['user_incar_settings']['IBRION']==2 for vis in defo_vis])
        self.lp.add_wf(self.wf)
        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        # check relaxation
        d = self._get_task_collection().find_one({"task_label": "elastic structure optimization"})
        self._check_run(d, mode="structure optimization")
        # check two of the deformation calculations
        d = self._get_task_collection().find_one({"task_label": "elastic deformation 0"})
        self._check_run(d, mode="elastic deformation 0")
        
        d = self._get_task_collection().find_one({"task_label": "elastic deformation 3"})
        self._check_run(d, mode="elastic deformation 3")

        # check the final results
        d = self._get_task_collection(coll_name="elasticity").find_one()
        self._check_run(d, mode="elastic analysis")
    def test_single_Vasp_dbinsertion(self):
        # add the workflow
        structure = self.struct_si
        # instructs to use db_file set by FWorker, see env_chk
        my_wf = get_wf(structure, "optimize_only.yaml", vis=MPRelaxSet(structure, force_gamma=True),
                       common_params={"vasp_cmd": VASP_CMD,
                                      "db_file": ">>db_file<<"})
        if not VASP_CMD:
            my_wf = use_fake_vasp(my_wf, ref_dirs_si)
        else:
            my_wf = use_custodian(my_wf)

        # add an msonable object to additional fields
        my_wf.fws[0].tasks[-1]['additional_fields'].update(
            {"test_additional_field": self.struct_si})
        self.lp.add_wf(my_wf)


        # run the workflow
        # set the db_file variable
        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        d = self.get_task_collection().find_one()
        self._check_run(d, mode="structure optimization")
        self._check_run(d, mode="additional field")

        wf = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
    def test_wf(self):

        self.wf = self._simulate_vasprun(self.wf)

        # 2*relax + 3*polarization = 5
        self.assertEqual(len(self.wf.fws), 5)

        # check VASP parameters on polarization calculation for interpolated structures
        interpolated_polarization_vis = [fw.tasks[7]['incar_update']['lcalcpol']
            for fw in self.wf.fws if "polarization" in fw.name and "interpolation" in fw.name]

        assert all(interpolated_polarization_vis)

        self.lp.add_wf(self.wf)
        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        # Check polar relaxation
        d = self.get_task_collection().find_one({"task_label": "_polar_relaxation"})
        self._check_run(d, "_polar_relaxation")

        # Check nonpolar relaxation
        d = self.get_task_collection().find_one({"task_label": "_nonpolar_relaxation"})
        self._check_run(d, "_nonpolar_relaxation")

        # Check polarization calculations
        D = self.get_task_collection().find({"task_label": {"$regex": ".*polarization"}})
        for d in D:
            self._check_run(d, d["task_label"])
    def test_wf(self):
        self.wf = self._simulate_vasprun(self.wf)
        self.assertEqual(len(self.wf.fws), self.ndeformations+2)

        defo_vis = [fw.tasks[2]['vasp_input_set'] for fw in self.wf.fws if "deform" in fw.name]
        assert all([vis.user_incar_settings['NSW'] == 99 for vis in defo_vis])
        assert all([vis.user_incar_settings['IBRION'] == 2 for vis in defo_vis])

        self.lp.add_wf(self.wf)

        # this is specific to bulk_modulus_wf "fit equation of state" that uses FW tag
        self.setup_task_docs()

        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))
        if _write_task_docs:
            self.write_task_docs()

        # check relaxation
        d = self.get_task_collection().find_one({"task_label": {"$regex": "structure optimization"}})
        self._check_run(d, mode="structure optimization")

        # check two of the deformation calculations
        d = self.get_task_collection().find_one({"task_label": {"$regex": "bulk_modulus deformation 0"}})
        self._check_run(d, mode="bulk_modulus deformation 0")

        d = self.get_task_collection().find_one({"task_label": {"$regex": "bulk_modulus deformation 4"}})
        self._check_run(d, mode="bulk_modulus deformation 4")

        # check the final results
        d = self.get_task_collection(coll_name="eos").find_one()
        self._check_run(d, mode="fit equation of state")
    def test_chgcar_db_read(self):
        # add the workflow
        structure = self.struct_si
        # instructs to use db_file set by FWorker, see env_chk
        my_wf = get_wf(structure, "static_only.yaml", vis=MPStaticSet(structure, force_gamma=True),
                       common_params={"vasp_cmd": VASP_CMD,
                                      "db_file": ">>db_file<<"})
        if not VASP_CMD:
            my_wf = use_fake_vasp(my_wf, ref_dirs_si)
        else:
            my_wf = use_custodian(my_wf)

        # set the flags for storing charge densties
        my_wf.fws[0].tasks[-1]["parse_chgcar"] = True
        my_wf.fws[0].tasks[-1]["parse_aeccar"] = True
        self.lp.add_wf(my_wf)

        # run the workflow
        # set the db_file variable
        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        d = self.get_task_collection().find_one()
        self._check_run(d, mode="static")

        wf = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))

        chgcar_fs_id = d["calcs_reversed"][0]["chgcar_fs_id"]
        accar0_fs_id = d["calcs_reversed"][0]["aeccar0_fs_id"]
        accar2_fs_id = d["calcs_reversed"][0]["aeccar2_fs_id"]

        self.assertTrue(bool(chgcar_fs_id))
        self.assertTrue(bool(accar0_fs_id))
        self.assertTrue(bool(accar2_fs_id))
Beispiel #11
0
    def itest_input_wf(self, lp, fworker, tmpdir, input_scf_si_low, use_autoparal):
        """
        Tests a simple scf run with the InputFWWorkflow
        """
        wf = InputFWWorkflow(input_scf_si_low, task_type=ScfFWTask, autoparal=use_autoparal)

        scf_fw_id = wf.fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_fw_id = old_new[scf_fw_id]

        rapidfire(lp, fworker, m_dir=str(tmpdir))

        fw = lp.get_fw_by_id(scf_fw_id)

        assert fw.state == "COMPLETED"

        wf = lp.get_wf_by_fw_id(scf_fw_id)

        assert wf.state == "COMPLETED"
        assert len(wf.leaf_fw_ids) == 1

        if self.check_numerical_values:
            task = load_abitask(lp.get_fw_by_id(wf.leaf_fw_ids[0]))

            with task.open_gsr() as gsr:
                assert gsr.energy == pytest.approx(-241.239839134, rel=0.05)
Beispiel #12
0
    def check_restart_task_type(self, lp, fworker, tmpdir, fw_id, task_tag):

        # resume the task for tag
        wf = lp.get_wf_by_fw_id(fw_id)
        fw = get_fw_by_task_index(wf, task_tag, index=1)
        assert fw is not None
        assert fw.state == "PAUSED"
        lp.resume_fw(fw.fw_id)

        # run the FW
        rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)

        # the job should have a detour for the restart
        wf = lp.get_wf_by_fw_id(fw_id)
        fw = get_fw_by_task_index(wf, task_tag, index=2)
        assert fw is not None
        assert fw.state == "READY"

        # run all the following and check that the last is correctly completed (if convergence is not achieved
        # the final state should be FIZZLED)
        rapidfire(lp, fworker, m_dir=str(tmpdir))

        wf = lp.get_wf_by_fw_id(fw_id)
        fw = get_fw_by_task_index(wf, task_tag, index=-1)

        assert fw.state == "COMPLETED"
    def test_bandgap_check_Vasp(self):
        # add the workflow
        structure = self.struct_si
        # instructs to use db_file set by FWorker, see env_chk
        my_wf = get_wf(structure, "bandstructure.yaml",
                       vis=MPRelaxSet(structure, force_gamma=True),
                       common_params={"vasp_cmd": VASP_CMD,
                                      "db_file": ">>db_file<<"})
        if not VASP_CMD:
            my_wf = use_fake_vasp(my_wf, ref_dirs_si)
        else:
            my_wf = use_custodian(my_wf)

        my_wf = add_namefile(my_wf)  # add a slug of fw-name to output files
        my_wf = add_bandgap_check(my_wf, check_bandgap_params={"max_gap": 0.1}, fw_name_constraint="structure optimization")
        self.lp.add_wf(my_wf)

        # run the workflow
        # set the db_file variable
        rapidfire(self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        # structure optimization should be completed
        self.assertEqual(self.lp.fireworks.find_one(
            {"name": "Si-structure optimization"}, {"state": 1})["state"],
                         "COMPLETED")

        self.assertEqual(self.lp.fireworks.find_one(
            {"name": "Si-static"}, {"state": 1})["state"],
                         "DEFUSED")
Beispiel #14
0
    def itest_scf(self, lp, fworker, fwp, tmpdir, benchmark_input_scf):
        wf = InputFWWorkflow(benchmark_input_scf)

        scf_fw_id = wf.fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_fw_id = old_new[scf_fw_id]

        rapidfire(lp, fworker, m_dir=str(tmpdir))

        fw = lp.get_fw_by_id(scf_fw_id)

        assert fw.state == "COMPLETED"

        # Build the flow
        flow = abilab.Flow(fwp.workdir, manager=fwp.manager)
        work = flow.register_task(benchmark_input_scf, task_class=abilab.ScfTask)

        flow.allocate()
        flow.build_and_pickle_dump()

        # Run t0, and check status
        t0 = work[0]
        t0.start_and_wait()
        t0.check_status()
        assert t0.status == t0.S_OK
    def test_wf(self):
        wf = self._simulate_vasprun(self.wf_1)

        self.assertEqual(len(self.wf_1.fws), 5)
        # check vasp parameters for ionic relaxation
        ads_vis = [fw.tasks[1]['vasp_input_set']
                   for fw in self.wf_1.fws if "adsorbate" in fw.name]
        for vis in ads_vis:
            self.assertEqual(vis.incar['EDIFFG'], -0.05)
            self.assertEqual(vis.incar['ISIF'], 2)
        self.lp.add_wf(wf)
        rapidfire(self.lp, fworker=FWorker(
            env={"db_file": os.path.join(db_dir, "db.json")}))

        # check relaxation
        d = self.get_task_collection().find_one(
            {"task_label": "H1-Ir_(1, 0, 0) adsorbate optimization 1"})
        self._check_run(d, mode="H1-Ir_(1, 0, 0) adsorbate optimization 1")

        # Check bulk opt
        d = self.get_task_collection().find_one(
            {"task_label": "structure optimization"})
        self._check_run(d, mode='oriented_ucell')

        wf = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
    def test_double_FF_opt(self):
        # location of test files
        test_double_FF_files = os.path.join(module_dir, "..", "..",
                                            "test_files", "double_FF_wf")
        # define starting molecule and workflow object
        initial_qcin = QCInput.from_file(
            os.path.join(test_double_FF_files, "block", "launcher_first",
                         "mol.qin.opt_0"))
        initial_mol = initial_qcin.molecule

        real_wf = get_wf_double_FF_opt(
            molecule=initial_mol,
            pcm_dielectric=10.0,
            qchem_input_params={
                "basis_set": "6-311++g**",
                "scf_algorithm": "diis",
                "overwrite_inputs": {
                    "rem": {
                        "sym_ignore": "true"
                    }
                }
            })
        # use powerup to replace run with fake run
        ref_dirs = {
            "first_FF_no_pcm":
            os.path.join(test_double_FF_files, "block", "launcher_first"),
            "second_FF_with_pcm":
            os.path.join(test_double_FF_files, "block", "launcher_second")
        }
        fake_wf = use_fake_qchem(real_wf, ref_dirs)
        self.lp.add_wf(fake_wf)
        rapidfire(
            self.lp,
            fworker=FWorker(env={"max_cores": 32, "db_file": os.path.join(db_dir, "db.json")}))

        wf_test = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(
            all([s == "COMPLETED" for s in wf_test.fw_states.values()]))

        first_FF = self.get_task_collection().find_one({
            "task_label":
            "first_FF_no_pcm"
        })
        self.assertEqual(first_FF["calcs_reversed"][0]["input"]["solvent"],
                         None)
        self.assertEqual(first_FF["num_frequencies_flattened"], 1)
        first_FF_final_mol = Molecule.from_dict(
            first_FF["output"]["optimized_molecule"])

        second_FF = self.get_task_collection().find_one({
            "task_label":
            "second_FF_with_pcm"
        })
        self.assertEqual(second_FF["calcs_reversed"][0]["input"]["solvent"],
                         {"dielectric": "10.0"})
        self.assertEqual(second_FF["num_frequencies_flattened"], 1)
        second_FF_initial_mol = Molecule.from_dict(
            second_FF["input"]["initial_molecule"])

        self.assertEqual(first_FF_final_mol, second_FF_initial_mol)
Beispiel #17
0
    def test_get_lp_and_fw_id_from_task(self):
        """
        Tests the get_lp_and_fw_id_from_task. This test relies on the fact that the LaunchPad loaded from auto_load
        will be different from what is defined in TESTDB_NAME. If this is not the case the test will be skipped.
        """
        lp = LaunchPad.auto_load()

        if not lp or lp.db.name == TESTDB_NAME:
            raise unittest.SkipTest("LaunchPad lp {} is not suitable for this test. Should be available and different"
                                    "from {}".format(lp, TESTDB_NAME))

        task = LpTask()
        # this will pass the lp
        fw1 = Firework([task], spec={'_add_launchpad_and_fw_id': True}, fw_id=1)
        # this will not have the lp and should fail
        fw2 = Firework([task], spec={}, fw_id=2, parents=[fw1])
        wf = Workflow([fw1, fw2])
        self.lp.add_wf(wf)

        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR, nlaunches=1)

        fw = self.lp.get_fw_by_id(1)

        assert fw.state == "COMPLETED"

        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR, nlaunches=1)

        fw = self.lp.get_fw_by_id(2)

        assert fw.state == "FIZZLED"
Beispiel #18
0
    def test_pause_fw(self):
        self.lp.pause_fw(self.zeus_fw_id)

        paused_ids = self.lp.get_fw_ids({'state':'PAUSED'})
        self.assertIn(self.zeus_fw_id, paused_ids)
        try:
            # Launch remaining fireworks
            rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)

            # Ensure except for Zeus and his children, all other fw are launched
            completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
            # Check that Lapetus and his descendants are subset of  completed fwids
            self.assertTrue(self.lapetus_desc_fw_ids.issubset(completed_ids))
            # Check that Zeus siblings are subset of completed fwids
            self.assertTrue(self.zeus_sib_fw_ids.issubset(completed_ids))

            # Check that Zeus and children are subset of incompleted fwids
            fws_no_run = set(self.lp.get_fw_ids({'state':{'$nin':['COMPLETED']}}))
            self.assertIn(self.zeus_fw_id,fws_no_run)
            self.assertTrue(self.zeus_child_fw_ids.issubset(fws_no_run))

            # Setup Zeus to run
            self.lp.resume_fw(self.zeus_fw_id)
            # Launch remaining fireworks
            rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
            # Check that Zeus and children are all completed now
            completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
            self.assertIn(self.zeus_fw_id,completed_ids)
            self.assertTrue(self.zeus_child_fw_ids.issubset(completed_ids))

        except:
            raise
Beispiel #19
0
    def test_run(self):
        db = DatabaseData(self.lp.name, collection="test_MongoEngineDBInsertionTask", username=self.lp.username,
                          password=self.lp.password)
        task = MongoEngineDBInsertionTask(db)
        fw = Firework([task], fw_id=1, spec={"_add_launchpad_and_fw_id": True})
        wf = Workflow([fw], metadata={'workflow_class': SaveDataWorkflow.workflow_class,
                                     'workflow_module': SaveDataWorkflow.workflow_module})
        self.lp.add_wf(wf)

        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR, nlaunches=1)

        wf = self.lp.get_wf_by_fw_id(1)

        assert wf.state == "COMPLETED"

        # retrived the saved object
        # error if not imported locally
        from abiflows.fireworks.tasks.tests.mock_objects import DataDocument
        db.connect_mongoengine()
        with db.switch_collection(DataDocument) as DataDocument:
            data = DataDocument.objects()

            assert len(data) == 1

            assert data[0].test_field_string == "test_text"
            assert data[0].test_field_int == 5
Beispiel #20
0
def rlaunch():

    m_description = 'This program launches one or more Rockets. A Rocket grabs a job from the central database and ' \
                    'runs it. The "single-shot" option launches a single Rocket, ' \
                    'whereas the "rapidfire" option loops until all FireWorks are completed.'

    parser = ArgumentParser(description=m_description)
    subparsers = parser.add_subparsers(help='command', dest='command')
    single_parser = subparsers.add_parser('singleshot', help='launch a single Rocket')
    rapid_parser = subparsers.add_parser('rapidfire',
                                         help='launch multiple Rockets (loop until all FireWorks complete)')

    single_parser.add_argument('-f', '--fw_id', help='specific fw_id to run', default=None, type=int)
    single_parser.add_argument('--offline', help='run in offline mode (FW.json required)', action='store_true')

    rapid_parser.add_argument('--nlaunches', help='num_launches (int or "infinite"; default 0 is all jobs in DB)', default=0)
    rapid_parser.add_argument('--sleep', help='sleep time between loops (secs)', default=None, type=int)

    parser.add_argument('-l', '--launchpad_file', help='path to launchpad file', default=LAUNCHPAD_LOC)
    parser.add_argument('-w', '--fworker_file', help='path to fworker file', default=FWORKER_LOC)
    parser.add_argument('-c', '--config_dir', help='path to a directory containing the config file (used if -l, -w unspecified)',
                        default=CONFIG_FILE_DIR)

    parser.add_argument('--loglvl', help='level to print log messages', default='INFO')
    parser.add_argument('-s', '--silencer', help='shortcut to mute log messages', action='store_true')

    args = parser.parse_args()

    signal.signal(signal.SIGINT, handle_interrupt)  # graceful exist on ^C

    if not args.launchpad_file and os.path.exists(os.path.join(args.config_dir, 'my_launchpad.yaml')):
        args.launchpad_file = os.path.join(args.config_dir, 'my_launchpad.yaml')

    if not args.fworker_file and os.path.exists(os.path.join(args.config_dir, 'my_fworker.yaml')):
        args.fworker_file = os.path.join(args.config_dir, 'my_fworker.yaml')

    args.loglvl = 'CRITICAL' if args.silencer else args.loglvl

    if args.command == 'singleshot' and args.offline:
        launchpad = None
    else:
        launchpad = LaunchPad.from_file(args.launchpad_file) if args.launchpad_file else LaunchPad(strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    # prime addr lookups
    _log = get_fw_logger("rlaunch", stream_level="INFO")
    _log.info("Hostname/IP lookup (this will take a few seconds)")
    get_my_host()
    get_my_ip()

    if args.command == 'rapidfire':
        rapidfire(launchpad, fworker, None, args.nlaunches, -1, args.sleep, args.loglvl)

    else:
        launch_rocket(launchpad, fworker, args.fw_id, args.loglvl)
Beispiel #21
0
def rapidfire_process(fworker, nlaunches, sleep, loglvl, port, node_list, sub_nproc, timeout, running_ids_dict):
    """
    Initializes shared data with multiprocessing parameters and starts a rapidfire.

    Args:
        fworker (FWorker): object
        nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
        sleep (int): secs to sleep between rapidfire loop iterations
        loglvl (str): level at which to output logs to stdout
        port (int): Listening port number of the shared object manage
        password (str): security password to access the server
        node_list ([str]): computer node list
        sub_nproc (int): number of processors of the sub job
        timeout (int): # of seconds after which to stop the rapidfire process
    """
    ds = DataServer(address=("127.0.0.1", port), authkey=DS_PASSWORD)
    ds.connect()
    launchpad = ds.LaunchPad()
    FWData().DATASERVER = ds
    FWData().MULTIPROCESSING = True
    FWData().NODE_LIST = node_list
    FWData().SUB_NPROCS = sub_nproc
    FWData().Running_IDs = running_ids_dict
    sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
    l_dir = launchpad.get_logdir() if launchpad else None
    l_logger = get_fw_logger("rocket.launcher", l_dir=l_dir, stream_level=loglvl)
    rapidfire(
        launchpad,
        fworker=fworker,
        m_dir=None,
        nlaunches=nlaunches,
        max_loops=-1,
        sleep_time=sleep,
        strm_lvl=loglvl,
        timeout=timeout,
    )
    while nlaunches == 0:
        time.sleep(1.5)  # wait for LaunchPad to be initialized
        launch_ids = FWData().Running_IDs.values()
        live_ids = list(set(launch_ids) - {None})
        if len(live_ids) > 0:
            # Some other sub jobs are still running
            log_multi(l_logger, "Sleeping for {} secs before resubmit sub job".format(sleep_time))
            time.sleep(sleep_time)
            log_multi(l_logger, "Resubmit sub job".format(sleep_time))
            rapidfire(
                launchpad,
                fworker=fworker,
                m_dir=None,
                nlaunches=nlaunches,
                max_loops=-1,
                sleep_time=sleep,
                strm_lvl=loglvl,
                timeout=timeout,
            )
        else:
            break
    log_multi(l_logger, "Sub job finished")
    def test_getinterpolatedposcar(self):
        nimages = 5
        this_image = 1
        autosort_tol = 0.5

        fw1 = Firework([CopyVaspOutputs(calc_dir=self.static_outdir,
                                        contcar_to_poscar=False,
                                        additional_files=["CONTCAR"]),
                        PassCalcLocs(name="fw1")], name="fw1")

        fw2 = Firework([CopyVaspOutputs(calc_dir=self.opt_outdir,
                                        contcar_to_poscar=False,
                                        additional_files=["CONTCAR"]),
                        PassCalcLocs(name="fw2")], name="fw2")

        fw3 = Firework([GetInterpolatedPOSCAR(start="fw1",
                                              end="fw2",
                                              this_image=this_image,
                                              nimages=nimages,
                                              autosort_tol=autosort_tol),
                        PassCalcLocs(name="fw3")],
                       name="fw3", parents=[fw1, fw2])
        fw4 = Firework([PassCalcLocs(name="fw4")], name="fw4", parents=fw3)

        wf = Workflow([fw1, fw2, fw3, fw4])
        self.lp.add_wf(wf)
        rapidfire(self.lp)

        fw4 = self.lp.get_fw_by_id(self.lp.get_fw_ids({"name": "fw4"})[0])

        calc_locs = fw4.spec["calc_locs"]
        self.assertTrue(os.path.exists(get_calc_loc("fw3", calc_locs)["path"] +
                                       "/POSCAR"))
        self.assertTrue(os.path.exists(get_calc_loc("fw3", calc_locs)["path"] +
                                       "/interpolate/CONTCAR_0"))
        self.assertTrue(os.path.exists(get_calc_loc("fw3", calc_locs)["path"] +
                                       "/interpolate/CONTCAR_1"))

        struct_start = Structure.from_file(get_calc_loc("fw3", calc_locs)["path"] +
                                          "/interpolate/CONTCAR_0")
        struct_end = Structure.from_file(get_calc_loc("fw3", calc_locs)["path"] +
                                         "/interpolate/CONTCAR_1")
        struct_inter = Structure.from_file(get_calc_loc("fw3", calc_locs)["path"] +
                                           "/POSCAR")

        structs = struct_start.interpolate(struct_end,
                                           nimages,
                                           interpolate_lattices=True,
                                           autosort_tol=autosort_tol)

        # Check x of 1st site.
        self.assertAlmostEqual(structs[this_image][1].coords[0],
                               struct_inter[1].coords[0])
        # Check c lattice parameter
        self.assertAlmostEqual(structs[this_image].lattice.abc[0],
                               struct_inter.lattice.abc[0])
    def test_defuse_fw_after_completion(self):
        # Launch rockets in rapidfire
        rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
        # defuse Zeus
        self.lp.defuse_fw(self.zeus_fw_id)

        defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
        self.assertIn(self.zeus_fw_id,defused_ids)
        completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
        self.assertFalse(self.zeus_child_fw_ids.issubset(completed_ids))
Beispiel #24
0
    def test_fibadder(self):
        fib = FibonacciAdderTask()
        fw = Firework(fib, {'smaller': 0, 'larger': 1, 'stop_point': 3})
        self.lp.add_wf(fw)
        rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)

        self.assertEqual(self.lp.get_launch_by_id(1).action.stored_data['next_fibnum'], 1)
        self.assertEqual(self.lp.get_launch_by_id(2).action.stored_data['next_fibnum'], 2)
        self.assertEqual(self.lp.get_launch_by_id(3).action.stored_data, {})
        self.assertFalse(self.lp.run_exists())
Beispiel #25
0
 def test_missioncontrol(self):
     self.mc.configure(wf_creator=wf_creator_basic,
                       dimensions=self.dims_basic,
                       **common_kwargs)
     launchpad.add_wf(wf_creator_basic([5, 11, 'blue']))
     rapidfire(launchpad, nlaunches=20, sleep_time=0)
     plt = self.mc.plot()
     savepath = os.path.join(lp_filedir, "missioncontrol_plot.png")
     plt.savefig(savepath)
     self.assertTrue(os.path.exists(savepath))
Beispiel #26
0
def rapidfire_process(fworker, nlaunches, sleep, loglvl, port, node_list, sub_nproc, timeout,
                      running_ids_dict, local_redirect, firing_state_dict, macro_sleep_time=None):
    """
    Initializes shared data with multiprocessing parameters and starts a rapidfire.

    Args:
        fworker (FWorker): object
        nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
        sleep (int): secs to sleep between rapidfire loop iterations
        loglvl (str): level at which to output logs to stdout
        port (int): Listening port number of the shared object manage
        password (str): security password to access the server
        node_list ([str]): computer node list
        sub_nproc (int): number of processors of the sub job
        timeout (int): # of seconds after which to stop the rapidfire process
        macro_sleep_time (int): secs to sleep between sub job resubmit
        local_redirect (bool): redirect standard input and output to local file
    """
    ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD)
    ds.connect()
    launchpad = ds.LaunchPad()
    fw_data = FWData()
    fw_data.DATASERVER = ds
    fw_data.MULTIPROCESSING = True
    fw_data.NODE_LIST = node_list
    fw_data.SUB_NPROCS = sub_nproc
    fw_data.Running_IDs = running_ids_dict
    fw_data.FiringState = firing_state_dict
    fw_data.lp = launchpad
    sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
    l_dir = launchpad.get_logdir() if launchpad else None
    l_logger = get_fw_logger('rocket.launcher', l_dir=l_dir, stream_level=loglvl)
    fw_data.FiringState[os.getpid()] = True
    rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=nlaunches,
              max_loops=-1, sleep_time=sleep, strm_lvl=loglvl, timeout=timeout,
              local_redirect=local_redirect)
    fw_data.FiringState[os.getpid()] = False
    while nlaunches == 0:
        time.sleep(1.5) # wait for LaunchPad to be initialized
        firing_pids = [pid for pid, is_firing in fw_data.FiringState.items() if is_firing]
        if len(firing_pids) > 0:
            # Some other sub jobs are still running
            macro_sleep_time = macro_sleep_time if macro_sleep_time \
                else sleep_time * len(fw_data.FiringState)
            log_multi(l_logger, 'Sleeping for {} secs before resubmit sub job'.format(macro_sleep_time))
            time.sleep(macro_sleep_time)
            log_multi(l_logger, 'Resubmit sub job'.format(macro_sleep_time))
            fw_data.FiringState[os.getpid()] = True
            rapidfire(launchpad, fworker=fworker, m_dir=None, nlaunches=nlaunches,
                      max_loops=-1, sleep_time=sleep, strm_lvl=loglvl, timeout=timeout,
                      local_redirect=local_redirect)
            fw_data.FiringState[os.getpid()] = False
        else:
            break
    log_multi(l_logger, 'Sub job finished')
Beispiel #27
0
 def test_multi_detour(self):
     fw1 = Firework([MultipleDetourTask()], fw_id=1)
     fw2 = Firework([ScriptTask.from_str('echo "DONE"')], parents=[fw1], fw_id=2)
     self.lp.add_wf(Workflow([fw1, fw2]))
     rapidfire(self.lp)
     links = self.lp.get_wf_by_fw_id(1).links
     self.assertEqual(set(links[1]), set([2, 3, 4, 5]))
     self.assertEqual(set(links[2]), set([]))
     self.assertEqual(set(links[3]), set([2]))
     self.assertEqual(set(links[4]), set([2]))
     self.assertEqual(set(links[5]), set([2]))
 def test_defuse_fw_after_completion(self):
     # Launch rockets in rapidfire
     rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
     # defuse Zeus
     self.lp.defuse_fw(self.zeus_fw_id)
     # Ensure the states are sync 
     wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
     fws = wf.id_fw
     for fw_id in wf.fw_states:
         fw_state = fws[fw_id].state
         fw_cache_state = wf.fw_states[fw_id]
         self.assertEqual(fw_state, fw_cache_state)
 def test_task_level_rerun_prev_dir(self):
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     self.assertEqual(os.getcwd(), MODULE_DIR)
     self.lp.rerun_fws_task_level(1, recover_mode="prev_dir")
     self.lp.update_spec([1], {'skip_exception': True})
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     fw = self.lp.get_fw_by_id(1)
     self.assertEqual(os.getcwd(), MODULE_DIR)
     self.assertEqual(fw.state, 'COMPLETED')
     self.assertEqual(fw.launches[0].launch_dir, fw.archived_launches[0].launch_dir)
     self.assertEqual(ExecutionCounterTask.exec_counter, 1)
     self.assertEqual(ExceptionTestTask.exec_counter, 2)
 def test_task_level_rerun_cp(self):
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     self.assertEqual(os.getcwd(), MODULE_DIR)
     self.lp.rerun_fws_task_level(1, recover_mode="cp")
     self.lp.update_spec([1], {'skip_exception': True})
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     self.assertEqual(os.getcwd(), MODULE_DIR)
     dirs = sorted(glob.glob(os.path.join(MODULE_DIR, "launcher_*")))
     self.assertEqual(self.lp.get_fw_by_id(1).state, 'COMPLETED')
     self.assertEqual(ExecutionCounterTask.exec_counter, 1)
     self.assertEqual(ExceptionTestTask.exec_counter, 2)
     self.assertTrue(filecmp.cmp(os.path.join(dirs[0], "date_file"), os.path.join(dirs[1], "date_file")))
Beispiel #31
0
def rlaunch():

    m_description = 'This program launches one or more Rockets. A Rocket grabs a job from the ' \
                    'central database and runs it. The "single-shot" option launches a single Rocket, ' \
                    'whereas the "rapidfire" option loops until all FireWorks are completed.'

    parser = ArgumentParser(description=m_description)
    subparsers = parser.add_subparsers(help='command', dest='command')
    single_parser = subparsers.add_parser('singleshot',
                                          help='launch a single Rocket')
    rapid_parser = subparsers.add_parser(
        'rapidfire',
        help='launch multiple Rockets (loop until all FireWorks complete)')
    multi_parser = subparsers.add_parser(
        'multi', help='launches multiple Rockets simultaneously')

    single_parser.add_argument('-f',
                               '--fw_id',
                               help='specific fw_id to run',
                               default=None,
                               type=int)
    single_parser.add_argument('--offline',
                               help='run in offline mode (FW.json required)',
                               action='store_true')

    rapid_parser.add_argument('--nlaunches',
                              help='num_launches (int or "infinite"; '
                              'default 0 is all jobs in DB)',
                              default=0)
    rapid_parser.add_argument(
        '--timeout',
        help='timeout (secs) after which to quit (default None)',
        default=None,
        type=int)
    rapid_parser.add_argument(
        '--max_loops',
        help='after this many sleep loops, quit even in '
        'infinite nlaunches mode (default -1 is infinite loops)',
        default=-1,
        type=int)
    rapid_parser.add_argument('--sleep',
                              help='sleep time between loops (secs)',
                              default=None,
                              type=int)
    rapid_parser.add_argument(
        '--local_redirect',
        help="Redirect stdout and stderr to the launch directory",
        action="store_true")

    multi_parser.add_argument('num_jobs',
                              help='the number of jobs to run in parallel',
                              type=int)
    multi_parser.add_argument('--nlaunches',
                              help='number of FireWorks to run in series per '
                              'parallel job (int or "infinite"; default 0 is '
                              'all jobs in DB)',
                              default=0)
    multi_parser.add_argument(
        '--sleep',
        help='sleep time between loops in infinite launch mode'
        '(secs)',
        default=None,
        type=int)
    multi_parser.add_argument(
        '--timeout',
        help='timeout (secs) after which to quit (default None)',
        default=None,
        type=int)
    multi_parser.add_argument(
        '--nodefile',
        help='nodefile name or environment variable name '
        'containing the node file name (for populating'
        ' FWData only)',
        default=None,
        type=str)
    multi_parser.add_argument(
        '--ppn',
        help='processors per node (for populating FWData only)',
        default=1,
        type=int)
    multi_parser.add_argument('--exclude_current_node',
                              help="Don't use the script launching node"
                              "as compute node",
                              action="store_true")
    multi_parser.add_argument(
        '--local_redirect',
        help="Redirect stdout and stderr to the launch directory",
        action="store_true")

    parser.add_argument('-l',
                        '--launchpad_file',
                        help='path to launchpad file',
                        default=LAUNCHPAD_LOC)
    parser.add_argument('-w',
                        '--fworker_file',
                        help='path to fworker file',
                        default=FWORKER_LOC)
    parser.add_argument('-c',
                        '--config_dir',
                        help='path to a directory containing the config file '
                        '(used if -l, -w unspecified)',
                        default=CONFIG_FILE_DIR)

    parser.add_argument('--loglvl',
                        help='level to print log messages',
                        default='INFO')
    parser.add_argument('-s',
                        '--silencer',
                        help='shortcut to mute log messages',
                        action='store_true')

    try:
        import argcomplete
        argcomplete.autocomplete(parser)
        # This supports bash autocompletion. To enable this, pip install
        # argcomplete, activate global completion, or add
        #      eval "$(register-python-argcomplete rlaunch)"
        # into your .bash_profile or .bashrc
    except ImportError:
        pass

    args = parser.parse_args()

    signal.signal(signal.SIGINT, handle_interrupt)  # graceful exit on ^C

    if not args.launchpad_file and os.path.exists(
            os.path.join(args.config_dir, 'my_launchpad.yaml')):
        args.launchpad_file = os.path.join(args.config_dir,
                                           'my_launchpad.yaml')

    if not args.fworker_file and os.path.exists(
            os.path.join(args.config_dir, 'my_fworker.yaml')):
        args.fworker_file = os.path.join(args.config_dir, 'my_fworker.yaml')

    args.loglvl = 'CRITICAL' if args.silencer else args.loglvl

    if args.command == 'singleshot' and args.offline:
        launchpad = None
    else:
        launchpad = LaunchPad.from_file(
            args.launchpad_file) if args.launchpad_file else LaunchPad(
                strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    # prime addr lookups
    _log = get_fw_logger("rlaunch", stream_level="INFO")
    _log.info("Hostname/IP lookup (this will take a few seconds)")
    get_my_host()
    get_my_ip()

    if args.command == 'rapidfire':
        rapidfire(launchpad,
                  fworker=fworker,
                  m_dir=None,
                  nlaunches=args.nlaunches,
                  max_loops=args.max_loops,
                  sleep_time=args.sleep,
                  strm_lvl=args.loglvl,
                  timeout=args.timeout,
                  local_redirect=args.local_redirect)
    elif args.command == 'multi':
        total_node_list = None
        if args.nodefile:
            if args.nodefile in os.environ:
                args.nodefile = os.environ[args.nodefile]
            with open(args.nodefile, 'r') as f:
                total_node_list = [line.strip() for line in f.readlines()]
        launch_multiprocess(launchpad,
                            fworker,
                            args.loglvl,
                            args.nlaunches,
                            args.num_jobs,
                            args.sleep,
                            total_node_list,
                            args.ppn,
                            timeout=args.timeout,
                            exclude_current_node=args.exclude_current_node,
                            local_redirect=args.local_redirect)
    else:
        launch_rocket(launchpad, fworker, args.fw_id, args.loglvl)
Beispiel #32
0
 def run(self):
     rapidfire(self.lpad, self.fworker)
Beispiel #33
0
    def itest_phonon_wf(self, lp, fworker, tmpdir, input_scf_phonon_si_low,
                        use_autoparal, db_data):
        """
        Tests the complete running of PhononFullFWWorkflow and PhononFWWorkflow
        """

        # test at gamma. Pass a custom manager, to check proper serialization
        manager_path = os.path.join(abidata.dirpath, 'managers',
                                    "travis_manager.yml")
        ph_fac = PhononsFromGsFactory(
            qpoints=[[0, 0, 0]],
            ph_tol={"tolvrs": 1.0e-7},
            ddk_tol={"tolwfr": 1.0e-16},
            dde_tol={"tolvrs": 1.0e-7},
            wfq_tol={"tolwfr": 1.0e-16},
            manager=TaskManager.from_file(manager_path))

        # first run the phonon workflow with generation task
        wf_gen = PhononFWWorkflow(input_scf_phonon_si_low,
                                  ph_fac,
                                  autoparal=use_autoparal,
                                  initialization_info={
                                      "ngqpt": [1, 1, 1],
                                      "kppa": 100
                                  })

        wf_gen.add_anaddb_ph_bs_fw(input_scf_phonon_si_low.structure,
                                   ph_ngqpt=[1, 1, 1],
                                   ndivsm=2,
                                   nqsmall=2)
        wf_gen.add_mongoengine_db_insertion(db_data)
        wf_gen.add_final_cleanup(["WFK"])

        scf_id = wf_gen.scf_fw.fw_id
        ph_generation_fw_id = wf_gen.ph_generation_fw.fw_id
        old_new = wf_gen.add_to_db(lpad=lp)
        scf_id = old_new[scf_id]
        ph_generation_fw_id = old_new[ph_generation_fw_id]

        # run all the workflow
        rapidfire(lp, fworker, m_dir=str(tmpdir))

        wf_gen = lp.get_wf_by_fw_id(scf_id)

        assert wf_gen.state == "COMPLETED"

        ph_task = load_abitask(
            get_fw_by_task_index(wf_gen, "phonon_0", index=-1))

        # check the effect of the final cleanup
        assert len(glob.glob(os.path.join(ph_task.outdir.path, "*_WFK"))) == 0
        assert len(glob.glob(os.path.join(ph_task.outdir.path, "*_DEN1"))) > 0
        assert len(glob.glob(os.path.join(ph_task.tmpdir.path, "*"))) == 0
        assert len(glob.glob(os.path.join(ph_task.indir.path, "*"))) == 0

        # check the save in the DB
        from abiflows.database.mongoengine.abinit_results import PhononResult
        with db_data.switch_collection(PhononResult) as PhononResult:
            results = PhononResult.objects()
            assert len(results) == 1
            r = results[0]

            assert r.abinit_input.structure.to_mgobj(
            ) == input_scf_phonon_si_low.structure
            assert r.abinit_output.structure.to_mgobj(
            ) == input_scf_phonon_si_low.structure
            assert r.abinit_input.ecut == input_scf_phonon_si_low['ecut']
            assert r.abinit_input.kppa == 100
            nptu.assert_array_equal(
                r.abinit_input.gs_input.to_mgobj()['ngkpt'],
                input_scf_phonon_si_low['ngkpt'])
            nptu.assert_array_equal(r.abinit_input.ngqpt, [1, 1, 1])

            ana_task = load_abitask(
                get_fw_by_task_index(wf_gen, "anaddb", index=None))

            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.phonon_bs.read())
                db_file.seek(0)
                assert filecmp.cmp(ana_task.phbst_path, db_file.name)

            mrgddb_task = load_abitask(
                get_fw_by_task_index(wf_gen, "mrgddb", index=None))

            # read/write in binary for py3k compatibility with mongoengine
            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.ddb.read())
                db_file.seek(0)
                assert filecmp.cmp(mrgddb_task.merged_ddb_path, db_file.name)

        # then rerun a similar workflow, but completely generated at its creation
        wf_full = PhononFullFWWorkflow(input_scf_phonon_si_low,
                                       ph_fac,
                                       autoparal=use_autoparal)
        wf_full.add_anaddb_ph_bs_fw(input_scf_phonon_si_low.structure,
                                    ph_ngqpt=[1, 1, 1],
                                    ndivsm=2,
                                    nqsmall=2)
        wf_full.add_mongoengine_db_insertion(db_data)

        scf_id = wf_full.scf_fw.fw_id
        old_new = wf_full.add_to_db(lpad=lp)
        scf_id = old_new[scf_id]

        # run all the workflow
        rapidfire(lp, fworker, m_dir=str(tmpdir))

        wf_full = lp.get_wf_by_fw_id(scf_id)

        assert wf_full.state == "COMPLETED"

        # the full workflow doesn't contain the generation FW and the cleanup tasks, but should have the same
        # amount of perturbations.
        if use_autoparal:
            diff = 1
        else:
            diff = 2
        assert len(wf_full.id_fw) + diff == len(wf_gen.id_fw)

        if self.check_numerical_values:
            gen_scf_task = load_abitask(
                get_fw_by_task_index(wf_gen, "scf", index=-1))
            with gen_scf_task.open_gsr() as gen_gsr:
                gen_energy = gen_gsr.energy
                assert gen_energy == pytest.approx(-240.264972012, rel=0.01)

            gen_ana_task = load_abitask(
                get_fw_by_task_index(wf_gen, "anaddb", index=None))
            with gen_ana_task.open_phbst() as gen_phbst:
                gen_phfreq = gen_phbst.phbands.phfreqs[0, 3]
                assert gen_phfreq == pytest.approx(0.06029885, rel=0.1)

            full_scf_task = load_abitask(
                get_fw_by_task_index(wf_gen, "scf", index=-1))
            with full_scf_task.open_gsr() as full_gsr:
                full_energy = full_gsr.energy
                assert full_energy == pytest.approx(-240.264972012, rel=0.01)

            full_ana_task = load_abitask(
                get_fw_by_task_index(wf_gen, "anaddb", index=None))
            with full_ana_task.open_phbst() as full_phbst:
                full_phfreqs = full_phbst.phbands.phfreqs[0, 3]
                assert full_phfreqs == pytest.approx(0.06029885, rel=0.1)

            assert gen_energy == pytest.approx(full_energy, rel=1e-6)
            assert gen_phfreq == pytest.approx(full_phfreqs, rel=1e-6)
Beispiel #34
0
    def test_FFopt_and_critic(self):
        # location of test files
        test_files = os.path.join(module_dir, "..", "..", "test_files",
                                  "critic_test_files")
        # define starting molecule and workflow object
        initial_qcin = QCInput.from_file(
            os.path.join(test_files, "FFopt", "mol.qin.orig"))
        initial_mol = initial_qcin.molecule

        real_wf = get_wf_FFopt_and_critic(
            molecule=initial_mol,
            suffix="testing",
            qchem_input_params={
                "dft_rung": 4,
                "smd_solvent": "custom",
                "custom_smd": "18.5,1.415,0.00,0.735,20.2,0.00,0.00",
                "overwrite_inputs": {
                    "rem": {
                        "thresh": "14",
                        "scf_guess_always": "True"
                    }
                }
            })
        # use powerup to replace run with fake run
        ref_dirs = {
            "{}:{}".format(initial_mol.composition.alphabetical_formula, "FFopt_testing"):
            os.path.join(test_files, "FFopt"),
            "{}:{}".format(initial_mol.composition.alphabetical_formula, "CC2_testing"):
            os.path.join(test_files, "critic_example")
        }
        fake_wf = use_fake_qchem(real_wf, ref_dirs)
        self.lp.add_wf(fake_wf)
        rapidfire(self.lp,
                  fworker=FWorker(env={
                      "max_cores": 32,
                      "db_file": os.path.join(db_dir, "db.json")
                  }))

        wf_test = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(
            all([s == "COMPLETED" for s in wf_test.fw_states.values()]))

        FFopt = self.get_task_collection().find_one({
            "task_label":
            "{}:{}".format(initial_mol.composition.alphabetical_formula,
                           "FFopt_testing")
        })
        self.assertEqual(FFopt["calcs_reversed"][0]["input"]["smx"]["solvent"],
                         "other")
        self.assertEqual(FFopt["num_frequencies_flattened"], 0)
        FFopt_final_mol = Molecule.from_dict(
            FFopt["output"]["optimized_molecule"])

        CC2 = self.get_task_collection().find_one({
            "task_label":
            "{}:{}".format(initial_mol.composition.alphabetical_formula,
                           "CC2_testing")
        })
        CC2_initial_mol = Molecule.from_dict(CC2["input"]["initial_molecule"])

        self.assertEqual(FFopt_final_mol, CC2_initial_mol)
        self.assertEqual(CC2["output"]["job_type"], "sp")
        self.assertEqual(CC2["output"]["final_energy"], -343.4820411597)
        critic2_drone_ref = loadfn(
            os.path.join(test_files, "critic_example",
                         "critic2_drone_ref.json"))
        self.assertEqual(CC2["critic2"], critic2_drone_ref)
Beispiel #35
0
def run_workflows():
    TESTDB_NAME = 'rsled'
    launchpad = LaunchPad(name=TESTDB_NAME)
    launchpad.reset(password=None, require_password=False)
    launchpad.add_wf(wf_creator([5, 5, 2]))
    rapidfire(launchpad, nlaunches=10, sleep_time=0)
Beispiel #36
0
def rapidfire_process(fworker,
                      nlaunches,
                      sleep,
                      loglvl,
                      port,
                      node_list,
                      sub_nproc,
                      timeout,
                      running_ids_dict,
                      local_redirect,
                      firing_state_dict,
                      macro_sleep_time=None):
    """
    Initializes shared data with multiprocessing parameters and starts a rapidfire.

    Args:
        fworker (FWorker): object
        nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
        sleep (int): secs to sleep between rapidfire loop iterations
        loglvl (str): level at which to output logs to stdout
        port (int): Listening port number of the shared object manage
        password (str): security password to access the server
        node_list ([str]): computer node list
        sub_nproc (int): number of processors of the sub job
        timeout (int): # of seconds after which to stop the rapidfire process
        macro_sleep_time (int): secs to sleep between sub job resubmit
        local_redirect (bool): redirect standard input and output to local file
    """
    ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD)
    ds.connect()
    launchpad = ds.LaunchPad()
    fw_data = FWData()
    fw_data.DATASERVER = ds
    fw_data.MULTIPROCESSING = True
    fw_data.NODE_LIST = node_list
    fw_data.SUB_NPROCS = sub_nproc
    fw_data.Running_IDs = running_ids_dict
    fw_data.FiringState = firing_state_dict
    fw_data.lp = launchpad
    sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
    l_dir = launchpad.get_logdir() if launchpad else None
    l_logger = get_fw_logger('rocket.launcher',
                             l_dir=l_dir,
                             stream_level=loglvl)
    fw_data.FiringState[os.getpid()] = True
    rapidfire(launchpad,
              fworker=fworker,
              m_dir=None,
              nlaunches=nlaunches,
              max_loops=-1,
              sleep_time=sleep,
              strm_lvl=loglvl,
              timeout=timeout,
              local_redirect=local_redirect)
    fw_data.FiringState[os.getpid()] = False
    while nlaunches == 0:
        time.sleep(1.5)  # wait for LaunchPad to be initialized
        firing_pids = [
            pid for pid, is_firing in fw_data.FiringState.items() if is_firing
        ]
        if len(firing_pids) > 0:
            # Some other sub jobs are still running
            macro_sleep_time = macro_sleep_time if macro_sleep_time \
                else sleep_time * len(fw_data.FiringState)
            log_multi(
                l_logger,
                'Sleeping for {} secs before resubmit sub job'.format(
                    macro_sleep_time))
            time.sleep(macro_sleep_time)
            log_multi(l_logger, 'Resubmit sub job'.format(macro_sleep_time))
            fw_data.FiringState[os.getpid()] = True
            rapidfire(launchpad,
                      fworker=fworker,
                      m_dir=None,
                      nlaunches=nlaunches,
                      max_loops=-1,
                      sleep_time=sleep,
                      strm_lvl=loglvl,
                      timeout=timeout,
                      local_redirect=local_redirect)
            fw_data.FiringState[os.getpid()] = False
        else:
            break
    log_multi(l_logger, 'Sub job finished')
Beispiel #37
0
def rapidfire_process(fworker, nlaunches, sleep, loglvl, port, node_list,
                      sub_nproc, timeout, running_ids_dict, local_redirect):
    """
    Initializes shared data with multiprocessing parameters and starts a rapidfire.

    Args:
        fworker (FWorker): object
        nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
        sleep (int): secs to sleep between rapidfire loop iterations
        loglvl (str): level at which to output logs to stdout
        port (int): Listening port number of the shared object manage
        password (str): security password to access the server
        node_list ([str]): computer node list
        sub_nproc (int): number of processors of the sub job
        timeout (int): # of seconds after which to stop the rapidfire process
        local_redirect (bool): redirect standard input and output to local file
    """
    ds = DataServer(address=("127.0.0.1", port), authkey=DS_PASSWORD)
    ds.connect()
    launchpad = ds.LaunchPad()
    FWData().DATASERVER = ds
    FWData().MULTIPROCESSING = True
    FWData().NODE_LIST = node_list
    FWData().SUB_NPROCS = sub_nproc
    FWData().Running_IDs = running_ids_dict
    sleep_time = sleep if sleep else RAPIDFIRE_SLEEP_SECS
    l_dir = launchpad.get_logdir() if launchpad else None
    l_logger = get_fw_logger("rocket.launcher",
                             l_dir=l_dir,
                             stream_level=loglvl)
    rapidfire(
        launchpad,
        fworker=fworker,
        m_dir=None,
        nlaunches=nlaunches,
        max_loops=-1,
        sleep_time=sleep,
        strm_lvl=loglvl,
        timeout=timeout,
        local_redirect=local_redirect,
    )
    while nlaunches == 0:
        time.sleep(1.5)  # wait for LaunchPad to be initialized
        launch_ids = FWData().Running_IDs.values()
        live_ids = list(set(launch_ids) - {None})
        if len(live_ids) > 0:
            # Some other sub jobs are still running
            log_multi(
                l_logger,
                f"Sleeping for {sleep_time} secs before resubmit sub job")
            time.sleep(sleep_time)
            log_multi(l_logger, "Resubmit sub job")
            rapidfire(
                launchpad,
                fworker=fworker,
                m_dir=None,
                nlaunches=nlaunches,
                max_loops=-1,
                sleep_time=sleep,
                strm_lvl=loglvl,
                timeout=timeout,
                local_redirect=local_redirect,
            )
        else:
            break
    log_multi(l_logger, "Sub job finished")
Beispiel #38
0
    def test_Fragmentation(self):
        with patch("atomate.qchem.firetasks.fragmenter.FWAction"
                   ) as FWAction_patch:
            mock_FWAction = MagicMock()
            FWAction_patch.return_value = mock_FWAction
            mock_FWAction.as_dict.return_value = {
                "stored_data": {},
                "exit": False,
                "update_spec": {},
                "mod_spec": [],
                "additions": [],
                "detours": [],
                "defuse_children": False,
                "defuse_workflow": False,
            }

            # location of test files
            test_FF_then_fragment_files = os.path.join(module_dir, "..", "..",
                                                       "test_files",
                                                       "FF_then_fragment_wf")
            # define starting molecule and workflow object
            initial_qcin = QCInput.from_file(
                os.path.join(
                    test_FF_then_fragment_files,
                    "block",
                    "launcher_first",
                    "mol.qin.opt_0",
                ))
            initial_mol = initial_qcin.molecule
            real_wf = get_fragmentation_wf(
                molecule=initial_mol,
                depth=0,
                do_triplets=False,
                qchem_input_params={
                    "scf_algorithm": "gdm",
                    "basis_set": "6-311++g*"
                },
            )
            # use powerup to replace run with fake run
            ref_dirs = {
                "first FF":
                os.path.join(test_FF_then_fragment_files, "block",
                             "launcher_first"),
                "fragment and FF_opt":
                os.path.join(test_FF_then_fragment_files, "block",
                             "launcher_second"),
            }
            fake_wf = use_fake_qchem(real_wf, ref_dirs)
            self.lp.add_wf(fake_wf)
            rapidfire(
                self.lp,
                fworker=FWorker(env={
                    "max_cores": 32,
                    "db_file": os.path.join(db_dir, "db.json")
                }),
                pdb_on_exception=True,
            )

            first_FF = self.get_task_collection().find_one(
                {"task_label": "first FF"})
            self.assertEqual(first_FF["calcs_reversed"][0]["input"]["solvent"],
                             None)
            self.assertEqual(first_FF["num_frequencies_flattened"], 0)
            self.assertEqual(len(FWAction_patch.call_args[1]["additions"]),
                             5 * 3)
Beispiel #39
0
def run_workflows():
    TESTDB_NAME = 'rsled'
    launchpad = LaunchPad(name=TESTDB_NAME)
    launchpad.reset(password=None, require_password=False)
    launchpad.add_wf(wf_creator([60, 45.0, "industry standard"]))
    rapidfire(launchpad, nlaunches=500, sleep_time=0)
Beispiel #40
0
def rlaunch():

    m_description = 'This program launches one or more Rockets. A Rocket grabs a job from the central database and ' \
                    'runs it. The "single-shot" option launches a single Rocket, ' \
                    'whereas the "rapidfire" option loops until all FireWorks are completed.'

    parser = ArgumentParser(description=m_description)
    subparsers = parser.add_subparsers(help='command', dest='command')
    single_parser = subparsers.add_parser('singleshot',
                                          help='launch a single Rocket')
    rapid_parser = subparsers.add_parser(
        'rapidfire',
        help='launch multiple Rockets (loop until all FireWorks complete)')

    single_parser.add_argument('-f',
                               '--fw_id',
                               help='specific fw_id to run',
                               default=None,
                               type=int)
    single_parser.add_argument('--offline',
                               help='run in offline mode (FW.json required)',
                               action='store_true')

    rapid_parser.add_argument(
        '--nlaunches',
        help='num_launches (int or "infinite"; default 0 is all jobs in DB)',
        default=0)
    rapid_parser.add_argument('--sleep',
                              help='sleep time between loops (secs)',
                              default=None,
                              type=int)

    parser.add_argument('-l',
                        '--launchpad_file',
                        help='path to launchpad file',
                        default=LAUNCHPAD_LOC)
    parser.add_argument('-w',
                        '--fworker_file',
                        help='path to fworker file',
                        default=FWORKER_LOC)
    parser.add_argument(
        '-c',
        '--config_dir',
        help=
        'path to a directory containing the config file (used if -l, -w unspecified)',
        default=CONFIG_FILE_DIR)

    parser.add_argument('--loglvl',
                        help='level to print log messages',
                        default='INFO')
    parser.add_argument('-s',
                        '--silencer',
                        help='shortcut to mute log messages',
                        action='store_true')

    args = parser.parse_args()

    signal.signal(signal.SIGINT, handle_interrupt)  # graceful exit on ^C

    if not args.launchpad_file and os.path.exists(
            os.path.join(args.config_dir, 'my_launchpad.yaml')):
        args.launchpad_file = os.path.join(args.config_dir,
                                           'my_launchpad.yaml')

    if not args.fworker_file and os.path.exists(
            os.path.join(args.config_dir, 'my_fworker.yaml')):
        args.fworker_file = os.path.join(args.config_dir, 'my_fworker.yaml')

    args.loglvl = 'CRITICAL' if args.silencer else args.loglvl

    if args.command == 'singleshot' and args.offline:
        launchpad = None
    else:
        launchpad = LaunchPad.from_file(
            args.launchpad_file) if args.launchpad_file else LaunchPad(
                strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    # prime addr lookups
    _log = get_fw_logger("rlaunch", stream_level="INFO")
    _log.info("Hostname/IP lookup (this will take a few seconds)")
    get_my_host()
    get_my_ip()

    if args.command == 'rapidfire':
        rapidfire(launchpad, fworker, None, args.nlaunches, -1, args.sleep,
                  args.loglvl)

    else:
        launch_rocket(launchpad, fworker, args.fw_id, args.loglvl)
Beispiel #41
0
from fireworks import LaunchPad, Firework, Workflow
from fireworks.core.rocket_launcher import rapidfire
from fireworks.examples.custom_firetasks.hello_world.hello_world_task import HelloTask

import yaml

FILENAME = ''

if __name__ == "__main__":
    # initialize the database
    with open(FILENAME) as f:
        lp = LaunchPad(**yaml.load(
            f))  # you might need to modify the connection settings here
    # lp.reset()  # uncomment this line and set the appropriate parameters if you want to reset the database

    # create the workflow and store it in the database
    final_task = Firework([HelloTask()])
    other_tasks = [Firework([HelloTask()]) for i in range(500)]
    links = {task: [final_task] for task in other_tasks}
    my_wflow = Workflow([final_task] + other_tasks, links_dict=links)
    lp.add_wf(my_wflow)

    # run the workflow
    rapidfire(lp)
Beispiel #42
0
    def test_getinterpolatedposcar(self):
        nimages = 5
        this_image = 1
        autosort_tol = 0.5

        fw1 = Firework(
            [
                CopyVaspOutputs(
                    calc_dir=self.static_outdir,
                    contcar_to_poscar=False,
                    additional_files=["CONTCAR"],
                ),
                PassCalcLocs(name="fw1"),
            ],
            name="fw1",
        )

        fw2 = Firework(
            [
                CopyVaspOutputs(
                    calc_dir=self.opt_outdir,
                    contcar_to_poscar=False,
                    additional_files=["CONTCAR"],
                ),
                PassCalcLocs(name="fw2"),
            ],
            name="fw2",
        )

        fw3 = Firework(
            [
                GetInterpolatedPOSCAR(
                    start="fw1",
                    end="fw2",
                    this_image=this_image,
                    nimages=nimages,
                    autosort_tol=autosort_tol,
                ),
                PassCalcLocs(name="fw3"),
            ],
            name="fw3",
            parents=[fw1, fw2],
        )
        fw4 = Firework([PassCalcLocs(name="fw4")], name="fw4", parents=fw3)

        wf = Workflow([fw1, fw2, fw3, fw4])
        self.lp.add_wf(wf)
        rapidfire(self.lp)

        fw4 = self.lp.get_fw_by_id(self.lp.get_fw_ids({"name": "fw4"})[0])

        calc_locs = fw4.spec["calc_locs"]
        self.assertTrue(
            os.path.exists(get_calc_loc("fw3", calc_locs)["path"] + "/POSCAR"))
        self.assertTrue(
            os.path.exists(
                get_calc_loc("fw3", calc_locs)["path"] +
                "/interpolate/CONTCAR_0"))
        self.assertTrue(
            os.path.exists(
                get_calc_loc("fw3", calc_locs)["path"] +
                "/interpolate/CONTCAR_1"))

        struct_start = Structure.from_file(
            get_calc_loc("fw3", calc_locs)["path"] + "/interpolate/CONTCAR_0")
        struct_end = Structure.from_file(
            get_calc_loc("fw3", calc_locs)["path"] + "/interpolate/CONTCAR_1")
        struct_inter = Structure.from_file(
            get_calc_loc("fw3", calc_locs)["path"] + "/POSCAR")

        structs = struct_start.interpolate(struct_end,
                                           nimages,
                                           interpolate_lattices=True,
                                           autosort_tol=autosort_tol)

        # Check x of 1st site.
        self.assertAlmostEqual(structs[this_image][1].coords[0],
                               struct_inter[1].coords[0])
        # Check c lattice parameter
        self.assertAlmostEqual(structs[this_image].lattice.abc[0],
                               struct_inter.lattice.abc[0])
Beispiel #43
0
                        default='INFO')
    parser.add_argument('--silencer',
                        help='shortcut to mute log messages',
                        action='store_true')

    args = parser.parse_args()

    if not args.launchpad_file and os.path.exists('my_launchpad.yaml'):
        args.launchpad_file = 'my_launchpad.yaml'

    if not args.fworker_file and os.path.exists('my_fworker.yaml'):
        args.fworker_file = 'my_fworker.yaml'

    args.loglvl = 'CRITICAL' if args.silencer else args.loglvl

    if args.launchpad_file:
        launchpad = LaunchPad.from_file(args.launchpad_file)
    else:
        launchpad = LaunchPad(logdir=args.logdir, strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    if args.command == 'rapidfire':
        rapidfire(launchpad, fworker, None, args.logdir, args.loglvl,
                  args.nlaunches, args.sleep)

    else:
        launch_rocket(launchpad, fworker, args.logdir, args.loglvl, args.fw_id)
Beispiel #44
0
                        }, {
                            "state": "READY"
                        }]
                    }, {
                        "spec._priority": 10
                    }]
                }
                highprio_ids = launchpad.get_fw_ids(highprio_query)
                if len(highprio_ids) > 0:
                    print("Jobs with high priority still not completed")
                    query = {"spec._priority": 10}
                elif len(lowprio_ids) > 0:
                    print("Jobs with low priority still not completed")
                    query = {"spec._priority": 8}
                else:
                    print("Jobs with no priority are being sent")
                    query = None
                launch_rocket_to_queue(launchpad,
                                       FWorker(category=category, query=query),
                                       adapter,
                                       launcher_dir=abspath,
                                       create_launcher_dir=True,
                                       reserve=True)
            time.sleep(5)
            if IS_OFFLINE:
                _recover_offline(lp=launchpad, fworker_name=None)

    else:
        rapidfire(launchpad,
                  FWorker(category=['dft', 'medium', 'lightweight']))
Beispiel #45
0
    def test_double_FF_opt(self):
        # location of test files
        test_double_FF_files = os.path.join(module_dir, "..", "..",
                                            "test_files", "double_FF_wf")
        # define starting molecule and workflow object
        initial_qcin = QCInput.from_file(
            os.path.join(test_double_FF_files, "block", "launcher_first",
                         "mol.qin.opt_0"))
        initial_mol = initial_qcin.molecule

        real_wf = get_wf_double_FF_opt(
            molecule=initial_mol,
            pcm_dielectric=10.0,
            qchem_input_params={
                "basis_set": "6-311++g**",
                "scf_algorithm": "diis",
                "overwrite_inputs": {
                    "rem": {
                        "sym_ignore": "true"
                    }
                },
            },
        )
        # use powerup to replace run with fake run
        ref_dirs = {
            "first_FF_no_pcm":
            os.path.join(test_double_FF_files, "block", "launcher_first"),
            "second_FF_with_pcm":
            os.path.join(test_double_FF_files, "block", "launcher_second"),
        }
        fake_wf = use_fake_qchem(real_wf, ref_dirs)
        self.lp.add_wf(fake_wf)
        rapidfire(
            self.lp,
            fworker=FWorker(env={
                "max_cores": 32,
                "db_file": os.path.join(db_dir, "db.json")
            }),
        )

        wf_test = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(
            all([s == "COMPLETED" for s in wf_test.fw_states.values()]))

        first_FF = self.get_task_collection().find_one(
            {"task_label": "first_FF_no_pcm"})
        self.assertEqual(first_FF["calcs_reversed"][0]["input"]["solvent"],
                         None)
        self.assertEqual(first_FF["num_frequencies_flattened"], 1)
        first_FF_final_mol = Molecule.from_dict(
            first_FF["output"]["optimized_molecule"])

        second_FF = self.get_task_collection().find_one(
            {"task_label": "second_FF_with_pcm"})
        self.assertEqual(second_FF["calcs_reversed"][0]["input"]["solvent"],
                         {"dielectric": "10.0"})
        self.assertEqual(second_FF["num_frequencies_flattened"], 1)
        second_FF_initial_mol = Molecule.from_dict(
            second_FF["input"]["initial_molecule"])

        self.assertEqual(first_FF_final_mol, second_FF_initial_mol)
    def test_wf(self):
        self.base_wf = self._simulate_vasprun(self.base_wf)
        self.base_wf_noopt = self._simulate_vasprun(self.base_wf_noopt)
        self.minimal_wf = self._simulate_vasprun(self.minimal_wf)

        self.assertEqual(len(self.base_wf.fws), 8)
        self.assertEqual(len(self.base_wf_noopt.fws), 7)
        self.assertEqual(len(self.minimal_wf.fws), 3)
        self.assertEqual(len(self.toec_wf.fws), 17)
        self.assertEqual(len(self.preset_wf.fws), 26)
        self.assertEqual(len(self.foec_wf.fws), 49)

        # check vasp parameters for ionic relaxation
        defo_vis = [
            fw.tasks[1]['vasp_input_set'] for fw in self.base_wf.fws
            if "deform" in fw.name
        ]
        assert all([vis.user_incar_settings['NSW'] == 99 for vis in defo_vis])
        assert all(
            [vis.user_incar_settings['IBRION'] == 2 for vis in defo_vis])
        # check preset parameters
        defo_vis = [
            fw.tasks[2]['vasp_input_set'] for fw in self.preset_wf.fws
            if "deform" in fw.name
        ]
        assert all(
            [vis.user_incar_settings['ENCUT'] == 700 for vis in defo_vis])
        assert all([
            vis.user_kpoints_settings.get('grid_density') == 7000
            for vis in defo_vis
        ])

        self.lp.add_wf(self.base_wf)
        self.lp.add_wf(self.base_wf_noopt)
        self.lp.add_wf(self.toec_analysis)

        rapidfire(
            self.lp,
            fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))

        # check relaxation
        d = self.get_task_collection().find_one(
            {"task_label": "structure optimization"})
        self._check_run(d, mode="structure optimization")
        # check two of the deformation calculations
        d = self.get_task_collection().find_one(
            {"task_label": "elastic deformation 0"})
        self._check_run(d, mode="elastic deformation 0")

        d = self.get_task_collection().find_one(
            {"task_label": "elastic deformation 3"})
        self._check_run(d, mode="elastic deformation 3")

        # check the final results
        d = self.get_task_collection(coll_name="elasticity").find_one({
            'order': 2,
            "optimized_structure": {
                "$exists": True
            }
        })
        self._check_run(d, mode="elastic analysis")

        # check third-order results
        d = self.get_task_collection(coll_name="elasticity").find_one(
            {'order': 3})
        self._check_run(d, mode="toec analysis")

        wf = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(all([s == 'COMPLETED' for s in wf.fw_states.values()]))
    def test_lammps_wflow(self):
        lammps_data = LammpsForceFieldData.from_file(self.data_file)
        log_filename = "peo.log"
        dump_filename = "peo.dump"
        dcd_traj_filename = "peo.dcd"
        timestep = 1  # in fmsec for 'real' units
        n_timesteps = 1000
        dump_freq = 50
        T = [300, 300, 100.0]  # start, end, damp
        P = [0, 0, 100.0]

        # override default settings read from the json file with these
        user_settings = {
            "log":
            log_filename,
            "timestep":
            timestep,
            "run":
            n_timesteps,
            "pair_style":
            "buck/coul/cut 15.0",
            "pair_coeff": [
                "1 1 2649.6 0.2674 27.22", "1 2 4320.0 0.2928 137.6",
                "1 3 14176.0 0.2563 104.0", "2 2 14976.0 0.3236 637.6",
                "2 3 33702.4 0.2796 503.0", "3 3 75844.8 0.2461 396.9"
            ],
            "thermo_style":
            "custom step time temp press pe ke etotal enthalpy fmax fnorm",
            "fix":
            "NPT all npt temp {T[0]} {T[1]} {T[2]} iso {P[0]} {P[1]} {P[2]}".
            format(T=T, P=P),
            "dump": [
                "peodump all custom {} {} id type x y z ix iy iz mol".format(
                    dump_freq, dump_filename),
                "traj all dcd {} {}".format(dump_freq, dcd_traj_filename)
            ]
        }

        if not LAMMPS_CMD:
            # fake run
            lammps_bin = "cp  {}/peo.* .".format(self.reference_files_path)
            dry_run = True
        else:
            lammps_bin = LAMMPS_CMD
            dry_run = False
        wf = wf_from_input_template(self.input_template,
                                    lammps_data,
                                    "npt.data",
                                    user_settings,
                                    is_forcefield=True,
                                    input_filename="lammps.inp",
                                    lammps_bin=lammps_bin,
                                    db_file=">>db_file<<",
                                    dry_run=dry_run)
        self.lp.add_wf(wf)
        # run
        rapidfire(
            self.lp,
            fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")}))
        d = self.get_task_collection().find_one()
        self._check_run(d)
Beispiel #48
0
 def test_add_fw(self):
     fw = Firework(AdditionTask(), {"input_array": [5, 7]})
     self.lp.add_wf(fw)
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     self.assertEqual(
         self.lp.get_launch_by_id(1).action.stored_data["sum"], 12)
Beispiel #49
0
    def itest_dte_with_phonons(self, lp, fworker, tmpdir, input_scf_phonon_gan_low, use_autoparal, db_data):
        """
        Simple test of DteFWWorkflow with autoparal True and False.
        Skips dte permutations.
        """

        # dte calculations only work with selected values of ixc
        input_scf_phonon_gan_low['ixc'] = 7
        dte_inputs = dte_from_gsinput(input_scf_phonon_gan_low, use_phonons=True, skip_dte_permutations=True,
                                      ph_tol={"tolvrs": 1.0e-7}, ddk_tol = {"tolwfr": 1.0e-16},
                                      dde_tol = {"tolvrs": 1.0e-7})

        wf = DteFWWorkflow(input_scf_phonon_gan_low, ddk_inp = dte_inputs.filter_by_tags(DDK),
                           dde_inp = dte_inputs.filter_by_tags(DDE), dte_inp = dte_inputs.filter_by_tags(DTE),
                           ph_inp = dte_inputs.filter_by_tags(PH_Q_PERT), autoparal=use_autoparal,
                           initialization_info={"kppa": 100})

        wf.add_anaddb_dte_fw(input_scf_phonon_gan_low.structure, dieflag=1, nlflag=1)
        wf.add_mongoengine_db_insertion(db_data)
        wf.add_final_cleanup(["WFK"])

        scf_fw_id = wf.scf_fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_fw_id = old_new[scf_fw_id]

        rapidfire(lp, fworker, m_dir=str(tmpdir))

        wf = lp.get_wf_by_fw_id(scf_fw_id)

        assert wf.state == "COMPLETED"

        # check the effect of the final cleanup
        scf_task = load_abitask(get_fw_by_task_index(wf, "scf", index=1))

        assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_WFK"))) == 0
        assert len(glob.glob(os.path.join(scf_task.outdir.path, "*_DEN"))) == 1
        assert len(glob.glob(os.path.join(scf_task.tmpdir.path, "*"))) == 0
        assert len(glob.glob(os.path.join(scf_task.indir.path, "*"))) == 0

        # check the save in the DB
        from abiflows.database.mongoengine.abinit_results import DteResult
        with db_data.switch_collection(DteResult) as DteResult:
            results = DteResult.objects()
            assert len(results) == 1
            r = results[0]

            assert r.abinit_input.structure.to_mgobj() == input_scf_phonon_gan_low.structure
            assert r.abinit_output.structure.to_mgobj() == input_scf_phonon_gan_low.structure
            assert r.abinit_input.ecut == input_scf_phonon_gan_low['ecut']
            assert r.abinit_input.kppa == 100
            nptu.assert_array_equal(r.abinit_input.gs_input.to_mgobj()['ngkpt'], input_scf_phonon_gan_low['ngkpt'])

            ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))

            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.anaddb_nc.read())
                db_file.seek(0)
                assert filecmp.cmp(ana_task.anaddb_nc_path, db_file.name)

            mrgddb_task = load_abitask(get_fw_by_task_index(wf, "mrgddb", index=None))

            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.ddb.read())
                db_file.seek(0)
                assert filecmp.cmp(mrgddb_task.merged_ddb_path, db_file.name)

        if self.check_numerical_values:
            with scf_task.open_gsr() as gsr:
                assert gsr.energy == pytest.approx(-680.402255069, rel=0.005)

            ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))
            with ana_task.open_anaddbnc() as ananc:
                assert float(ananc.dchide[0,0,2]) == pytest.approx(-1.69328765210, rel=0.15)
Beispiel #50
0
def rlaunch():
    m_description = (
        "This program launches one or more Rockets. A Rocket retrieves a job from the "
        'central database and runs it. The "single-shot" option launches a single Rocket, '
        'whereas the "rapidfire" option loops until all FireWorks are completed.'
    )

    parser = ArgumentParser(description=m_description)
    subparsers = parser.add_subparsers(help="command", dest="command")
    single_parser = subparsers.add_parser("singleshot",
                                          help="launch a single Rocket")
    rapid_parser = subparsers.add_parser(
        "rapidfire",
        help="launch multiple Rockets (loop until all FireWorks complete)")
    multi_parser = subparsers.add_parser(
        "multi", help="launches multiple Rockets simultaneously")

    single_parser.add_argument("-f",
                               "--fw_id",
                               help="specific fw_id to run",
                               default=None,
                               type=int)
    single_parser.add_argument("--offline",
                               help="run in offline mode (FW.json required)",
                               action="store_true")
    single_parser.add_argument("--pdb",
                               help="shortcut to invoke debugger on error",
                               action="store_true")

    rapid_parser.add_argument("--nlaunches",
                              help='num_launches (int or "infinite"; '
                              "default 0 is all jobs in DB)",
                              default=0)
    rapid_parser.add_argument(
        "--timeout",
        help="timeout (secs) after which to quit (default None)",
        default=None,
        type=int)
    rapid_parser.add_argument(
        "--max_loops",
        help=
        "after this many sleep loops, quit even in infinite nlaunches mode (default -1 is infinite loops)",
        default=-1,
        type=int,
    )
    rapid_parser.add_argument("--sleep",
                              help="sleep time between loops (secs)",
                              default=None,
                              type=int)
    rapid_parser.add_argument(
        "--local_redirect",
        help="Redirect stdout and stderr to the launch directory",
        action="store_true")

    multi_parser.add_argument("num_jobs",
                              help="the number of jobs to run in parallel",
                              type=int)
    multi_parser.add_argument(
        "--nlaunches",
        help="number of FireWorks to run in series per "
        'parallel job (int or "infinite"; default 0 is '
        "all jobs in DB)",
        default=0,
    )
    multi_parser.add_argument(
        "--sleep",
        help="sleep time between loops in infinite launch mode (secs)",
        default=None,
        type=int)
    multi_parser.add_argument(
        "--timeout",
        help="timeout (secs) after which to quit (default None)",
        default=None,
        type=int)
    multi_parser.add_argument(
        "--nodefile",
        help="nodefile name or environment variable name "
        "containing the node file name (for populating"
        " FWData only)",
        default=None,
        type=str,
    )
    multi_parser.add_argument(
        "--ppn",
        help="processors per node (for populating FWData only)",
        default=1,
        type=int)
    multi_parser.add_argument(
        "--exclude_current_node",
        help="Don't use the script launching node as compute node",
        action="store_true")
    multi_parser.add_argument(
        "--local_redirect",
        help="Redirect stdout and stderr to the launch directory",
        action="store_true")

    parser.add_argument("-l",
                        "--launchpad_file",
                        help="path to launchpad file")
    parser.add_argument("-w", "--fworker_file", help="path to fworker file")
    parser.add_argument(
        "-c",
        "--config_dir",
        help=
        "path to a directory containing the config file (used if -l, -w unspecified)",
        default=CONFIG_FILE_DIR,
    )

    parser.add_argument("--loglvl",
                        help="level to print log messages",
                        default="INFO")
    parser.add_argument("-s",
                        "--silencer",
                        help="shortcut to mute log messages",
                        action="store_true")

    try:
        import argcomplete

        argcomplete.autocomplete(parser)
        # This supports bash autocompletion. To enable this, pip install
        # argcomplete, activate global completion, or add
        #      eval "$(register-python-argcomplete rlaunch)"
        # into your .bash_profile or .bashrc
    except ImportError:
        pass

    args = parser.parse_args()

    signal.signal(signal.SIGINT, handle_interrupt)  # graceful exit on ^C

    if not args.launchpad_file and os.path.exists(
            os.path.join(args.config_dir, "my_launchpad.yaml")):
        args.launchpad_file = os.path.join(args.config_dir,
                                           "my_launchpad.yaml")
    elif not args.launchpad_file:
        args.launchpad_file = LAUNCHPAD_LOC

    if not args.fworker_file and os.path.exists(
            os.path.join(args.config_dir, "my_fworker.yaml")):
        args.fworker_file = os.path.join(args.config_dir, "my_fworker.yaml")
    elif not args.fworker_file:
        args.fworker_file = FWORKER_LOC

    args.loglvl = "CRITICAL" if args.silencer else args.loglvl

    if args.command == "singleshot" and args.offline:
        launchpad = None
    else:
        launchpad = LaunchPad.from_file(
            args.launchpad_file) if args.launchpad_file else LaunchPad(
                strm_lvl=args.loglvl)

    if args.fworker_file:
        fworker = FWorker.from_file(args.fworker_file)
    else:
        fworker = FWorker()

    # prime addr lookups
    _log = get_fw_logger("rlaunch", stream_level="INFO")
    _log.info("Hostname/IP lookup (this will take a few seconds)")
    get_my_host()
    get_my_ip()

    if args.command == "rapidfire":
        rapidfire(
            launchpad,
            fworker=fworker,
            m_dir=None,
            nlaunches=args.nlaunches,
            max_loops=args.max_loops,
            sleep_time=args.sleep,
            strm_lvl=args.loglvl,
            timeout=args.timeout,
            local_redirect=args.local_redirect,
        )
    elif args.command == "multi":
        total_node_list = None
        if args.nodefile:
            if args.nodefile in os.environ:
                args.nodefile = os.environ[args.nodefile]
            with open(args.nodefile) as f:
                total_node_list = [line.strip() for line in f.readlines()]
        launch_multiprocess(
            launchpad,
            fworker,
            args.loglvl,
            args.nlaunches,
            args.num_jobs,
            args.sleep,
            total_node_list,
            args.ppn,
            timeout=args.timeout,
            exclude_current_node=args.exclude_current_node,
            local_redirect=args.local_redirect,
        )
    else:
        launch_rocket(launchpad,
                      fworker,
                      args.fw_id,
                      args.loglvl,
                      pdb_on_exception=args.pdb)
Beispiel #51
0
"""
This code is described in the Dynamic Workflow tutorial, http://fireworks.readthedocs.io/en/latest/dynamic_wf_tutorial.html
"""

from fireworks import Firework, FWorker, LaunchPad
from fireworks.core.rocket_launcher import rapidfire
from fw_tutorials.dynamic_wf.fibadd_task import FibonacciAdderTask

if __name__ == "__main__":
    # set up the LaunchPad and reset it
    launchpad = LaunchPad()
    # launchpad.reset('', require_password=False)

    # create the Firework consisting of a custom "Fibonacci" task
    firework = Firework(FibonacciAdderTask(), spec={"smaller": 0, "larger": 1, "stop_point": 100})

    # store workflow and launch it locally
    launchpad.add_wf(firework)
    rapidfire(launchpad, FWorker())

    def test_torsion_potential(self):
        # location of test files
        test_tor_files = os.path.join(
            module_dir, "..", "..", "test_files", "torsion_wf"
        )
        # define starting molecule and torsion potential workflow object
        initial_qcin = QCInput.from_file(
            os.path.join(test_tor_files, "initial_opt", "mol.qin")
        )
        initial_mol = initial_qcin.molecule
        atom_indexes = [6, 8, 9, 10]
        angles = [0.0, 90.0, 180.0]
        rem = []
        # add the first rem section
        rem.append(
            {
                "jobtype": "opt",
                "method": "wb97m-v",
                "basis": "def2-tzvppd",
                "gen_scfman": "true",
                "geom_opt_max_cycles": 75,
                "max_scf_cycles": 300,
                "scf_algorithm": "diis",
                "scf_guess": "sad",
                "sym_ignore": "true",
                "symmetry": "false",
                "thresh": 14,
            }
        )

        # the second rem section
        rem.append(
            {
                "jobtype": "opt",
                "method": "wb97m-v",
                "basis": "def2-tzvppd",
                "geom_opt_max_cycles": 75,
                "max_scf_cycles": 300,
                "scf_algorithm": "diis",
                "scf_guess": "sad",
                "sym_ignore": "true",
                "symmetry": "false",
                "thresh": 14,
            }
        )

        real_wf = get_wf_torsion_potential(
            molecule=initial_mol,
            atom_indexes=atom_indexes,
            angles=angles,
            rem=rem,
            db_file=">>db_file<<",
        )
        # use powerup to replace run with fake run
        # def ref_dirs
        ref_dirs = {
            "initial_opt": os.path.join(test_tor_files, "initial_opt"),
            "opt_0": os.path.join(test_tor_files, "opt_0"),
            "opt_90": os.path.join(test_tor_files, "opt_90"),
            "opt_180": os.path.join(test_tor_files, "opt_180"),
        }
        fake_wf = use_fake_qchem(real_wf, ref_dirs)

        self.lp.add_wf(fake_wf)
        rapidfire(
            self.lp, fworker=FWorker(env={"db_file": os.path.join(db_dir, "db.json")})
        )

        wf_test = self.lp.get_wf_by_fw_id(1)
        self.assertTrue(all([s == "COMPLETED" for s in wf_test.fw_states.values()]))

        # Checking of the inputs happens in fake_run_qchem so there is no point to retest the inputs
        # Check the output info that gets inserted in the DB
        init_opt = self.get_task_collection().find_one({"task_label": "initial_opt"})
        init_opt_final_mol = Molecule.from_dict(
            init_opt["output"]["optimized_molecule"]
        )
        init_opt_final_e = init_opt["output"]["final_energy"]
        # parse output file
        act_init_opt_out = QCOutput(
            os.path.join(test_tor_files, "initial_opt", "mol.qout")
        )
        act_init_opt_mol = act_init_opt_out.data["molecule_from_optimized_geometry"]
        act_init_opt_final_e = act_init_opt_out.data["final_energy"]

        np.testing.assert_equal(act_init_opt_mol.species, init_opt_final_mol.species)
        np.testing.assert_allclose(
            act_init_opt_mol.cart_coords, init_opt_final_mol.cart_coords, atol=0.0001
        )
        np.testing.assert_equal(act_init_opt_final_e, init_opt_final_e)

        # Optimization of 0 torsion
        opt_0 = self.get_task_collection().find_one({"task_label": "opt_0"})
        opt_0_final_mol = Molecule.from_dict(opt_0["output"]["optimized_molecule"])
        opt_0_final_e = opt_0["output"]["final_energy"]
        # parse output file
        act_opt_0_out = QCOutput(os.path.join(test_tor_files, "opt_0", "mol.qout"))
        act_opt_0_mol = act_opt_0_out.data["molecule_from_optimized_geometry"]
        act_opt_0_final_e = act_opt_0_out.data["final_energy"]

        np.testing.assert_equal(act_opt_0_mol.species, opt_0_final_mol.species)
        np.testing.assert_allclose(
            act_opt_0_mol.cart_coords, opt_0_final_mol.cart_coords, atol=0.0001
        )
        np.testing.assert_equal(act_opt_0_final_e, opt_0_final_e)

        # Optimization of 90 torsion
        opt_90 = self.get_task_collection().find_one({"task_label": "opt_90"})
        opt_90_final_mol = Molecule.from_dict(opt_90["output"]["optimized_molecule"])
        opt_90_final_e = opt_90["output"]["final_energy"]
        # parse output file
        act_opt_90_out = QCOutput(os.path.join(test_tor_files, "opt_90", "mol.qout"))
        act_opt_90_mol = act_opt_90_out.data["molecule_from_optimized_geometry"]
        act_opt_90_final_e = act_opt_90_out.data["final_energy"]

        np.testing.assert_equal(act_opt_90_mol.species, opt_90_final_mol.species)
        np.testing.assert_allclose(
            act_opt_90_mol.cart_coords, opt_90_final_mol.cart_coords, atol=0.0001
        )
        np.testing.assert_equal(act_opt_90_final_e, opt_90_final_e)

        # Optimization of 180 torsion
        opt_180 = self.get_task_collection().find_one({"task_label": "opt_180"})
        opt_180_final_mol = Molecule.from_dict(opt_180["output"]["optimized_molecule"])
        opt_180_final_e = opt_180["output"]["final_energy"]
        # parse output file
        act_opt_180_out = QCOutput(os.path.join(test_tor_files, "opt_180", "mol.qout"))
        act_opt_180_mol = act_opt_180_out.data["molecule_from_optimized_geometry"]
        act_opt_180_final_e = act_opt_180_out.data["final_energy"]

        np.testing.assert_equal(act_opt_180_mol.species, opt_180_final_mol.species)
        np.testing.assert_allclose(
            act_opt_180_mol.cart_coords, opt_180_final_mol.cart_coords, atol=0.0001
        )
        np.testing.assert_equal(act_opt_180_final_e, opt_180_final_e)
Beispiel #53
0
    """
    fin_len = x[0]
    fin_angle = x[1]
    useful_feature1 = fin_len + fin_angle**2
    useful_feature2 = fin_angle + fin_len
    return x + [useful_feature1, useful_feature2]


if __name__ == "__main__":
    # Make a MissionControl object
    mc = MissionControl(**db_info)

    # Reset the launchpad and optimization db for this example
    launchpad.reset(password=None, require_password=False)
    mc.reset(hard=True)

    # Configure the optimization db with MissionControl
    mc.configure(wf_creator=wf_creator,
                 dimensions=x_dim,
                 acq="maximin",
                 predictor="GaussianProcessRegressor",
                 get_z=get_z)

    # Run 30 workflows + optimization
    launchpad.add_wf(wf_creator([100, 45.0, "dolphin fin"]))
    rapidfire(launchpad, nlaunches=30)

    # Examine and plot the optimization
    plt = mc.plot(print_pareto=True)
    plt.show()
Beispiel #54
0
 def test_except_details_on_rerun(self):
     rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
     self.assertEqual(os.getcwd(), MODULE_DIR)
     self.lp.rerun_fw(1)
     fw = self.lp.get_fw_by_id(1)
     self.assertEqual(fw.spec['_exception_details'], self.error_test_dict)
Beispiel #55
0
    def itest_dfpt_anaddb_ph(self, lp, fworker, tmpdir, input_scf_phonon_gan_low, db_data):
        """
        Simple test of DteFWWorkflow with autoparal True and False.
        Skips dte permutations.
        """

        dfpt_inputs = dfpt_from_gsinput(input_scf_phonon_gan_low, ph_ngqpt=[2, 2, 2], do_ddk=True, do_dde=True,
                                        do_strain=True, do_dte=False,
                                        ddk_tol = {"tolwfr": 1.0e-16}, dde_tol = {"tolvrs": 1.0e-7},
                                        strain_tol={"tolvrs": 1.0e-7}, ph_tol={"tolvrs": 1.0e-7})

        wf = DfptFWWorkflow(input_scf_phonon_gan_low, ddk_inp = dfpt_inputs.filter_by_tags(DDK),
                           dde_inp = dfpt_inputs.filter_by_tags(DDE), strain_inp=dfpt_inputs.filter_by_tags(STRAIN),
                           ph_inp = dfpt_inputs.filter_by_tags(PH_Q_PERT), dte_inp = dfpt_inputs.filter_by_tags(DTE),
                            nscf_inp=dfpt_inputs.filter_by_tags(NSCF),initialization_info={"kppa": 100},
                            autoparal=False)

        wf.add_anaddb_dfpt_fw(input_scf_phonon_gan_low.structure, ph_ngqpt=[2, 2, 2], nqsmall=2, ndivsm=3)
        wf.add_mongoengine_db_insertion(db_data)
        wf.add_final_cleanup(["WFK"])

        scf_fw_id = wf.scf_fw.fw_id
        old_new = wf.add_to_db(lpad=lp)
        scf_fw_id = old_new[scf_fw_id]

        rapidfire(lp, fworker, m_dir=str(tmpdir))

        wf = lp.get_wf_by_fw_id(scf_fw_id)

        assert wf.state == "COMPLETED"

        scf_task = load_abitask(get_fw_by_task_index(wf, "scf", index=1))

        # check the save in the DB
        from abiflows.database.mongoengine.abinit_results import DfptResult
        with db_data.switch_collection(DfptResult) as DteResult:
            results = DteResult.objects()
            assert len(results) == 1
            r = results[0]

            assert r.abinit_input.structure.to_mgobj() == input_scf_phonon_gan_low.structure
            assert r.abinit_output.structure.to_mgobj() == input_scf_phonon_gan_low.structure
            assert r.abinit_input.ecut == input_scf_phonon_gan_low['ecut']
            assert r.abinit_input.kppa == 100
            nptu.assert_array_equal(r.abinit_input.gs_input.to_mgobj()['ngkpt'], input_scf_phonon_gan_low['ngkpt'])

            ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))

            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.anaddb_nc.read())
                db_file.seek(0)
                assert filecmp.cmp(ana_task.anaddb_nc_path, db_file.name)

            mrgddb_task = load_abitask(get_fw_by_task_index(wf, "mrgddb", index=None))

            with tempfile.NamedTemporaryFile(mode="wb") as db_file:
                db_file.write(r.abinit_output.ddb.read())
                db_file.seek(0)
                assert filecmp.cmp(mrgddb_task.merged_ddb_path, db_file.name)

        if self.check_numerical_values:
            with scf_task.open_gsr() as gsr:
                assert gsr.energy == pytest.approx(-680.402255069, rel=0.005)

            ana_task = load_abitask(get_fw_by_task_index(wf, "anaddb", index=None))
            with ana_task.open_anaddbnc() as ananc:
                assert float(ananc.eps0[0,0]) == pytest.approx(64.8276774889143, rel=0.15)

                e = ananc.elastic_data
                if has_abinit("8.9.3"):
                    assert float(e.elastic_relaxed[0,0,0,0]) == pytest.approx(41.230540749230556, rel=0.15)
Beispiel #56
0
    return Workflow([firework1])


# An optional function which returns extra information 'z' from unique vector 'x'
def get_z(x):
    return [x[0] * 2, x[2]**3]


# how an example custom optimization function could be used
# replace the code inside example_predictor with your favorite optimizer


def example_predictor(X_tot, y, X_space_total):
    # custom optimizer code goes here
    return random.choice(X_space_total)


if __name__ == "__main__":

    TESTDB_NAME = 'turboworks'
    launchpad = LaunchPad(name=TESTDB_NAME)
    launchpad.reset(password=None, require_password=False)
    launchpad.add_wf(wf_creator([1, 1, 2, "red"], launchpad, 3, my_kwarg=1))

    # if n_launches > 24 for this particular example, the search space will be exhausted and OptTask will throw
    # an exception
    rapidfire(launchpad, nlaunches=25, sleep_time=0)

    # tear down database
    # launchpad.connection.drop_database(TESTDB_NAME)
Beispiel #57
0
"""
This code is described in the Dynamic Workflow tutorial, https://materialsproject.github.io/fireworks/dynamic_wf_tutorial.html
"""

from fireworks import ScriptTask
from fireworks.core.firework import Firework, Workflow
from fireworks.core.launchpad import LaunchPad
from fireworks.core.rocket_launcher import rapidfire

from fw_tutorials.dynamic_wf.printjob_task import PrintJobTask

if __name__ == "__main__":
    # set up the LaunchPad and reset it
    launchpad = LaunchPad()
    # launchpad.reset('', require_password=False)

    # create the Workflow that passes job info
    fw1 = Firework([ScriptTask.from_str('echo "This is the first FireWork"')],
                   spec={"_pass_job_info": True},
                   fw_id=1)
    fw2 = Firework([PrintJobTask()], parents=[fw1], fw_id=2)
    wf = Workflow([fw1, fw2])

    # store workflow and launch it locally
    launchpad.add_wf(wf)
    rapidfire(launchpad)
Beispiel #58
0
def rapid(dummy):
    lpad = create_launchpad(LOCAL_DB_CONFIG)
    rapidfire(lpad, FWorker(), nlaunches=28000)