def setUp(self): self.old_wd = os.getcwd() self.dest1 = os.path.join(MODULE_DIR, 'numbers1.txt') self.dest2 = os.path.join(MODULE_DIR, 'numbers2.txt') self.tracker1 = Tracker(self.dest1,nlines=2) self.tracker2 = Tracker(self.dest2,nlines=2)
def __init__(self, mol, molname, mission, additional_user_tags=None, dupefinder=None, priority=1, update_spec=None, large=False): self.molname = molname self.mol = mol self.large = large initial_inchi = self.get_inchi(mol) user_tags = {'mission': mission, "molname": molname} if additional_user_tags: user_tags.update(additional_user_tags) spec = dict() spec['user_tags'] = user_tags spec['_priority'] = priority spec['_dupefinder'] = dupefinder.to_dict() if dupefinder \ else DupeFinderEG().to_dict() tracker_out = Tracker("mol.qcout", nlines=20) tracker_std = Tracker("mol.qclog", nlines=10) tracker_joberr = Tracker("FW_job.error", nlines=20) tracker_jobout = Tracker("FW_job.out", nlines=20) spec["_trackers"] = [ tracker_out, tracker_std, tracker_joberr, tracker_jobout ] spec['run_tags'] = dict() spec['implicit_solvent'] = {} spec['inchi'] = initial_inchi spec['num_atoms'] = len(mol) if update_spec: spec.update(update_spec) self.base_spec = lambda: copy.deepcopy(spec)
def setUp(self): self.old_wd = os.getcwd() self.dest1 = os.path.join(MODULE_DIR, "numbers1.txt") self.dest2 = os.path.join(MODULE_DIR, "numbers2.txt") self.tracker1 = Tracker(self.dest1, nlines=2) self.tracker2 = Tracker(self.dest2, nlines=2)
def add_trackers(original_wf, tracked_files=None, nlines=25): """ Every FireWork that runs VASP also tracks the OUTCAR, OSZICAR, etc using FWS Trackers. Args: original_wf (Workflow) tracked_files (list) : list of files to be tracked nlines (int): number of lines at the end of files to be tracked Returns: Workflow """ if tracked_files is None: tracked_files = ["OUTCAR", "OSZICAR"] trackers = [ Tracker(f, nlines=nlines, allow_zipped=True) for f in tracked_files ] idx_list = get_fws_and_tasks(original_wf, task_name_constraint="RunVasp") for idx_fw, idx_t in idx_list: if "_trackers" in original_wf.fws[idx_fw].spec: original_wf.fws[idx_fw].spec["_trackers"].extend(trackers) else: original_wf.fws[idx_fw].spec["_trackers"] = trackers return original_wf
def get_tracker_data(self, fw_id): data = [] for l in self.launches.find({'fw_id': fw_id}, {'trackers': 1, 'launch_id': 1}): if 'trackers' in l: # backwards compatibility trackers = [Tracker.from_dict(t) for t in l['trackers']] data.append({'launch_id': l['launch_id'], 'trackers': trackers}) return data
def checkout_fw(self, fworker, launch_dir, fw_id=None, host=None, ip=None): """ (internal method) Finds a FireWork that's ready to be run, marks it as running, and returns it to the caller. The caller is responsible for running the FireWork. :param fworker: A FWorker instance :param host: the host making the request (for creating a Launch object) :param ip: the ip making the request (for creating a Launch object) :param launch_dir: the dir the FW will be run in (for creating a Launch object) :return: a FireWork, launch_id tuple """ # TODO: this method is confusing, says AJ of Xmas past. Clean it up, remove duplication, etc. m_fw = self._get_a_fw_to_run(fworker.query, fw_id) if not m_fw: return None, None # was this Launch previously reserved? If so, overwrite that reservation with this Launch # note that adding a new Launch is problematic from a duplicate run standpoint prev_reservations = [l for l in m_fw.launches if l.state == 'RESERVED'] reserved_launch = None if len(prev_reservations) == 0 else prev_reservations[0] state_history = reserved_launch.state_history if reserved_launch else None l_id = reserved_launch.launch_id if reserved_launch else self.get_new_launch_id() trackers = [Tracker.from_dict(f) for f in m_fw.spec['_trackers']] if '_trackers' in m_fw.spec else None m_launch = Launch('RUNNING', launch_dir, fworker, host, ip, trackers=trackers, state_history=state_history, launch_id=l_id, fw_id=m_fw.fw_id) self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True) self.m_logger.debug('Created/updated Launch with launch_id: {}'.format(l_id)) if not reserved_launch: # we're appending a new FireWork m_fw.launches.append(m_launch) else: # we're updating an existing launch m_fw.launches = [m_launch if l.launch_id == m_launch.launch_id else l for l in m_fw.launches] m_fw.state = 'RUNNING' self._upsert_fws([m_fw]) # update any duplicated runs for fw in self.fireworks.find( {'launches': l_id, 'state': {'$in': ['WAITING', 'READY', 'RESERVED', 'FIZZLED']}}, {'fw_id': 1}): fw_id = fw['fw_id'] fw = self.get_fw_by_id(fw_id) fw.state = 'RUNNING' self._upsert_fws([fw]) self.m_logger.debug('Checked out FW with id: {}'.format(m_fw.fw_id)) # use dict as return type, just to be compatible with multiprocessing return m_fw, l_id
def checkout_fw(self, fworker, launch_dir, fw_id=None, host=None, ip=None): """ (internal method) Finds a FireWork that's ready to be run, marks it as running, and returns it to the caller. The caller is responsible for running the FireWork. :param fworker: A FWorker instance :param host: the host making the request (for creating a Launch object) :param ip: the ip making the request (for creating a Launch object) :param launch_dir: the dir the FW will be run in (for creating a Launch object) :return: a FireWork, launch_id tuple """ # TODO: this method is confusing, says AJ of Xmas past. Clean it up, remove duplication, etc. m_fw = self._get_a_fw_to_run(fworker.query, fw_id) if not m_fw: return None, None # was this Launch previously reserved? If so, overwrite that reservation with this Launch # note that adding a new Launch is problematic from a duplicate run standpoint prev_reservations = [l for l in m_fw.launches if l.state == 'RESERVED'] reserved_launch = None if len(prev_reservations) == 0 else prev_reservations[0] state_history = reserved_launch.state_history if reserved_launch else None l_id = reserved_launch.launch_id if reserved_launch else self.get_new_launch_id() trackers = [Tracker.from_dict(f) for f in m_fw.spec['_trackers']] if '_trackers' in m_fw.spec else None m_launch = Launch('RUNNING', launch_dir, fworker, host, ip, trackers=trackers, state_history=state_history, launch_id=l_id, fw_id=m_fw.fw_id) self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True) self.m_logger.debug('Created/updated Launch with launch_id: {}'.format(l_id)) if not reserved_launch: # we're appending a new FireWork m_fw.launches.append(m_launch) else: # we're updating an existing launch m_fw.launches = [m_launch if l.launch_id == m_launch.launch_id else l for l in m_fw.launches] m_fw.state = 'RUNNING' self._upsert_fws([m_fw]) # update any duplicated runs for fw in self.fireworks.find( {'launches': l_id, 'state': {'$in': ['WAITING', 'READY', 'RESERVED', 'FIZZLED']}}, {'fw_id': 1}): fw_id = fw['fw_id'] fw = self.get_fw_by_id(fw_id) fw.state = 'RUNNING' self._upsert_fws([fw]) self.m_logger.debug('Checked out FW with id: {}'.format(m_fw.fw_id)) return m_fw, l_id
def reserve_fw(self, fworker, launch_dir, host=None, ip=None): m_fw = self._get_a_fw_to_run(fworker.query) if not m_fw: return None, None # create a launch # TODO: this code is duplicated with checkout_fw with minimal mods, should refactor this!! launch_id = self.get_new_launch_id() trackers = [Tracker.from_dict(f) for f in m_fw.spec['_trackers']] if '_trackers' in m_fw.spec else None m_launch = Launch('RESERVED', launch_dir, fworker, host, ip, trackers=trackers, launch_id=launch_id, fw_id=m_fw.fw_id) self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True) # add launch to FW m_fw.launches.append(m_launch) m_fw.state = 'RESERVED' self._upsert_fws([m_fw]) self.m_logger.debug('Reserved FW with id: {}'.format(m_fw.fw_id)) return m_fw, launch_id
def _reserve_fw(self, fworker, launch_dir, host=None, ip=None): m_fw = self._get_a_fw_to_run(fworker.query) if not m_fw: return None, None # create a launch # TODO: this code is duplicated with checkout_fw with minimal mods, should refactor this!! launch_id = self.get_new_launch_id() trackers = [Tracker.from_dict(f) for f in m_fw.spec['_trackers']] if '_trackers' in m_fw.spec else None m_launch = Launch('RESERVED', launch_dir, fworker, host, ip, trackers=trackers, launch_id=launch_id, fw_id=m_fw.fw_id) self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True) # add launch to FW m_fw.launches.append(m_launch) m_fw.state = 'RESERVED' self._upsert_fws([m_fw]) self.m_logger.debug('Reserved FW with id: {}'.format(m_fw.fw_id)) return m_fw, launch_id
def add_trackers(original_wf, tracked_files=None, nlines=25): """ Every FireWork that runs VASP also tracks the OUTCAR, OSZICAR, etc using FWS Trackers. Args: original_wf (Workflow) tracked_files (list) : list of files to be tracked nlines (int): number of lines at the end of files to be tracked """ if tracked_files == None: tracked_files = ["OUTCAR", "OSZICAR"] trackers = [ Tracker(f, nlines=nlines, allow_zipped=True) for f in tracked_files ] wf_dict = original_wf.to_dict() for idx_fw, idx_t in get_fws_and_tasks(original_wf, task_name_constraint="RunVasp"): if "_trackers" in wf_dict["fws"][idx_fw]["spec"]: wf_dict["fws"][idx_fw]["spec"]["_trackers"].extend(trackers) else: wf_dict["fws"][idx_fw]["spec"]["_trackers"] = trackers return Workflow.from_dict(wf_dict)
def run_task(self, fw_spec): print 'sleeping 10s for Mongo' time.sleep(10) print 'done sleeping' print 'the gap is {}, the cutoff is {}'.format( fw_spec['analysis']['bandgap'], self.gap_cutoff) if fw_spec['analysis']['bandgap'] >= self.gap_cutoff: static_dens = 90 uniform_dens = 1000 line_dens = 20 else: static_dens = 450 uniform_dens = 1500 line_dens = 30 if fw_spec['analysis']['bandgap'] <= self.metal_cutoff: user_incar_settings = {"ISMEAR": 1, "SIGMA": 0.2} else: user_incar_settings = {} print 'Adding more runs...' type_name = 'GGA+U' if 'GGA+U' in fw_spec['prev_task_type'] else 'GGA' snl = StructureNL.from_dict(fw_spec['mpsnl']) f = Composition( snl.structure.composition.reduced_formula).alphabetical_formula fws = [] connections = {} priority = fw_spec['_priority'] trackers = [ Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), Tracker('OSZICAR') ] trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] # run GGA static spec = fw_spec # pass all the items from the current spec to the new spec.update({ 'task_type': '{} static v2'.format(type_name), '_queueadapter': QA_VASP_SMALL, '_dupefinder': DupeFinderVasp().to_dict(), '_priority': priority, '_trackers': trackers }) fws.append( Firework([ VaspCopyTask({ 'use_CONTCAR': True, 'skip_CHGCAR': True }), SetupStaticRunTask({ "kpoints_density": static_dens, 'user_incar_settings': user_incar_settings }), get_custodian_task(spec) ], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-10)) # insert into DB - GGA static spec = { 'task_type': 'VASP db insertion', '_queueadapter': QA_DB, '_allow_fizzled_parents': True, '_priority': priority * 2, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db } fws.append( Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-9)) connections[-10] = -9 # run GGA Uniform spec = { 'task_type': '{} Uniform v2'.format(type_name), '_queueadapter': QA_VASP, '_dupefinder': DupeFinderVasp().to_dict(), '_priority': priority, '_trackers': trackers } fws.append( Firework([ VaspCopyTask({'use_CONTCAR': False}), SetupNonSCFTask({ 'mode': 'uniform', "kpoints_density": uniform_dens }), get_custodian_task(spec) ], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-8)) connections[-9] = -8 # insert into DB - GGA Uniform spec = { 'task_type': 'VASP db insertion', '_queueadapter': QA_DB, '_allow_fizzled_parents': True, '_priority': priority * 2, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db } fws.append( Firework([VaspToDBTask({'parse_uniform': True})], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-7)) connections[-8] = -7 # run GGA Band structure spec = { 'task_type': '{} band structure v2'.format(type_name), '_queueadapter': QA_VASP, '_dupefinder': DupeFinderVasp().to_dict(), '_priority': priority, '_trackers': trackers } fws.append( Firework([ VaspCopyTask({'use_CONTCAR': False}), SetupNonSCFTask({ 'mode': 'line', "kpoints_line_density": line_dens }), get_custodian_task(spec) ], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-6)) connections[-7] = [-6] # insert into DB - GGA Band structure spec = { 'task_type': 'VASP db insertion', '_queueadapter': QA_DB, '_allow_fizzled_parents': True, '_priority': priority * 2, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db } fws.append( Firework([VaspToDBTask({})], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-5)) connections[-6] = -5 if fw_spec.get('parameters') and fw_spec['parameters'].get( 'boltztrap'): # run Boltztrap from mpworks.firetasks.boltztrap_tasks import BoltztrapRunTask spec = { 'task_type': '{} Boltztrap'.format(type_name), '_queueadapter': QA_DB, '_dupefinder': DupeFinderDB().to_dict(), '_priority': priority } fws.append( Firework([BoltztrapRunTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-4)) connections[-7].append(-4) wf = Workflow(fws, connections) print 'Done adding more runs...' return FWAction(additions=wf)
def snl_to_wf(snl, parameters=None): fws = [] connections = defaultdict(list) parameters = parameters if parameters else {} snl_priority = parameters.get('priority', 1) priority = snl_priority * 2 # once we start a job, keep going! f = Composition( snl.structure.composition.reduced_formula).alphabetical_formula snl_spec = {} if 'snlgroup_id' in parameters: if 'mpsnl' in parameters: snl_spec['mpsnl'] = parameters['mpsnl'] elif isinstance(snl, MPStructureNL): snl_spec['mpsnl'] = snl.as_dict() else: raise ValueError("improper use of force SNL") snl_spec['snlgroup_id'] = parameters['snlgroup_id'] else: # add the SNL to the SNL DB and figure out duplicate group tasks = [AddSNLTask()] spec = { 'task_type': 'Add to SNL database', 'snl': snl.as_dict(), '_queueadapter': QA_DB, '_priority': snl_priority } fws.append( Firework(tasks, spec, name=get_slug(f + '--' + spec['task_type']), fw_id=0)) connections[0] = [1] trackers = [ Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), Tracker('OSZICAR'), Tracker('OUTCAR.relax1'), Tracker('OUTCAR.relax2') ] trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] # run GGA structure optimization spec = _snl_to_spec(snl, enforce_gga=True, parameters=parameters) spec.update(snl_spec) spec['_priority'] = priority spec['_queueadapter'] = QA_VASP spec['_trackers'] = trackers tasks = [VaspWriterTask(), get_custodian_task(spec)] fws.append( Firework(tasks, spec, name=get_slug(f + '--' + spec['task_type']), fw_id=1)) # insert into DB - GGA structure optimization spec = { 'task_type': 'VASP db insertion', '_priority': priority * 2, '_allow_fizzled_parents': True, '_queueadapter': QA_DB, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db } fws.append( Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=2)) connections[1] = [2] # determine if GGA+U FW is needed incar = MPVaspInputSet().get_incar(snl.structure).as_dict() ggau_compound = ('LDAU' in incar and incar['LDAU']) if not parameters.get('skip_bandstructure', False) and ( not ggau_compound or parameters.get('force_gga_bandstructure', False)): spec = { 'task_type': 'Controller: add Electronic Structure v2', '_priority': priority, '_queueadapter': QA_CONTROL } fws.append( Firework([AddEStructureTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=3)) connections[2] = [3] if ggau_compound: spec = _snl_to_spec(snl, enforce_gga=False, parameters=parameters) del spec[ 'vasp'] # we are stealing all VASP params and such from previous run spec['_priority'] = priority spec['_queueadapter'] = QA_VASP spec['_trackers'] = trackers fws.append( Firework( [VaspCopyTask(), SetupGGAUTask(), get_custodian_task(spec)], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=10)) connections[2].append(10) spec = { 'task_type': 'VASP db insertion', '_queueadapter': QA_DB, '_allow_fizzled_parents': True, '_priority': priority, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db } fws.append( Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=11)) connections[10] = [11] if not parameters.get('skip_bandstructure', False): spec = { 'task_type': 'Controller: add Electronic Structure v2', '_priority': priority, '_queueadapter': QA_CONTROL } fws.append( Firework([AddEStructureTask()], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=12)) connections[11] = [12] wf_meta = get_meta_from_structure(snl.structure) wf_meta['run_version'] = 'May 2013 (1)' if '_materialsproject' in snl.data and 'submission_id' in snl.data[ '_materialsproject']: wf_meta['submission_id'] = snl.data['_materialsproject'][ 'submission_id'] return Workflow( fws, connections, name=Composition( snl.structure.composition.reduced_formula).alphabetical_formula, metadata=wf_meta)
class TrackerTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.lp = None cls.fworker = FWorker() try: cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR') cls.lp.reset(password=None,require_password=False) except: raise unittest.SkipTest("MongoDB is not running in localhost:27017! Skipping tests.") @classmethod def tearDownClass(cls): if cls.lp: cls.lp.connection.drop_database(TESTDB_NAME) def setUp(self): self.old_wd = os.getcwd() self.dest1 = os.path.join(MODULE_DIR, 'numbers1.txt') self.dest2 = os.path.join(MODULE_DIR, 'numbers2.txt') self.tracker1 = Tracker(self.dest1,nlines=2) self.tracker2 = Tracker(self.dest2,nlines=2) def tearDown(self): self.lp.reset(password=None, require_password=False) if os.path.exists(os.path.join('FW.json')): os.remove('FW.json') os.chdir(self.old_wd) for i in glob.glob(os.path.join(MODULE_DIR,'launcher_*')): shutil.rmtree(i) def _teardown(self, dests): for f in dests: if os.path.exists(f): os.remove(f) def test_tracker(self): """ Launch a workflow and track the files """ self._teardown([self.dest1]) try: fts = [] for i in range(5,100): ft = ScriptTask.from_str('echo "' + str(i) + '" >> ' + self.dest1, {'store_stdout':True}) fts.append(ft) fw = Firework(fts, spec={'_trackers':[self.tracker1]}, fw_id=20, name='test_fw') self.lp.add_wf(fw) launch_rocket(self.lp, self.fworker) #print (self.tracker1.track_file()) self.assertEqual('98\n99',self.tracker1.track_file()) finally: self._teardown([self.dest1]) def test_tracker_failed_fw(self): """ Add a bad firetask to workflow and test the tracking """ self._teardown([self.dest1]) try: fts = [] for i in range(5,50): ft = ScriptTask.from_str('echo "' + str(i) + '" >> '+ self.dest1, {'store_stdout':True}) fts.append(ft) fts.append(ScriptTask.from_str('cat 4 >> ' + self.dest1)) for i in range(51,100): ft = ScriptTask.from_str('echo "' + str(i) + '" >> ' + self.dest1, {'store_stdout':True}) fts.append(ft) fw = Firework(fts, spec={'_trackers':[self.tracker1]}, fw_id=21, name='test_fw') self.lp.add_wf(fw) try: print("===========================================") print("Bad rocket launched. The failure below is OK") print("===========================================") launch_rocket(self.lp, self.fworker) except: pass self.assertEqual('48\n49',self.tracker1.track_file()) finally: self._teardown([self.dest1]) def test_tracker_mlaunch(self): """ Test the tracker for mlaunch """ self._teardown([self.dest1,self.dest2]) try: def add_wf(j, dest, tracker, name): fts = [] for i in range(j,j+25): ft = ScriptTask.from_str('echo "' + str(i) + '" >> '+ dest, {'store_stdout':True}) fts.append(ft) fw1 = Firework(fts, spec={'_trackers':[tracker]}, fw_id=j+1, name=name+'1') fts = [] for i in range(j+25,j+50): ft = ScriptTask.from_str('echo "' + str(i) + '" >> ' + dest, {'store_stdout':True}) fts.append(ft) fw2 = Firework(fts, spec={'_trackers':[tracker]}, fw_id=j+2, name=name+'2') wf = Workflow([fw1, fw2], links_dict={fw1:[fw2]}) self.lp.add_wf(wf) add_wf(0, self.dest1, self.tracker1, 'a_test') add_wf(50, self.dest2, self.tracker2, 'b_test') try: launch_multiprocess(self.lp, self.fworker, 'ERROR', 0, 2, 0, ppn=2) except: pass self.assertEqual('48\n49',self.tracker1.track_file()) self.assertEqual('98\n99',self.tracker2.track_file()) finally: self._teardown([self.dest1,self.dest2]) pwd = os.getcwd() for ldir in glob.glob(os.path.join(pwd,'launcher_*')): shutil.rmtree(ldir) pass
class TrackerTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.lp = None cls.fworker = FWorker() try: cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl="ERROR") cls.lp.reset(password=None, require_password=False) except Exception: raise unittest.SkipTest( "MongoDB is not running in localhost:27017! Skipping tests.") @classmethod def tearDownClass(cls): if cls.lp: cls.lp.connection.drop_database(TESTDB_NAME) def setUp(self): self.old_wd = os.getcwd() self.dest1 = os.path.join(MODULE_DIR, "numbers1.txt") self.dest2 = os.path.join(MODULE_DIR, "numbers2.txt") self.tracker1 = Tracker(self.dest1, nlines=2) self.tracker2 = Tracker(self.dest2, nlines=2) def tearDown(self): self.lp.reset(password=None, require_password=False) if os.path.exists(os.path.join("FW.json")): os.remove("FW.json") os.chdir(self.old_wd) for i in glob.glob(os.path.join(MODULE_DIR, "launcher_*")): shutil.rmtree(i) @staticmethod def _teardown(dests): for f in dests: if os.path.exists(f): os.remove(f) def test_tracker(self): """ Launch a workflow and track the files """ self._teardown([self.dest1]) try: fts = [] for i in range(5, 100): ft = ScriptTask.from_str( 'echo "' + str(i) + '" >> ' + self.dest1, {"store_stdout": True}) fts.append(ft) fw = Firework(fts, spec={"_trackers": [self.tracker1]}, fw_id=20, name="test_fw") self.lp.add_wf(fw) launch_rocket(self.lp, self.fworker) # print (self.tracker1.track_file()) self.assertEqual("98\n99", self.tracker1.track_file()) finally: self._teardown([self.dest1]) def test_tracker_failed_fw(self): """ Add a bad firetask to workflow and test the tracking """ self._teardown([self.dest1]) try: fts = [] for i in range(5, 50): ft = ScriptTask.from_str( 'echo "' + str(i) + '" >> ' + self.dest1, {"store_stdout": True}) fts.append(ft) fts.append(ScriptTask.from_str("cat 4 >> " + self.dest1)) for i in range(51, 100): ft = ScriptTask.from_str( 'echo "' + str(i) + '" >> ' + self.dest1, {"store_stdout": True}) fts.append(ft) fw = Firework(fts, spec={"_trackers": [self.tracker1]}, fw_id=21, name="test_fw") self.lp.add_wf(fw) try: print("===========================================") print("Bad rocket launched. The failure below is OK") print("===========================================") launch_rocket(self.lp, self.fworker) except Exception: pass self.assertEqual("48\n49", self.tracker1.track_file()) finally: self._teardown([self.dest1]) def test_tracker_mlaunch(self): """ Test the tracker for mlaunch """ self._teardown([self.dest1, self.dest2]) try: def add_wf(j, dest, tracker, name): fts = [] for i in range(j, j + 25): ft = ScriptTask.from_str( 'echo "' + str(i) + '" >> ' + dest, {"store_stdout": True}) fts.append(ft) fw1 = Firework(fts, spec={"_trackers": [tracker]}, fw_id=j + 1, name=name + "1") fts = [] for i in range(j + 25, j + 50): ft = ScriptTask.from_str( 'echo "' + str(i) + '" >> ' + dest, {"store_stdout": True}) fts.append(ft) fw2 = Firework(fts, spec={"_trackers": [tracker]}, fw_id=j + 2, name=name + "2") wf = Workflow([fw1, fw2], links_dict={fw1: [fw2]}) self.lp.add_wf(wf) add_wf(0, self.dest1, self.tracker1, "a_test") add_wf(50, self.dest2, self.tracker2, "b_test") try: launch_multiprocess(self.lp, self.fworker, "ERROR", 0, 2, 0, ppn=2) except Exception: pass self.assertEqual("48\n49", self.tracker1.track_file()) self.assertEqual("98\n99", self.tracker2.track_file()) finally: self._teardown([self.dest1, self.dest2]) pwd = os.getcwd() for ldir in glob.glob(os.path.join(pwd, "launcher_*")): shutil.rmtree(ldir)