def test_platform(n=2, sleep_dur=10): """ This should sleep to make sure that concurrent apps will go to different workers on different nodes. """ config = fresh_config() if config.executors[0].label == "htex_local": return parsl.load(fresh_config()) dfk = parsl.dfk() name = list(dfk.executors.keys())[0] print("Trying to get executor : ", name) x = [platform(sleep=1) for i in range(2)] print([i.result() for i in x]) print("Executor : ", dfk.executors[name]) print("Connected : ", dfk.executors[name].connected_workers) print("Outstanding : ", dfk.executors[name].outstanding) d = [] for i in range(0, n): x = platform(sleep=sleep_dur) d.append(x) pinfo = set([i.result() for i in d]) assert len(pinfo) == 2, "Expected two nodes, instead got {}".format(pinfo) print("Test passed") dfk.cleanup() parsl.clear() return True
def test_provider(): """ Provider scaling """ logger.info("Starting test_provider") config = fresh_config() name = config.executors[0].label parsl.load(config) dfk = parsl.dfk() logger.info("Trying to get executor : {}".format(name)) x = platform(sleep=0) logger.info("Result is {}".format(x.result())) executor = dfk.executors[name] provider = dfk.executors[name].provider # At this point we should have 1 job _, current_jobs = executor._get_block_and_job_ids() assert len(current_jobs) == 1, "Expected 1 job at init, got {}".format( len(current_jobs)) logger.info("Getting provider status (1)") status = provider.status(current_jobs) logger.info("Got provider status") assert status[ 0].state == JobState.RUNNING, "Expected job to be in state RUNNING" # Scale down to 0 scale_in_blocks = executor.scale_in(blocks=1) logger.info("Now sleeping 60 seconds") time.sleep(60) logger.info("Sleep finished") logger.info("Getting provider status (2)") status = executor.status() logger.info("Got executor status") logger.info("Block status: {}".format(status)) assert status[scale_in_blocks[0]].terminal is True, "Terminal state" logger.info("Job in terminal state") _, current_jobs = executor._get_block_and_job_ids() # PR 1952 stoped removing scale_in blocks from self.blocks # A new PR will handle removing blocks from self.block # this includes failed/completed/canceled blocks assert len(current_jobs) == 1, "Expected current_jobs == 1" dfk.cleanup() parsl.clear() logger.info("Ended test_provider") return True
def test_provider(): """ Provider scaling """ logger.info("Starting test_provider") config = fresh_config() name = config.executors[0].label parsl.load(config) dfk = parsl.dfk() logger.info("Trying to get executor : {}".format(name)) x = platform(sleep=0) logger.info("Result is {}".format(x.result())) executor = dfk.executors[name] provider = dfk.executors[name].provider # At this point we should have 1 job current_jobs = executor._get_job_ids() assert len(current_jobs) == 1, "Expected 1 job at init, got {}".format(len(current_jobs)) logger.info("Getting provider status (1)") status = provider.status(current_jobs) logger.info("Got provider status") assert status[0].state == JobState.RUNNING, "Expected job to be in state RUNNING" # Scale down to 0 scale_in_status = executor.scale_in(blocks=1) logger.info("Now sleeping 60 seconds") time.sleep(60) logger.info("Sleep finished") logger.info("Getting provider status (2)") status = provider.status(scale_in_status) logger.info("Got provider status") logger.info("Block status: {}".format(status)) assert status[0].terminal is True, "Terminal state" logger.info("Job in terminal state") current_jobs = executor._get_job_ids() assert len(current_jobs) == 0, "Expected current_jobs == 0" parsl.clear() del dfk logger.info("Ended test_provider") return True
def test_provider(): """ Provider scaling """ config = fresh_config() name = config.executors[0].label parsl.load(config) dfk = parsl.dfk() print("Trying to get executor : ", name) x = platform(sleep=0) print(x.result()) executor = dfk.executors[name] provider = dfk.executors[name].provider # At this point we should have 1 job current_jobs = executor._get_job_ids() assert len(current_jobs) == 1, "Expected 1 job at init, got {}".format( len(current_jobs)) status = provider.status(current_jobs) assert status[ 0].state == JobState.RUNNING, "Expected job to be in state RUNNING" # Scale down to 0 scale_in_status = executor.scale_in(blocks=1) time.sleep(60) status = provider.status(scale_in_status) print("Block status: ", status) assert status[0].terminal is True, "Terminal state" print("Job in terminal state") current_jobs = executor._get_job_ids() assert len(current_jobs) == 0, "Expected current_jobs == 0" parsl.clear() del dfk return True