def test_db_all_in_one_quotas_1(monkeypatch): """ quotas[queue, project, job_type, user] = [int, int, float]; | | | maximum used resources ----------+ | | maximum number of running jobs -------+ | maximum resources times (hours) ------------+ """ create_quotas_rules_file('{"quotas": {"*,*,*,/": [-1, 1, -1], "/,*,*,*": [-1, -1, 0.55]}}') insert_job(res=[(100, [('resource_id=1', "")])], properties="", user="******") insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******") insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******") # pdb.set_trace() now = get_date() meta_schedule('internal') res = [] for i in db['GanttJobsPrediction'].query.order_by(GanttJobsPrediction.moldable_id).all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now) res.append(i.start_time - now) assert res == [0, 160, 420]
def test_db_all_in_one_quotas_2(monkeypatch): """ quotas[queue, project, job_type, user] = [int, int, float]; | | | maximum used resources ----------+ | | maximum number of running jobs -------+ | maximum resources times (hours) ------------+ """ create_quotas_rules_file('{"quotas": {"*,*,*,/": [-1, 1, -1]}}') # Submit and allocate an Advance Reservation t0 = get_date() insert_and_sched_ar(t0 + 100) # Submit other jobs insert_job(res=[(100, [('resource_id=1', "")])], properties="", user="******") insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******") # pdb.set_trace() t1 = get_date() meta_schedule('internal') res = [] for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - t1) res.append(i.start_time - t1) assert (res[1] - res[0]) == 120 assert (res[2] - res[0]) == 280
def test_db_all_in_one_BE_to_kill(monkeypatch): os.environ['USER'] = '******' # to allow fragging db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active') insert_job(res=[(100, [('resource_id=2', "")])], queue_name='besteffort', types=['besteffort']) meta_schedule('internal') job = db['Job'].query.one() assert (job.state == 'toLaunch') insert_job(res=[(100, [('resource_id=5', "")])]) meta_schedule('internal') jobs = db['Job'].query.all() print(jobs[0].state, jobs[1].state) print("frag...", db['FragJob'].query.one()) frag_job = db['FragJob'].query.one() assert jobs[0].state == 'toLaunch' assert jobs[1].state == 'Waiting' assert frag_job.job_id == jobs[0].id
def insert_and_sched_ar(start_time, walltime=60): insert_job(res=[(walltime, [('resource_id=4', "")])], reservation='toSchedule', start_time=start_time, info_type='localhost:4242') meta_schedule('internal') return (db['Job'].query.one())
def scheduleJobs(self): print("Sheduling Round") real_time = time.time() if self.platform_model == "simu": schedule_cycle(self.platform, self.env.now, "default") # retrieve jobs to launch jids_to_launch = [] for jid, job in iteritems(self.platform.assigned_jobs): print("job.start_time %s" % job.start_time) if (job.start_time == self.env.now) and (job.state == "Waiting"): self.waiting_jids.remove(jid) jids_to_launch.append(jid) job.state = "Running" print("tolaunch: %s" % jid) self.platform.running_jids.append(jid) else: print("call meta_schedule('internal')") meta_schedule("internal", plt) result = db.query(Job).filter(Job.state == "toLaunch").order_by(Job.id).all() for job_db in result: set_job_state(job_db.id, "Running") jid = self.db_jid2s_jid[job_db.id] self.waiting_jids.remove(jid) jids_to_launch.append(jid) self.jobs[jid].state = "Running" print("_tolaunch: %s" % jid) self.platform.running_jids.append(jid) print("Ids of jobs to launch: ", *jids_to_launch) print("Time befort scheduling round: ", self.bs._current_time, self.sched_delay) # update time real_sched_time = time.time() - real_time if self.sched_delay == -1: self.bs.consume_time(real_sched_time) # TODO else: self.bs.consume_time(self.sched_delay) self.env.now = self.bs._current_time print("Time after scheduling round: ", self.bs._current_time) # send to uds if len(jids_to_launch) > 0: scheduled_jobs = [] jobs_res = {} for jid in jids_to_launch: ds_job = self.jobs[jid].ds_job res = itvs2batsim_str0(self.jobs[jid].res_set) scheduled_jobs.append(ds_job) jobs_res[ds_job.id] = res self.bs.start_jobs(scheduled_jobs, jobs_res)
def test_db_all_in_one_BE(monkeypatch): db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active') insert_job(res=[(100, [('resource_id=1', "")])], queue_name='besteffort', types=['besteffort']) meta_schedule('internal') job = db['Job'].query.one() print(job.state) assert (job.state == 'toLaunch')
def test_suspend_resume_1(monkeypatch): # now = get_date() insert_job(res=[(60, [('resource_id=4', "")])], properties="") meta_schedule('internal') job = db['Job'].query.one() print(job.state) set_job_state(job.id, 'Resuming') job = db['Job'].query.one() print(job.state) meta_schedule('internal') assert(job.state == 'Resuming')
def test_db_all_in_one_AR_7(monkeypatch): now = get_date() insert_job(res=[(60, [('resource_id=4', "")])], reservation='toSchedule', start_time=now+10, info_type='localhost:4242', types=["timesharing=*,*"]) meta_schedule('internal') job = db['Job'].query.one() assert ((job.state == 'Waiting') and (job.reservation == 'Scheduled'))
def test_suspend_resume_2(monkeypatch): config['JUST_BEFORE_RESUME_EXEC_FILE'] = 'sleep 2' # now = get_date() insert_job(res=[(60, [('resource_id=4', "")])], properties="") meta_schedule('internal') job = db['Job'].query.one() print(job.state) set_job_state(job.id, 'Resuming') job = db['Job'].query.one() print(job.state) meta_schedule('internal') assert(job.state == 'Resuming')
def test_db_all_in_one_simple_interactive_waiting_1(monkeypatch): insert_job(res=[(60, [('resource_id=4', "")])], properties="") insert_job(res=[(60, [('resource_id=4', "")])], properties="", job_type='INTERACTIVE', info_type="0.0.0.0:1234") meta_schedule('internal') for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time) jobs = db['Job'].query.order_by(db['Job'].id).all() assert (jobs[0].state == 'toLaunch') assert (jobs[1].state == 'Waiting')
def test_db_all_in_one_simple_1(monkeypatch): insert_job(res=[(60, [('resource_id=4', "")])], properties="") job = db['Job'].query.one() print('job state:', job.state) # pdb.set_trace() meta_schedule('internal') for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time) job = db['Job'].query.one() print(job.state) assert (job.state == 'toLaunch')
def test_db_placeholder_2(monkeypatch): now = get_date() insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["placeholder=yop"]) insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["allow=poy"]) meta_schedule('internal') for j in db['Job'].query.all(): print(j.state) res = [] for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now) res.append(i.start_time - now) assert res[0] != res[1]
def test_db_all_in_one_wakeup_node_energy_saving_internal_1(monkeypatch): config['ENERGY_SAVING_INTERNAL'] = 'yes' insert_job(res=[(60, [('resource_id=4', "")])], properties="") now = get_date() # Suspend nodes db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000}, synchronize_session=False) db.commit() meta_schedule('internal') job = db['Job'].query.one() print(job.state) print(node_list) assert (job.state == 'Waiting')
def test_db_metasched_simple_1(monkeypatch): print("DB_BASE_FILE: ", config["DB_BASE_FILE"]) insert_job(res=[(60, [('resource_id=4', "")])], properties="") job = db['Job'].query.one() print('job state:', job.state) meta_schedule() for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time) job = db['Job'].query.one() print(job.state) assert (job.state == 'toLaunch')
def test_db_moldable_2(monkeypatch): now = get_date() insert_job(res=[(60, [('resource_id=3', "")])], properties="") insert_job(res=[(60, [('resource_id=4', "")]), (70, [('resource_id=2', "")])], properties="") meta_schedule('internal') for j in db['Job'].query.all(): print(j.state) res = [] for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now) res.append(i.start_time - now) assert res[0] == res[1]
def test_db_metasched_ar_1(monkeypatch): # add one job now = get_date() # sql_now = local_to_sql(now) insert_job(res=[(60, [('resource_id=4', "")])], properties="", reservation='toSchedule', start_time=(now + 10), info_type='localhost:4242') meta_schedule() job = db['Job'].query.one() print(job.state, ' ', job.reservation) assert ((job.state == 'Waiting') and (job.reservation == 'Scheduled'))
def test_db_all_in_one_AR_4(monkeypatch): now = get_date() job = insert_and_sched_ar(now + 10) new_start_time = now - 20 db.query(GanttJobsPrediction).update({GanttJobsPrediction.start_time: new_start_time}, synchronize_session=False) db.commit() meta_schedule('internal') job = db['Job'].query.one() print('\n', job.id, job.state, ' ', job.reservation, job.start_time) assert job.state == 'toLaunch'
def test_db_all_in_one_wakeup_node_1(monkeypatch): insert_job(res=[(60, [('resource_id=4', "")])], properties="") now = get_date() # Suspend nodes db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000}, synchronize_session=False) db.commit() meta_schedule('internal') job = db['Job'].query.one() print(job.state) print(node_list) assert (job.state == 'Waiting') assert (node_list == [u'localhost0', u'localhost1'])
def test_db_timesharing_2(monkeypatch): now = get_date() insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=user,*"], user='******') insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["timesharing=user,*"], user='******') meta_schedule('internal') for j in db['Job'].query.all(): print(j.state) res = [] for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now) res.append(i.start_time - now) assert res[0] != res[1]
def test_db_extra_metasched_1(): config['EXTRA_METASCHED'] = 'foo' insert_job(res=[(60, [('resource_id=1', "")])], properties="deploy='YES'") insert_job(res=[(60, [('resource_id=1', "")])], properties="deploy='FOO'") insert_job(res=[(60, [('resource_id=1', "")])], properties="") for job in db['Job'].query.all(): print('job state:', job.state, job.id) meta_schedule() for i in db['GanttJobsPrediction'].query.all(): print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time) states = [job.state for job in db['Job'].query.order_by(Job.id).all()] print(states) assert states == ['toLaunch', 'Waiting', 'toLaunch']
def test_db_all_in_one_AR_6(monkeypatch): now = get_date() job = insert_and_sched_ar(now + 10, 600) new_start_time = now - 350 set_jobs_start_time(tuple([job.id]), new_start_time) db.query(GanttJobsPrediction).update({GanttJobsPrediction.start_time: new_start_time}, synchronize_session=False) # db.query(Resource).update({Resource.state: 'Suspected'}, synchronize_session=False) meta_schedule('internal') job = db['Job'].query.one() print('\n', job.id, job.state, ' ', job.reservation, job.start_time) assert job.state == 'Waiting'
def test_db_all_in_one_sleep_node_1(monkeypatch): now = get_date() insert_job(res=[(60, [('resource_id=1', "")])], properties="") # Suspend nodes # pdb.set_trace() db.query(Resource).update({Resource.available_upto: now + 50000}, synchronize_session=False) db.commit() meta_schedule('internal') job = db['Job'].query.one() print(job.state) print(node_list) assert (job.state == 'toLaunch') assert (node_list == [u'localhost2', u'localhost1'] or node_list == [u'localhost1', u'localhost2'])
def test_db_all_in_one_BE_2(monkeypatch): # TODO TOFINISH db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active') insert_job(res=[(100, [('resource_id=1', "")])], queue_name='besteffort', types=['besteffort', "timesharing=*,*"]) meta_schedule('internal') job = db['Job'].query.one() set_job_state(job.id, 'Running') insert_job(res=[(50, [('resource_id=1', "")])], types=["timesharing=*,*"]) meta_schedule('internal') jobs = db['Job'].query.all() print(jobs[1].id, jobs[1].state) # assert (jobs[1].state == 'toLaunch') assert (jobs[1].state == 'Waiting')
def test_db_suspended_duration_1(monkeypatch): insert_job(res=[(60, [('resource_id=3', "")])], properties="", suspended='YES') meta_schedule('internal') job = db['Job'].query.one() assert (job.state == 'toLaunch')
def sched_loop(self): nb_completed_jobs = 0 while nb_completed_jobs < self.nb_jobs: now_float, jobs_submitted, new_jobs_completed = read_bat_msg(self.sock) # now_str = "10" # jobs_submitted = [1] # new_jobs_completed = [] if jobs_submitted: for jid in jobs_submitted: self.waiting_jids.add(jid) if self.platform_model == "batsim-db": print('set_job_state("Waiting"):', self.jobs[jid].db_jid) set_job_state(self.jobs[jid].db_jid, "Waiting") nb_completed_jobs += len(new_jobs_completed) print("new job completed: %s" % new_jobs_completed) for jid in new_jobs_completed: jobs_completed.append(jid) if jid in self.platform.running_jids: self.platform.running_jids.remove(jid) if self.platform_model == "batsim-db": set_job_state(self.jobs[jid].db_jid, "Terminated") now = int(now_float) self.env.now = now # TODO can be remove ??? real_time = time.time() print("jobs running: %s" % self.platform.running_jids) print("jobs waiting: %s" % self.waiting_jids) print("jobs completed: %s" % jobs_completed) jids_to_launch = [] if self.platform_model == "simu": print("call schedule_cycle.... %s" % now) schedule_cycle(self.platform, now, "default") # retrieve jobs to launch jids_to_launch = [] for jid, job in iteritems(self.platform.assigned_jobs): print("job.start_time %s" % job.start_time) if (job.start_time == now) and (job.state == "Waiting"): self.waiting_jids.remove(jid) jids_to_launch.append(jid) job.state = "Running" print("tolaunch: %s" % jid) self.platform.running_jids.append(jid) else: print("call meta_schedule('internal')") meta_schedule("internal", plt) # Launching phase # Retrieve job to Launch result = db.query(Job).filter(Job.state == "toLaunch").order_by(Job.id).all() for job_db in result: set_job_state(job_db.id, "Running") jid = self.db_jid2s_jid[job_db.id] self.waiting_jids.remove(jid) jids_to_launch.append(jid) self.jobs[jid].state = "Running" print("_tolaunch: %s" % jid) self.platform.running_jids.append(jid) real_sched_time = time.time() - real_time if self.sched_delay == -1: now_float += real_sched_time else: now_float += self.sched_delay send_bat_msg(self.sock, now_float, jids_to_launch, self.jobs)
def main(): logger.info("Starting Kao Meta Scheduler") meta_schedule()