Example #1
0
def test_db_all_in_one_quotas_2(monkeypatch):
    """
    quotas[queue, project, job_type, user] = [int, int, float];
                                               |    |     |
              maximum used resources ----------+    |     |
              maximum number of running jobs -------+     |
              maximum resources times (hours) ------------+
    """

    create_quotas_rules_file('{"quotas": {"*,*,*,/": [-1, 1, -1]}}')

    # Submit and allocate an Advance Reservation
    t0 = get_date()
    insert_and_sched_ar(t0 + 100)

    # Submit other jobs
    insert_job(res=[(100, [('resource_id=1', "")])], properties="", user="******")
    insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******")

    # pdb.set_trace()
    t1 = get_date()
    meta_schedule('internal')

    res = []
    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - t1)
        res.append(i.start_time - t1)

    assert (res[1] - res[0]) == 120
    assert (res[2] - res[0]) == 280
Example #2
0
def test_db_all_in_one_BE_to_kill(monkeypatch):

    os.environ['USER'] = '******'  # to allow fragging
    db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active')

    insert_job(res=[(100, [('resource_id=2', "")])], queue_name='besteffort', types=['besteffort'])

    meta_schedule('internal')

    job = db['Job'].query.one()
    assert (job.state == 'toLaunch')

    insert_job(res=[(100, [('resource_id=5', "")])])

    meta_schedule('internal')

    jobs = db['Job'].query.all()

    print(jobs[0].state, jobs[1].state)

    print("frag...", db['FragJob'].query.one())
    frag_job = db['FragJob'].query.one()
    assert jobs[0].state == 'toLaunch'
    assert jobs[1].state == 'Waiting'
    assert frag_job.job_id == jobs[0].id
Example #3
0
def minimal_db_initialization(request):
    with db.session(ephemeral=True):
        for i in range(5):
            db['Resource'].create(network_address="localhost")

        for i in range(5):
            insert_job(res=[(60, [('resource_id=2', "")])], properties="")
        yield
Example #4
0
def insert_and_sched_ar(start_time, walltime=60):

    insert_job(res=[(walltime, [('resource_id=4', "")])],
               reservation='toSchedule', start_time=start_time,
               info_type='localhost:4242')

    meta_schedule('internal')

    return (db['Job'].query.one())
Example #5
0
def test_db_all_in_one_BE(monkeypatch):

    db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active')

    insert_job(res=[(100, [('resource_id=1', "")])], queue_name='besteffort', types=['besteffort'])

    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    assert (job.state == 'toLaunch')
Example #6
0
def test_suspend_resume_1(monkeypatch):
    # now = get_date()
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")
    meta_schedule('internal')
    job = db['Job'].query.one()
    print(job.state)
    set_job_state(job.id, 'Resuming')
    job = db['Job'].query.one()
    print(job.state)
    meta_schedule('internal')
    assert(job.state == 'Resuming')
Example #7
0
def test_db_all_in_one_AR_7(monkeypatch):

    now = get_date()
    insert_job(res=[(60, [('resource_id=4', "")])],
               reservation='toSchedule', start_time=now+10,
               info_type='localhost:4242', types=["timesharing=*,*"])

    meta_schedule('internal')

    job = db['Job'].query.one()
    assert ((job.state == 'Waiting') and (job.reservation == 'Scheduled'))
Example #8
0
def test_suspend_resume_2(monkeypatch):
    config['JUST_BEFORE_RESUME_EXEC_FILE'] = 'sleep 2'
    # now = get_date()
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")
    meta_schedule('internal')
    job = db['Job'].query.one()
    print(job.state)
    set_job_state(job.id, 'Resuming')
    job = db['Job'].query.one()
    print(job.state)
    meta_schedule('internal')
    assert(job.state == 'Resuming')
Example #9
0
def test_db_all_in_one_simple_interactive_waiting_1(monkeypatch):
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")
    insert_job(res=[(60, [('resource_id=4', "")])], properties="", job_type='INTERACTIVE',
               info_type="0.0.0.0:1234")

    meta_schedule('internal')

    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time)

    jobs = db['Job'].query.order_by(db['Job'].id).all()
    assert (jobs[0].state == 'toLaunch')
    assert (jobs[1].state == 'Waiting')
Example #10
0
def test_db_all_in_one_simple_1(monkeypatch):
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")
    job = db['Job'].query.one()
    print('job state:', job.state)

    # pdb.set_trace()
    meta_schedule('internal')

    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time)

    job = db['Job'].query.one()
    print(job.state)
    assert (job.state == 'toLaunch')
def test_treematch(monkeypatch):
     insert_job(res=[(60, [('resource_id=4', "")])], properties="",
types=["find=treematch:matrix_file"])
     job = db['Job'].query.one()
     print('job state:', job.state)

     meta_schedule('internal')

     for i in db['GanttJobsPrediction'].query.all():
         print("moldable_id: ", i.moldable_id, ' start_time: ',
i.start_time)

     job = db['Job'].query.one()
     print(job.state)
     assert (job.state == 'toLaunch')
Example #12
0
def test_db_moldable_2(monkeypatch):
    now = get_date()
    insert_job(res=[(60, [('resource_id=3', "")])], properties="")
    insert_job(res=[(60, [('resource_id=4', "")]), (70, [('resource_id=2', "")])], properties="")
    meta_schedule('internal')

    for j in db['Job'].query.all():
        print(j.state)

    res = []
    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
        res.append(i.start_time - now)

    assert res[0] == res[1]
Example #13
0
def test_db_koa_simple_1(monkeypatch):
    insert_job(res=[(60, [("resource_id=4", "")])], properties="")
    job = db["Job"].query.one()
    print("job state:", job.state)

    # pdb.set_trace()
    main()

    for i in db["GanttJobsPrediction"].query.all():
        print("moldable_id: ", i.moldable_id, " start_time: ", i.start_time)

    job = db["Job"].query.one()
    print(job.state)

    assert job.state == "toLaunch"
Example #14
0
def test_db_placeholder_2(monkeypatch):
    now = get_date()
    insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["placeholder=yop"])
    insert_job(res=[(60, [('resource_id=4', "")])], properties="", types=["allow=poy"])
    meta_schedule('internal')

    for j in db['Job'].query.all():
        print(j.state)

    res = []
    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
        res.append(i.start_time - now)

    assert res[0] != res[1]
Example #15
0
def test_db_metasched_simple_1(monkeypatch):

    print("DB_BASE_FILE: ", config["DB_BASE_FILE"])
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")
    job = db['Job'].query.one()
    print('job state:', job.state)

    meta_schedule()

    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time)

    job = db['Job'].query.one()
    print(job.state)
    assert (job.state == 'toLaunch')
Example #16
0
def test_db_all_in_one_wakeup_node_energy_saving_internal_1(monkeypatch):
    config['ENERGY_SAVING_INTERNAL'] = 'yes'
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")

    now = get_date()
    # Suspend nodes
    db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'Waiting')
Example #17
0
def test_db_metasched_ar_1(monkeypatch):

    # add one job
    now = get_date()
    # sql_now = local_to_sql(now)

    insert_job(res=[(60, [('resource_id=4', "")])], properties="",
               reservation='toSchedule', start_time=(now + 10),
               info_type='localhost:4242')

    meta_schedule()

    job = db['Job'].query.one()
    print(job.state, ' ', job.reservation)

    assert ((job.state == 'Waiting') and (job.reservation == 'Scheduled'))
Example #18
0
def test_db_kamelot_fifo_no_hierarchy():
    # add some resources
    for i in range(5):
        db['Resource'].create(network_address="localhost")

    for i in range(5):
        insert_job(res=[(60, [('resource_id=2', "")])], properties="")

    main()

    req = db['GanttJobsPrediction'].query.all()

    # for i, r in enumerate(req):
    #    print "req:", r.moldable_id, r.start_time

    assert len(req) == 2
Example #19
0
def test_db_all_in_one_wakeup_node_1(monkeypatch):

    insert_job(res=[(60, [('resource_id=4', "")])], properties="")

    now = get_date()
    # Suspend nodes
    db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'Waiting')
    assert (node_list == [u'localhost0', u'localhost1'])
Example #20
0
def test_db_timesharing_2(monkeypatch):
    now = get_date()
    insert_job(res=[(60, [('resource_id=4', "")])], properties="",
               types=["timesharing=user,*"], user='******')
    insert_job(res=[(60, [('resource_id=4', "")])], properties="",
               types=["timesharing=user,*"], user='******')
    meta_schedule('internal')

    for j in db['Job'].query.all():
        print(j.state)

    res = []
    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
        res.append(i.start_time - now)

    assert res[0] != res[1]
Example #21
0
def test_db_all_in_one_sleep_node_1(monkeypatch):

    now = get_date()

    insert_job(res=[(60, [('resource_id=1', "")])], properties="")

    # Suspend nodes
    # pdb.set_trace()
    db.query(Resource).update({Resource.available_upto: now + 50000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'toLaunch')
    assert (node_list == [u'localhost2', u'localhost1'] or
            node_list == [u'localhost1', u'localhost2'])
Example #22
0
def test_db_all_in_one_BE_2(monkeypatch):
    # TODO TOFINISH
    db['Queue'].create(name='besteffort', priority=3, scheduler_policy='kamelot', state='Active')

    insert_job(res=[(100, [('resource_id=1', "")])], queue_name='besteffort',
               types=['besteffort', "timesharing=*,*"])

    meta_schedule('internal')
    job = db['Job'].query.one()

    set_job_state(job.id, 'Running')

    insert_job(res=[(50, [('resource_id=1', "")])], types=["timesharing=*,*"])

    meta_schedule('internal')

    jobs = db['Job'].query.all()
    print(jobs[1].id, jobs[1].state)
    # assert (jobs[1].state == 'toLaunch')
    assert (jobs[1].state == 'Waiting')
Example #23
0
def test_db_all_in_one_quotas_1(monkeypatch):
    """
    quotas[queue, project, job_type, user] = [int, int, float];
                                               |    |     |
              maximum used resources ----------+    |     |
              maximum number of running jobs -------+     |
              maximum resources times (hours) ------------+
    """

    create_quotas_rules_file('{"quotas": {"*,*,*,/": [-1, 1, -1], "/,*,*,*": [-1, -1, 0.55]}}')

    insert_job(res=[(100, [('resource_id=1', "")])], properties="", user="******")
    insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******")
    insert_job(res=[(200, [('resource_id=1', "")])], properties="", user="******")

    # pdb.set_trace()
    now = get_date()
    meta_schedule('internal')

    res = []
    for i in db['GanttJobsPrediction'].query.order_by(GanttJobsPrediction.moldable_id).all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time - now)
        res.append(i.start_time - now)

    assert res == [0, 160, 420]
Example #24
0
def test_db_kamelot_fifo_w_hierarchy():
    # add some resources
    for i in range(5):
        db['Resource'].create(network_address="localhost" + str(int(i / 2)))

    for res in db['Resource'].query.all():
        print(res.id, res.network_address)

    for i in range(5):
        insert_job(res=[(60, [('network_address=1', "")])],
                   properties="")

    plt = Platform()

    schedule_fifo_cycle(plt, "default", True)

    req = db['GanttJobsPrediction'].query.all()

    # for i, r in enumerate(req):
    #    print("req:", r.moldable_id, r.start_time)

    assert len(req) == 3
def test_db_job_sorting_simple_priority_no_waiting_time():

    config['JOB_SORTING'] = "simple_priority"

    plt = Platform()
    now = plt.get_time()

    # add some resources
    for i in range(4):
        db['Resource'].create(network_address="localhost")

    # add some job with priority
    for  i in range(10):
        priority = str(float(i)/10.0)
        insert_job(res=[(60, [('resource_id=4', "")])],
                   submission_time=now,
                   types=['priority='+priority])

        
    schedule_cycle(plt, plt.get_time())

    req = db['GanttJobsPrediction'].query\
                                   .order_by(db['GanttJobsPrediction'].start_time)\
                                   .all()
    flag = True

    print(req)
    for r in req:
        print(r.moldable_id, r.start_time)
    for i, r in enumerate(req):
        if i != 0:
            print(r.moldable_id, prev_id)
            if r.moldable_id > prev_id:
                flag = False
                break
        prev_id = r.moldable_id

    assert flag
def test_db_extra_metasched_1():
    config['EXTRA_METASCHED'] = 'foo'

    insert_job(res=[(60, [('resource_id=1', "")])], properties="deploy='YES'")
    insert_job(res=[(60, [('resource_id=1', "")])], properties="deploy='FOO'")
    insert_job(res=[(60, [('resource_id=1', "")])], properties="")

    for job in  db['Job'].query.all():
        print('job state:', job.state, job.id)

    meta_schedule()

    for i in db['GanttJobsPrediction'].query.all():
        print("moldable_id: ", i.moldable_id, ' start_time: ', i.start_time)

    states = [job.state for job in db['Job'].query.order_by(Job.id).all()]
    print(states)
    assert states == ['toLaunch', 'Waiting', 'toLaunch']
Example #27
0
def test_db_suspended_duration_1(monkeypatch):
    insert_job(res=[(60, [('resource_id=3', "")])], properties="", suspended='YES')
    meta_schedule('internal')
    job = db['Job'].query.one()
    assert (job.state == 'toLaunch')
Example #28
0
def bataar(wkp_filename, database_mode, socket, node_size, scheduler_policy, types, scheduler_delay):
    #    import pdb; pdb.set_trace()
    if database_mode == 'memory':
        config.clear()
        config.update(BATSIM_DEFAULT_CONFIG)

    assign = False
    assign_func = None
    find = False
    find_func = None

    add_1h = False  # control addition of one level of hierarchy in resources' request
    add_mld = False  # control addition of one modldable instance in resources' request

    sp = scheduler_policy
    if sp == 'BASIC' or sp == '0':
        print("BASIC scheduler_policy selected")
        # default
        pass
    elif sp == 'BEST_EFFORT_CONTIGUOUS' or sp == '1':
        print("BEST_EFFORT_CONTIGUOUS scheduler_policy selected")
        find = True
        find_func = getattr(oar.kao.advanced_scheduling, 'find_contiguous_1h')
        assign = True
        assign_func = getattr(oar.kao.advanced_scheduling, 'assign_one_time_find')
    elif sp == 'CONTIGUOUS' or sp == '2':
        print("CONTIGUOUS scheduler_policy selected")
        find = True
        find_func = getattr(oar.kao.advanced_scheduling, 'find_contiguous_1h')
    elif sp == 'BEST_EFFORT_LOCAL' or sp == '3':
        print("BEST_EFFORT_LOCAL scheduler_policy selected")
        add_1h = True
        add_mld = True
    elif sp == 'LOCAL' or sp == '4':
        print("LOCAL scheduler_policy selected")
        add_1h = True

    #
    # Load workload
    #

    json_jobs, nb_res = load_json_workload_profile(wkp_filename)

    print("nb_res:", nb_res)

    if types and types != '':
        types_array = types.split(',')
        for type_value in types_array:
            t, v = type_value.split('=')
            if t == "assign":
                print("type assign with function: ", v)
                assign = True
                assign_func = getattr(oar.kao.advanced_scheduling, 'assign_' + v)
            if t == "find":
                print("type find with function: ", v)
                find = True
                find_func = getattr(oar.kao.advanced_scheduling, 'find_' + v)

    if database_mode == 'no-db':
        #
        # generate ResourceSet
        #

        hy_resource_id = [[(i, i)] for i in range(1,nb_res+1)]
        hierarchy = {'resource_id': hy_resource_id}
        if node_size > 0:
            node_id = [[(node_size*i, node_size*(i+1)-1)] for i in range(int(nb_res/node_size))]
            hierarchy['node'] = node_id

        print('hierarchy: ', hierarchy)

        res_set = ResourceSetSimu(
            rid_i2o=range(nb_res+1),
            rid_o2i=range(nb_res+1),
            roid_itvs=[(1, nb_res)],
            hierarchy=hierarchy,
            available_upto={2147483600: [(1, nb_res)]}
        )

        #
        # prepare jobs
        #
        mld_id = 1
        print("Genererate jobs")

        for j in json_jobs:
            jid = int(j['id'])
            walltime = int(math.ceil(float(j["walltime"])))
            rqb = [([('resource_id', j['res'])], [(1, nb_res)])]
            rqbh = [([('node', 1), ('resource_id', j['res'])], [(1, nb_res)])]

            if add_1h:
                if add_mld:
                    mld_res_rqts = [(mld_id, walltime, rqbh), (mld_id+1, walltime, rqb)]
                    mld_id += 2
                else:
                    mld_res_rqts = [(mld_id, walltime, rqbh)]
                    mld_id += 1
            else:
                if add_mld:
                    mld_res_rqts = [(mld_id, walltime, rqb), (mld_id+1, walltime, rqb)]
                    mld_id += 2
                else:
                    mld_res_rqts = [(mld_id, walltime, rqb)]
                    mld_id += 1

            jobs[jid] = JobSimu(id=jid,
                                state="Waiting",
                                queue="test",
                                start_time=0,
                                walltime=0,
                                types={},
                                res_set=[],
                                moldable_id=0,
                                mld_res_rqts=mld_res_rqts,
                                run_time=0,
                                deps=[],
                                key_cache={},
                                ts=False, ph=0,
                                assign=assign, assign_func=assign_func,
                                find=find, find_func=find_func)

            # print("jobs: ", jid, " mld_res_rqts: ", mld_res_rqts)
        # import pdb; pdb.set_trace()
        BatSched(res_set, jobs, 'simu', {}, scheduler_delay, socket).run()

    elif database_mode == 'memory':

        global offset_idx
        offset_idx = 1
        monkeypatch_oar_lib_tools()
        db_initialization(nb_res)

        #
        # prepare jobs
        #
        db_jid2s_jid = {}
        print("Prepare jobs")
        for i, j in enumerate(json_jobs):
            jid = int(j["id"])
            walltime = int(math.ceil(float(j["walltime"])))
            jobs[jid] = JobSimu(id=jid,
                                state="Waiting",
                                queue="test",
                                start_time=0,
                                walltime=0,
                                moldable_id=0,
                                mld_res_rqts=[(jid, walltime,
                                               [([("resource_id", j["res"])],
                                                 [(1, nb_res - 0)])])],
                                run_time=0,
                                db_jid=i + 1,
                                assign=False,
                                find=False)

            insert_job(res=[(walltime, [('resource_id=' + str(j["res"]), "")])],
                       state='Hold', properties='', user='')
            db_jid2s_jid[i + 1] = jid

        db.flush()  # TO REMOVE ???
        # import pdb; pdb.set_trace()
        BatSched([], jobs, 'batsim-db', db_jid2s_jid, scheduler_delay, socket).run()

        if __name__ != '__main__':
            # If used oar.lib.tools' functions are used after we need to undo monkeypatching.
            # Main use case is suite testing evaluation
            restore_oar_lib_tools()