Exemplo n.º 1
0
    def save_assigns_simu(self, jobs, resource_set):
        print("save_assigns_simu")

        for jid, job in iteritems(jobs):
            jres_set = job.res_set
            print("job.res_set before", jid, job.res_set)
            r_ids = [resource_set.rid_o2i[roid] for roid in itvs2ids(jres_set)]
            job.res_set = unordered_ids2itvs(r_ids)
        self.assigned_jobs = jobs
Exemplo n.º 2
0
Arquivo: job.py Projeto: oar-team/oar3
def remove_gantt_resource_job(moldable_id, job_res_set, resource_set):

    riods = itvs2ids(job_res_set)
    resource_ids = [resource_set.rid_o2i[rid] for rid in riods]

    db.query(GanttJobsResource)\
      .filter(GanttJobsResource.moldable_id == moldable_id)\
      .filter(~GanttJobsResource.resource_id.in_(tuple(resource_ids)))\
      .delete(synchronize_session=False)

    db.commit()
Exemplo n.º 3
0
def save_assigns_redis_0(jobs, resource_set):
    if len(jobs) > 0:
        r = redis.Redis()
        mld_id_start_time_s = []
        for j in itervalues(jobs):
            mld_id_start_time_s.append({"moldable_job_id": j.moldable_id, "start_time": j.start_time})
            riods = itvs2ids(j.res_set)
            str_mld_id_rids = ",".join(map(lambda x: str(resource_set.rid_o2i[x]), riods))

            r.set(str(j.moldable_id), str_mld_id_rids)

        db.session.execute(GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
Exemplo n.º 4
0
def save_assigns_bulk_0(jobs, resource_set):

    if len(jobs) > 0:
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            mld_id_start_time_s.append((j.moldable_id, j.start_time))
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [(j.moldable_id, resource_set.rid_o2i[rid]) for rid in riods])

        with db.engine.connect() as to_conn:
            cursor = to_conn.connection.cursor()
            pg_bulk_insert(cursor, db['gantt_jobs_predictions'], mld_id_start_time_s,
                           ('moldable_job_id', 'start_time'), binary=True)
            pg_bulk_insert(cursor, db['queues'], mld_id_rid_s,
                           ('moldable_job_id', 'resource_id'), binary=True)
Exemplo n.º 5
0
    def save_assigns_simu_and_default(self, jobs, resource_set):
        print("save_assigns_simu_and_default........................")
        # assigned_jobs = {}
        for jid, job in iteritems(jobs):
            sid = self.db_jid2s_jid[jid]
            jobsimu = self.jobs[sid]
            jres_set = job.res_set
            r_ids = [resource_set.rid_o2i[roid] for roid in itvs2ids(jres_set)]
            jobsimu.res_set = unordered_ids2itvs(r_ids)
            print("save assign jid, sid, res_set: ", jid, " ", sid, " ", jobsimu.res_set)
            jobsimu.start_time = job.start_time
            jobsimu.walltime = job.walltime
            # assigned_jobs[sid] = jobsimu

        # self.assigned_jobs = assigned_jobs

        return save_assigns(jobs, resource_set)
Exemplo n.º 6
0
def save_assigns_redis_pipeline_0(jobs, resource_set):
    print('# save_assigns_redis_pipeline_0')
    t = 0
    if len(jobs) > 0:
        r = redis.Redis()
        pipe = r.pipeline()
        mld_id_start_time_s = []
        for j in itervalues(jobs):
            t0 = time.time()
            mld_id_start_time_s.append(
                {'moldable_job_id': j.moldable_id, 'start_time': j.start_time})
            riods = itvs2ids(j.res_set)
            str_mld_id_rids = ','.join(map(lambda x: str(resource_set.rid_o2i[x]), riods))
            t += (time.time() - t0)
            pipe.set(str(j.moldable_id),  str_mld_id_rids)

        db.session.execute(
            GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
        pipe.execute()
    print("Cumlated mld_id_start_time_s.append time:", t)
Exemplo n.º 7
0
Arquivo: job.py Projeto: fr0uty/oartm
def save_assigns_bulk(jobs, resource_set):

    if len(jobs) > 0:
        logger.debug("nb job to save: " + str(len(jobs)))
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            logger.debug("first job_id  to save: " + str(j.id))
            mld_id_start_time_s.append((j.moldable_id, j.start_time))
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [(j.moldable_id, resource_set.rid_o2i[rid]) for rid in riods])

        logger.info("save assignements")

        with db.engine.connect() as to_conn:
            cursor = to_conn.connection.cursor()
            pg_bulk_insert(cursor, db['gantt_jobs_predictions'], mld_id_start_time_s,
                           ('moldable_job_id', 'start_time'), binary=True)
            pg_bulk_insert(cursor, db['queues'], mld_id_rid_s,
                           ('moldable_job_id', 'resource_id'), binary=True)
Exemplo n.º 8
0
Arquivo: job.py Projeto: fr0uty/oartm
def save_assigns(jobs, resource_set):
    # http://docs.sqlalchemy.org/en/rel_0_9/core/dml.html#sqlalchemy.sql.expression.Insert.values
    if len(jobs) > 0:
        logger.debug("nb job to save: " + str(len(jobs)))
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            logger.debug("first job_id  to save: " + str(j.id))
            mld_id_start_time_s.append(
                {'moldable_job_id': j.moldable_id, 'start_time': j.start_time})
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [{'moldable_job_id': j.moldable_id,
                  'resource_id': resource_set.rid_o2i[rid]} for rid in riods])

        logger.info("save assignements")

        db.session.execute(
            GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
        db.session.execute(GanttJobsResource.__table__.insert(), mld_id_rid_s)
        db.commit()
Exemplo n.º 9
0
def gantt_init_with_running_jobs(plt, initial_time_sec, job_security_time):

    #
    # Determine Global Resource Intervals and Initial Slot
    #
    resource_set = plt.resource_set()
    initial_slot_set = SlotSet((resource_set.roid_itvs, initial_time_sec))

    logger.debug("Processing of processing of already handled reservations")
    accepted_ar_jids, accepted_ar_jobs = \
        get_waiting_reservations_already_scheduled(resource_set, job_security_time)
    gantt_flush_tables(accepted_ar_jids)

    # TODO Can we remove this step, below ???
    #  why don't use: assigned_resources and job start_time ??? in get_scheduled_jobs ???
    logger.debug("Processing of current jobs")
    current_jobs = get_jobs_in_multiple_states(['Running', 'toLaunch', 'Launching',
                                                'Finishing', 'Suspended', 'Resuming'],
                                               resource_set)
    plt.save_assigns(current_jobs, resource_set)

    #
    #  Resource availabilty (Available_upto field) is integrated through pseudo job
    #
    pseudo_jobs = []
    for t_avail_upto in sorted(resource_set.available_upto.keys()):
        itvs = resource_set.available_upto[t_avail_upto]
        j = JobPseudo()
        j.start_time = t_avail_upto
        j.walltime = MAX_TIME - t_avail_upto
        j.res_set = itvs
        j.ts = False
        j.ph = NO_PLACEHOLDER

        pseudo_jobs.append(j)

    if pseudo_jobs != []:
        initial_slot_set.split_slots_jobs(pseudo_jobs)

    #
    # Get already scheduled jobs advanced reservations and jobs from more higher priority queues
    #
    # TODO?: Remove resources of the type specified in
    # SCHEDULER_AVAILABLE_SUSPENDED_RESOURCE_TYPE
    scheduled_jobs = plt.get_scheduled_jobs(
        resource_set, job_security_time, initial_time_sec)

    # retrieve ressources used by besteffort jobs
    besteffort_rid2job = {}

    for job in scheduled_jobs:
        #  print("job.id:", job.id, job.queue_name, job.types, job.res_set, job.start_time)
        if 'besteffort' in job.types:
            for r_id in itvs2ids(job.res_set):
                besteffort_rid2job[r_id] = job

    # Create and fill gantt
    all_slot_sets = {'default': initial_slot_set}
    if scheduled_jobs != []:
        filter_besteffort = True
        set_slots_with_prev_scheduled_jobs(all_slot_sets, scheduled_jobs,
                                           job_security_time, initial_time_sec,
                                           filter_besteffort)

    return (all_slot_sets, scheduled_jobs, besteffort_rid2job)
Exemplo n.º 10
0
def schedule_fifo_cycle(plt, queue="default", hierarchy_use=False):

    assigned_jobs = {}

    now = plt.get_time()

    logger.info("Begin scheduling....now: " + str(now) + ", queue: " + queue)

    #
    # Retrieve waiting jobs
    #
    waiting_jobs, waiting_jids, nb_waiting_jobs = plt.get_waiting_jobs(queue)

    if nb_waiting_jobs > 0:
        logger.info("nb_waiting_jobs:" + str(nb_waiting_jobs))
        for jid in waiting_jids:
            logger.debug("waiting_jid: " + str(jid))

        #
        # Determine Global Resource Intervals
        #
        resource_set = plt.resource_set()
        res_itvs = deepcopy(resource_set.roid_itvs)

        #
        # Get  additional waiting jobs' data
        #
        job_security_time = int(config["SCHEDULER_JOB_SECURITY_TIME"])
        plt.get_data_jobs(waiting_jobs, waiting_jids, resource_set, job_security_time)

        #
        # Remove resources used by running job
        #
        for job in plt.get_scheduled_jobs(resource_set, job_security_time, now):
            if job.state == "Running":
                res_itvs = sub_intervals(res_itvs, job.res_itvs)

        #
        # Assign resource to jobs
        #

        for jid in waiting_jids:
            job = waiting_jobs[jid]

            # We consider only one instance of resources request (no support for moldable)
            (mld_id, walltime, hy_res_rqts) = job.mld_res_rqts[0]

            if hierarchy_use:
                # Assign resources which hierarchy support (uncomment)
                itvs = find_resource_hierarchies_job(res_itvs, hy_res_rqts, resource_set.hierarchy)
            else:
                # OR assign resource by considering only resource_id (no hierarchy)
                # and only one type of resource
                (hy_level_nbs, constraints) = hy_res_rqts[0]
                (h_name, nb_asked_res) = hy_level_nbs[0]
                itvs_avail = intersec(constraints, res_itvs)
                ids_avail = itvs2ids(itvs_avail)

                if len(ids_avail) < nb_asked_res:
                    itvs = []
                else:
                    itvs = unordered_ids2itvs(ids_avail[:nb_asked_res])

            if (itvs != []):
                job.moldable_id = mld_id
                job.res_set = itvs
                assigned_jobs[job.id] = job
                res_itvs = sub_intervals(res_itvs, itvs)
            else:
                logger.debug("Not enough available resources, it's a FIFO scheduler, we stop here.")
                break

        #
        # Save assignement
        #
        logger.info("save assignement")
        plt.save_assigns(assigned_jobs, resource_set)

    else:
        logger.info("no waiting jobs")
Exemplo n.º 11
0
def test_itvs2ids():
    y = [(1, 1), (3, 5), (7, 7), (10, 12), (23, 23)]
    r = [1, 3, 4, 5, 7, 10, 11, 12, 23]
    a = itvs2ids(y)
    assert a == r