示例#1
0
def job_sorting_simple_priority(queue, now, jids, jobs, str_config, plt):
    priority_config = json.loads(str_config)

    # import pdb; pdb.set_trace()
    if 'WAITING_TIME_WEIGHT' in config:
        waiting_time_weight = float(priority_config['WAITING_TIME_WEIGHT'])
    else:
        waiting_time_weight = 0.0


    #
    # establish  job priori 
    #

    for job in itervalues(jobs):
        if 'priority' in job.types:
            try:
                priority = float(job.types['priority'])
            except ValueError:
                logger.warn("job priority failed to convert to float: " % job.types['priority'])
                priority = 0.0

        job.karma = priority + waiting_time_weight * float(now-job.submission_time) / float(now)


    # sort jids according to jobs' karma value
    # print jids
    karma_ordered_jids = sorted(jids, key=lambda jid: jobs[jid].karma, reverse=True)
    # print karma_ordered_jids
    return karma_ordered_jids
def save_assigns_redis_0(jobs, resource_set):
    if len(jobs) > 0:
        r = redis.Redis()
        mld_id_start_time_s = []
        for j in itervalues(jobs):
            mld_id_start_time_s.append({"moldable_job_id": j.moldable_id, "start_time": j.start_time})
            riods = itvs2ids(j.res_set)
            str_mld_id_rids = ",".join(map(lambda x: str(resource_set.rid_o2i[x]), riods))

            r.set(str(j.moldable_id), str_mld_id_rids)

        db.session.execute(GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
示例#3
0
def save_assigns_bulk_0(jobs, resource_set):

    if len(jobs) > 0:
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            mld_id_start_time_s.append((j.moldable_id, j.start_time))
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [(j.moldable_id, resource_set.rid_o2i[rid]) for rid in riods])

        with db.engine.connect() as to_conn:
            cursor = to_conn.connection.cursor()
            pg_bulk_insert(cursor, db['gantt_jobs_predictions'], mld_id_start_time_s,
                           ('moldable_job_id', 'start_time'), binary=True)
            pg_bulk_insert(cursor, db['queues'], mld_id_rid_s,
                           ('moldable_job_id', 'resource_id'), binary=True)
示例#4
0
文件: job.py 项目: oar-team/oar3
def get_jobs_types(jids, jobs):
    import oar.kao.advanced_scheduling
    jobs_types = {}
    for j_type in db.query(JobType).filter(JobType.job_id.in_(tuple(jids))):
        jid = j_type.job_id
        job = jobs[jid]
        t_v = j_type.type.split("=")
        t = t_v[0]
        if t == "timesharing":
            job.ts = True
            job.ts_user, job.ts_name = t_v[1].split(',')
        elif t == "placeholder":
            job.ph = PLACEHOLDER
            job.ph_name = t_v[1]
        elif t == "allow":
            job.ph = ALLOW
            job.ph_name = t_v[1]
        elif t == "assign":
            job.assign = True
            raw_args = '='.join(t_v[1:])
            funcname, job.assign_args, job.assign_kwargs = extract_find_assign_args(raw_args)
            job.assign_func = getattr(oar.kao.advanced_scheduling, 'assign_%s' % funcname)
        elif t == "find":
            job.find = True
            raw_args = '='.join(t_v[1:])
            funcname, job.find_args, job.find_kwargs = extract_find_assign_args(raw_args)
            job.find_func = getattr(oar.kao.advanced_scheduling, 'find_%s' % funcname)
        else:
            if len(t_v) == 2:
                v = t_v[1]
            else:
                v = ""
            if jid not in jobs_types:
                jobs_types[jid] = dict()

            (jobs_types[jid])[t] = v

    for job in itervalues(jobs):
        if job.id in jobs_types:
            job.types = jobs_types[job.id]
        else:
            job.types = {}
示例#5
0
def save_assigns_redis_pipeline_0(jobs, resource_set):
    print('# save_assigns_redis_pipeline_0')
    t = 0
    if len(jobs) > 0:
        r = redis.Redis()
        pipe = r.pipeline()
        mld_id_start_time_s = []
        for j in itervalues(jobs):
            t0 = time.time()
            mld_id_start_time_s.append(
                {'moldable_job_id': j.moldable_id, 'start_time': j.start_time})
            riods = itvs2ids(j.res_set)
            str_mld_id_rids = ','.join(map(lambda x: str(resource_set.rid_o2i[x]), riods))
            t += (time.time() - t0)
            pipe.set(str(j.moldable_id),  str_mld_id_rids)

        db.session.execute(
            GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
        pipe.execute()
    print("Cumlated mld_id_start_time_s.append time:", t)
示例#6
0
文件: job.py 项目: fr0uty/oartm
def save_assigns_bulk(jobs, resource_set):

    if len(jobs) > 0:
        logger.debug("nb job to save: " + str(len(jobs)))
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            logger.debug("first job_id  to save: " + str(j.id))
            mld_id_start_time_s.append((j.moldable_id, j.start_time))
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [(j.moldable_id, resource_set.rid_o2i[rid]) for rid in riods])

        logger.info("save assignements")

        with db.engine.connect() as to_conn:
            cursor = to_conn.connection.cursor()
            pg_bulk_insert(cursor, db['gantt_jobs_predictions'], mld_id_start_time_s,
                           ('moldable_job_id', 'start_time'), binary=True)
            pg_bulk_insert(cursor, db['queues'], mld_id_rid_s,
                           ('moldable_job_id', 'resource_id'), binary=True)
示例#7
0
文件: job.py 项目: fr0uty/oartm
def save_assigns(jobs, resource_set):
    # http://docs.sqlalchemy.org/en/rel_0_9/core/dml.html#sqlalchemy.sql.expression.Insert.values
    if len(jobs) > 0:
        logger.debug("nb job to save: " + str(len(jobs)))
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            logger.debug("first job_id  to save: " + str(j.id))
            mld_id_start_time_s.append(
                {'moldable_job_id': j.moldable_id, 'start_time': j.start_time})
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [{'moldable_job_id': j.moldable_id,
                  'resource_id': resource_set.rid_o2i[rid]} for rid in riods])

        logger.info("save assignements")

        db.session.execute(
            GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
        db.session.execute(GanttJobsResource.__table__.insert(), mld_id_rid_s)
        db.commit()
示例#8
0
文件: karma.py 项目: fr0uty/oartm
def karma_jobs_sorting(queue, now, jids, jobs, plt):

    # if "SCHEDULER_FAIRSHARING_MAX_JOB_PER_USER" in config:
    #    fairsharing_nb_job_limit = config["SCHEDULER_FAIRSHARING_MAX_JOB_PER_USER"]
        # TODO NOT UDSED
        # fairsharing_nb_job_limit = 100000

    karma_window_size = 3600 * 30 * 24  # TODO in conf ???

    # Set undefined config value to default one
    default_config = {
        "SCHEDULER_FAIRSHARING_PROJECT_TARGETS": "{default => 21.0}",
        "SCHEDULER_FAIRSHARING_USER_TARGETS": "{default => 22.0}",
        "SCHEDULER_FAIRSHARING_COEF_PROJECT": "0",
        "SCHEDULER_FAIRSHARING_COEF_USER": "******",
        "SCHEDULER_FAIRSHARING_COEF_USER_ASK": "1"
    }
    config.setdefault_config(default_config)

    # get fairsharing config if any
    karma_proj_targets = perl_hash_2_dict(
        config["SCHEDULER_FAIRSHARING_PROJECT_TARGETS"])
    karma_user_targets = perl_hash_2_dict(
        config["SCHEDULER_FAIRSHARING_USER_TARGETS"])
    karma_coeff_proj_consumption = float(
        config["SCHEDULER_FAIRSHARING_COEF_PROJECT"])
    karma_coeff_user_consumption = float(
        config["SCHEDULER_FAIRSHARING_COEF_USER"])
    karma_coeff_user_asked_consumption = float(
        config["SCHEDULER_FAIRSHARING_COEF_USER_ASK"])

    #
    # Sort jobs accordingly to karma value (fairsharing)  *)
    #                                                     *)

    window_start = now - karma_window_size
    window_stop = now

    karma_sum_time_asked, karma_sum_time_used = plt.get_sum_accounting_window(
        queue, window_start, window_stop)
    karma_projects_asked, karma_projects_used = plt.get_sum_accounting_by_project(
        queue, window_start, window_stop)
    karma_users_asked, karma_users_used = plt.get_sum_accounting_by_user(
        queue, window_start, window_stop)
    #
    # compute karma for each job
    #

    for job in itervalues(jobs):
        if job.project in karma_projects_used:
            karma_proj_used_j = karma_projects_used[job.project]
        else:
            karma_proj_used_j = 0.0

        if job.user in karma_users_used:
            karma_user_used_j = karma_users_used[job.user]
        else:
            karma_user_used_j = 0.0

        if job.user in karma_users_asked:
            karma_user_asked_j = karma_users_asked[job.user]
        else:
            karma_user_asked_j = 0.0

        if job.project in karma_proj_targets:
            karma_proj_target = karma_proj_targets[job.project]
        else:
            karma_proj_target = 0.0

        if job.user in karma_user_targets:
            karma_user_target = karma_user_targets[job.user] / 100.0
        else:
            karma_user_target = 0.0

        # x1 = karma_coeff_proj_consumption * ((karma_proj_used_j / karma_sum_time_used) - (karma_proj_target / 100.0))
        # x2 = karma_coeff_user_consumption * ((karma_user_used_j / karma_sum_time_used) - (karma_user_target / 100.0))
        # x3 = karma_coeff_user_asked_consumption * ((karma_user_asked_j / karma_sum_time_asked) - (karma_user_target / 100.0))
        # print "yopypop", x1, x2, x3
        job.karma = (karma_coeff_proj_consumption * ((karma_proj_used_j / karma_sum_time_used) - (karma_proj_target / 100.0)) +
                     karma_coeff_user_consumption * ((karma_user_used_j / karma_sum_time_used) - (karma_user_target / 100.0)) +
                     karma_coeff_user_asked_consumption *
                     ((karma_user_asked_j / karma_sum_time_asked) -
                      (karma_user_target / 100.0))
                     )

        # print "job.karma", job.karma

    # sort jids according to jobs' karma value
    # print jids
    karma_ordered_jids = sorted(jids, key=lambda jid: jobs[jid].karma)
    # print karma_ordered_jids
    return karma_ordered_jids