コード例 #1
0
ファイル: job.py プロジェクト: oar-team/oar3
def frag_job(jid):

    if 'OARDO_USER' in os.environ:
        luser = os.environ['OARDO_USER']
    else:
        luser = os.environ['USER']

    job = get_job(jid)

    if (job is not None) and ((luser == job.user)
                              or (luser == 'oar')
                              or (luser == 'root')):
        res = db.query(FragJob).filter(FragJob.job_id == jid).all()

        if len(res) == 0:

            date = tools.get_date()
            frajob = FragJob(job_id=jid, date=date)
            db.add(frajob)
            db.commit()
            add_new_event("FRAG_JOB_REQUEST",
                          jid, "User %s requested to frag the job %s"
                          % (luser, str(jid)))
            return 0
        else:
            # Job already killed
            return -2
    else:
        return -1
コード例 #2
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_job_start_time_assigned_moldable_id(jid, start_time, moldable_id):
    # db.query(Job).update({Job.start_time:
    # start_time,Job.assigned_moldable_job: moldable_id}).filter(Job.id ==
    # jid)
    db.query(Job).filter(Job.id == jid).update(
        {Job.start_time: start_time, Job.assigned_moldable_job: moldable_id})
    db.commit()
コード例 #3
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_gantt_job_start_time(moldable_id, current_time_sec):

    db.query(GanttJobsPrediction)\
      .filter(GanttJobsPrediction.moldable_id == moldable_id)\
      .update({GanttJobsPrediction.start_time: current_time_sec})

    db.commit()
コード例 #4
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_moldable_job_max_time(moldable_id, walltime):

    db.query(MoldableJobDescription)\
      .filter(MoldableJobDescription.id == moldable_id)\
      .update({MoldableJobDescription.walltime: walltime})

    db.commit()
コード例 #5
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_job_resa_state(job_id, state):
    ''' sets the reservation field of the job of id passed in parameter
    parameters : base, jobid, state
    return value : None
    side effects : changes the field state of the job in the table Jobs
    '''
    db.query(Job).filter(Job.id == job_id).update({Job.reservation: state})
    db.commit()
コード例 #6
0
def extra_metasched_foo(prev_queue, plt, scheduled_jobs, all_slot_sets,
                        job_security_time, queue, initial_time_sec,
                        extra_metasched_config):

    if prev_queue is None:
        # set first resource deployable
        first_id = db.query(Resource).first().id
        db.query(Resource).filter(Resource.id == first_id)\
                          .update({Resource.deploy: 'YES'}, synchronize_session=False)
        db.commit()
コード例 #7
0
ファイル: job.py プロジェクト: oar-team/oar3
def add_resource_jobs_pairs(tuple_mld_ids):  # pragma: no cover
    resources_mld_ids = db.query(GanttJobsResource)\
                          .filter(GanttJobsResource.job_id.in_(tuple_mld_ids))\
                          .all()

    assigned_resources = [{'moldable_job_id': res_mld_id.moldable_id,
                           'resource_id': res_mld_id.resource_id} for res_mld_id in resources_mld_ids]

    db.session.execute(AssignedResource.__table__.insert(), assigned_resources)
    db.commit()
コード例 #8
0
ファイル: job.py プロジェクト: oar-team/oar3
def add_resource_job_pairs(moldable_id):
    resources_mld_ids = db.query(GanttJobsResource)\
                          .filter(GanttJobsResource.moldable_id == moldable_id)\
                          .all()

    assigned_resources = [{'moldable_job_id': res_mld_id.moldable_id,
                           'resource_id': res_mld_id.resource_id} for res_mld_id in resources_mld_ids]

    db.session.execute(AssignedResource.__table__.insert(), assigned_resources)
    db.commit()
コード例 #9
0
ファイル: bataar.py プロジェクト: oar-team/oar3
def db_initialization(nb_res, node_size=None):

    print("Set default queue")
    db.add(Queue(name="default", priority=3, scheduler_policy="kamelot", state="Active"))

    print("add resources")
    # add some resources
    for i in range(nb_res):
        db.add(Resource(network_address="localhost"))

    db.commit()
コード例 #10
0
ファイル: job.py プロジェクト: oar-team/oar3
def remove_gantt_resource_job(moldable_id, job_res_set, resource_set):

    riods = itvs2ids(job_res_set)
    resource_ids = [resource_set.rid_o2i[rid] for rid in riods]

    db.query(GanttJobsResource)\
      .filter(GanttJobsResource.moldable_id == moldable_id)\
      .filter(~GanttJobsResource.resource_id.in_(tuple(resource_ids)))\
      .delete(synchronize_session=False)

    db.commit()
コード例 #11
0
ファイル: meta_sched.py プロジェクト: fr0uty/oartm
def update_gantt_visualization():

    db.query(GanttJobsPredictionsVisu).delete()
    db.query(GanttJobsResourcesVisu).delete()
    db.commit()

    sql_queries = ["INSERT INTO gantt_jobs_predictions_visu SELECT * FROM gantt_jobs_predictions",
                   "INSERT INTO gantt_jobs_resources_visu SELECT * FROM gantt_jobs_resources"
                   ]
    for query in sql_queries:
        db.session.execute(query)
    db.commit()
コード例 #12
0
ファイル: test_db_all_in_one.py プロジェクト: oar-team/oar3
def test_db_all_in_one_wakeup_node_energy_saving_internal_1(monkeypatch):
    config['ENERGY_SAVING_INTERNAL'] = 'yes'
    insert_job(res=[(60, [('resource_id=4', "")])], properties="")

    now = get_date()
    # Suspend nodes
    db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'Waiting')
コード例 #13
0
ファイル: job.py プロジェクト: oar-team/oar3
def update_scheduler_last_job_date(date, moldable_id):
    ''' used to allow search_idle_nodes to operate for dynamic node management feature (Hulot)
    '''

    if db.dialect == "sqlite":
        subquery = db.query(AssignedResource.resource_id).filter_by(moldable_id=moldable_id)\
                     .subquery()
        db.query(Resource).filter(Resource.id.in_(subquery))\
                          .update({Resource.last_job_date: date}, synchronize_session=False)

    else:
        db.query(Resource).filter(AssignedResource.moldable_id == moldable_id)\
                          .filter(Resource.id == AssignedResource.resource_id)\
                          .update({Resource.last_job_date: date}, synchronize_session=False)
    db.commit()
コード例 #14
0
ファイル: test_db_all_in_one.py プロジェクト: oar-team/oar3
def test_db_all_in_one_wakeup_node_1(monkeypatch):

    insert_job(res=[(60, [('resource_id=4', "")])], properties="")

    now = get_date()
    # Suspend nodes
    db.query(Resource).update({Resource.state: 'Absent', Resource.available_upto: now + 1000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'Waiting')
    assert (node_list == [u'localhost0', u'localhost1'])
コード例 #15
0
ファイル: test_db_all_in_one.py プロジェクト: oar-team/oar3
def test_db_all_in_one_AR_4(monkeypatch):

    now = get_date()
    job = insert_and_sched_ar(now + 10)
    new_start_time = now - 20

    db.query(GanttJobsPrediction).update({GanttJobsPrediction.start_time: new_start_time},
                                         synchronize_session=False)
    db.commit()

    meta_schedule('internal')

    job = db['Job'].query.one()
    print('\n', job.id, job.state, ' ', job.reservation, job.start_time)

    assert job.state == 'toLaunch'
コード例 #16
0
ファイル: job.py プロジェクト: oar-team/oar3
def resume_job_action(job_id):
    '''resume_job_action performs all action when a job is suspended'''

    set_job_state(job_id, 'Running')

    resources = get_current_resources_with_suspended_job()
    if resources != ():
        db.query(Resource)\
          .filter(~Resource.id.in_(resources))\
          .update({Resource.suspended_jobs: 'NO'}, synchronize_session=False)

    else:
        db.query(Resource)\
          .update({Resource.suspended_jobs: 'NO'}, synchronize_session=False)

    db.commit()
コード例 #17
0
ファイル: job.py プロジェクト: oar-team/oar3
def gantt_flush_tables(reservations_to_keep_mld_ids):
    '''Flush gantt tables but keep accepted advance reservations'''

    if reservations_to_keep_mld_ids != []:
        logger.debug("reservations_to_keep_mld_ids[0]: " + str(reservations_to_keep_mld_ids[0]))
        db.query(GanttJobsPrediction)\
          .filter(~GanttJobsPrediction.moldable_id.in_(tuple(reservations_to_keep_mld_ids)))\
          .delete(synchronize_session=False)
        db.query(GanttJobsResource)\
          .filter(~GanttJobsResource.moldable_id.in_(tuple(reservations_to_keep_mld_ids)))\
          .delete(synchronize_session=False)
    else:
        db.query(GanttJobsPrediction).delete()
        db.query(GanttJobsResource).delete()

    db.commit()
コード例 #18
0
ファイル: kamelot.py プロジェクト: fr0uty/oartm
def main():
    config['LOG_FILE'] = '/tmp/oar_kamelot.log'
    logger = get_logger("oar.kamelot", forward_stderr=True)
    config.setdefault_config(DEFAULT_CONFIG)

    plt = Platform()

    logger.debug("argv..." + str(sys.argv))

    if len(sys.argv) > 2:
        schedule_cycle(plt, int(float(sys.argv[2])), sys.argv[1])
    elif len(sys.argv) == 2:
        schedule_cycle(plt, plt.get_time(), sys.argv[1])
    else:
        schedule_cycle(plt, plt.get_time())

    logger.info("That's all folks")
    from oar.lib import db
    db.commit()
コード例 #19
0
ファイル: test_db_all_in_one.py プロジェクト: oar-team/oar3
def test_db_all_in_one_sleep_node_1(monkeypatch):

    now = get_date()

    insert_job(res=[(60, [('resource_id=1', "")])], properties="")

    # Suspend nodes
    # pdb.set_trace()
    db.query(Resource).update({Resource.available_upto: now + 50000},
                              synchronize_session=False)
    db.commit()
    meta_schedule('internal')

    job = db['Job'].query.one()
    print(job.state)
    print(node_list)
    assert (job.state == 'toLaunch')
    assert (node_list == [u'localhost2', u'localhost1'] or
            node_list == [u'localhost1', u'localhost2'])
コード例 #20
0
ファイル: test_db_all_in_one.py プロジェクト: oar-team/oar3
def test_db_all_in_one_AR_5(monkeypatch):

    now = get_date()
    job = insert_and_sched_ar(now + 10)
    new_start_time = now - 20

    set_jobs_start_time(tuple([job.id]), new_start_time)
    db.query(GanttJobsPrediction).update({GanttJobsPrediction.start_time: new_start_time},
                                         synchronize_session=False)
    db.commit()

    db.query(Resource).update({Resource.state: 'Suspected'}, synchronize_session=False)
    db.commit()

    meta_schedule('internal')

    job = db['Job'].query.one()
    print('\n', job.id, job.state, ' ', job.reservation, job.start_time)

    assert job.state == 'Waiting'
コード例 #21
0
ファイル: job.py プロジェクト: fr0uty/oartm
def save_assigns(jobs, resource_set):
    # http://docs.sqlalchemy.org/en/rel_0_9/core/dml.html#sqlalchemy.sql.expression.Insert.values
    if len(jobs) > 0:
        logger.debug("nb job to save: " + str(len(jobs)))
        mld_id_start_time_s = []
        mld_id_rid_s = []
        for j in itervalues(jobs):
            logger.debug("first job_id  to save: " + str(j.id))
            mld_id_start_time_s.append(
                {'moldable_job_id': j.moldable_id, 'start_time': j.start_time})
            riods = itvs2ids(j.res_set)
            mld_id_rid_s.extend(
                [{'moldable_job_id': j.moldable_id,
                  'resource_id': resource_set.rid_o2i[rid]} for rid in riods])

        logger.info("save assignements")

        db.session.execute(
            GanttJobsPrediction.__table__.insert(), mld_id_start_time_s)
        db.session.execute(GanttJobsResource.__table__.insert(), mld_id_rid_s)
        db.commit()
コード例 #22
0
ファイル: job.py プロジェクト: oar-team/oar3
def log_job(job):  # pragma: no cover
    if db.dialect == "sqlite":
        return
    db.query(MoldableJobDescription)\
      .filter(MoldableJobDescription.index == 'CURRENT')\
      .filter(MoldableJobDescription.job_id == job.id)\
      .update({MoldableJobDescription.index: 'LOG'}, synchronize_session=False)

    db.query(JobResourceDescription)\
      .filter(MoldableJobDescription.job_id == job.id)\
      .filter(JobResourceGroup.moldable_id == MoldableJobDescription.id)\
      .filter(JobResourceDescription.group_id == JobResourceGroup.id) \
      .update({JobResourceDescription.index: 'LOG'}, synchronize_session=False)

    db.query(JobResourceGroup)\
      .filter(JobResourceGroup.index == 'CURRENT')\
      .filter(MoldableJobDescription.index == 'LOG')\
      .filter(MoldableJobDescription.job_id == job.id)\
      .filter(JobResourceGroup.moldable_id == MoldableJobDescription.id)\
      .update({JobResourceGroup.index: 'LOG'}, synchronize_session=False)

    db.query(JobType)\
      .filter(JobType.types_index == 'CURRENT')\
      .filter(JobType.job_id == job.id)\
      .update({JobType.types_index: 'LOG'}, synchronize_session=False)

    db.query(JobDependencie)\
      .filter(JobDependencie.index == 'CURRENT')\
      .filter(JobDependencie.job_id == job.id)\
      .update({JobDependencie.index: 'LOG'}, synchronize_session=False)

    if job.assigned_moldable_job != "0":
        db.query(AssignedResource)\
          .filter(AssignedResource.index == 'CURRENT')\
          .filter(AssignedResource.moldable_id == int(job.assigned_moldable_job))\
          .update({AssignedResource.index: 'LOG'},
                  synchronize_session=False)
    db.commit()
コード例 #23
0
ファイル: submission.py プロジェクト: fr0uty/oartm
def add_micheline_subjob(job_vars,
                         ssh_private_key, ssh_public_key,
                         array_id, array_index,
                         array_commands,
                         properties_applied_after_validation):

    # Estimate_job_nb_resources and incidentally test if properties and resources request are coherent
    # against avalaible resources
    # pdb.set_trace()
    date = get_date()
    properties = job_vars['properties']
    resource_request = job_vars['resource_request']
    resource_available, estimated_nb_resources = estimate_job_nb_resources(resource_request, properties)
    # Add admin properties to the job
    if properties_applied_after_validation:
        if properties:
            properties = '(' + properties + ') AND ' + properties_applied_after_validation
        else:
            properties = properties_applied_after_validation
    job_vars['properties'] = properties
    # TODO Verify the content of the ssh keys

    # TODO format job message
    # message = ''

    # my $job_message = format_job_message_text($job_name,$estimated_nb_resources, $estimated_walltime,
    # $jobType, $reservationField, $queue_name, $project, $type_list, '');

    # TODO  job_group
    #
    name = job_vars['name']
    stdout = job_vars['stdout']
    if not stdout:
        stdout = 'OAR'
        if name:
            stdout += '.' + name
        stdout += ".%jobid%.stdout"
    else:
        stdout = re.sub(r'%jobname%', name, stdout)
    job_vars['stdout'] = stdout

    stderr = job_vars['stderr']
    if not stderr:
        stderr = 'OAR'
        if name:
            stderr += '.' + name
        stderr += '.%jobid%.stderr'
    else:
        stderr = re.sub(r'%jobname%', name, stderr)
    stderr = job_vars['stderr']

    # Insert job

    kwargs = job_kwargs(job_vars, array_commands[0], date)
    kwargs['message'] = ''  # TODO message
    kwargs['array_index'] = array_index

    if array_id > 0:
        kwargs['array_id'] = array_id

    ins = Job.__table__.insert().values(**kwargs)
    result = db.session.execute(ins)
    job_id = result.inserted_primary_key[0]

    if array_id <= 0:
        db.query(Job).filter(Job.id == job_id).update({Job.array_id: job_id})
        db.commit()

    random_number = random.randint(1, 1000000000000)
    ins = Challenge.__table__.insert().values(
        {'job_id': job_id, 'challenge': random_number,
         'ssh_private_key': ssh_private_key, 'ssh_public_key': ssh_public_key})
    db.session.execute(ins)

    # print(resource_request)

    # Insert resources request in DB
    mld_jid_walltimes = []
    resource_desc_lst = []
    for moldable_instance in resource_request:
        resource_desc, walltime = moldable_instance
        if not walltime:
            # TODO add nullable=True in [email protected] ?
            walltime = 0
        mld_jid_walltimes.append(
            {'moldable_job_id': job_id, 'moldable_walltime': walltime})
        resource_desc_lst.append(resource_desc)

    # Insert MoldableJobDescription job_id and walltime
    # print('mld_jid_walltimes)
    result = db.session.execute(MoldableJobDescription.__table__.insert(),
                                mld_jid_walltimes)

    # Retrieve MoldableJobDescription.ids
    if len(mld_jid_walltimes) == 1:
        mld_ids = [result.inserted_primary_key[0]]
    else:
        r = db.query(MoldableJobDescription.id)\
              .filter(MoldableJobDescription.job_id == job_id).all()
        mld_ids = [e[0] for e in r]
    #
    # print(mld_ids, resource_desc_lst)
    for mld_idx, resource_desc in enumerate(resource_desc_lst):
        # job_resource_groups
        mld_id_property = []
        res_lst = []

        moldable_id = mld_ids[mld_idx]

        for prop_res in resource_desc:
            prop = prop_res['property']
            res = prop_res['resources']

            mld_id_property.append({'res_group_moldable_id': moldable_id,
                                    'res_group_property': prop})

            res_lst.append(res)

        # print(mld_id_property)
        # Insert property for moldable
        db.session.execute(JobResourceGroup.__table__.insert(),
                           mld_id_property)

        if len(mld_id_property) == 1:
            grp_ids = [result.inserted_primary_key[0]]
        else:
            r = db.query(JobResourceGroup.id)\
                  .filter(JobResourceGroup.moldable_id == moldable_id).all()
            grp_ids = [e[0] for e in r]

        # print('grp_ids, res_lst)
        # Insert job_resource_descriptions
        for grp_idx, res in enumerate(res_lst):
            res_description = []
            for idx, res_value in enumerate(res):
                res_description.append({'res_job_group_id': grp_ids[grp_idx],
                                        'res_job_resource_type': res_value['resource'],
                                        'res_job_value': res_value['value'],
                                        'res_job_order': idx})
            # print(res_description)
            db.session.execute(JobResourceDescription.__table__.insert(),
                               res_description)

    # types of job
    types = job_vars['types']
    if types:
        ins = [{'job_id': job_id, 'type': typ} for typ in types]
        db.session.execute(JobType.__table__.insert(), ins)

    # TODO dependencies with min_start_shift and max_start_shift
    dependencies = job_vars['dependencies']
    if dependencies:
        ins = [{'job_id': job_id, 'job_id_required': dep} for dep in dependencies]
        db.session.execute(JobDependencie.__table__.insert(), ins)
    #    foreach my $a (@{$anterior_ref}){
    #    if (my ($j,$min,$max) = $a =~ /^(\d+)(?:,([\[\]][-+]?\d+)?(?:,([\[\]][-+]?\d+)?)?)?$/) {
    #        $dbh->do("  INSERT INTO job_dependencies (job_id,job_id_required,min_start_shift,max_start_shift)
    #                    VALUES ($job_id,$j,'".(defined($min)?$min:"")."','".(defined($max)?$max:"")."')

    if not job_vars['hold']:
        req = db.insert(JobStateLog).values(
            {'job_id': job_id, 'job_state': 'Waiting', 'date_start': date})
        db.session.execute(req)
        db.commit()

        db.query(Job).filter(Job.id == job_id).update({Job.state: 'Waiting'})
        db.commit()
    else:
        req = db.insert(JobStateLog).values(
            {'job_id': job_id, 'job_state': 'Hold', 'date_start': date})
        db.session.execute(req)
        db.commit()

    return(0, job_id)
コード例 #24
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_job_message(job_id, message):
    db.query(Job).filter(Job.id == job_id).update({Job.message: message})
    db.commit()
コード例 #25
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_job_state(jid, state):

    # TODO
    # TODO Later: notify_user
    # TODO Later: update_current_scheduler_priority

    result = db.query(Job).filter(Job.id == jid)\
                          .filter(Job.state != 'Error')\
                          .filter(Job.state != 'Terminated')\
                          .filter(Job.state != state)\
                          .update({Job.state: state})
    db.commit()

    if result == 1:  # OK for sqlite
        logger.debug(
            "Job state updated, job_id: " + str(jid) + ", wanted state: " + state)

        date = tools.get_date()

        # TODO: optimize job log
        db.query(JobStateLog).filter(JobStateLog.date_stop == 0)\
                             .filter(JobStateLog.job_id == jid)\
                             .update({JobStateLog.date_stop: date})
        db.commit()
        req = db.insert(JobStateLog).values(
            {'job_id': jid, 'job_state': state, 'date_start': date})
        db.session.execute(req)

        if state == "Terminated" or state == "Error" or state == "toLaunch" or \
           state == "Running" or state == "Suspended" or state == "Resuming":
            job = db.query(Job).filter(Job.id == jid).one()
            if state == "Suspend":
                tools.notify_user(job, "SUSPENDED", "Job is suspended.")
            elif state == "Resuming":
                tools.notify_user(job, "RESUMING", "Job is resuming.")
            elif state == "Running":
                tools.notify_user(job, "RUNNING", "Job is running.")
            elif state == "toLaunch":
                update_current_scheduler_priority(job, "+2", "START")
            else:  # job is "Terminated" or ($state eq "Error")
                if job.stop_time < job.start_time:
                    db.query(Job).filter(Job.id == jid)\
                                 .update({Job.stop_time: job.start_time})
                    db.commit()

                if job.assigned_moldable_job != "0":
                    # Update last_job_date field for resources used
                    update_scheduler_last_job_date(
                        date, int(job.assigned_moldable_job))

                if state == "Terminated":
                    tools.notify_user(job, "END", "Job stopped normally.")
                else:
                    # Verify if the job was suspended and if the resource
                    # property suspended is updated
                    if job.suspended == "YES":
                        r = get_current_resources_with_suspended_job()

                        if r != ():
                            db.query(Resource).filter(~Resource.id.in_(r))\
                                              .update({Resource.suspended_jobs: 'NO'})

                        else:
                            db.query(Resource).update(
                                {Resource.suspended_jobs: 'NO'})
                        db.commit()

                    tools.notify_user(
                        job, "ERROR", "Job stopped abnormally or an OAR error occured.")

                update_current_scheduler_priority(job, "-2", "STOP")

                # Here we must not be asynchronously with the scheduler
                log_job(job)
                # $dbh is valid so these 2 variables must be defined
                nb_sent = tools.notify_almighty("ChState")
                if nb_sent == 0:
                    logger.warning("Not able to notify almighty to launch the job " +
                                   str(job.id) + " (socket error)")

    else:
        logger.warning("Job is already termindated or in error or wanted state, job_id: " +
                       str(jid) + ", wanted state: " + state)
コード例 #26
0
ファイル: job.py プロジェクト: oar-team/oar3
def set_jobs_start_time(tuple_jids, start_time):

    db.query(Job).filter(Job.id.in_(tuple_jids)).update(
        {Job.start_time: start_time}, synchronize_session=False)
    db.commit()
コード例 #27
0
ファイル: submission.py プロジェクト: fr0uty/oartm
def add_micheline_simple_array_job(job_vars,
                                   ssh_private_key, ssh_public_key,
                                   array_id, array_index,
                                   array_commands,
                                   properties_applied_after_validation):

    job_id_list = []
    date = get_date()

    # Check the jobs are no moldable
    resource_request = job_vars['resource_request']
    if len(resource_request) > 1:
        print_error('array jobs cannot be moldable')
        sub_exit(-30)

    # Estimate_job_nb_resources and incidentally test if properties and resources request are coherent
    # against avalaible resources
    # pdb.set_trace()
    properties = job_vars['properties']
    resource_available, estimated_nb_resources = estimate_job_nb_resources(resource_request, properties)

    # Add admin properties to the job
    if properties_applied_after_validation:
        if properties:
            properties = '(' + properties + ') AND ' + properties_applied_after_validation
        else:
            properties = properties_applied_after_validation
    job_vars['properties'] = properties
    # TODO format job message

    # my $job_message = format_job_message_text($job_name,$estimated_nb_resources, $estimated_walltime,
    # $jobType, $reservationField, $queue_name, $project, $type_list, '');

    name = job_vars['name']
    stdout = job_vars['stdout']
    if not stdout:
        stdout = 'OAR'
        if name:
            stdout += '.' + name
        stdout += ".%jobid%.stdout"
    else:
        stdout = re.sub(r'%jobname%', name, stdout)
    job_vars['stdout'] = stdout

    stderr = job_vars['stderr']
    if not stderr:
        stderr = 'OAR'
        if name:
            stderr += '.' + name
        stderr += '.%jobid%.stderr'
    else:
        stderr = re.sub(r'%jobname%', name, stderr)
    stderr = job_vars['stderr']

    # Insert job
    kwargs = job_kwargs(job_vars, array_commands[0], date)
    kwargs['message'] = ''  # TODO message
    kwargs['array_index'] = array_index

    # print(kwargs)

    ins = Job.__table__.insert().values(**kwargs)
    result = db.session.execute(ins)
    first_job_id = result.inserted_primary_key[0]

    # Update array_id
    array_id = first_job_id
    db.query(Job).filter(Job.id == first_job_id).update({Job.array_id: array_id})
    db.commit()

    # Insert remaining array jobs with array_id
    jobs_data = []
    kwargs['array_id'] = array_id
    for command in array_commands[1:]:
        job_data = kwargs.copy()
        job_data['command'] = command
        jobs_data.append(job_data)

    db.session.execute(Job.__table__.insert(), jobs_data)
    db.commit()

    # Retrieve job_ids thanks to array_id value
    result = db.query(Job.id).filter(Job.array_id == array_id).all()
    job_id_list = [r[0] for r in result]

    # TODO Populate challenges and moldable_job_descriptions tables
    challenges = []
    moldable_job_descriptions = []

    walltime = resource_request[0][1]
    if not walltime:
        walltime = default_job_walltime

    for job_id in job_id_list:
        random_number = random.randint(1, 1000000000000)
        challenges.append({'job_id': job_id, 'challenge': random_number})
        moldable_job_descriptions.append({'moldable_job_id': job_id, 'moldable_walltime': walltime})

    db.session.execute(Challenge.__table__.insert(), challenges)
    db.session.execute(MoldableJobDescription.__table__.insert(), moldable_job_descriptions)
    db.commit()

    # Retrieve moldable_ids thanks to job_ids
    result = db.query(MoldableJobDescription.id)\
               .filter(MoldableJobDescription.job_id.in_(tuple(job_id_list)))\
               .order_by(MoldableJobDescription.id).all()
    moldable_ids = [r[0] for r in result]

    # Populate job_resource_groups table
    job_resource_groups = []
    resource_desc_lst = resource_request[0][0]

    for moldable_id in moldable_ids:
        for resource_desc in resource_desc_lst:
            prop = resource_desc['property']
            job_resource_groups.append({'res_group_moldable_id': moldable_id,
                                        'res_group_property': prop})

    db.session.execute(JobResourceGroup.__table__.insert(), job_resource_groups)
    db.commit()

    # Retrieve res_group_ids thanks to moldable_ids
    result = db.query(JobResourceGroup.id)\
               .filter(JobResourceGroup.moldable_id.in_(tuple(moldable_ids)))\
               .order_by(JobResourceGroup.id).all()
    res_group_ids = [r[0] for r in result]

    # Populate job_resource_descriptions table
    job_resource_descriptions = []
    k = 0
    for i in range(len(array_commands)):  # Nb jobs
        for resource_desc in resource_desc_lst:
            order = 0
            for res_val in resource_desc['resources']:
                job_resource_descriptions.append({'res_job_group_id': res_group_ids[k],
                                                  'res_job_resource_type': res_val['resource'],
                                                  'res_job_value': res_val['value'],
                                                  'res_job_order': order})
                order += 1
            k += 1

    db.session.execute(JobResourceDescription.__table__.insert(), job_resource_descriptions)
    db.commit()

    # Populate job_types table
    types = job_vars['types']
    if types:
        job_types = []
        for job_id in job_id_list:
            for typ in types:
                job_types.append({'job_id': job_id, 'type': typ})
        db.session.execute(JobType.__table__.insert(), job_types)
        db.commit()

    # TODO Anterior job setting

    # Hold/Waiting management, job_state_log setting
    # Job is inserted with hold state first
    state_log = 'Hold'
    if job_vars['hold']:
        state_log = 'Waiting'
        db.query(Job).filter(Job.array_id == array_id).update({Job.state: state_log})
        db.commit

    # Update array_id field and set job to state if waiting and insert job_state_log
    job_state_logs = [{'job_id': job_id, 'job_state': state_log, 'date_start': date}
                      for job_id in job_id_list]
    db.session.execute(JobStateLog.__table__.insert(), job_state_logs)
    db.commit()

    return(0, job_id_list)