示例#1
0
 def post_commit():
     if task.status != 'failed' and type(task) is not WorkflowJob:
         # Before task is dispatched, ensure that job_event partitions exist
         create_partition(task.event_class._meta.db_table,
                          start=task.created)
         task_cls = task._get_task_class()
         task_cls.apply_async(
             [task.pk],
             opts,
             queue=task.get_queue_name(),
             uuid=task.celery_task_id,
             callbacks=[{
                 'task': handle_work_success.name,
                 'kwargs': {
                     'task_actual': task_actual
                 }
             }],
             errbacks=[{
                 'task': handle_work_error.name,
                 'args': [task.celery_task_id],
                 'kwargs': {
                     'subtasks': [task_actual] + dependencies
                 }
             }],
         )
示例#2
0
文件: firehose.py 项目: yckwon75/awx
def generate_jobs(jobs, batch_size, time_delta):
    print(f'inserting {jobs} job(s)')
    sys.path.insert(0, pkg_resources.get_distribution('awx').module_path)
    from awx import prepare_env

    prepare_env()
    setup_django()

    from awx.main.models import UnifiedJob, Job, JobTemplate

    fields = list(set(Job._meta.fields) - set(UnifiedJob._meta.fields))
    job_field_names = set([f.attname for f in fields])
    # extra unified job field names from base class
    for field_name in ('name', 'created_by_id', 'modified_by_id'):
        job_field_names.add(field_name)
    jt_count = JobTemplate.objects.count()

    def make_batch(N, jt_pos=0):
        jt = None
        while not jt:
            try:
                jt = JobTemplate.objects.all()[jt_pos % jt_count]
            except IndexError as e:
                # seems to happen every now and then due to some race condition
                print('Warning: IndexError on {} JT, error: {}'.format(
                    jt_pos % jt_count, e))
            jt_pos += 1
        jt_defaults = dict((f.attname, getattr(jt, f.attname))
                           for f in JobTemplate._meta.get_fields()
                           if f.concrete and f.attname in job_field_names
                           and getattr(jt, f.attname))
        jt_defaults['job_template_id'] = jt.pk
        jt_defaults[
            'unified_job_template_id'] = jt.pk  # populated by save method

        jobs = [
            Job(
                status=STATUS_OPTIONS[i % len(STATUS_OPTIONS)],
                started=now() - time_delta,
                created=now() - time_delta,
                modified=now() - time_delta,
                finished=now() - time_delta,
                elapsed=0.0,
                **jt_defaults,
            ) for i in range(N)
        ]
        ujs = UnifiedJob.objects.bulk_create(jobs)
        for uj in ujs:
            uj.unifiedjob_ptr_id = uj.id  # hack around the polymorphic id field not being picked up
        query = InsertQuery(Job)
        query.insert_values(fields, ujs)
        with connection.cursor() as cursor:
            query, params = query.sql_with_params()[0]
            cursor.execute(query, params)
        return ujs[-1], jt_pos, [uj.pk for uj in ujs]

    i = 1
    jt_pos = 0
    created_job_ids = []
    s = time()

    from awx.main.models import JobEvent
    from awx.main.utils.common import create_partition

    start_partition = (now() - time_delta).replace(minute=0,
                                                   second=0,
                                                   microsecond=0)
    create_partition(JobEvent._meta.db_table, start_partition)

    while jobs > 0:
        s_loop = time()
        print('running batch {}, runtime {}'.format(i, time() - s))
        created, jt_pos, ujs_pk = make_batch(min(jobs, batch_size), jt_pos)
        print('took {}'.format(time() - s_loop))
        i += 1
        jobs -= batch_size
        created_job_ids += ujs_pk
    print('Created Job IDS: {}'.format(created_job_ids))
    # return created
    return created_job_ids