Пример #1
0
 def post(self, job_id):
     job = Job.find(job_id)
     job.pipeline.start_single_job(job)
     tracker = insight.GAProvider()
     tracker.track_event(category='jobs',
                         action='manual_run',
                         label=job.worker_class)
     return job
Пример #2
0
    def get(self, pipeline_id):
        args = log_parser.parse_args()
        entries = []
        urlfetch.set_default_fetch_deadline(300)

        next_page_token = args.get('next_page_token')
        page_size = 20
        from core import cloud_logging

        project_id = app_identity.get_application_id()
        filter_ = 'logName="projects/%s/logs/%s"' % (project_id,
                                                     cloud_logging.logger_name)
        filter_ += ' AND jsonPayload.labels.pipeline_id="%s"' % pipeline_id
        if args.get('worker_class'):
            filter_ += ' AND jsonPayload.labels.worker_class="%s"' \
                % args.get('worker_class')
        if args.get('job_id'):
            filter_ += ' AND jsonPayload.labels.job_id="%s"' % args.get(
                'job_id')
        if args.get('log_level'):
            filter_ += ' AND jsonPayload.log_level="%s"' % args.get(
                'log_level')
        if args.get('query'):
            filter_ += ' AND jsonPayload.message:"%s"' % args.get('query')
        if args.get('fromdate'):
            filter_ += ' AND timestamp>="%s"' % args.get('fromdate')
        if args.get('todate'):
            filter_ += ' AND timestamp<="%s"' % args.get('todate')
        iterator = cloud_logging.client.list_entries(
            projects=[project_id],
            filter_=filter_,
            order_by=DESCENDING,
            page_size=page_size,
            page_token=next_page_token)
        page = next(iterator.pages)

        for entry in page:
            # print '    Page number: %d' % (iterator.page_number,)
            # print '  Items in page: %d' % (page.num_items,)
            # print 'Items remaining: %d' % (page.remaining,)
            # print 'Next page token: %s' % (iterator.next_page_token,)
            # print '----------------------------'
            if isinstance(entry.payload, dict) \
               and entry.payload.get('labels') \
               and entry.payload.get('labels').get('job_id'):

                job = Job.find(entry.payload.get('labels').get('job_id'))
                if job:
                    log = {
                        'timestamp': entry.timestamp.__str__(),
                        'payload': entry.payload,
                        'job_name': job.name,
                        'log_level': entry.payload.get('log_level', 'INFO')
                    }
                    entries.append(log)
            next_page_token = iterator.next_page_token
        return {'entries': entries, 'next_page_token': next_page_token}
Пример #3
0
    def delete(self, job_id):
        job = Job.find(job_id)
        abort_if_job_doesnt_exist(job, job_id)

        if job.pipeline.is_blocked():
            return {
                'message': 'Removing of job for active pipeline is unavailable'
            }, 422

        job.destroy()
        return {}, 204
Пример #4
0
    def delete(self, job_id):
        job = Job.find(job_id)
        abort_if_job_doesnt_exist(job, job_id)

        if job.pipeline.is_blocked():
            return {
                'message': 'Removing of job for active pipeline is unavailable'
            }, 422

        job.destroy()
        tracker = insight.GAProvider()
        tracker.track_event(category='jobs', action='delete')
        return {}, 204
Пример #5
0
    def put(self, job_id):
        job = Job.find(job_id)
        abort_if_job_doesnt_exist(job, job_id)

        if job.pipeline.is_blocked():
            return {
                'message': 'Editing of job for active pipeline is unavailable'
            }, 422

        args = parser.parse_args()

        job.assign_attributes(args)
        job.save()
        job.save_relations(args)
        return job, 200
Пример #6
0
    def post(self):
        """
    NB: you want retrieve the task name with this snippet

        task_name = request.headers.get('X-AppEngine-TaskName')[11:]

    """
        # Clear the memcache client, mainly to avoid memory overflow of
        # the internal hashmap.
        cache.clear_memcache_client()
        retries = int(request.headers.get('X-AppEngine-TaskExecutionCount'))
        args = parser.parse_args()
        logger.debug(args)
        task_name = args['task_name']
        job = Job.find(args['job_id'])
        worker_class = getattr(workers, args['worker_class'])
        worker_params = json.loads(args['worker_params'])
        worker = worker_class(worker_params, job.pipeline_id, job.id)
        if retries >= worker_class.MAX_ATTEMPTS:
            worker.log_error('Execution canceled after %i failed attempts',
                             retries)
            job.task_failed(task_name)
        elif job.status == 'stopping':
            worker.log_warn(
                'Execution canceled as parent job is going to stop')
            job.task_failed(task_name)
        else:
            try:
                workers_to_enqueue = worker.execute()
            except workers.WorkerException as e:
                worker.log_error('Execution failed: %s: %s',
                                 e.__class__.__name__, e)
                job.task_failed(task_name)
            except Exception as e:
                worker.log_error('Unexpected error: %s: %s',
                                 e.__class__.__name__, e)
                raise e
            else:
                for worker_class_name, worker_params, delay in workers_to_enqueue:
                    job.enqueue(worker_class_name, worker_params, delay)
                job.task_succeeded(task_name)
        return 'OK', 200
Пример #7
0
  def post(self):
    """
    NB: you want retrieve the task name with this snippet

        task_name = request.headers.get('X-AppEngine-TaskName')[11:]

    """
    urlfetch.set_default_fetch_deadline(300)
    retries = int(request.headers.get('X-AppEngine-TaskExecutionCount'))
    args = parser.parse_args()
    logger.debug(args)
    task_name = args['task_name']
    job = Job.find(args['job_id'])
    worker_class = getattr(workers, args['worker_class'])
    worker_params = json.loads(args['worker_params'])

    for setting in worker_class.GLOBAL_SETTINGS:
        worker_params[setting] = GeneralSetting.where(name=setting).first().value

    worker = worker_class(worker_params, job.pipeline_id, job.id)
    if retries >= worker_class.MAX_ATTEMPTS:
      worker.log_error('Execution canceled after %i failed attempts', retries)
      job.task_failed(task_name)
    elif job.status == 'stopping':
      worker.log_warn('Execution canceled as parent job is going to stop')
      job.task_failed(task_name)
    else:
      try:
        workers_to_enqueue = worker.execute()
      except workers.WorkerException as e:
        worker.log_error('Execution failed: %s: %s', e.__class__.__name__, e)
        job.task_failed(task_name)
      except Exception as e:
        worker.log_error('Unexpected error: %s: %s', e.__class__.__name__, e)
        raise e
      else:
        for worker_class_name, worker_params, delay in workers_to_enqueue:
          job.enqueue(worker_class_name, worker_params, delay)
        job.task_succeeded(task_name)
    return 'OK', 200
Пример #8
0
 def get(self, job_id):
     job = Job.find(job_id)
     abort_if_job_doesnt_exist(job, job_id)
     return job
Пример #9
0
 def post(self, job_id):
     job = Job.find(job_id)
     job.pipeline.start_single_job(job)
     return job