예제 #1
0
파일: schedules.py 프로젝트: ameade/qonos
    def _get_request_params(self, request):
        filter_args = {}
        params = request.params
        if params.get('next_run_after') is not None:
            next_run_after = params['next_run_after']
            next_run_after = timeutils.parse_isotime(next_run_after)
            next_run_after = timeutils.normalize_time(next_run_after)
            filter_args['next_run_after'] = next_run_after

        if params.get('next_run_before') is not None:
            next_run_before = params['next_run_before']
            next_run_before = timeutils.parse_isotime(next_run_before)
            next_run_before = timeutils.normalize_time(next_run_before)
            filter_args['next_run_before'] = next_run_before

        if request.params.get('tenant') is not None:
            filter_args['tenant'] = request.params['tenant']

        filter_args['limit'] = params.get('limit')
        filter_args['marker'] = params.get('marker')

        for filter_key in params.keys():
            if filter_key not in filter_args:
                filter_args[filter_key] = params[filter_key]

        return filter_args
예제 #2
0
파일: schedules.py 프로젝트: cp16net/qonos
    def _get_request_params(self, request):
        filter_args = {}
        params = request.params
        if params.get('next_run_after') is not None:
            next_run_after = params['next_run_after']
            next_run_after = timeutils.parse_isotime(next_run_after)
            next_run_after = timeutils.normalize_time(next_run_after)
            filter_args['next_run_after'] = next_run_after

        if params.get('next_run_before') is not None:
            next_run_before = params['next_run_before']
            next_run_before = timeutils.parse_isotime(next_run_before)
            next_run_before = timeutils.normalize_time(next_run_before)
            filter_args['next_run_before'] = next_run_before

        if request.params.get('tenant') is not None:
            filter_args['tenant'] = request.params['tenant']

        filter_args['limit'] = params.get('limit')
        filter_args['marker'] = params.get('marker')

        for filter_key in params.keys():
            if filter_key not in filter_args:
                filter_args[filter_key] = params[filter_key]

        return filter_args
예제 #3
0
def job_get_all(params={}):
    jobs = copy.deepcopy(DATA['jobs'].values())
    JOB_BASE_FILTERS = [
        'schedule_id', 'tenant', 'action', 'worker_id', 'status', 'timeout',
        'hard_timeout'
    ]

    for key in JOB_BASE_FILTERS:
        if key in params:
            value = params.get(key)
            if type(value) is datetime.datetime:
                value = timeutils.normalize_time(value).replace(microsecond=0)

            for job in reversed(jobs):
                job_value = job.get(key)
                if job_value and type(job_value) is datetime.datetime:
                    job_value = job_value.replace(microsecond=0)
                if not (job_value == value):
                    del jobs[jobs.index(job)]

    for job in jobs:
        job['job_metadata'] =\
            job_meta_get_all_by_job_id(job['id'])

    marker = params.get('marker')
    limit = params.get('limit')
    jobs = _do_pagination(jobs, marker, limit)

    return jobs
예제 #4
0
파일: api.py 프로젝트: clefelhocz/qonos
def job_get_all(params={}):
    jobs = copy.deepcopy(DATA['jobs'].values())
    JOB_BASE_FILTERS = ['schedule_id',
                        'tenant',
                        'action',
                        'worker_id',
                        'status',
                        'timeout',
                        'hard_timeout']

    for key in JOB_BASE_FILTERS:
        if key in params:
            value = params.get(key)
            if type(value) is datetime.datetime:
                value = timeutils.normalize_time(value).replace(microsecond=0)

            for job in reversed(jobs):
                job_value = job.get(key)
                if job_value and type(job_value) is datetime.datetime:
                    job_value = job_value.replace(microsecond=0)
                if not (job_value == value):
                    del jobs[jobs.index(job)]

    for job in jobs:
        job['job_metadata'] =\
            job_meta_get_all_by_job_id(job['id'])

    marker = params.get('marker')
    limit = params.get('limit')
    jobs = _do_pagination(jobs, marker, limit)

    return jobs
예제 #5
0
파일: jobs.py 프로젝트: broble/qonos
    def list(self, request):
        params = request.params.copy()

        try:
            params = utils.get_pagination_limit(params)
        except exception.Invalid as e:
            raise webob.exc.HTTPBadRequest(explanation=str(e))

        if 'status' in params:
            params['status'] = str(params['status']).upper()

        if 'timeout' in params:
            timeout = timeutils.parse_isotime(params['timeout'])
            params['timeout'] = timeutils.normalize_time(timeout)

        if 'hard_timeout' in params:
            hard_timeout = timeutils.parse_isotime(params['hard_timeout'])
            params['hard_timeout'] = timeutils.normalize_time(hard_timeout)

        try:
            jobs = self.db_api.job_get_all(params)
        except exception.NotFound:
            raise webob.exc.HTTPNotFound()

        limit = params.get('limit')
        if len(jobs) != 0 and len(jobs) == limit:
            next_page = '/v1/jobs?marker=%s' % jobs[-1].get('id')
        else:
            next_page = None

        for job in jobs:
            utils.serialize_datetimes(job)
            api_utils.serialize_job_metadata(job)

        links = [{'rel': 'next', 'href': next_page}]
        return {'jobs': jobs, 'jobs_links': links}
예제 #6
0
파일: jobs.py 프로젝트: clefelhocz/qonos
    def list(self, request):
        params = request.params.copy()

        try:
            params = utils.get_pagination_limit(params)
        except exception.Invalid as e:
            raise webob.exc.HTTPBadRequest(explanation=str(e))

        if 'status' in params:
            params['status'] = str(params['status']).upper()

        if 'timeout' in params:
            timeout = timeutils.parse_isotime(params['timeout'])
            params['timeout'] = timeutils.normalize_time(timeout)

        if 'hard_timeout' in params:
            hard_timeout = timeutils.parse_isotime(params['hard_timeout'])
            params['hard_timeout'] = timeutils.normalize_time(hard_timeout)

        try:
            jobs = self.db_api.job_get_all(params)
        except exception.NotFound:
            raise webob.exc.HTTPNotFound()

        limit = params.get('limit')
        if len(jobs) != 0 and len(jobs) == limit:
            next_page = '/v1/jobs?marker=%s' % jobs[-1].get('id')
        else:
            next_page = None

        for job in jobs:
            utils.serialize_datetimes(job)
            api_utils.serialize_job_metadata(job)

        links = [{'rel': 'next', 'href': next_page}]
        return {'jobs': jobs, 'jobs_links': links}
예제 #7
0
    def update_status(self, request, job_id, body):
        status = body.get('status')
        if not status:
            raise webob.exc.HTTPBadRequest()

        values = {'status': status['status'].upper()}
        if 'timeout' in status:
            timeout = timeutils.parse_isotime(status['timeout'])
            values['timeout'] = timeutils.normalize_time(timeout)

        job = None
        try:
            job = self.db_api.job_update(job_id, values)
        except exception.NotFound:
            msg = _('Job %s could not be found.') % job_id
            raise webob.exc.HTTPNotFound(explanation=msg)

        if status['status'].upper() in ['ERROR', 'CANCELLED']:
            values = self._get_error_values(status, job)
            self.db_api.job_fault_create(values)

        return {'status': {'status': job['status'], 'timeout': job['timeout']}}
예제 #8
0
파일: jobs.py 프로젝트: broble/qonos
    def update_status(self, request, job_id, body):
        status = body.get('status')
        if not status:
            raise webob.exc.HTTPBadRequest()

        values = {'status': status['status'].upper()}
        if 'timeout' in status:
            timeout = timeutils.parse_isotime(status['timeout'])
            values['timeout'] = timeutils.normalize_time(timeout)

        job = None
        try:
            job = self.db_api.job_update(job_id, values)
        except exception.NotFound:
            msg = _('Job %s could not be found.') % job_id
            raise webob.exc.HTTPNotFound(explanation=msg)

        if status['status'].upper() in ['ERROR', 'CANCELLED']:
            values = self._get_error_values(status, job)
            self.db_api.job_fault_create(values)

        return {'status': {'status': job['status'],
                           'timeout': job['timeout']}}
예제 #9
0
    def _process_job(self, job):
        payload = {'job': job}
        if job['status'] == 'QUEUED':
            self.send_notification_start(payload)
        else:
            self.send_notification_retry(payload)

        job_id = job['id']

        hard_timeout = timeutils.normalize_time(
            timeutils.parse_isotime(job['hard_timeout']))
        hard_timed_out = hard_timeout <= self._get_utcnow()
        if hard_timed_out:
            msg = ('Job %(job_id)s has reached/exceeded its'
                   ' hard timeout: %(hard_timeout)s.' %
                   {'job_id': job_id, 'hard_timeout': job['hard_timeout']})
            self._job_hard_timed_out(job, msg)
            LOG.info(_('[%(worker_tag)s] Job hard timed out: %(msg)s') %
                     {'worker_tag': self.get_worker_tag(), 'msg': msg})
            return

        max_retried = job['retry_count'] > self.max_retry
        if max_retried:
            msg = ('Job %(job_id)s has reached/exceeded its'
                   ' max_retry count: %(retry_count)s.' %
                   {'job_id': job_id, 'retry_count': job['retry_count']})
            self._job_max_retried(job, msg)
            LOG.info(_('[%(worker_tag)s] Job max_retry reached: %(msg)s') %
                     {'worker_tag': self.get_worker_tag(), 'msg': msg})
            return

        schedule = self._get_schedule(job)
        if schedule is None:
            msg = ('Schedule %(schedule_id)s not found for job %(job_id)s' %
                   {'schedule_id': job['schedule_id'], 'job_id': job_id})
            self._job_cancelled(job, msg)

            LOG.info(_('[%(worker_tag)s] Job cancelled: %(msg)s') %
                     {'worker_tag': self.get_worker_tag(),
                      'msg': msg})
            return

        now = self._get_utcnow()
        self.next_timeout = now + self.initial_timeout
        self._job_processing(job, self.next_timeout)
        self.next_update = self._get_utcnow() + self.update_interval

        instance_id = self._get_instance_id(job)
        if not instance_id:
            msg = ('Job %s does not specify an instance_id in its metadata.'
                   % job_id)
            self._job_cancelled(job, msg)
            return

        image_id = self._get_image_id(job)
        if image_id is None:
            image_id = self._create_image(job, instance_id,
                                          schedule)
            if image_id is None:
                return
        else:
            LOG.info(_("[%(worker_tag)s] Resuming image: %(image_id)s")
                     % {'worker_tag': self.get_worker_tag(),
                        'image_id': image_id})

        active = False
        retry = True

        while retry and not active and not self.stopping:
            image_status = self._poll_image_status(job, image_id)

            active = image_status == 'ACTIVE'
            if not active:
                retry = True
                try:
                    self._update_job(job_id, "PROCESSING")
                except exc.OutOfTimeException:
                    retry = False
                else:
                    time.sleep(self.image_poll_interval)

        if active:
            self._process_retention(instance_id,
                                    self.current_job['schedule_id'])
            self._job_succeeded(self.current_job)
        elif not active and not retry:
            self._job_timed_out(self.current_job)
        elif self.stopping:
            # Timeout job so it gets picked up again quickly rather than
            # queuing up behind a bunch of new jobs, but not so soon that
            # another worker will pick it up before everything is shut down
            # and thus burn through the retries
            timeout = self._get_utcnow() + self.timeout_worker_stop
            self._job_processing(self.current_job, timeout=timeout)

        LOG.debug("[%s] Snapshot complete" % self.get_worker_tag())
예제 #10
0
    def _process_job(self, job):
        payload = {'job': job}
        if job['status'] == 'QUEUED':
            self.send_notification_start(payload)
        else:
            self.send_notification_retry(payload)

        job_id = job['id']

        hard_timeout = timeutils.normalize_time(
            timeutils.parse_isotime(job['hard_timeout']))
        hard_timed_out = hard_timeout <= self._get_utcnow()
        if hard_timed_out:
            msg = ('Job %(job_id)s has reached/exceeded its'
                   ' hard timeout: %(hard_timeout)s.' % {
                       'job_id': job_id,
                       'hard_timeout': job['hard_timeout']
                   })
            self._job_hard_timed_out(job, msg)
            LOG.info(
                _('[%(worker_tag)s] Job hard timed out: %(msg)s') % {
                    'worker_tag': self.get_worker_tag(),
                    'msg': msg
                })
            return

        max_retried = job['retry_count'] > self.max_retry
        if max_retried:
            msg = ('Job %(job_id)s has reached/exceeded its'
                   ' max_retry count: %(retry_count)s.' % {
                       'job_id': job_id,
                       'retry_count': job['retry_count']
                   })
            self._job_max_retried(job, msg)
            LOG.info(
                _('[%(worker_tag)s] Job max_retry reached: %(msg)s') % {
                    'worker_tag': self.get_worker_tag(),
                    'msg': msg
                })
            return

        schedule = self._get_schedule(job)
        if schedule is None:
            msg = ('Schedule %(schedule_id)s not found for job %(job_id)s' % {
                'schedule_id': job['schedule_id'],
                'job_id': job_id
            })
            self._job_cancelled(job, msg)

            LOG.info(
                _('[%(worker_tag)s] Job cancelled: %(msg)s') % {
                    'worker_tag': self.get_worker_tag(),
                    'msg': msg
                })
            return

        now = self._get_utcnow()
        self.next_timeout = now + self.initial_timeout
        self._job_processing(job, self.next_timeout)
        self.next_update = self._get_utcnow() + self.update_interval

        instance_id = self._get_instance_id(job)
        if not instance_id:
            msg = ('Job %s does not specify an instance_id in its metadata.' %
                   job_id)
            self._job_cancelled(job, msg)
            return

        image_id = self._get_image_id(job)
        if image_id is None:
            image_id = self._create_image(job, instance_id, schedule)
            if image_id is None:
                return
        else:
            LOG.info(
                _("[%(worker_tag)s] Resuming image: %(image_id)s") % {
                    'worker_tag': self.get_worker_tag(),
                    'image_id': image_id
                })

        active = False
        retry = True

        while retry and not active and not self.stopping:
            image_status = self._poll_image_status(job, image_id)

            active = image_status == 'ACTIVE'
            if not active:
                retry = True
                try:
                    self._update_job(job_id, "PROCESSING")
                except exc.OutOfTimeException:
                    retry = False
                else:
                    time.sleep(self.image_poll_interval)

        if active:
            self._process_retention(instance_id,
                                    self.current_job['schedule_id'])
            self._job_succeeded(self.current_job)
        elif not active and not retry:
            self._job_timed_out(self.current_job)
        elif self.stopping:
            # Timeout job so it gets picked up again quickly rather than
            # queuing up behind a bunch of new jobs, but not so soon that
            # another worker will pick it up before everything is shut down
            # and thus burn through the retries
            timeout = self._get_utcnow() + self.timeout_worker_stop
            self._job_processing(self.current_job, timeout=timeout)

        LOG.debug("[%s] Snapshot complete" % self.get_worker_tag())