Beispiel #1
0
    def test_redo_job(self):

        for job_type in self.all_job_types:
            job = self._prepare_job_element(job_type)

            jobs = [
                # create an entirely new job
                {
                    "job": job,
                    "expected_error": 200
                },
            ]

            self._test_and_check(jobs)

        response = self.app.get('/v1.0/jobs')
        return_job = response.json

        jobs = return_job['jobs']

        # redo a new job
        for job in jobs:
            response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']},
                                      expect_errors=True)

            self.assertEqual(response_1.status_int, 200)

        response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True)
        self.assertEqual(response_2.status_int, 404)

        # redo a running job
        job_type_3 = constants.JT_NETWORK_UPDATE
        job_3 = self._prepare_job_element(job_type_3)
        resource_id_3 = '#'.join([job_3['resource'][resource_id]
                                  for resource_type, resource_id
                                  in self.job_resource_map[job_type_3]])
        job_running_3 = db_api.register_job(self.context,
                                            job_3['project_id'],
                                            job_type_3,
                                            resource_id_3)

        self.assertEqual(constants.JS_Running, job_running_3['status'])
        response_3 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': job_running_3['id']}, expect_errors=True)

        self.assertEqual(response_3.status_int, 400)

        # redo a failed job
        job_type_4 = constants.JT_NETWORK_UPDATE
        job_4 = self._prepare_job_element(job_type_4)

        job_dict_4 = {
            "job": job_4,
            "expected_error": 200
        }

        response_4 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_4['job']),
                                        expect_errors=True)
        return_job_4 = response_4.json

        self.assertEqual(response_4.status_int, 200)

        db_api.finish_job(self.context,
                          return_job_4['job']['id'],
                          False, timeutils.utcnow())

        job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id'])
        self.assertEqual(constants.JS_Fail, job_fail_4['status'])
        response_5 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_4['job']['id']}, expect_errors=True)

        self.assertEqual(response_5.status_int, 200)

        # redo a successful job
        job_type_6 = constants.JT_NETWORK_UPDATE
        job_6 = self._prepare_job_element(job_type_6)

        job_dict_6 = {
            "job": job_6,
            "expected_error": 200
        }

        response_6 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_6['job']),
                                        expect_errors=True)
        return_job_6 = response_6.json

        with self.context.session.begin():
            job_dict = {'status': constants.JS_Success,
                        'timestamp': timeutils.utcnow(),
                        'extra_id': uuidutils.generate_uuid()}
            core.update_resource(self.context, models.AsyncJob,
                                 return_job_6['job']['id'], job_dict)

        job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
        self.assertEqual(constants.JS_Success, job_succ_6['status'])
        response_7 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_6['job']['id']}, expect_errors=True)

        self.assertEqual(response_7.status_int, 400)
Beispiel #2
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Beispiel #3
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)

            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_4_succ['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_4_succ['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_4_succ['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_4_succ['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_5['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_5['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_5['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_5['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_6_failed['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_6_failed['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_6_failed['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_6_failed['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_6)))
Beispiel #4
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)
            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Beispiel #5
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)
            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Beispiel #6
0
    def test_get_all_jobs(self, mock_context):
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context, job['project_id'], job_type,
                                    resource_id)
            else:
                db_api.new_job(self.context, job['project_id'], job_type,
                               resource_id)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            project_id_filter_1 = {'project_id': job_project_id_map[job_type]}
            project_id_filter_2 = {'project_id': uuidutils.generate_uuid()}

            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # successful case, filter by project id
            jobs_project_id_filter_1 = self.controller.get_all(
                **project_id_filter_1)
            self.assertEqual(1, len(jobs_project_id_filter_1['jobs']))

            jobs_project_id_filter_2 = self.controller.get_all(
                **project_id_filter_2)
            self.assertEqual(0, len(jobs_project_id_filter_2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by project id, job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(
                    list(project_id_filter_1.items()) +
                    list(job_status_filter_3.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(
                    list(project_id_filter_1.items()) +
                    list(job_status_filter_1.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(
                list(project_id_filter_1.items()) +
                list(job_status_filter_2.items()) +
                list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs, len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs,
                         len(jobs_job_status_filter_3['jobs']))
Beispiel #7
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'],
                                   job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)

            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_4_succ['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_4_succ['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_4_succ['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_4_succ['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [
                {'key': 'type', 'comparator': 'eq', 'value': job_5['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_5['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_5['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_5['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_6_failed['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_6_failed['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_6_failed['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_6_failed['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_6)))
Beispiel #8
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context,
                                    job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context,
                               job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(list(job_status_filter_3.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(list(job_status_filter_1.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(list(job_status_filter_2.items()) +
                                      list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {'status': 'new', 'limit': limit,
                                   'marker': marker}
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Beispiel #9
0
    def test_redo_job(self):

        for job_type in self.all_job_types:
            job = self._prepare_job_element(job_type)

            jobs = [
                # create an entirely new job
                {
                    "job": job,
                    "expected_error": 200
                },
            ]

            self._test_and_check(jobs)

        response = self.app.get('/v1.0/jobs')
        return_job = response.json

        jobs = return_job['jobs']

        # redo a new job
        for job in jobs:
            response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']},
                                      expect_errors=True)

            self.assertEqual(response_1.status_int, 200)

        response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True)
        self.assertEqual(response_2.status_int, 404)

        # redo a running job
        job_type_3 = constants.JT_NETWORK_UPDATE
        job_3 = self._prepare_job_element(job_type_3)
        resource_id_3 = '#'.join([job_3['resource'][resource_id]
                                  for resource_type, resource_id
                                  in self.job_resource_map[job_type_3]])
        job_running_3 = db_api.register_job(self.context,
                                            job_3['project_id'],
                                            job_type_3,
                                            resource_id_3)

        self.assertEqual(constants.JS_Running, job_running_3['status'])
        response_3 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': job_running_3['id']}, expect_errors=True)

        self.assertEqual(response_3.status_int, 400)

        # redo a failed job
        job_type_4 = constants.JT_NETWORK_UPDATE
        job_4 = self._prepare_job_element(job_type_4)

        job_dict_4 = {
            "job": job_4,
            "expected_error": 200
        }

        response_4 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_4['job']),
                                        expect_errors=True)
        return_job_4 = response_4.json

        self.assertEqual(response_4.status_int, 200)

        db_api.finish_job(self.context,
                          return_job_4['job']['id'],
                          False, timeutils.utcnow())

        job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id'])
        self.assertEqual(constants.JS_Fail, job_fail_4['status'])
        response_5 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_4['job']['id']}, expect_errors=True)

        self.assertEqual(response_5.status_int, 200)

        # redo a successful job
        job_type_6 = constants.JT_NETWORK_UPDATE
        job_6 = self._prepare_job_element(job_type_6)

        job_dict_6 = {
            "job": job_6,
            "expected_error": 200
        }

        response_6 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_6['job']),
                                        expect_errors=True)
        return_job_6 = response_6.json

        with self.context.session.begin():
            job_dict = {'status': constants.JS_Success,
                        'timestamp': timeutils.utcnow(),
                        'extra_id': uuidutils.generate_uuid()}
            core.update_resource(self.context, models.AsyncJob,
                                 return_job_6['job']['id'], job_dict)

        job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
        self.assertEqual(constants.JS_Success, job_succ_6['status'])
        response_7 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_6['job']['id']}, expect_errors=True)

        self.assertEqual(response_7.status_int, 400)
Beispiel #10
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context, job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context, job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(
                    list(job_status_filter_3.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(
                    list(job_status_filter_1.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(
                list(job_status_filter_2.items()) +
                list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {
                'status': 'new',
                'limit': limit,
                'marker': marker
            }
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Beispiel #11
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)