Exemple #1
0
    def delete(self, job_id):
        # delete a job from the database. If the job is running, the delete
        # operation will fail. In other cases, job will be deleted directly.
        context = t_context.extract_context_from_environ()

        if not policy.enforce(context, policy.ADMIN_API_JOB_DELETE):
            return utils.format_api_error(403,
                                          _('Unauthorized to delete a job'))

        try:
            db_api.get_job_from_log(context, job_id)
            return utils.format_api_error(
                400,
                _('Job %(job_id)s is from job log') % {'job_id': job_id})
        except Exception:
            try:
                job = db_api.get_job(context, job_id)
            except t_exc.ResourceNotFound:
                return utils.format_api_error(
                    404,
                    _('Job %(job_id)s not found') % {'job_id': job_id})
        try:
            # if job status = RUNNING, notify user this new one, delete
            # operation fails.
            if job['status'] == constants.JS_Running:
                return utils.format_api_error(
                    400, (_('Failed to delete the running job %(job_id)s') % {
                        "job_id": job_id
                    }))
            # if job status = SUCCESS, move the job entry to job log table,
            # then delete it from job table.
            elif job['status'] == constants.JS_Success:
                db_api.finish_job(context, job_id, True, timeutils.utcnow())
                pecan.response.status = 200
                return {}

            db_api.delete_job(context, job_id)
            pecan.response.status = 200
            return {}
        except Exception as e:
            LOG.exception('Failed to delete the job: '
                          '%(exception)s ', {'exception': e})
            return utils.format_api_error(500, _('Failed to delete the job'))
Exemple #2
0
    def delete(self, job_id):
        # delete a job from the database. If the job is running, the delete
        # operation will fail. In other cases, job will be deleted directly.
        context = t_context.extract_context_from_environ()

        if not policy.enforce(context, policy.ADMIN_API_JOB_DELETE):
            return utils.format_api_error(
                403, _('Unauthorized to delete a job'))

        try:
            db_api.get_job_from_log(context, job_id)
            return utils.format_api_error(
                400, _('Job %(job_id)s is from job log') % {'job_id': job_id})
        except Exception:
            try:
                job = db_api.get_job(context, job_id)
            except t_exc.ResourceNotFound:
                return utils.format_api_error(
                    404, _('Job %(job_id)s not found') % {'job_id': job_id})
        try:
            # if job status = RUNNING, notify user this new one, delete
            # operation fails.
            if job['status'] == constants.JS_Running:
                return utils.format_api_error(
                    400, (_('Failed to delete the running job %(job_id)s') %
                          {"job_id": job_id}))
            # if job status = SUCCESS, move the job entry to job log table,
            # then delete it from job table.
            elif job['status'] == constants.JS_Success:
                db_api.finish_job(context, job_id, True, timeutils.utcnow())
                pecan.response.status = 200
                return {}

            db_api.delete_job(context, job_id)
            pecan.response.status = 200
            return {}
        except Exception as e:
            LOG.exception('Failed to delete the job: '
                          '%(exception)s ', {'exception': e})
            return utils.format_api_error(
                500, _('Failed to delete the job'))
Exemple #3
0
    def test_redo_job(self):

        for job_type in self.all_job_types:
            job = self._prepare_job_element(job_type)

            jobs = [
                # create an entirely new job
                {
                    "job": job,
                    "expected_error": 200
                },
            ]

            self._test_and_check(jobs)

        response = self.app.get('/v1.0/jobs')
        return_job = response.json

        jobs = return_job['jobs']

        # redo a new job
        for job in jobs:
            response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']},
                                      expect_errors=True)

            self.assertEqual(response_1.status_int, 200)

        response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True)
        self.assertEqual(response_2.status_int, 404)

        # redo a running job
        job_type_3 = constants.JT_NETWORK_UPDATE
        job_3 = self._prepare_job_element(job_type_3)
        resource_id_3 = '#'.join([job_3['resource'][resource_id]
                                  for resource_type, resource_id
                                  in self.job_resource_map[job_type_3]])
        job_running_3 = db_api.register_job(self.context,
                                            job_3['project_id'],
                                            job_type_3,
                                            resource_id_3)

        self.assertEqual(constants.JS_Running, job_running_3['status'])
        response_3 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': job_running_3['id']}, expect_errors=True)

        self.assertEqual(response_3.status_int, 400)

        # redo a failed job
        job_type_4 = constants.JT_NETWORK_UPDATE
        job_4 = self._prepare_job_element(job_type_4)

        job_dict_4 = {
            "job": job_4,
            "expected_error": 200
        }

        response_4 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_4['job']),
                                        expect_errors=True)
        return_job_4 = response_4.json

        self.assertEqual(response_4.status_int, 200)

        db_api.finish_job(self.context,
                          return_job_4['job']['id'],
                          False, timeutils.utcnow())

        job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id'])
        self.assertEqual(constants.JS_Fail, job_fail_4['status'])
        response_5 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_4['job']['id']}, expect_errors=True)

        self.assertEqual(response_5.status_int, 200)

        # redo a successful job
        job_type_6 = constants.JT_NETWORK_UPDATE
        job_6 = self._prepare_job_element(job_type_6)

        job_dict_6 = {
            "job": job_6,
            "expected_error": 200
        }

        response_6 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_6['job']),
                                        expect_errors=True)
        return_job_6 = response_6.json

        with self.context.session.begin():
            job_dict = {'status': constants.JS_Success,
                        'timestamp': timeutils.utcnow(),
                        'extra_id': uuidutils.generate_uuid()}
            core.update_resource(self.context, models.AsyncJob,
                                 return_job_6['job']['id'], job_dict)

        job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
        self.assertEqual(constants.JS_Success, job_succ_6['status'])
        response_7 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_6['job']['id']}, expect_errors=True)

        self.assertEqual(response_7.status_int, 400)
Exemple #4
0
    def test_get_one_and_get_all(self):
        all_job_ids = {}
        all_job_project_ids = {}

        index = 0
        for job_type in self.all_job_types:
            job = self._prepare_job_element(job_type)

            jobs = [
                {
                    "job": job,
                    "expected_error": 200
                },
            ]

            self._test_and_check(jobs)

            response = self.app.get('/v1.0/jobs')
            return_job = response.json

            all_job_ids[index] = return_job['jobs'][index]['id']
            all_job_project_ids[job_type] = (
                return_job['jobs'][index]['project_id'])

            index = index + 1

        service_uris = ['jobs', 'jobs/detail']
        amount_of_all_jobs = len(self.all_job_types)
        # with no filters all jobs are returned
        for service_uri in service_uris:
            response_1 = self.app.get('/v1.0/%(service_uri)s' % {
                'service_uri': service_uri})
            return_jobs_1 = response_1.json

            self.assertEqual(amount_of_all_jobs, len(return_jobs_1['jobs']))
            self.assertIn('status', response_1)
            self.assertIn('resource', response_1)
            self.assertIn('project_id', response_1)
            self.assertIn('id', response_1)
            self.assertIn('timestamp', response_1)
            self.assertIn('type', response_1)

            self.assertNotIn('extra_id', response_1)
            self.assertNotIn('resource_id', response_1)

        # use job status filter
        response_2 = self.app.get('/v1.0/jobs?status=new')
        return_jobs_2 = response_2.json

        self.assertEqual(amount_of_all_jobs, len(return_jobs_2['jobs']))

        response = self.app.get('/v1.0/jobs?status=fail')
        return_jobs_3 = response.json

        self.assertEqual(0, len(return_jobs_3['jobs']))

        amount_of_fail_jobs = int(amount_of_all_jobs / 3)
        for i in xrange(amount_of_fail_jobs):
            db_api.finish_job(self.context,
                              all_job_ids[i], False,
                              timeutils.utcnow())

        amount_of_succ_jobs = int(amount_of_all_jobs / 3)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context,
                              all_job_ids[amount_of_fail_jobs + i], True,
                              timeutils.utcnow())

        for service_uri in service_uris:
            response = self.app.get('/v1.0/%(service_uri)s?status=fail' % {
                'service_uri': service_uri})
            return_jobs = response.json

            self.assertEqual(amount_of_fail_jobs, len(return_jobs['jobs']))

            response = self.app.get('/v1.0/%(service_uri)s?status=success'
                                    '' % {'service_uri': service_uri})
            return_jobs = response.json

            self.assertEqual(amount_of_succ_jobs, len(return_jobs['jobs']))

            # use job type filter or project id filter
            for job_type in self.all_job_types:
                response = self.app.get('/v1.0/%(service_uri)s?type=%(type)s'
                                        '' % {'service_uri': service_uri,
                                              'type': job_type})
                return_job = response.json

                self.assertEqual(1, len(return_job['jobs']))

                response = self.app.get(
                    '/v1.0/%(service_uri)s?project_id=%(project_id)s' % {
                        'service_uri': service_uri,
                        'project_id': all_job_project_ids[job_type]})
                return_job = response.json

                self.assertEqual(1, len(return_job['jobs']))

                # combine job type filter and project id filter
                response = self.app.get(
                    '/v1.0/%(service_uri)s?project_id=%(project_id)s&'
                    'type=%(type)s' % {
                        'service_uri': service_uri,
                        'project_id': all_job_project_ids[job_type],
                        'type': job_type})
                return_job = response.json

                self.assertEqual(1, len(return_job['jobs']))

            # combine job type filter, project id filter and job status filter
            for i in xrange(amount_of_all_jobs):
                if i < amount_of_fail_jobs:
                    # this aims to test service "/v1.0/jobs/{id}"
                    response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
                        'id': all_job_ids[i]})
                    return_job_1 = response_1.json

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?'
                        'project_id=%(project_id)s&'
                        'type=%(type)s&'
                        'status=%(status)s' % {
                            'service_uri': service_uri,
                            'project_id': return_job_1['job']['project_id'],
                            'type': return_job_1['job']['type'],
                            'status': 'fail'})

                    return_job_2 = response_2.json

                    self.assertEqual(1, len(return_job_2['jobs']))

                elif ((i >= amount_of_fail_jobs
                       ) and (i < amount_of_fail_jobs + amount_of_succ_jobs)):
                    # those jobs are set to 'success' and they are moved to
                    # job log. their job ids are not stored in all_job_ids
                    job_type = self.all_job_types[i]
                    response = self.app.get(
                        '/v1.0/%(service_uri)s?project_id=%(project_id)s&'
                        'type=%(type)s&status=%(status)s' % {
                            'service_uri': service_uri,
                            'project_id': all_job_project_ids[job_type],
                            'type': job_type,
                            'status': 'success'})

                    return_job = response.json

                    self.assertEqual(1, len(return_job['jobs']))

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?status=%(status)s'
                        '&type=%(type)s' % {
                            'service_uri': service_uri,
                            'status': "success-x",
                            'type': job_type})
                    return_job_2 = response_2.json
                    self.assertEqual(0, len(return_job_2['jobs']))

                else:
                    response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
                        'id': all_job_ids[i]})
                    return_job_1 = response_1.json

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?project_id=%(project_id)s&'
                        'type=%(type)s&status=%(status)s' % {
                            'service_uri': service_uri,
                            'project_id': return_job_1['job']['project_id'],
                            'type': return_job_1['job']['type'],
                            'status': 'new'})

                    return_job_2 = response_2.json

                    self.assertEqual(1, len(return_job_2['jobs']))

                    response_3 = self.app.get(
                        '/v1.0/%(service_uri)s?status=%(status)s'
                        '&type=%(type)s' % {
                            'service_uri': service_uri,
                            'status': "new-x",
                            'type': return_job_1['job']['type']})
                    return_job_3 = response_3.json
                    self.assertEqual(0, len(return_job_3['jobs']))

            # use unsupported filter, it will raise 400 error
            response = self.app.get('/v1.0/%(service_uri)s?'
                                    'fake_filter=%(fake_filter)s'
                                    '' % {'service_uri': service_uri,
                                          'fake_filter': "fake_filter"},
                                    expect_errors=True)

            self.assertEqual(response.status_int, 400)

            # use invalid filter, it will return empty set
            response = self.app.get('/v1.0/%(service_uri)s?status=%(status)s'
                                    '' % {'service_uri': service_uri,
                                          'status': "new-x"})
            return_job = response.json
            self.assertEqual(0, len(return_job['jobs']))

        @patch.object(context, 'extract_context_from_environ',
                      new=fake_admin_context)
        def test_get_job_schemas(self):
            response = self.app.get('/v1.0/jobs/schemas')
            return_job_schemas = response.json

            job_schemas = []
            for job_type in self.all_job_types:
                job = {}
                resource = []
                for resource_type, resource_id in (
                        self.job_resource_map[job_type]):
                    resource.append(resource_id)
                job['resource'] = resource
                job['type'] = job_type
                job_schemas.append(job)

            self.assertEqual(job_schemas, return_job_schemas['schemas'])
Exemple #5
0
    def test_post(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            kw_job = {'job': job}

            # failure case, only admin can create the job
            self.context.is_admin = False
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # failure case, request body not found
            kw_job_1 = {'job_1': job}
            res = self.controller.post(**kw_job_1)
            self._validate_error_code(res, 400)

            # failure case, wrong job type parameter
            job_type_backup = job.pop('type')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = ''
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = job_type_backup + '_1'
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = job_type_backup

            # failure case, wrong resource parameter
            job_resource_backup = job.pop('resource')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['resource'] = copy.deepcopy(job_resource_backup)
            job['resource'].popitem()
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            fake_resource = 'fake_resource'
            job['resource'][fake_resource] = fake_resource
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['resource'] = job_resource_backup

            # failure case, wrong project id parameter
            project_id_backup = job.pop('project_id')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = ''
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = uuidutils.generate_uuid()
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = project_id_backup

            # successful case, create an entirely new job. Because the job
            # status returned from controller has been formatted, so we not
            # only validate the database records, but also validate the return
            # value of the controller.
            job_1 = self.controller.post(**kw_job)['job']
            job_in_db_1 = db_api.get_job(self.context, job_1['id'])
            self.assertEqual(job_type, job_in_db_1['type'])
            self.assertEqual(job['project_id'], job_in_db_1['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_1['status'])

            self.assertEqual('NEW', job_1['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_1['resource']))
            self.assertFalse('resource_id' in job_1)
            self.assertFalse('extra_id' in job_1)
            db_api.delete_job(self.context, job_1['id'])

            # successful case, target job already exists in the job table
            # and its status is NEW, then this newer job will be picked by
            # job handler.
            job_2 = self.controller.post(**kw_job)['job']
            job_in_db_2 = db_api.get_job(self.context, job_2['id'])
            job_3 = self.controller.post(**kw_job)['job']
            job_in_db_3 = db_api.get_job(self.context, job_3['id'])

            self.assertEqual(job_type, job_in_db_2['type'])
            self.assertEqual(job['project_id'], job_in_db_2['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_2['status'])

            self.assertEqual('NEW', job_2['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_2['resource']))
            self.assertFalse('resource_id' in job_2)
            self.assertFalse('extra_id' in job_2)

            self.assertEqual(job_type, job_in_db_3['type'])
            self.assertEqual(job['project_id'], job_in_db_3['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_3['status'])

            self.assertEqual('NEW', job_3['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_3['resource']))
            self.assertFalse('resource_id' in job_3)
            self.assertFalse('extra_id' in job_3)

            db_api.finish_job(self.context, job_3['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_3['id'])
Exemple #6
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)
            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Exemple #7
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)

            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_4_succ['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_4_succ['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_4_succ['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_4_succ['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_5['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_5['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_5['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_5['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_6_failed['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_6_failed['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_6_failed['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_6_failed['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_6)))
Exemple #8
0
    def test_get_one(self, mock_context):
        mock_context.return_value = self.context

        # failure case, only admin can list the job's info
        self.context.is_admin = False
        res = self.controller.get_one("schemas")
        self._validate_error_code(res, 403)
        res = self.controller.get_one("detail")
        self._validate_error_code(res, 403)
        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 403)

        self.context.is_admin = True

        # failure case, parameter error
        res = self.controller.get_one("schemas_1")
        self._validate_error_code(res, 404)

        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 404)

        # successful case, set id="schemas" to get job schemas
        job_schemas_2 = self.controller.get_one("schemas")
        job_schemas_3 = []
        for job_type in self.job_resource_map.keys():
            job = {}
            resource = []
            for resource_type, resource_id in self.job_resource_map[job_type]:
                resource.append(resource_id)
            job['resource'] = resource
            job['type'] = job_type
            job_schemas_3.append(job)

        self.assertEqual(job_schemas_3, job_schemas_2['schemas'])

        # successful case, set id="detail" to get all jobs.
        # first, we need to create jobs in job table.
        amount_of_all_jobs = len(self.job_resource_map.keys())
        all_job_ids = {}
        index = 0
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            all_job_ids[index] = job_1['id']
            index = index + 1

            # validate if the id=job_id, get_one(id=job_id) can take effective
            job_2 = self.controller.get_one(job_1['id'])['job']
            self.assertEqual(job_1['type'], job_2['type'])
            self.assertEqual(job_1['project_id'], job_2['project_id'])
            self.assertEqual("NEW", job_2['status'])

        jobs_1 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_1['jobs']))

        # create jobs in job log table, in order to validate
        # get_one(id=detail) can also get the jobs from job log
        amount_of_succ_jobs = int(len(all_job_ids) / 2)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context, all_job_ids[i], True,
                              timeutils.utcnow())

        jobs_2 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_2['jobs']))

        job_status_filter_1 = {'status': 'success'}
        jobs_3 = self.controller.get_one("detail", **job_status_filter_1)
        self.assertEqual(amount_of_succ_jobs, len(jobs_3['jobs']))

        job_status_filter_2 = {'status': 'new'}
        jobs_4 = self.controller.get_one("detail", **job_status_filter_2)
        self.assertEqual(amount_of_all_jobs - amount_of_succ_jobs,
                         len(jobs_4['jobs']))
Exemple #9
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Exemple #10
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)
            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Exemple #11
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'],
                                   job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)

            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_4_succ['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_4_succ['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_4_succ['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_4_succ['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [
                {'key': 'type', 'comparator': 'eq', 'value': job_5['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_5['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_5['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_5['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_6_failed['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_6_failed['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_6_failed['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_6_failed['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_6)))
Exemple #12
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context,
                                    job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context,
                               job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(list(job_status_filter_3.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(list(job_status_filter_1.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(list(job_status_filter_2.items()) +
                                      list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {'status': 'new', 'limit': limit,
                                   'marker': marker}
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Exemple #13
0
    def test_get_one_and_get_all(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # failure case, only admin can list the job's info
        self.context.is_admin = False
        res = self.controller.get_one("schemas")
        self._validate_error_code(res, 403)
        res = self.controller.get_one("detail")
        self._validate_error_code(res, 403)
        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 403)

        self.context.is_admin = True

        # failure case, parameter error
        res = self.controller.get_one("schemas_1")
        self._validate_error_code(res, 404)

        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 404)

        # successful case, set id="schemas" to get job schemas
        job_schemas_2 = self.controller.get_one("schemas")
        job_schemas_3 = []
        for job_type in self.job_resource_map.keys():
            job = {}
            resource = []
            for resource_type, resource_id in self.job_resource_map[job_type]:
                resource.append(resource_id)
            job['resource'] = resource
            job['type'] = job_type
            job_schemas_3.append(job)

        self.assertEqual(job_schemas_3, job_schemas_2['schemas'])

        # successful case, set id="detail" to get all jobs.
        # first, we need to create jobs in job table.
        amount_of_all_jobs = len(self.job_resource_map.keys())
        all_job_ids = {}
        index = 0
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            # for test convenience, all jobs have same project ID
            job['project_id'] = self.context.project_id

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])
            job_1 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            all_job_ids[index] = job_1['id']
            index = index + 1
            time.sleep(1)

            # validate if the id=job_id, get_one(id=job_id) can take effective
            job_2 = self.controller.get_one(job_1['id'])['job']
            self.assertEqual(job_1['type'], job_2['type'])
            self.assertEqual(job_1['project_id'], job_2['project_id'])
            self.assertEqual("NEW", job_2['status'])

        jobs_1 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_1['jobs']))

        # create jobs in job log table, in order to validate
        # get_one(id=detail) can also get the jobs from job log
        amount_of_succ_jobs = int(len(all_job_ids) / 2)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context, all_job_ids[i], True,
                              timeutils.utcnow())
            time.sleep(1)

        jobs_2 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_2['jobs']))

        job_status_filter_1 = {'status': 'success'}
        jobs_3 = self.controller.get_one("detail", **job_status_filter_1)
        self.assertEqual(amount_of_succ_jobs, len(jobs_3['jobs']))

        # set marker in job log
        res = self.controller.get_all(marker=jobs_3['jobs'][0]['id'],
                                      limit=amount_of_succ_jobs)
        self.assertEqual(amount_of_succ_jobs - 1, len(res['jobs']))

        job_status_filter_2 = {'status': 'new'}
        amount_of_new_jobs = amount_of_all_jobs - amount_of_succ_jobs
        jobs_4 = self.controller.get_one("detail", **job_status_filter_2)
        self.assertEqual(amount_of_new_jobs, len(jobs_4['jobs']))

        # set marker in job
        res = self.controller.get_all(marker=jobs_4['jobs'][0]['id'],
                                      limit=amount_of_new_jobs)
        self.assertEqual(amount_of_new_jobs, len(res['jobs']))
Exemple #14
0
    def test_redo_job(self):

        for job_type in self.all_job_types:
            job = self._prepare_job_element(job_type)

            jobs = [
                # create an entirely new job
                {
                    "job": job,
                    "expected_error": 200
                },
            ]

            self._test_and_check(jobs)

        response = self.app.get('/v1.0/jobs')
        return_job = response.json

        jobs = return_job['jobs']

        # redo a new job
        for job in jobs:
            response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']},
                                      expect_errors=True)

            self.assertEqual(response_1.status_int, 200)

        response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True)
        self.assertEqual(response_2.status_int, 404)

        # redo a running job
        job_type_3 = constants.JT_NETWORK_UPDATE
        job_3 = self._prepare_job_element(job_type_3)
        resource_id_3 = '#'.join([job_3['resource'][resource_id]
                                  for resource_type, resource_id
                                  in self.job_resource_map[job_type_3]])
        job_running_3 = db_api.register_job(self.context,
                                            job_3['project_id'],
                                            job_type_3,
                                            resource_id_3)

        self.assertEqual(constants.JS_Running, job_running_3['status'])
        response_3 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': job_running_3['id']}, expect_errors=True)

        self.assertEqual(response_3.status_int, 400)

        # redo a failed job
        job_type_4 = constants.JT_NETWORK_UPDATE
        job_4 = self._prepare_job_element(job_type_4)

        job_dict_4 = {
            "job": job_4,
            "expected_error": 200
        }

        response_4 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_4['job']),
                                        expect_errors=True)
        return_job_4 = response_4.json

        self.assertEqual(response_4.status_int, 200)

        db_api.finish_job(self.context,
                          return_job_4['job']['id'],
                          False, timeutils.utcnow())

        job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id'])
        self.assertEqual(constants.JS_Fail, job_fail_4['status'])
        response_5 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_4['job']['id']}, expect_errors=True)

        self.assertEqual(response_5.status_int, 200)

        # redo a successful job
        job_type_6 = constants.JT_NETWORK_UPDATE
        job_6 = self._prepare_job_element(job_type_6)

        job_dict_6 = {
            "job": job_6,
            "expected_error": 200
        }

        response_6 = self.app.post_json('/v1.0/jobs',
                                        dict(job=job_dict_6['job']),
                                        expect_errors=True)
        return_job_6 = response_6.json

        with self.context.session.begin():
            job_dict = {'status': constants.JS_Success,
                        'timestamp': timeutils.utcnow(),
                        'extra_id': uuidutils.generate_uuid()}
            core.update_resource(self.context, models.AsyncJob,
                                 return_job_6['job']['id'], job_dict)

        job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id'])
        self.assertEqual(constants.JS_Success, job_succ_6['status'])
        response_7 = self.app.put('/v1.0/jobs/%(id)s' % {
            'id': return_job_6['job']['id']}, expect_errors=True)

        self.assertEqual(response_7.status_int, 400)
Exemple #15
0
    def test_get_one_and_get_all(self, mock_context):
        self.context.project_id = "fake_project_id"
        mock_context.return_value = self.context

        all_job_ids = {}
        all_job_project_ids = {}
        index = 0
        for job_type in self.all_job_types:
            if index == 0:
                # the first job has a project ID that differs from
                # context.project_id
                job = self._prepare_job_element(job_type)
            else:
                job = self._prepare_job_element(job_type,
                                                self.context.project_id)

            job = {"job": job, "expected_error": 200}

            back_jobid = self._test_and_obtain_id(job)

            all_job_ids[index] = back_jobid
            all_job_project_ids[job_type] = job['job']['project_id']

            index = index + 1
        service_uris = ['jobs', 'jobs/detail']
        amount_of_all_jobs = len(self.all_job_types)
        # with no filters all jobs are returned
        for service_uri in service_uris:
            response_1 = self.app.get('/v1.0/%(service_uri)s' % {
                'service_uri': service_uri})
            return_jobs_1 = response_1.json

            self.assertEqual(amount_of_all_jobs - 1,
                             len(return_jobs_1['jobs']))
            self.assertIn('status', response_1)
            self.assertIn('resource', response_1)
            self.assertIn('project_id', response_1)
            self.assertIn('id', response_1)
            self.assertIn('timestamp', response_1)
            self.assertIn('type', response_1)

            self.assertNotIn('extra_id', response_1)
            self.assertNotIn('resource_id', response_1)

        # use job status filter
        response_2 = self.app.get('/v1.0/jobs?status=new')
        return_jobs_2 = response_2.json

        self.assertEqual(amount_of_all_jobs - 1, len(return_jobs_2['jobs']))

        response = self.app.get('/v1.0/jobs?status=fail')
        return_jobs_3 = response.json

        self.assertEqual(0, len(return_jobs_3['jobs']))

        amount_of_fail_jobs = int(amount_of_all_jobs / 3)
        for i in xrange(amount_of_fail_jobs):
            db_api.finish_job(self.context,
                              all_job_ids[i], False,
                              timeutils.utcnow())

        amount_of_succ_jobs = int(amount_of_all_jobs / 3)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context,
                              all_job_ids[amount_of_fail_jobs + i], True,
                              timeutils.utcnow())

        for service_uri in service_uris:
            response = self.app.get('/v1.0/%(service_uri)s?status=fail' % {
                'service_uri': service_uri})
            return_jobs = response.json

            self.assertEqual(amount_of_fail_jobs - 1, len(return_jobs['jobs']))

            response = self.app.get('/v1.0/%(service_uri)s?status=success'
                                    '' % {'service_uri': service_uri})
            return_jobs = response.json

            self.assertEqual(amount_of_succ_jobs, len(return_jobs['jobs']))

            # project ID filter in URL query string will be ignored, and
            # only the project ID in which the user is authorized will
            # be used as filter.
            response = self.app.get(
                '/v1.0/%(service_uri)s' % {'service_uri': service_uri})
            return_job = response.json

            response1 = self.app.get(
                '/v1.0/%(service_uri)s?project_id=%(project_id)s' % {
                    'service_uri': service_uri,
                    'project_id': uuidutils.generate_uuid()})
            return_job1 = response1.json

            response2 = self.app.get(
                '/v1.0/%(service_uri)s?project_id=%(project_id)s' % {
                    'service_uri': service_uri,
                    'project_id': 'fake_project_id'})
            return_job2 = response2.json

            self.assertEqual(len(return_job2['jobs']),
                             len(return_job1['jobs']))
            self.assertEqual(len(return_job['jobs']),
                             len(return_job2['jobs']))

            # use job type filter
            count = 1
            for job_type in self.all_job_types:
                response = self.app.get('/v1.0/%(service_uri)s?type=%(type)s'
                                        '' % {'service_uri': service_uri,
                                              'type': job_type})
                return_job = response.json
                if count == 1:
                    self.assertEqual(0, len(return_job['jobs']))
                else:
                    self.assertEqual(1, len(return_job['jobs']))
                count += 1

            # combine job type and job status filter
            for i in xrange(1, amount_of_all_jobs):
                if i < amount_of_fail_jobs:
                    # this aims to test service "/v1.0/jobs/{id}"
                    response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
                        'id': all_job_ids[i]})
                    return_job_1 = response_1.json

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?'
                        'type=%(type)s&'
                        'status=%(status)s' % {
                            'service_uri': service_uri,
                            'type': return_job_1['job']['type'],
                            'status': 'fail'})

                    return_job_2 = response_2.json

                    self.assertEqual(1, len(return_job_2['jobs']))

                elif ((i >= amount_of_fail_jobs
                       ) and (i < amount_of_fail_jobs + amount_of_succ_jobs)):
                    # those jobs are set to 'success' and they are moved to
                    # job log. their job ids are not stored in all_job_ids
                    job_type = self.all_job_types[i]
                    response = self.app.get(
                        '/v1.0/%(service_uri)s?'
                        'type=%(type)s&status=%(status)s' % {
                            'service_uri': service_uri,
                            'type': job_type,
                            'status': 'success'})

                    return_job = response.json

                    self.assertEqual(1, len(return_job['jobs']))

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?status=%(status)s'
                        '&type=%(type)s' % {
                            'service_uri': service_uri,
                            'status': "success-x",
                            'type': job_type})
                    return_job_2 = response_2.json
                    self.assertEqual(0, len(return_job_2['jobs']))

                else:
                    response_1 = self.app.get('/v1.0/jobs/%(id)s' % {
                        'id': all_job_ids[i]})
                    return_job_1 = response_1.json

                    response_2 = self.app.get(
                        '/v1.0/%(service_uri)s?'
                        'type=%(type)s&status=%(status)s' % {
                            'service_uri': service_uri,
                            'type': return_job_1['job']['type'],
                            'status': 'new'})

                    return_job_2 = response_2.json

                    self.assertEqual(1, len(return_job_2['jobs']))

                    response_3 = self.app.get(
                        '/v1.0/%(service_uri)s?status=%(status)s'
                        '&type=%(type)s' % {
                            'service_uri': service_uri,
                            'status': "new-x",
                            'type': return_job_1['job']['type']})
                    return_job_3 = response_3.json
                    self.assertEqual(0, len(return_job_3['jobs']))

            # use unsupported filter, it will raise 400 error
            response = self.app.get('/v1.0/%(service_uri)s?'
                                    'fake_filter=%(fake_filter)s'
                                    '' % {'service_uri': service_uri,
                                          'fake_filter': "fake_filter"},
                                    expect_errors=True)

            self.assertEqual(response.status_int, 400)

            # use invalid filter, it will return empty set
            response = self.app.get('/v1.0/%(service_uri)s?status=%(status)s'
                                    '' % {'service_uri': service_uri,
                                          'status': "new-x"})
            return_job = response.json
            self.assertEqual(0, len(return_job['jobs']))

        @patch.object(context, 'extract_context_from_environ',
                      new=fake_admin_context)
        def test_get_job_schemas(self):
            response = self.app.get('/v1.0/jobs/schemas')
            return_job_schemas = response.json

            job_schemas = []
            for job_type in self.all_job_types:
                job = {}
                resource = []
                for resource_type, resource_id in (
                        self.job_resource_map[job_type]):
                    resource.append(resource_id)
                job['resource'] = resource
                job['type'] = job_type
                job_schemas.append(job)

            self.assertEqual(job_schemas, return_job_schemas['schemas'])
Exemple #16
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context, job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context, job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(
                    list(job_status_filter_3.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(
                    list(job_status_filter_1.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(
                list(job_status_filter_2.items()) +
                list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {
                'status': 'new',
                'limit': limit,
                'marker': marker
            }
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Exemple #17
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Exemple #18
0
    def test_post(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            kw_job = {'job': job}

            # failure case, only admin can create the job
            self.context.is_admin = False
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # failure case, request body not found
            kw_job_1 = {'job_1': job}
            res = self.controller.post(**kw_job_1)
            self._validate_error_code(res, 400)

            # failure case, wrong job type parameter
            job_type_backup = job.pop('type')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = ''
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = job_type_backup + '_1'
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['type'] = job_type_backup

            # failure case, wrong resource parameter
            job_resource_backup = job.pop('resource')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['resource'] = copy.deepcopy(job_resource_backup)
            job['resource'].popitem()
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            fake_resource = 'fake_resource'
            job['resource'][fake_resource] = fake_resource
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['resource'] = job_resource_backup

            # failure case, wrong project id parameter
            project_id_backup = job.pop('project_id')
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = ''
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = uuidutils.generate_uuid()
            res = self.controller.post(**kw_job)
            self._validate_error_code(res, 400)

            job['project_id'] = project_id_backup

            # successful case, create an entirely new job. Because the job
            # status returned from controller has been formatted, so we not
            # only validate the database records, but also validate the return
            # value of the controller.
            job_1 = self.controller.post(**kw_job)['job']
            job_in_db_1 = db_api.get_job(self.context, job_1['id'])
            self.assertEqual(job_type, job_in_db_1['type'])
            self.assertEqual(job['project_id'], job_in_db_1['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_1['status'])

            self.assertEqual('NEW', job_1['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_1['resource']))
            self.assertFalse('resource_id' in job_1)
            self.assertFalse('extra_id' in job_1)
            db_api.delete_job(self.context, job_1['id'])

            # successful case, target job already exists in the job table
            # and its status is NEW, then this newer job will be picked by
            # job handler.
            job_2 = self.controller.post(**kw_job)['job']
            job_in_db_2 = db_api.get_job(self.context, job_2['id'])
            job_3 = self.controller.post(**kw_job)['job']
            job_in_db_3 = db_api.get_job(self.context, job_3['id'])

            self.assertEqual(job_type, job_in_db_2['type'])
            self.assertEqual(job['project_id'], job_in_db_2['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_2['status'])

            self.assertEqual('NEW', job_2['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_2['resource']))
            self.assertFalse('resource_id' in job_2)
            self.assertFalse('extra_id' in job_2)

            self.assertEqual(job_type, job_in_db_3['type'])
            self.assertEqual(job['project_id'], job_in_db_3['project_id'])
            self.assertEqual(constants.JS_New, job_in_db_3['status'])

            self.assertEqual('NEW', job_3['status'])
            self.assertEqual(len(constants.job_resource_map[job['type']]),
                             len(job_3['resource']))
            self.assertFalse('resource_id' in job_3)
            self.assertFalse('extra_id' in job_3)

            db_api.finish_job(self.context, job_3['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_3['id'])