Exemplo n.º 1
0
 def test_configure_extra_routes_with_external_network(self, mock_update):
     top_router_id = 'router_id'
     project_id = uuidutils.generate_uuid()
     bridge_infos = self._prepare_east_west_network_test(top_router_id)
     ns_bridge_ip, ns_router_id = self._prepare_snat_test(top_router_id)
     db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
                    top_router_id)
     self.xmanager.configure_route(
         self.context,
         payload={constants.JT_CONFIGURE_ROUTE: top_router_id})
     calls = []
     ns_routes = []
     for i in range(2):
         routes = []
         for ip in bridge_infos[i]['vm_ips']:
             route = {'nexthop': bridge_infos[i]['bridge_ip'],
                      'destination': ip + '/32'}
             routes.append(route)
             ns_routes.append(route)
         routes.append({'nexthop': ns_bridge_ip,
                        'destination': '0.0.0.0/0'})
         call = mock.call(self.context, bridge_infos[1 - i]['router_id'],
                          {'router': {'routes': routes}})
         calls.append(call)
     calls.append(mock.call(self.context, ns_router_id,
                            {'router': {'routes': ns_routes}}))
     self._check_extra_routes_calls(calls, mock_update.call_args_list)
Exemplo n.º 2
0
    def test_job_run_expire(self):
        job_type = 'fake_resource'

        @xmanager._job_handle(job_type)
        def fake_handle(self, ctx, payload):
            pass

        fake_id = uuidutils.generate_uuid()
        fake_project_id = uuidutils.generate_uuid()
        payload = {job_type: fake_id}
        db_api.new_job(self.context, fake_project_id, job_type, fake_id)
        expired_job = {
            'id': uuidutils.generate_uuid(),
            'type': job_type,
            'timestamp': datetime.datetime.now() - datetime.timedelta(0, 200),
            'status': constants.JS_Running,
            'resource_id': fake_id,
            'extra_id': constants.SP_EXTRA_ID
        }
        core.create_resource(self.context, models.AsyncJob, expired_job)
        fake_handle(None, self.context, payload=payload)

        logs = core.query_resource(self.context, models.AsyncJobLog, [], [])

        self.assertEqual(fake_id, logs[0]['resource_id'])
        self.assertEqual(job_type, logs[0]['type'])
Exemplo n.º 3
0
    def test_worker_handle_timeout(self, mock_register, mock_get):
        job_type = 'fake_resource'

        @xmanager._job_handle(job_type)
        def fake_handle(self, ctx, payload):
            pass

        cfg.CONF.set_override('worker_handle_timeout', 1)
        mock_register.return_value = None
        mock_get.return_value = None

        fake_id = uuidutils.generate_uuid()
        fake_project_id = uuidutils.generate_uuid()
        payload = {job_type: fake_id}
        db_api.new_job(self.context, fake_project_id, job_type, fake_id)
        fake_handle(None, self.context, payload=payload)
Exemplo n.º 4
0
    def test_job_handle(self):
        job_type = 'fake_resource'

        @xmanager._job_handle(job_type)
        def fake_handle(self, ctx, payload):
            pass

        fake_id = 'fake_id'
        fake_project_id = uuidutils.generate_uuid()
        payload = {job_type: fake_id}
        db_api.new_job(self.context, fake_project_id, job_type, fake_id)
        fake_handle(None, self.context, payload=payload)

        logs = core.query_resource(self.context, models.AsyncJobLog, [], [])

        self.assertEqual(fake_id, logs[0]['resource_id'])
        self.assertEqual(job_type, logs[0]['type'])
Exemplo n.º 5
0
    def test_job_handle_exception(self):
        job_type = 'fake_resource'

        @xmanager._job_handle(job_type)
        def fake_handle(self, ctx, payload):
            raise Exception()

        fake_id = 'fake_id'
        fake_project_id = uuidutils.generate_uuid()
        payload = {job_type: fake_id}
        db_api.new_job(self.context, fake_project_id, job_type, fake_id)
        fake_handle(None, self.context, payload=payload)

        jobs = core.query_resource(self.context, models.AsyncJob, [], [])
        expected_status = [constants.JS_New, constants.JS_Fail]
        job_status = [job['status'] for job in jobs]
        six.assertCountEqual(self, expected_status, job_status)

        self.assertEqual(fake_id, jobs[0]['resource_id'])
        self.assertEqual(fake_id, jobs[1]['resource_id'])
        self.assertEqual(job_type, jobs[0]['type'])
        self.assertEqual(job_type, jobs[1]['type'])
Exemplo n.º 6
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)

            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_4_succ['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_4_succ['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_4_succ['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_4_succ['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_5['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_5['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_5['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_5['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [{
                'key': 'type',
                'comparator': 'eq',
                'value': job_6_failed['type']
            }, {
                'key': 'status',
                'comparator': 'eq',
                'value': job_6_failed['status']
            }, {
                'key': 'resource_id',
                'comparator': 'eq',
                'value': job_6_failed['resource_id']
            }, {
                'key': 'extra_id',
                'comparator': 'eq',
                'value': job_6_failed['extra_id']
            }]
            self.assertEqual(
                0, len(db_api.list_jobs(self.context, filters_job_6)))
Exemplo n.º 7
0
    def test_configure_extra_routes_ew_gw(self, router_update, subnet_update):
        for i in (1, 2):
            pod_dict = {'pod_id': 'pod_id_%d' % i,
                        'region_name': 'pod_%d' % i,
                        'az_name': 'az_name_%d' % i}
            db_api.create_pod(self.context, pod_dict)
        for i in (1, 2, 3):
            router = {'id': 'top_router_%d_id' % i}
            TOP_ROUTER.append(router)

        # gateway in podX is attached to routerX
        gw_map = {'net1_pod1_gw': '10.0.1.1',
                  'net2_pod2_gw': '10.0.2.1',
                  'net3_pod1_gw': '10.0.3.3',
                  'net3_pod2_gw': '10.0.3.4'}
        # interfaces are all attached to router3
        inf_map = {'net1_pod1_inf': '10.0.1.3',
                   'net2_pod2_inf': '10.0.2.3',
                   'net3_pod1_inf': '10.0.3.5',
                   'net3_pod2_inf': '10.0.3.6'}
        get_gw_map = lambda n_idx, p_idx: gw_map[
            'net%d_pod%d_gw' % (n_idx, p_idx)]
        get_inf_map = lambda n_idx, p_idx: inf_map[
            'net%d_pod%d_inf' % (n_idx, p_idx)]
        bridge_infos = []

        for net_idx, router_idx, pod_idx in [(1, 1, 1), (3, 1, 1), (1, 3, 1),
                                             (3, 3, 1), (2, 2, 2), (3, 2, 2),
                                             (2, 3, 2), (3, 3, 2)]:
            region_name = 'pod_%d' % pod_idx
            pod_id = 'pod_id_%d' % pod_idx
            top_router_id = 'top_router_%d_id' % router_idx

            network = {'id': 'network_%d_id' % net_idx}
            router = {'id': 'router_%d_%d_id' % (pod_idx, router_idx)}
            subnet = {'id': 'subnet_%d_id' % net_idx,
                      'network_id': network['id'],
                      'cidr': '10.0.%d.0/24' % net_idx,
                      'gateway_ip': get_gw_map(net_idx, pod_idx)}
            port = {'network_id': network['id'],
                    'device_id': router['id'],
                    'device_owner': 'network:router_interface',
                    'fixed_ips': [{'subnet_id': subnet['id']}]}
            if router_idx == 3:
                port['fixed_ips'][0][
                    'ip_address'] = get_inf_map(net_idx, pod_idx)
            else:
                port['fixed_ips'][0][
                    'ip_address'] = get_gw_map(net_idx, pod_idx)

            if net_idx == pod_idx and router_idx == 3:
                vm_idx = net_idx * 2 + pod_idx + 10
                vm_ip = '10.0.%d.%d' % (net_idx, vm_idx)
                vm_port = {'id': 'vm_port_%d_id' % vm_idx,
                           'network_id': network['id'],
                           'device_id': 'vm%d_id' % vm_idx,
                           'device_owner': 'compute:None',
                           'fixed_ips': [{'subnet_id': subnet['id'],
                                          'ip_address': vm_ip}]}
                bridge_network = {'id': 'bridge_network_%d_id' % net_idx}
                bridge_subnet = {'id': 'bridge_subnet_%d_id' % net_idx,
                                 'network_id': bridge_network['id'],
                                 'cidr': '100.0.1.0/24',
                                 'gateway_ip': '100.0.1.1'}
                bridge_cidr = bridge_subnet['cidr']
                bridge_port_ip = '%s.%d' % (
                    bridge_cidr[:bridge_cidr.rindex('.')], 2 + pod_idx)
                bridge_infos.append({'router_id': router['id'],
                                     'bridge_ip': bridge_port_ip,
                                     'vm_ip': vm_ip})
                bridge_port = {
                    'network_id': bridge_network['id'],
                    'device_id': router['id'],
                    'device_owner': 'network:router_gateway',
                    'fixed_ips': [{'subnet_id': bridge_subnet['id'],
                                   'ip_address': bridge_port_ip}]
                }
                RES_MAP[region_name]['port'].append(vm_port)
                RES_MAP[region_name]['network'].append(bridge_network)
                RES_MAP[region_name]['subnet'].append(bridge_subnet)
                RES_MAP[region_name]['port'].append(bridge_port)

            RES_MAP[region_name]['network'].append(network)
            RES_MAP[region_name]['subnet'].append(subnet)
            RES_MAP[region_name]['port'].append(port)
            RES_MAP[region_name]['router'].append(router)

            db_api.create_resource_mapping(self.context, top_router_id,
                                           router['id'], pod_id, 'project_id',
                                           constants.RT_ROUTER)
        # the above codes create this topology
        # pod1: net1 is attached to R1, default gateway is set on R1
        #       net1 is attached to R3
        #       net3 is attached to R1, default gateway is set on R1
        #       net3 is attached to R3
        # pod2: net2 is attached to R2, default gateway is set on R2
        #       net2 is attached to R3
        #       net3 is attached to R2, default gateway is set on R2
        #       net3 is attached to R3

        target_router_id = 'top_router_3_id'
        project_id = uuidutils.generate_uuid()
        db_api.new_job(self.context, project_id,
                       constants.JT_CONFIGURE_ROUTE, target_router_id)
        self.xmanager.configure_route(
            self.context,
            payload={constants.JT_CONFIGURE_ROUTE: target_router_id})

        # for the following paths, packets will go to R3 via the interface
        # which is attached to R3
        # net1 in pod1 -> net2 in pod2
        # net2 in pod2 -> net1 in pod1
        # net3 in pod1 -> net2 in pod2
        # net3 in pod2 -> net1 in pod1
        expect_calls = [
            mock.call(self.context, 'subnet_1_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(1, 1),
                                 'destination': '10.0.2.0/24'}]}}),
            mock.call(self.context, 'subnet_2_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(2, 2),
                                 'destination': '10.0.1.0/24'}]}}),
            mock.call(self.context, 'subnet_3_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(3, 1),
                                 'destination': '10.0.2.0/24'}]}}),
            mock.call(self.context, 'subnet_3_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(3, 2),
                                 'destination': '10.0.1.0/24'}]}})]
        subnet_update.assert_has_calls(expect_calls, any_order=True)
        expect_calls = []
        for i in (0, 1):
            bridge_info = bridge_infos[i]
            expect_call = mock.call(
                self.context, bridge_infos[1 - i]['router_id'],
                {'router': {'routes': [
                    {'nexthop': bridge_info['bridge_ip'],
                     'destination': bridge_info['vm_ip'] + '/32'}]}})
            expect_calls.append(expect_call)
        router_update.assert_has_calls(expect_calls, any_order=True)
Exemplo n.º 8
0
    def test_get_all_jobs(self, mock_context):
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context, job['project_id'], job_type,
                                    resource_id)
            else:
                db_api.new_job(self.context, job['project_id'], job_type,
                               resource_id)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            project_id_filter_1 = {'project_id': job_project_id_map[job_type]}
            project_id_filter_2 = {'project_id': uuidutils.generate_uuid()}

            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # successful case, filter by project id
            jobs_project_id_filter_1 = self.controller.get_all(
                **project_id_filter_1)
            self.assertEqual(1, len(jobs_project_id_filter_1['jobs']))

            jobs_project_id_filter_2 = self.controller.get_all(
                **project_id_filter_2)
            self.assertEqual(0, len(jobs_project_id_filter_2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by project id, job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(
                    list(project_id_filter_1.items()) +
                    list(job_status_filter_3.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(
                    list(project_id_filter_1.items()) +
                    list(job_status_filter_1.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(
                list(project_id_filter_1.items()) +
                list(job_status_filter_2.items()) +
                list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs, len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs,
                         len(jobs_job_status_filter_3['jobs']))
Exemplo n.º 9
0
 def invoke_method(self, ctxt, project_id, method, _type, id):
     db_api.new_job(ctxt, project_id, _type, id)
Exemplo n.º 10
0
    def test_configure_security_group_rules_duplicated_cidr(
            self, mock_create, mock_delete):
        project_id = uuidutils.generate_uuid()
        sg_id = uuidutils.generate_uuid()
        sg_rule_id_1 = uuidutils.generate_uuid()
        sg_rule_id_2 = uuidutils.generate_uuid()

        sg = {
            'id':
            sg_id,
            'tenant_id':
            project_id,
            'name':
            'default',
            'security_group_rules': [{
                'id': sg_rule_id_1,
                'remote_group_id': sg_id,
                'direction': 'ingress',
                'remote_ip_prefix': None,
                'protocol': None,
                'ethertype': 'IPv4',
                'port_range_max': -1,
                'port_range_min': -1,
                'security_group_id': sg_id
            }, {
                'id': sg_rule_id_2,
                'remote_group_id': None,
                'direction': 'engress',
                'remote_ip_prefix': None,
                'protocol': None,
                'ethertype': 'IPv4',
                'port_range_max': -1,
                'port_range_min': -1,
                'security_group_id': sg_id
            }]
        }
        RES_MAP['top']['security_group'].append(sg)

        for i in xrange(1, 3):
            pod_dict = {
                'pod_id': 'pod_id_%d' % i,
                'region_name': 'pod_%d' % i,
                'az_name': 'az_name_%d' % i
            }
            db_api.create_pod(self.context, pod_dict)

            network = {'id': 'network_%d_id' % i, 'tenant_id': project_id}
            # we create two subnets with identical cidr but different
            # allocation pools
            subnet = {
                'id': 'subnet_%d_id' % i,
                'network_id': network['id'],
                'cidr': '10.0.1.0/24',
                'gateway_ip': '10.0.1.%d' % i,
                'tenant_id': project_id,
                'allocation_pools': {
                    'start': '10.0.1.%d' % 10 * i,
                    'end': '10.0.1.%d' % (10 * i + 9)
                },
                'ip_version': q_constants.IP_VERSION_4
            }
            RES_MAP['top']['network'].append(network)
            RES_MAP['top']['subnet'].append(subnet)

            region_name = 'pod_%d' % i
            RES_MAP[region_name]['security_group'].append(sg)
            route = {
                'top_id': sg_id,
                'bottom_id': sg_id,
                'pod_id': pod_dict['pod_id'],
                'resource_type': 'security_group'
            }
            with self.context.session.begin():
                core.create_resource(self.context, models.ResourceRouting,
                                     route)

        db_api.new_job(self.context, project_id, constants.JT_SEG_RULE_SETUP,
                       project_id)
        self.xmanager.configure_security_group_rules(
            self.context, payload={constants.JT_SEG_RULE_SETUP: project_id})

        calls = [mock.call(self.context, sg_rule_id_1)]
        mock_delete.assert_has_calls(calls)
        call_rules_id = [
            call_arg[0][1] for call_arg in mock_delete.call_args_list
        ]
        # bottom security group already has sg_rule_id_2, so this rule will
        # not be deleted
        self.assertNotIn(sg_rule_id_2, call_rules_id)

        calls = [
            mock.call(
                self.context, {
                    'security_group_rules': [{
                        'remote_group_id': None,
                        'direction': 'ingress',
                        'remote_ip_prefix': '10.0.1.0/24',
                        'protocol': None,
                        'ethertype': 'IPv4',
                        'port_range_max': -1,
                        'port_range_min': -1,
                        'security_group_id': sg_id
                    }]
                })
        ]
        mock_create.assert_has_calls(calls)
Exemplo n.º 11
0
    def test_get_one(self, mock_context):
        mock_context.return_value = self.context

        # failure case, only admin can list the job's info
        self.context.is_admin = False
        res = self.controller.get_one("schemas")
        self._validate_error_code(res, 403)
        res = self.controller.get_one("detail")
        self._validate_error_code(res, 403)
        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 403)

        self.context.is_admin = True

        # failure case, parameter error
        res = self.controller.get_one("schemas_1")
        self._validate_error_code(res, 404)

        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 404)

        # successful case, set id="schemas" to get job schemas
        job_schemas_2 = self.controller.get_one("schemas")
        job_schemas_3 = []
        for job_type in self.job_resource_map.keys():
            job = {}
            resource = []
            for resource_type, resource_id in self.job_resource_map[job_type]:
                resource.append(resource_id)
            job['resource'] = resource
            job['type'] = job_type
            job_schemas_3.append(job)

        self.assertEqual(job_schemas_3, job_schemas_2['schemas'])

        # successful case, set id="detail" to get all jobs.
        # first, we need to create jobs in job table.
        amount_of_all_jobs = len(self.job_resource_map.keys())
        all_job_ids = {}
        index = 0
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            all_job_ids[index] = job_1['id']
            index = index + 1

            # validate if the id=job_id, get_one(id=job_id) can take effective
            job_2 = self.controller.get_one(job_1['id'])['job']
            self.assertEqual(job_1['type'], job_2['type'])
            self.assertEqual(job_1['project_id'], job_2['project_id'])
            self.assertEqual("NEW", job_2['status'])

        jobs_1 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_1['jobs']))

        # create jobs in job log table, in order to validate
        # get_one(id=detail) can also get the jobs from job log
        amount_of_succ_jobs = int(len(all_job_ids) / 2)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context, all_job_ids[i], True,
                              timeutils.utcnow())

        jobs_2 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_2['jobs']))

        job_status_filter_1 = {'status': 'success'}
        jobs_3 = self.controller.get_one("detail", **job_status_filter_1)
        self.assertEqual(amount_of_succ_jobs, len(jobs_3['jobs']))

        job_status_filter_2 = {'status': 'new'}
        jobs_4 = self.controller.get_one("detail", **job_status_filter_2)
        self.assertEqual(amount_of_all_jobs - amount_of_succ_jobs,
                         len(jobs_4['jobs']))
Exemplo n.º 12
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context, job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([
                job_3['resource'][resource_id_3] for resource_type_3,
                resource_id_3 in self.job_resource_map[job_type]
            ])

            job_4 = db_api.new_job(self.context, job_3['project_id'], job_type,
                                   resource_id_3)
            with self.context.session.begin():
                job_dict = {
                    'status': constants.JS_Success,
                    'timestamp': timeutils.utcnow(),
                    'extra_id': uuidutils.generate_uuid()
                }
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context, job['project_id'], job_type,
                                   resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Exemplo n.º 13
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([
                job['resource'][resource_id] for resource_type, resource_id in
                self.job_resource_map[job_type]
            ])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context, job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context, job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(
                    list(job_status_filter_3.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(
                    list(job_status_filter_1.items()) +
                    list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(
                list(job_status_filter_2.items()) +
                list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {
                'status': 'new',
                'limit': limit,
                'marker': marker
            }
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Exemplo n.º 14
0
    def test_get_one_and_get_all(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # failure case, only admin can list the job's info
        self.context.is_admin = False
        res = self.controller.get_one("schemas")
        self._validate_error_code(res, 403)
        res = self.controller.get_one("detail")
        self._validate_error_code(res, 403)
        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 403)

        self.context.is_admin = True

        # failure case, parameter error
        res = self.controller.get_one("schemas_1")
        self._validate_error_code(res, 404)

        res = self.controller.get_one(uuidutils.generate_uuid())
        self._validate_error_code(res, 404)

        # successful case, set id="schemas" to get job schemas
        job_schemas_2 = self.controller.get_one("schemas")
        job_schemas_3 = []
        for job_type in self.job_resource_map.keys():
            job = {}
            resource = []
            for resource_type, resource_id in self.job_resource_map[job_type]:
                resource.append(resource_id)
            job['resource'] = resource
            job['type'] = job_type
            job_schemas_3.append(job)

        self.assertEqual(job_schemas_3, job_schemas_2['schemas'])

        # successful case, set id="detail" to get all jobs.
        # first, we need to create jobs in job table.
        amount_of_all_jobs = len(self.job_resource_map.keys())
        all_job_ids = {}
        index = 0
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            # for test convenience, all jobs have same project ID
            job['project_id'] = self.context.project_id

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])
            job_1 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            all_job_ids[index] = job_1['id']
            index = index + 1
            time.sleep(1)

            # validate if the id=job_id, get_one(id=job_id) can take effective
            job_2 = self.controller.get_one(job_1['id'])['job']
            self.assertEqual(job_1['type'], job_2['type'])
            self.assertEqual(job_1['project_id'], job_2['project_id'])
            self.assertEqual("NEW", job_2['status'])

        jobs_1 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_1['jobs']))

        # create jobs in job log table, in order to validate
        # get_one(id=detail) can also get the jobs from job log
        amount_of_succ_jobs = int(len(all_job_ids) / 2)
        for i in xrange(amount_of_succ_jobs):
            db_api.finish_job(self.context, all_job_ids[i], True,
                              timeutils.utcnow())
            time.sleep(1)

        jobs_2 = self.controller.get_one("detail")
        self.assertEqual(amount_of_all_jobs, len(jobs_2['jobs']))

        job_status_filter_1 = {'status': 'success'}
        jobs_3 = self.controller.get_one("detail", **job_status_filter_1)
        self.assertEqual(amount_of_succ_jobs, len(jobs_3['jobs']))

        # set marker in job log
        res = self.controller.get_all(marker=jobs_3['jobs'][0]['id'],
                                      limit=amount_of_succ_jobs)
        self.assertEqual(amount_of_succ_jobs - 1, len(res['jobs']))

        job_status_filter_2 = {'status': 'new'}
        amount_of_new_jobs = amount_of_all_jobs - amount_of_succ_jobs
        jobs_4 = self.controller.get_one("detail", **job_status_filter_2)
        self.assertEqual(amount_of_new_jobs, len(jobs_4['jobs']))

        # set marker in job
        res = self.controller.get_all(marker=jobs_4['jobs'][0]['id'],
                                      limit=amount_of_new_jobs)
        self.assertEqual(amount_of_new_jobs, len(res['jobs']))
Exemplo n.º 15
0
    def test_get_all_jobs_with_pagination(self, mock_context):
        self.context.project_id = uuidutils.generate_uuid()
        mock_context.return_value = self.context

        # map job type to project id for later project id filter validation.
        job_project_id_map = {}
        amount_of_all_jobs = len(self.job_resource_map.keys())
        amount_of_running_jobs = 3
        count = 1

        # cover all job types.
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)
            if count > 1:
                # for test convenience, the first job has a project ID
                # that is different from the context.project_id
                job['project_id'] = self.context.project_id

            job_project_id_map[job_type] = job['project_id']

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])
            if count <= amount_of_running_jobs:
                db_api.register_job(self.context,
                                    job['project_id'], job_type,
                                    resource_id)
                # because jobs are sorted by timestamp, without time delay then
                # all jobs are created at the same time, paginate_query can't
                # identify them
                time.sleep(1)
            else:
                db_api.new_job(self.context,
                               job['project_id'], job_type,
                               resource_id)
                time.sleep(1)
            count = count + 1

        # query the jobs with several kinds of filters.
        # supported filters: project id, job status, job type.
        job_status_filter_1 = {'status': 'new'}
        job_status_filter_2 = {'status': 'fail'}
        job_status_filter_3 = {'status': 'running'}
        invalid_filter = {'status': "new-x"}
        unsupported_filter = {'fake_filter': "fake_filter"}
        count = 1
        for job_type in self.job_resource_map.keys():
            job_type_filter_1 = {'type': job_type}
            job_type_filter_2 = {'type': job_type + '_1'}

            # failure case, only admin can list the jobs
            self.context.is_admin = False
            res = self.controller.get_all()
            self._validate_error_code(res, 403)

            self.context.is_admin = True

            # test when specify project ID filter from client, if this
            # project ID is different from the one from context, then
            # it will be ignored, project ID from context will be
            # used instead.
            filter1 = {'project_id': uuidutils.generate_uuid()}
            res1 = self.controller.get_all(**filter1)

            filter2 = {'project_id': self.context.project_id}
            res2 = self.controller.get_all(**filter2)
            self.assertEqual(len(res2['jobs']), len(res1['jobs']))

            res3 = self.controller.get_all()
            # there is one job whose project ID is different from
            # context.project_id. As the list operation only retrieves the
            # jobs whose project ID equals to context.project_id, so this
            # special job entry won't be retrieved.
            self.assertEqual(len(res3['jobs']), len(res2['jobs']))

            # successful case, filter by job type
            jobs_job_type_filter_1 = self.controller.get_all(
                **job_type_filter_1)
            if count == 1:
                self.assertEqual(0, len(jobs_job_type_filter_1['jobs']))
            else:
                self.assertEqual(1, len(jobs_job_type_filter_1['jobs']))

            jobs_job_type_filter_2 = self.controller.get_all(
                **job_type_filter_2)
            self.assertEqual(0, len(jobs_job_type_filter_2['jobs']))

            # successful case, filter by job status and job type
            if count <= amount_of_running_jobs:
                all_filters = dict(list(job_status_filter_3.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                if count == 1:
                    self.assertEqual(0, len(jobs_all_filters['jobs']))
                else:
                    self.assertEqual(1, len(jobs_all_filters['jobs']))
            else:
                all_filters = dict(list(job_status_filter_1.items()) +
                                   list(job_type_filter_1.items()))
                jobs_all_filters = self.controller.get_all(**all_filters)
                self.assertEqual(1, len(jobs_all_filters['jobs']))

            # successful case, contradictory filter
            contradict_filters = dict(list(job_status_filter_2.items()) +
                                      list((job_type_filter_2.items())))
            jobs_contradict_filters = self.controller.get_all(
                **contradict_filters)
            self.assertEqual(0, len(jobs_contradict_filters['jobs']))
            count = count + 1

        # failure case, unsupported filter
        res = self.controller.get_all(**unsupported_filter)
        self._validate_error_code(res, 400)

        # successful case, invalid filter
        jobs_invalid_filter = self.controller.get_all(**invalid_filter)
        self.assertEqual(0, len(jobs_invalid_filter['jobs']))

        # successful case, list jobs without filters
        jobs_empty_filters = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(jobs_empty_filters['jobs']))

        # successful case, filter by job status
        jobs_job_status_filter_1 = self.controller.get_all(
            **job_status_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_job_status_filter_1['jobs']))

        jobs_job_status_filter_2 = self.controller.get_all(
            **job_status_filter_2)
        self.assertEqual(0, len(jobs_job_status_filter_2['jobs']))

        jobs_job_status_filter_3 = self.controller.get_all(
            **job_status_filter_3)
        self.assertEqual(amount_of_running_jobs - 1,
                         len(jobs_job_status_filter_3['jobs']))

        # test for paginate query
        job_paginate_no_filter_1 = self.controller.get_all()
        self.assertEqual(amount_of_all_jobs - 1,
                         len(job_paginate_no_filter_1['jobs']))

        # no limit no marker
        job_paginate_filter_1 = {'status': 'new'}
        jobs_paginate_filter_1 = self.controller.get_all(
            **job_paginate_filter_1)
        self.assertEqual(amount_of_all_jobs - amount_of_running_jobs,
                         len(jobs_paginate_filter_1['jobs']))

        # failed cases, unsupported limit type
        job_paginate_filter_2 = {'limit': '2test'}
        res = self.controller.get_all(**job_paginate_filter_2)
        self._validate_error_code(res, 400)

        # successful cases
        job_paginate_filter_4 = {'status': 'new', 'limit': '2'}
        res = self.controller.get_all(**job_paginate_filter_4)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_5 = {'status': 'new', 'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_5)
        self.assertEqual(2, len(res['jobs']))

        job_paginate_filter_6 = {'status': 'running', 'limit': 1}
        res1 = self.controller.get_all(**job_paginate_filter_6)

        marker = res1['jobs'][0]['id']
        job_paginate_filter_7 = {'status': 'running', 'marker': marker}
        res2 = self.controller.get_all(**job_paginate_filter_7)
        self.assertEqual(amount_of_running_jobs - 1, len(res2['jobs']))

        job_paginate_filter_8 = {'status': 'new', 'limit': 3}
        res = self.controller.get_all(**job_paginate_filter_8)
        self.assertGreaterEqual(res['jobs'][0]['timestamp'],
                                res['jobs'][1]['timestamp'])
        self.assertGreaterEqual(res['jobs'][1]['timestamp'],
                                res['jobs'][2]['timestamp'])

        # unsupported marker type
        res = self.controller.get_all(marker=None)
        self.assertEqual(amount_of_all_jobs - 1, len(res['jobs']))

        res = self.controller.get_all(marker='-123')
        self._validate_error_code(res, 400)

        # marker not in job table and job log table
        job_paginate_filter_9 = {'marker': uuidutils.generate_uuid()}
        res = self.controller.get_all(**job_paginate_filter_9)
        self._validate_error_code(res, 400)

        # test marker and limit
        limit = 2
        pt = '/v1.0/jobs\?limit=\w+&marker=([\w-]+)'
        job_paginate_filter = {'status': 'new', 'limit': limit}
        res = self.controller.get_all(**job_paginate_filter)
        while 'jobs_links' in res:
            m = re.match(pt, res['jobs_links'][0]['href'])
            marker = m.group(1)
            self.assertEqual(limit, len(res['jobs']))
            job_paginate_filter = {'status': 'new', 'limit': limit,
                                   'marker': marker}
            res = self.controller.get_all(**job_paginate_filter)

        job_paginate_filter_10 = {'status': 'running'}
        res = self.controller.get_all(**job_paginate_filter_10)
        self.assertEqual(amount_of_running_jobs - 1, len(res['jobs']))
        # add some rows to job log table
        for i in xrange(amount_of_running_jobs - 1):
            db_api.finish_job(self.context, res['jobs'][i]['id'], True,
                              timeutils.utcnow())
            time.sleep(1)
        res_success_log = db_api.list_jobs_from_log(self.context, None)
        self.assertEqual(amount_of_running_jobs - 1, len(res_success_log))

        res_in_job = db_api.list_jobs(self.context, None)
        self.assertEqual(amount_of_all_jobs - (amount_of_running_jobs - 1),
                         len(res_in_job))

        job_paginate_filter_11 = {'limit': 2}
        res = self.controller.get_all(**job_paginate_filter_11)
        self.assertIsNotNone(res['jobs_links'][0]['href'])
Exemplo n.º 16
0
 def invoke_method(self, ctxt, project_id, method, _type, id):
     db_api.new_job(ctxt, project_id, _type, id)
     self.client.prepare(exchange='openstack').cast(ctxt,
                                                    method,
                                                    payload={_type: id})
Exemplo n.º 17
0
 def fake_invoke_method(self, context, project_id, method, type, id):
     db_api.new_job(context, project_id, type, id)
Exemplo n.º 18
0
    def test_setup_shadow_ports(self, mock_setup):
        project_id = uuidutils.generate_uuid()
        net1_id = uuidutils.generate_uuid()
        subnet1_id = uuidutils.generate_uuid()
        port1_id = uuidutils.generate_uuid()
        port2_id = uuidutils.generate_uuid()
        for i in (1, 2):
            pod_id = 'pod_id_%d' % i
            pod_dict = {
                'pod_id': pod_id,
                'region_name': 'pod_%d' % i,
                'az_name': 'az_name_%d' % i
            }
            db_api.create_pod(self.context, pod_dict)
            db_api.create_resource_mapping(self.context, net1_id, net1_id,
                                           pod_id, project_id,
                                           constants.RT_NETWORK)
        TOP_NETWORK.append({'id': net1_id, 'tenant_id': project_id})
        BOTTOM1_PORT.append({
            'id':
            port1_id,
            'network_id':
            net1_id,
            'device_owner':
            'compute:None',
            'binding:vif_type':
            'ovs',
            'binding:host_id':
            'host1',
            'mac_address':
            'fa:16:3e:d4:01:03',
            'fixed_ips': [{
                'subnet_id': subnet1_id,
                'ip_address': '10.0.1.3'
            }]
        })
        BOTTOM2_PORT.append({
            'id':
            port2_id,
            'network_id':
            net1_id,
            'device_owner':
            'compute:None',
            'binding:vif_type':
            'ovs',
            'binding:host_id':
            'host2',
            'mac_address':
            'fa:16:3e:d4:01:03',
            'fixed_ips': [{
                'subnet_id': subnet1_id,
                'ip_address': '10.0.1.4'
            }]
        })
        db_api.ensure_agent_exists(self.context, 'pod_id_1', 'host1',
                                   q_constants.AGENT_TYPE_OVS, '192.168.1.101')
        db_api.ensure_agent_exists(self.context, 'pod_id_2', 'host2',
                                   q_constants.AGENT_TYPE_OVS, '192.168.1.102')

        resource_id = 'pod_id_1#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port in pod1 is created and updated
        client1 = FakeClient('pod_1')
        sd_ports = client1.list_ports(
            self.context, [{
                'key': 'device_owner',
                'comparator': 'eq',
                'value': constants.DEVICE_OWNER_SHADOW
            }])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'], '10.0.1.4')
        self.assertIn(constants.PROFILE_FORCE_UP,
                      sd_ports[0]['binding:profile'])

        # check job to setup shadow ports for pod2 is registered
        mock_setup.assert_called_once_with(self.context, project_id,
                                           'pod_id_2', net1_id)

        # update shadow port to down and test again, this is possible when we
        # succeed to create shadow port but fail to update it to active
        profile = sd_ports[0]['binding:profile']
        profile.pop(constants.PROFILE_FORCE_UP)
        client1.update_ports(
            self.context, sd_ports[0]['id'], {
                'port': {
                    'status': q_constants.PORT_STATUS_DOWN,
                    'binding:profile': profile
                }
            })

        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port is udpated to active again
        sd_port = client1.get_ports(self.context, sd_ports[0]['id'])
        self.assertIn(constants.PROFILE_FORCE_UP, sd_port['binding:profile'])

        # manually trigger shadow ports setup in pod2
        resource_id = 'pod_id_2#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        client2 = FakeClient('pod_2')
        sd_ports = client2.list_ports(
            self.context, [{
                'key': 'device_owner',
                'comparator': 'eq',
                'value': constants.DEVICE_OWNER_SHADOW
            }])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'], '10.0.1.3')
Exemplo n.º 19
0
    def test_configure_security_group_rules_duplicated_cidr(self, mock_create,
                                                            mock_delete):
        project_id = uuidutils.generate_uuid()
        sg_id = uuidutils.generate_uuid()
        sg_rule_id_1 = uuidutils.generate_uuid()
        sg_rule_id_2 = uuidutils.generate_uuid()

        sg = {'id': sg_id,
              'tenant_id': project_id,
              'name': 'default',
              'security_group_rules': [{
                  'id': sg_rule_id_1,
                  'remote_group_id': sg_id,
                  'direction': 'ingress',
                  'remote_ip_prefix': None,
                  'protocol': None,
                  'ethertype': 'IPv4',
                  'port_range_max': -1,
                  'port_range_min': -1,
                  'security_group_id': sg_id},
                  {'id': sg_rule_id_2,
                   'remote_group_id': None,
                   'direction': 'engress',
                   'remote_ip_prefix': None,
                   'protocol': None,
                   'ethertype': 'IPv4',
                   'port_range_max': -1,
                   'port_range_min': -1,
                   'security_group_id': sg_id}]}
        RES_MAP['top']['security_group'].append(sg)

        for i in xrange(1, 3):
            pod_dict = {'pod_id': 'pod_id_%d' % i,
                        'region_name': 'pod_%d' % i,
                        'az_name': 'az_name_%d' % i}
            db_api.create_pod(self.context, pod_dict)

            network = {'id': 'network_%d_id' % i,
                       'tenant_id': project_id}
            # we create two subnets with identical cidr but different
            # allocation pools
            subnet = {'id': 'subnet_%d_id' % i,
                      'network_id': network['id'],
                      'cidr': '10.0.1.0/24',
                      'gateway_ip': '10.0.1.%d' % i,
                      'tenant_id': project_id,
                      'allocation_pools': {'start': '10.0.1.%d' % 10 * i,
                                           'end': '10.0.1.%d' % (10 * i + 9)},
                      'ip_version': q_constants.IP_VERSION_4}
            RES_MAP['top']['network'].append(network)
            RES_MAP['top']['subnet'].append(subnet)

            region_name = 'pod_%d' % i
            RES_MAP[region_name]['security_group'].append(sg)
            route = {'top_id': sg_id, 'bottom_id': sg_id,
                     'pod_id': pod_dict['pod_id'],
                     'resource_type': 'security_group'}
            with self.context.session.begin():
                core.create_resource(self.context, models.ResourceRouting,
                                     route)

        db_api.new_job(self.context, project_id, constants.JT_SEG_RULE_SETUP,
                       project_id)
        self.xmanager.configure_security_group_rules(
            self.context, payload={constants.JT_SEG_RULE_SETUP: project_id})

        calls = [mock.call(self.context, sg_rule_id_1)]
        mock_delete.assert_has_calls(calls)
        call_rules_id = [
            call_arg[0][1] for call_arg in mock_delete.call_args_list]
        # bottom security group already has sg_rule_id_2, so this rule will
        # not be deleted
        self.assertNotIn(sg_rule_id_2, call_rules_id)

        calls = [mock.call(self.context,
                           {'security_group_rules': [
                               {'remote_group_id': None,
                                'direction': 'ingress',
                                'remote_ip_prefix': '10.0.1.0/24',
                                'protocol': None,
                                'ethertype': 'IPv4',
                                'port_range_max': -1,
                                'port_range_min': -1,
                                'security_group_id': sg_id}]})]
        mock_create.assert_has_calls(calls)
Exemplo n.º 20
0
 def invoke_method(self, ctxt, project_id, method, _type, id):
     db_api.new_job(ctxt, project_id, _type, id)
     self.client.prepare(exchange='openstack').cast(
         ctxt, method, payload={_type: id})
Exemplo n.º 21
0
    def test_setup_shadow_ports(self, mock_setup):
        project_id = uuidutils.generate_uuid()
        net1_id = uuidutils.generate_uuid()
        subnet1_id = uuidutils.generate_uuid()
        port1_id = uuidutils.generate_uuid()
        port2_id = uuidutils.generate_uuid()
        for i in (1, 2):
            pod_id = 'pod_id_%d' % i
            pod_dict = {'pod_id': pod_id,
                        'region_name': 'pod_%d' % i,
                        'az_name': 'az_name_%d' % i}
            db_api.create_pod(self.context, pod_dict)
            db_api.create_resource_mapping(
                self.context, net1_id, net1_id, pod_id, project_id,
                constants.RT_NETWORK)
        TOP_NETWORK.append({'id': net1_id, 'tenant_id': project_id})
        BOTTOM1_PORT.append({'id': port1_id,
                             'network_id': net1_id,
                             'device_owner': 'compute:None',
                             'binding:vif_type': 'ovs',
                             'binding:host_id': 'host1',
                             'device_id': None,
                             'mac_address': 'fa:16:3e:d4:01:03',
                             'fixed_ips': [{'subnet_id': subnet1_id,
                                            'ip_address': '10.0.1.3'}]})
        BOTTOM2_PORT.append({'id': port2_id,
                             'network_id': net1_id,
                             'device_owner': 'compute:None',
                             'binding:vif_type': 'ovs',
                             'binding:host_id': 'host2',
                             'device_id': None,
                             'mac_address': 'fa:16:3e:d4:01:03',
                             'fixed_ips': [{'subnet_id': subnet1_id,
                                            'ip_address': '10.0.1.4'}]})
        db_api.ensure_agent_exists(
            self.context, 'pod_id_1', 'host1', q_constants.AGENT_TYPE_OVS,
            '192.168.1.101')
        db_api.ensure_agent_exists(
            self.context, 'pod_id_2', 'host2', q_constants.AGENT_TYPE_OVS,
            '192.168.1.102')

        resource_id = 'pod_id_1#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port in pod1 is created and updated
        client1 = FakeClient('pod_1')
        sd_ports = client1.list_ports(
            self.context, [{'key': 'device_owner',
                            'comparator': 'eq',
                            'value': constants.DEVICE_OWNER_SHADOW}])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
                         '10.0.1.4')
        self.assertIn(constants.PROFILE_FORCE_UP,
                      sd_ports[0]['binding:profile'])

        # check job to setup shadow ports for pod2 is registered
        mock_setup.assert_called_once_with(self.context, project_id,
                                           'pod_id_2', net1_id)

        # update shadow port to down and test again, this is possible when we
        # succeed to create shadow port but fail to update it to active
        profile = sd_ports[0]['binding:profile']
        profile.pop(constants.PROFILE_FORCE_UP)
        client1.update_ports(self.context, sd_ports[0]['id'],
                             {'port': {'status': q_constants.PORT_STATUS_DOWN,
                                       'binding:profile': profile}})

        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port is udpated to active again
        sd_port = client1.get_ports(self.context, sd_ports[0]['id'])
        self.assertIn(constants.PROFILE_FORCE_UP, sd_port['binding:profile'])

        # manually trigger shadow ports setup in pod2
        resource_id = 'pod_id_2#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        client2 = FakeClient('pod_2')
        sd_ports = client2.list_ports(
            self.context, [{'key': 'device_owner',
                            'comparator': 'eq',
                            'value': constants.DEVICE_OWNER_SHADOW}])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
                         '10.0.1.3')
Exemplo n.º 22
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Exemplo n.º 23
0
 def invoke_method(self, ctxt, project_id, method, _type, id):
     db_api.new_job(ctxt, project_id, _type, id)
Exemplo n.º 24
0
    def test_configure_extra_routes_ew_gw(self, router_update, subnet_update):
        for i in (1, 2):
            pod_dict = {
                'pod_id': 'pod_id_%d' % i,
                'region_name': 'pod_%d' % i,
                'az_name': 'az_name_%d' % i
            }
            db_api.create_pod(self.context, pod_dict)
        for i in (1, 2, 3):
            router = {'id': 'top_router_%d_id' % i}
            TOP_ROUTER.append(router)

        # gateway in podX is attached to routerX
        gw_map = {
            'net1_pod1_gw': '10.0.1.1',
            'net2_pod2_gw': '10.0.2.1',
            'net3_pod1_gw': '10.0.3.3',
            'net3_pod2_gw': '10.0.3.4'
        }
        # interfaces are all attached to router3
        inf_map = {
            'net1_pod1_inf': '10.0.1.3',
            'net2_pod2_inf': '10.0.2.3',
            'net3_pod1_inf': '10.0.3.5',
            'net3_pod2_inf': '10.0.3.6'
        }
        get_gw_map = lambda n_idx, p_idx: gw_map['net%d_pod%d_gw' %
                                                 (n_idx, p_idx)]
        get_inf_map = lambda n_idx, p_idx: inf_map['net%d_pod%d_inf' %
                                                   (n_idx, p_idx)]
        bridge_infos = []

        for net_idx, router_idx, pod_idx in [(1, 1, 1), (3, 1, 1), (1, 3, 1),
                                             (3, 3, 1), (2, 2, 2), (3, 2, 2),
                                             (2, 3, 2), (3, 3, 2)]:
            region_name = 'pod_%d' % pod_idx
            pod_id = 'pod_id_%d' % pod_idx
            top_router_id = 'top_router_%d_id' % router_idx

            network = {'id': 'network_%d_id' % net_idx}
            router = {'id': 'router_%d_%d_id' % (pod_idx, router_idx)}
            subnet = {
                'id': 'subnet_%d_id' % net_idx,
                'network_id': network['id'],
                'cidr': '10.0.%d.0/24' % net_idx,
                'gateway_ip': get_gw_map(net_idx, pod_idx)
            }
            port = {
                'network_id': network['id'],
                'device_id': router['id'],
                'device_owner': 'network:router_interface',
                'fixed_ips': [{
                    'subnet_id': subnet['id']
                }]
            }
            if router_idx == 3:
                port['fixed_ips'][0]['ip_address'] = get_inf_map(
                    net_idx, pod_idx)
            else:
                port['fixed_ips'][0]['ip_address'] = get_gw_map(
                    net_idx, pod_idx)

            if net_idx == pod_idx and router_idx == 3:
                vm_idx = net_idx * 2 + pod_idx + 10
                vm_ip = '10.0.%d.%d' % (net_idx, vm_idx)
                vm_port = {
                    'id': 'vm_port_%d_id' % vm_idx,
                    'network_id': network['id'],
                    'device_id': 'vm%d_id' % vm_idx,
                    'device_owner': 'compute:None',
                    'fixed_ips': [{
                        'subnet_id': subnet['id'],
                        'ip_address': vm_ip
                    }]
                }
                bridge_network = {'id': 'bridge_network_%d_id' % net_idx}
                bridge_subnet = {
                    'id': 'bridge_subnet_%d_id' % net_idx,
                    'network_id': bridge_network['id'],
                    'cidr': '100.0.1.0/24',
                    'gateway_ip': '100.0.1.1'
                }
                bridge_cidr = bridge_subnet['cidr']
                bridge_port_ip = '%s.%d' % (
                    bridge_cidr[:bridge_cidr.rindex('.')], 2 + pod_idx)
                bridge_infos.append({
                    'router_id': router['id'],
                    'bridge_ip': bridge_port_ip,
                    'vm_ip': vm_ip
                })
                bridge_port = {
                    'network_id':
                    bridge_network['id'],
                    'device_id':
                    router['id'],
                    'device_owner':
                    'network:router_gateway',
                    'fixed_ips': [{
                        'subnet_id': bridge_subnet['id'],
                        'ip_address': bridge_port_ip
                    }]
                }
                RES_MAP[region_name]['port'].append(vm_port)
                RES_MAP[region_name]['network'].append(bridge_network)
                RES_MAP[region_name]['subnet'].append(bridge_subnet)
                RES_MAP[region_name]['port'].append(bridge_port)

            RES_MAP[region_name]['network'].append(network)
            RES_MAP[region_name]['subnet'].append(subnet)
            RES_MAP[region_name]['port'].append(port)
            RES_MAP[region_name]['router'].append(router)

            db_api.create_resource_mapping(self.context, top_router_id,
                                           router['id'], pod_id, 'project_id',
                                           constants.RT_ROUTER)
        # the above codes create this topology
        # pod1: net1 is attached to R1, default gateway is set on R1
        #       net1 is attached to R3
        #       net3 is attached to R1, default gateway is set on R1
        #       net3 is attached to R3
        # pod2: net2 is attached to R2, default gateway is set on R2
        #       net2 is attached to R3
        #       net3 is attached to R2, default gateway is set on R2
        #       net3 is attached to R3

        target_router_id = 'top_router_3_id'
        project_id = uuidutils.generate_uuid()
        db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
                       target_router_id)
        self.xmanager.configure_route(
            self.context,
            payload={constants.JT_CONFIGURE_ROUTE: target_router_id})

        # for the following paths, packets will go to R3 via the interface
        # which is attached to R3
        # net1 in pod1 -> net2 in pod2
        # net2 in pod2 -> net1 in pod1
        # net3 in pod1 -> net2 in pod2
        # net3 in pod2 -> net1 in pod1
        expect_calls = [
            mock.call(
                self.context, 'subnet_1_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(1, 1),
                            'destination': '10.0.2.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_2_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(2, 2),
                            'destination': '10.0.1.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_3_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(3, 1),
                            'destination': '10.0.2.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_3_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(3, 2),
                            'destination': '10.0.1.0/24'
                        }]
                    }
                })
        ]
        subnet_update.assert_has_calls(expect_calls, any_order=True)
        expect_calls = []
        for i in (0, 1):
            bridge_info = bridge_infos[i]
            expect_call = mock.call(
                self.context, bridge_infos[1 - i]['router_id'], {
                    'router': {
                        'routes': [{
                            'nexthop': bridge_info['bridge_ip'],
                            'destination': bridge_info['vm_ip'] + '/32'
                        }]
                    }
                })
            expect_calls.append(expect_call)
        router_update.assert_has_calls(expect_calls, any_order=True)
Exemplo n.º 25
0
    def test_delete(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types.
        # each 'for' loop adds one item in job log table, we set count variable
        # to record dynamic total job entries in job log table.
        count = 1
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can delete the job
            job_1 = db_api.new_job(self.context, job['project_id'],
                                   job_type,
                                   resource_id)
            self.context.is_admin = False
            res = self.controller.delete(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.delete(-123)
            self._validate_error_code(res, 404)

            # failure case, delete a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            job = db_api.get_job(self.context, job_2['id'])
            res = self.controller.delete(job_2['id'])
            self._validate_error_code(res, 400)

            # finish the job and delete it
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # successful case, delete a successful job. successful job from
            # job log can't be deleted, here this successful job is from
            # job table.
            job_3 = self._prepare_job_element(job_type)
            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)

            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            job_4_succ = db_api.get_job(self.context, job_4['id'])
            self.controller.delete(job_4['id'])

            filters_job_4 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_4_succ['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_4_succ['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_4_succ['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_4_succ['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_4)))
            self.assertEqual(count,
                             len(db_api.list_jobs_from_log(self.context)))
            count = count + 1

            # successful case, delete a new job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            self.controller.delete(job_5['id'])

            filters_job_5 = [
                {'key': 'type', 'comparator': 'eq', 'value': job_5['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_5['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_5['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_5['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_5)))

            # successful case, delete a failed job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'], job_type,
                                   resource_id)
            db_api.finish_job(self.context, job_6['id'], False,
                              timeutils.utcnow())
            job_6_failed = db_api.get_job(self.context, job_6['id'])
            self.controller.delete(job_6['id'])
            filters_job_6 = [
                {'key': 'type', 'comparator': 'eq',
                 'value': job_6_failed['type']},
                {'key': 'status', 'comparator': 'eq',
                 'value': job_6_failed['status']},
                {'key': 'resource_id', 'comparator': 'eq',
                 'value': job_6_failed['resource_id']},
                {'key': 'extra_id', 'comparator': 'eq',
                 'value': job_6_failed['extra_id']}]
            self.assertEqual(0, len(db_api.list_jobs(self.context,
                                                     filters_job_6)))
Exemplo n.º 26
0
    def test_put(self, mock_context):
        mock_context.return_value = self.context

        # cover all job types
        for job_type in self.job_resource_map.keys():
            job = self._prepare_job_element(job_type)

            resource_id = '#'.join([job['resource'][resource_id]
                                    for resource_type, resource_id
                                    in self.job_resource_map[job_type]])

            # failure case, only admin can redo the job
            job_1 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.context.is_admin = False
            res = self.controller.put(job_1['id'])
            self._validate_error_code(res, 403)

            self.context.is_admin = True
            db_api.delete_job(self.context, job_1['id'])

            # failure case, job not found
            res = self.controller.put(-123)
            self._validate_error_code(res, 404)

            # failure case, redo a running job
            job_2 = db_api.register_job(self.context,
                                        job['project_id'],
                                        job_type, resource_id)
            res = self.controller.put(job_2['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_2['id'], False,
                              timeutils.utcnow())
            db_api.delete_job(self.context, job_2['id'])

            # failure case, redo a successful job
            job_3 = self._prepare_job_element(job_type)

            resource_id_3 = '#'.join([job_3['resource'][resource_id_3]
                                      for resource_type_3, resource_id_3
                                      in self.job_resource_map[job_type]])

            job_4 = db_api.new_job(self.context,
                                   job_3['project_id'],
                                   job_type, resource_id_3)
            with self.context.session.begin():
                job_dict = {'status': constants.JS_Success,
                            'timestamp': timeutils.utcnow(),
                            'extra_id': uuidutils.generate_uuid()}
                core.update_resource(self.context, models.AsyncJob,
                                     job_4['id'], job_dict)

            res = self.controller.put(job_4['id'])
            self._validate_error_code(res, 400)
            db_api.finish_job(self.context, job_4['id'], True,
                              timeutils.utcnow())

            # successful case, redo a failed job
            job_5 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            db_api.finish_job(self.context, job_5['id'], False,
                              timeutils.utcnow())
            self.controller.put(job_5['id'])

            db_api.delete_job(self.context, job_5['id'])

            # successful case, redo a new job
            job_6 = db_api.new_job(self.context,
                                   job['project_id'],
                                   job_type, resource_id)
            self.controller.put(job_6['id'])

            db_api.delete_job(self.context, job_6['id'])
Exemplo n.º 27
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Exemplo n.º 28
0
 def configure_security_group_rules(self, ctxt, project_id):
     db_api.new_job(ctxt, constants.JT_SEG_RULE_SETUP, project_id)
     self.client.prepare(exchange='openstack').cast(
         ctxt,
         'configure_security_group_rules',
         payload={constants.JT_SEG_RULE_SETUP: project_id})