def test_get_failed_jobs(self): job_dict_list = [ {'timestamp': datetime.datetime(2000, 1, 1, 12, 0, 0), 'resource_id': 'uuid1', 'type': 'res1', 'status': constants.JS_Fail}, # job_uuid1 {'timestamp': datetime.datetime(2000, 1, 1, 12, 5, 0), 'resource_id': 'uuid1', 'type': 'res1', 'status': constants.JS_Fail}, # job_uuid3 {'timestamp': datetime.datetime(2000, 1, 1, 12, 20, 0), 'resource_id': 'uuid2', 'type': 'res2', 'status': constants.JS_Fail}, # job_uuid5 {'timestamp': datetime.datetime(2000, 1, 1, 12, 15, 0), 'resource_id': 'uuid2', 'type': 'res2', 'status': constants.JS_Fail}, # job_uuid7 {'timestamp': datetime.datetime(2000, 1, 1, 12, 25, 0), 'resource_id': 'uuid3', 'type': 'res3', 'status': constants.JS_Fail}, # job_uuid9 {'timestamp': datetime.datetime(2000, 1, 1, 12, 30, 0), 'resource_id': 'uuid3', 'type': 'res3', 'status': constants.JS_Success}] for i, job_dict in enumerate(job_dict_list, 1): job_dict['id'] = 'job_uuid%d' % (2 * i - 1) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i - 1) core.create_resource(self.context, models.Job, job_dict) job_dict['id'] = 'job_uuid%d' % (2 * i) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i) job_dict['status'] = constants.JS_New core.create_resource(self.context, models.Job, job_dict) # for res3 + uuid3, the latest job's status is "Success", not returned expected_ids = ['job_uuid3', 'job_uuid5'] returned_jobs = db_api.get_latest_failed_jobs(self.context) actual_ids = [job['id'] for job in returned_jobs] self.assertItemsEqual(expected_ids, actual_ids)
def test_job_run_expire(self): @xmanager._job_handle('fake_resource') def fake_handle(self, ctx, payload): pass fake_id = uuidutils.generate_uuid() payload = {'fake_resource': fake_id} expired_job = { 'id': uuidutils.generate_uuid(), 'type': 'fake_resource', 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 120), 'status': constants.JS_Running, 'resource_id': fake_id, 'extra_id': constants.SP_EXTRA_ID } core.create_resource(self.context, models.Job, expired_job) fake_handle(None, self.context, payload=payload) jobs = core.query_resource(self.context, models.Job, [], []) expected_status = ['New', 'Fail', 'Success'] job_status = [job['status'] for job in jobs] self.assertItemsEqual(expected_status, job_status) for i in xrange(3): self.assertEqual(fake_id, jobs[i]['resource_id']) self.assertEqual('fake_resource', jobs[i]['type'])
def get_pod_by_az_tenant(context, az_name, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{ 'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id }], []) for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) if pod['az_name'] == az_name: return pod, pod['pod_az_name'] # TODO(joehuang): schedule one dynamically in the future filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}] pods = db_api.list_pods(context, filters=filters) for pod in pods: if pod['pod_name'] != '': try: with context.session.begin(): core.create_resource( context, models.PodBinding, { 'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod['pod_id'] }) return pod, pod['pod_az_name'] except Exception as e: LOG.error(_LE('Fail to create pod binding: %(exception)s'), {'exception': e}) return None, None return None, None
def test_delete(self, mock_ctx, mock_delete, mock_delete_port): t_pod, b_pod = self._prepare_pod() mock_ctx.return_value = self.context t_server_id = 't_server_id' b_server_id = 'b_server_id' with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': t_server_id, 'bottom_id': b_server_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': constants.RT_SERVER}) port_id = uuidutils.generate_uuid() server_port = { 'id': port_id, 'device_id': t_server_id } TOP_PORTS.append(server_port) mock_delete.return_value = () res = self.controller.delete(t_server_id) mock_delete_port.assert_called_once_with(self.context, port_id) mock_delete.assert_called_once_with(self.context, b_server_id) self.assertEqual(204, res.status)
def test_job_run_expire(self): job_type = 'fake_resource' @xmanager._job_handle(job_type) def fake_handle(self, ctx, payload): pass fake_id = uuidutils.generate_uuid() fake_project_id = uuidutils.generate_uuid() payload = {job_type: fake_id} db_api.new_job(self.context, fake_project_id, job_type, fake_id) expired_job = { 'id': uuidutils.generate_uuid(), 'type': job_type, 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 200), 'status': constants.JS_Running, 'resource_id': fake_id, 'extra_id': constants.SP_EXTRA_ID } core.create_resource(self.context, models.AsyncJob, expired_job) fake_handle(None, self.context, payload=payload) logs = core.query_resource(self.context, models.AsyncJobLog, [], []) self.assertEqual(fake_id, logs[0]['resource_id']) self.assertEqual(job_type, logs[0]['type'])
def _basic_pod_route_setup(self): pod1 = {'pod_id': 'pod_id_1', 'region_name': 'pod_1', 'az_name': 'az_name_1'} pod2 = {'pod_id': 'pod_id_2', 'region_name': 'pod_2', 'az_name': 'az_name_2'} pod3 = {'pod_id': 'pod_id_0', 'region_name': 'top_pod', 'az_name': ''} for pod in (pod1, pod2, pod3): db_api.create_pod(self.context, pod) route1 = { 'top_id': 'top_id_1', 'pod_id': 'pod_id_1', 'bottom_id': 'bottom_id_1', 'resource_type': 'port'} route2 = { 'top_id': 'top_id_2', 'pod_id': 'pod_id_2', 'bottom_id': 'bottom_id_2', 'resource_type': 'port'} with self.context.session.begin(): core.create_resource(self.context, models.ResourceRouting, route1) core.create_resource(self.context, models.ResourceRouting, route2)
def _test_handle_remote_group_invalid_input(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, top_rules, bottom1_sgs): t_sg1_id = uuidutils.generate_uuid() t_sg2_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule( t_rule1_id, t_sg1_id, project_id, None, t_sg1_id) t_rule2 = self._build_test_rule( t_rule2_id, t_sg1_id, project_id, None, t_sg2_id) t_sg = {'id': t_sg1_id, 'name': 'test', 'description': '', 'tenant_id': project_id, 'security_group_rules': []} b_sg = {'id': b_sg_id, 'name': t_sg1_id, 'description': '', 'tenant_id': project_id, 'security_group_rules': []} top_sgs.append(t_sg) top_rules.append(t_rule1) bottom1_sgs.append(b_sg) route = { 'top_id': t_sg1_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route) self.assertRaises(exceptions.RemoteGroupNotSupported, plugin.create_security_group_rule, q_ctx, {'security_group_rule': t_rule2}) self.assertRaises(exceptions.RemoteGroupNotSupported, plugin.delete_security_group_rule, q_ctx, t_rule1_id)
def get_pod_by_az_tenant(context, az_name, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id}], []) for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) if pod['az_name'] == az_name: return pod, pod['pod_az_name'] # TODO(joehuang): schedule one dynamically in the future filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}] pods = db_api.list_pods(context, filters=filters) for pod in pods: if pod['pod_name'] != '': try: with context.session.begin(): core.create_resource( context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod['pod_id']}) return pod, pod['pod_az_name'] except Exception as e: LOG.error(_LE('Fail to create pod binding: %(exception)s'), {'exception': e}) return None, None return None, None
def _test_delete_security_group_rule_exception(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, top_rules, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule = self._build_test_rule( t_rule_id, t_sg_id, project_id, '10.0.1.0/24') b_rule = self._build_test_rule( t_rule_id, b_sg_id, project_id, '10.0.1.0/24') t_sg = {'id': t_sg_id, 'name': 'test', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule]} b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '', 'tenant_id': project_id, 'security_group_rules': [b_rule]} top_sgs.append(t_sg) top_rules.append(t_rule) bottom1_sgs.append(b_sg) route = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route) self.assertRaises(exceptions.BottomPodOperationFailure, plugin.delete_security_group_rule, q_ctx, t_rule_id)
def _prepare_port_test(self, tenant_id, ctx, pod_name, net_id): t_port_id = uuidutils.generate_uuid() t_port = { 'id': t_port_id, 'network_id': net_id } TOP_PORTS.append(DotDict(t_port)) b_port = { 'id': t_port_id, 'network_id': net_id } if pod_name == 'pod_1': BOTTOM1_PORTS.append(DotDict(b_port)) else: BOTTOM2_PORTS.append(DotDict(b_port)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(ctx, models.ResourceRouting, {'top_id': t_port_id, 'bottom_id': t_port_id, 'pod_id': pod_id, 'project_id': tenant_id, 'resource_type': constants.RT_PORT}) return t_port_id
def _prepare_flow_classifier_test(self, project_id, t_ctx, pod_name, index, src_port_id, create_bottom): t_fc_id = uuidutils.generate_uuid() b_fc_id = uuidutils.generate_uuid() top_fc = { "source_port_range_min": None, "destination_ip_prefix": None, "protocol": None, "description": "", "l7_parameters": {}, "source_port_range_max": None, "id": t_fc_id, "name": "t_fc_%s" % index, "ethertype": "IPv4", "tenant_id": project_id, "source_ip_prefix": "1.0.0.0/24", "logical_destination_port": None, "destination_port_range_min": None, "destination_port_range_max": None, "project_id": project_id, "logical_source_port": src_port_id } TOP_FLOWCLASSIFIERS.append(DotDict(top_fc)) if create_bottom: btm_fc = { "source_port_range_min": None, "destination_ip_prefix": None, "protocol": None, "description": "", "l7_parameters": {}, "source_port_range_max": None, "id": b_fc_id, "name": "b_fc_%s" % index, "ethertype": "IPv4", "tenant_id": project_id, "source_ip_prefix": "1.0.0.0/24", "logical_destination_port": None, "destination_port_range_min": None, "destination_port_range_max": None, "project_id": project_id, "logical_source_port": src_port_id } if pod_name == 'pod_1': BOTTOM1_FLOWCLASSIFIERS.append(DotDict(btm_fc)) else: BOTTOM2_FLOWCLASSIFIERS.append(DotDict(btm_fc)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': t_fc_id, 'bottom_id': b_fc_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_FLOW_CLASSIFIER }) return t_fc_id, b_fc_id
def ensure_agent_exists(context, pod_id, host, _type, tunnel_ip): try: context.session.begin() agents = core.query_resource(context, models.ShadowAgent, [{ 'key': 'host', 'comparator': 'eq', 'value': host }, { 'key': 'type', 'comparator': 'eq', 'value': _type }], []) if agents: return core.create_resource( context, models.ShadowAgent, { 'id': uuidutils.generate_uuid(), 'pod_id': pod_id, 'host': host, 'type': _type, 'tunnel_ip': tunnel_ip }) context.session.commit() except db_exc.DBDuplicateEntry: # agent has already been created context.session.rollback() finally: context.session.close()
def test_get_bottom_mappings_by_top_id(self): for i in xrange(3): pod = {'pod_id': 'test_pod_uuid_%d' % i, 'pod_name': 'test_pod_%d' % i, 'az_name': 'test_az_uuid_%d' % i} api.create_pod(self.context, pod) route1 = { 'top_id': 'top_uuid', 'pod_id': 'test_pod_uuid_0', 'resource_type': 'port'} route2 = { 'top_id': 'top_uuid', 'pod_id': 'test_pod_uuid_1', 'bottom_id': 'bottom_uuid_1', 'resource_type': 'port'} route3 = { 'top_id': 'top_uuid', 'pod_id': 'test_pod_uuid_2', 'bottom_id': 'bottom_uuid_2', 'resource_type': 'neutron'} routes = [route1, route2, route3] with self.context.session.begin(): for route in routes: core.create_resource( self.context, models.ResourceRouting, route) mappings = api.get_bottom_mappings_by_top_id(self.context, 'top_uuid', 'port') self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id']) self.assertEqual('bottom_uuid_1', mappings[0][1])
def _test_create_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_sg = {'id': t_sg_id, 'name': 'test', 'description': '', 'tenant_id': project_id, 'security_group_rules': []} b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '', 'tenant_id': project_id, 'security_group_rules': []} top_sgs.append(t_sg) bottom1_sgs.append(b_sg) route = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route) rule = { 'security_group_rule': self._build_test_rule( t_rule_id, t_sg_id, project_id, '10.0.0.0/24')} plugin.create_security_group_rule(q_ctx, rule) self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules'])) b_rule = bottom1_sgs[0]['security_group_rules'][0] self.assertEqual(b_sg_id, b_rule['security_group_id'])
def _prepare_snat_test(self, top_router_id): ext_network = {'id': 'ext_network_id', 'router:external': True} ext_subnet = { 'id': 'ext_subnet_id', 'network_id': ext_network['id'], 'cidr': '162.3.124.0/24', 'gateway_ip': '162.3.124.1' } for router in TOP_ROUTER: if router['id'] == top_router_id: router['external_gateway_info'] = { 'network_id': ext_network['id']} router = {'id': 'ns_router_id'} for subnet in BOTTOM2_SUBNET: if 'bridge' in subnet['id']: bridge_subnet = subnet bridge_port = { 'network_id': bridge_subnet['network_id'], 'device_id': router['id'], 'device_owner': 'network:router_interface', 'fixed_ips': [{'subnet_id': bridge_subnet['id'], 'ip_address': bridge_subnet['gateway_ip']}] } BOTTOM2_NETWORK.append(ext_network) BOTTOM2_SUBNET.append(ext_subnet) BOTTOM2_PORT.append(bridge_port) BOTTOM2_ROUTER.append(router) route = {'top_id': top_router_id, 'bottom_id': router['id'], 'pod_id': 'pod_id_2', 'resource_type': constants.RT_NS_ROUTER} with self.context.session.begin(): core.create_resource(self.context, models.ResourceRouting, route) return bridge_subnet['gateway_ip'], router['id']
def test_job_run_expire(self): job_type = 'fake_resource' @xmanager._job_handle(job_type) def fake_handle(self, ctx, payload): pass fake_id = uuidutils.generate_uuid() payload = {job_type: fake_id} db_api.new_job(self.context, job_type, fake_id) expired_job = { 'id': uuidutils.generate_uuid(), 'type': job_type, 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 200), 'status': constants.JS_Running, 'resource_id': fake_id, 'extra_id': constants.SP_EXTRA_ID } core.create_resource(self.context, models.AsyncJob, expired_job) fake_handle(None, self.context, payload=payload) logs = core.query_resource(self.context, models.AsyncJobLog, [], []) self.assertEqual(fake_id, logs[0]['resource_id']) self.assertEqual(job_type, logs[0]['type'])
def finish_job(context, job_id, successful, timestamp): status = constants.JS_Success if successful else constants.JS_Fail with context.session.begin(): job_dict = { 'status': status, 'timestamp': timestamp, 'extra_id': uuidutils.generate_uuid() } job = core.update_resource(context, models.AsyncJob, job_id, job_dict) if status == constants.JS_Success: log_dict = { 'id': uuidutils.generate_uuid(), 'type': job['type'], 'project_id': job['project_id'], 'timestamp': timestamp, 'resource_id': job['resource_id'] } context.session.query(models.AsyncJob).filter( sql.and_(models.AsyncJob.type == job['type'], models.AsyncJob.resource_id == job['resource_id'], models.AsyncJob.timestamp <= timestamp)).delete( synchronize_session=False) core.create_resource(context, models.AsyncJobLog, log_dict) else: # sqlite has problem handling "<" operator on timestamp, so we # slide the timestamp a bit and use "<=" timestamp = timestamp - datetime.timedelta(microseconds=1) context.session.query(models.AsyncJob).filter( sql.and_(models.AsyncJob.type == job['type'], models.AsyncJob.resource_id == job['resource_id'], models.AsyncJob.timestamp <= timestamp)).delete( synchronize_session=False)
def _prepare_port_pair_group_test(self, project_id, t_ctx, pod_name, index, t_pp_ids, create_bottom, b_pp_ids): t_ppg_id = uuidutils.generate_uuid() b_ppg_id = uuidutils.generate_uuid() t_client = FakeClient() b_client = FakeClient(pod_name) t_pps = [ t_client.get_resource('port_pair', t_ctx, e) for e in t_pp_ids ] if create_bottom: b_pps = [ b_client.get_resource('port_pair', t_ctx, e) for e in b_pp_ids ] top_ppg = { "group_id": 1, "description": "", "tenant_id": project_id, "port_pair_group_parameters": { "lb_fields": [] }, "port_pairs": t_pps, "project_id": project_id, "id": t_ppg_id, "name": 'top_ppg_%d' % index, "tap_enabled": False } TOP_PORTPAIRGROUPS.append(DotDict(top_ppg)) if create_bottom: btm_ppg = { "group_id": 1, "description": "", "tenant_id": project_id, "port_pair_group_parameters": { "lb_fields": [] }, "port_pairs": b_pps, "project_id": project_id, "id": b_ppg_id, "name": 'btm_ppg_%d' % index, "tap_enabled": False } if pod_name == 'pod_1': BOTTOM1_PORTPAIRGROUPS.append(DotDict(btm_ppg)) else: BOTTOM2_PORTPAIRGROUPS.append(DotDict(btm_ppg)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': t_ppg_id, 'bottom_id': b_ppg_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_PAIR_GROUP }) return t_ppg_id, b_ppg_id
def _test_get_security_group(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule(t_rule1_id, t_sg_id, project_id, '10.0.0.0/24') t_rule2 = self._build_test_rule(t_rule2_id, t_sg_id, project_id, '192.168.56.0/24') t_sg = { 'id': t_sg_id, 'name': 'top_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2] } b_sg = { 'id': b_sg_id, 'name': 'bottom_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2] } top_sgs.append(t_sg) bottom1_sgs.append(b_sg) route1 = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG } with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route1) # test get_sg for normal situation res = plugin.get_security_group(q_ctx, t_sg_id) self.assertTrue(res['id'] == t_sg_id and res['name'] == 'top_sg') # test get_sg when the top_sg is under deleting dict_para = { 'resource_id': t_sg_id, 'resource_type': t_constants.RT_SG } with t_ctx.session.begin(): core.create_resource(t_ctx, models.DeletingResources, dict_para) q_ctx.USER_AGENT = t_constants.LOCAL self.assertRaises(t_exceptions.ResourceNotFound, plugin.get_security_group, q_ctx, t_sg_id) # test get_sg when the request is from user_agent q_ctx.USER_AGENT = t_constants.USER_AGENT self.assertRaises(t_exceptions.ResourceIsInDeleting, plugin.get_security_group, q_ctx, t_sg_id)
def _prepare_flow_classifier_test(self, project_id, t_ctx, pod_name, index, src_port_id, create_bottom): t_fc_id = uuidutils.generate_uuid() b_fc_id = uuidutils.generate_uuid() top_fc = { "source_port_range_min": None, "destination_ip_prefix": None, "protocol": None, "description": "", "l7_parameters": {}, "source_port_range_max": None, "id": t_fc_id, "name": "t_fc_%s" % index, "ethertype": "IPv4", "tenant_id": project_id, "source_ip_prefix": "1.0.0.0/24", "logical_destination_port": None, "destination_port_range_min": None, "destination_port_range_max": None, "project_id": project_id, "logical_source_port": src_port_id} TOP_FLOWCLASSIFIERS.append(DotDict(top_fc)) if create_bottom: btm_fc = { "source_port_range_min": None, "destination_ip_prefix": None, "protocol": None, "description": "", "l7_parameters": {}, "source_port_range_max": None, "id": b_fc_id, "name": "b_fc_%s" % index, "ethertype": "IPv4", "tenant_id": project_id, "source_ip_prefix": "1.0.0.0/24", "logical_destination_port": None, "destination_port_range_min": None, "destination_port_range_max": None, "project_id": project_id, "logical_source_port": src_port_id} if pod_name == 'pod_1': BOTTOM1_FLOWCLASSIFIERS.append(DotDict(btm_fc)) else: BOTTOM2_FLOWCLASSIFIERS.append(DotDict(btm_fc)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(t_ctx, models.ResourceRouting, {'top_id': t_fc_id, 'bottom_id': b_fc_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_FLOW_CLASSIFIER}) return t_fc_id, b_fc_id
def _test_update_default_sg(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, top_rules, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule(t_rule1_id, t_sg_id, project_id, '10.0.0.0/24') t_sg = { 'id': t_sg_id, 'name': 'default', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1] } b_sg = { 'id': b_sg_id, 'name': 'default', 'description': '', 'tenant_id': project_id, 'security_group_rules': [] } top_sgs.append(t_sg) top_rules.append(t_rule1) bottom1_sgs.append(b_sg) route1 = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG } with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route1) t_rule2 = { 'security_group_rule': self._build_test_rule(t_rule2_id, t_sg_id, project_id, '10.0.1.0/24') } plugin.create_security_group_rule(q_ctx, t_rule2) self.assertEqual(len(top_sgs[0]['security_group_rules']), len(bottom1_sgs[0]['security_group_rules'])) for i in range(len(bottom1_sgs[0]['security_group_rules'])): self.assertTrue( self._compare_rule(bottom1_sgs[0]['security_group_rules'][i], top_sgs[0]['security_group_rules'][i])) plugin.delete_security_group_rule(q_ctx, t_rule1_id) self.assertEqual(len(bottom1_sgs[0]['security_group_rules']), len(top_sgs[0]['security_group_rules'])) for i in range(len(bottom1_sgs[0]['security_group_rules'])): self.assertTrue( self._compare_rule(bottom1_sgs[0]['security_group_rules'][i], top_sgs[0]['security_group_rules'][i]))
def change_pod_binding(context, pod_binding, pod_id): with context.session.begin(): core.update_resource(context, models.PodBinding, pod_binding['id'], pod_binding) core.create_resource(context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': pod_binding['tenant_id'], 'pod_id': pod_id, 'is_binding': True})
def _prepare_port_pair_test(self, project_id, t_ctx, pod_name, index, ingress, egress, create_bottom, portpairgroup_id=None): t_pp_id = uuidutils.generate_uuid() b_pp_id = uuidutils.generate_uuid() top_pp = { 'id': t_pp_id, 'project_id': project_id, 'tenant_id': project_id, 'ingress': ingress, 'egress': egress, 'name': 'top_pp_%d' % index, 'service_function_parameters': { "weight": 1, "correlation": DotDict({'value': 'null'}) }, 'description': "description", 'portpairgroup_id': portpairgroup_id } TOP_PORTPAIRS.append(DotDict(top_pp)) if create_bottom: btm_pp = { 'id': b_pp_id, 'project_id': project_id, 'tenant_id': project_id, 'ingress': ingress, 'egress': egress, 'name': 'btm_pp_%d' % index, 'service_function_parameters': { "weight": 1, "correlation": DotDict({'value': 'null'}) }, 'description': "description", 'portpairgroup_id': portpairgroup_id } if pod_name == 'pod_1': BOTTOM1_PORTPAIRS.append(DotDict(btm_pp)) else: BOTTOM2_PORTPAIRS.append(DotDict(btm_pp)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': t_pp_id, 'bottom_id': b_pp_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_PAIR }) return t_pp_id, b_pp_id
def _prepare_net_test(self, project_id, ctx, pod_name): t_net_id = uuidutils.generate_uuid() pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(ctx, models.ResourceRouting, {'top_id': t_net_id, 'bottom_id': t_net_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_NETWORK}) return t_net_id
def _prepare_server(self, pod): t_server_id = uuidutils.generate_uuid() b_server_id = t_server_id with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': t_server_id, 'bottom_id': b_server_id, 'pod_id': pod['pod_id'], 'project_id': self.project_id, 'resource_type': constants.RT_SERVER}) return t_server_id
def _test_handle_network_dhcp_port(self, dhcp_ip): t_pod, b_pod = self._prepare_pod() top_net_id = 'top_net_id' bottom_net_id = 'bottom_net_id' top_subnet_id = 'top_subnet_id' bottom_subnet_id = 'bottom_subnet_id' t_net = {'id': top_net_id} b_net = {'id': bottom_net_id} t_subnet = {'id': top_subnet_id, 'network_id': top_net_id, 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', 'allocation_pools': {'start': '10.0.0.2', 'end': '10.0.0.254'}, 'enable_dhcp': True} b_subnet = {'id': bottom_subnet_id, 'network_id': bottom_net_id, 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', 'allocation_pools': {'start': '10.0.0.2', 'end': '10.0.0.254'}, 'enable_dhcp': True} b_dhcp_port = {'id': 'bottom_dhcp_port_id', 'network_id': bottom_net_id, 'fixed_ips': [ {'subnet_id': bottom_subnet_id, 'ip_address': dhcp_ip} ], 'mac_address': 'fa:16:3e:96:41:0a', 'binding:profile': {}, 'device_id': 'reserved_dhcp_port', 'device_owner': 'network:dhcp'} TOP_NETS.append(t_net) TOP_SUBNETS.append(t_subnet) BOTTOM_NETS.append(b_net) BOTTOM_SUBNETS.append(b_subnet) BOTTOM_PORTS.append(b_dhcp_port) with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': top_net_id, 'bottom_id': bottom_net_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': 'network'}) core.create_resource( self.context, models.ResourceRouting, {'top_id': top_subnet_id, 'bottom_id': bottom_subnet_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': 'subnet'}) self.controller._handle_network(self.context, b_pod, t_net, [t_subnet]) self._check_routes()
def _test_handle_network_dhcp_port(self, dhcp_ip): t_pod, b_pod = self._prepare_pod() top_net_id = 'top_net_id' bottom_net_id = 'bottom_net_id' top_subnet_id = 'top_subnet_id' bottom_subnet_id = 'bottom_subnet_id' t_net = {'id': top_net_id, 'name': 'net'} b_net = {'id': bottom_net_id} t_subnet = {'id': top_subnet_id, 'network_id': top_net_id, 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', 'allocation_pools': {'start': '10.0.0.2', 'end': '10.0.0.254'}, 'enable_dhcp': True} b_subnet = {'id': bottom_subnet_id, 'network_id': bottom_net_id, 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', 'allocation_pools': {'start': '10.0.0.2', 'end': '10.0.0.254'}, 'enable_dhcp': True} b_dhcp_port = {'id': 'bottom_dhcp_port_id', 'network_id': bottom_net_id, 'fixed_ips': [ {'subnet_id': bottom_subnet_id, 'ip_address': dhcp_ip} ], 'mac_address': 'fa:16:3e:96:41:0a', 'binding:profile': {}, 'device_id': 'reserved_dhcp_port', 'device_owner': 'network:dhcp'} TOP_NETS.append(t_net) TOP_SUBNETS.append(t_subnet) BOTTOM_NETS.append(b_net) BOTTOM_SUBNETS.append(b_subnet) BOTTOM_PORTS.append(b_dhcp_port) with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': top_net_id, 'bottom_id': bottom_net_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': 'network'}) core.create_resource( self.context, models.ResourceRouting, {'top_id': top_subnet_id, 'bottom_id': bottom_subnet_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': 'subnet'}) self.controller._handle_network(self.context, b_pod, t_net, [t_subnet]) self._check_routes()
def _prepare_port_chain_test(self, project_id, t_ctx, pod_name, index, create_bottom, ids): t_pc_id = uuidutils.generate_uuid() b_pc_id = uuidutils.generate_uuid() top_pc = { "tenant_id": project_id, "name": "t_pc_%s" % index, "chain_parameters": { "symmetric": False, "correlation": "mpls" }, "port_pair_groups": ids['t_ppg_id'], "flow_classifiers": ids['t_fc_id'], "project_id": project_id, "chain_id": 1, "description": "", "id": t_pc_id } TOP_PORTCHAINS.append(DotDict(top_pc)) if create_bottom: btm_pc = { "tenant_id": project_id, "name": "b_pc_%s" % index, "chain_parameters": { "symmetric": False, "correlation": "mpls" }, "port_pair_groups": ids['b_ppg_id'], "flow_classifiers": ids['b_fc_id'], "project_id": project_id, "chain_id": 1, "description": "", "id": b_pc_id } if pod_name == 'pod_1': BOTTOM1_PORTCHAINS.append(DotDict(btm_pc)) else: BOTTOM2_PORTCHAINS.append(DotDict(btm_pc)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': t_pc_id, 'bottom_id': b_pc_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_CHAIN }) return t_pc_id, b_pc_id
def test_resource_routing_unique_key(self): pod = {'pod_id': 'test_pod1_uuid', 'pod_name': 'test_pod1', 'az_name': 'test_az1_uuid'} api.create_pod(self.context, pod) routing = {'top_id': 'top_uuid', 'pod_id': 'test_pod1_uuid', 'resource_type': 'port'} with self.context.session.begin(): core.create_resource(self.context, models.ResourceRouting, routing) self.assertRaises(oslo_db.exception.DBDuplicateEntry, core.create_resource, self.context, models.ResourceRouting, routing)
def _test_delete_security_group(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule(t_rule1_id, t_sg_id, project_id, '10.0.0.0/24') t_rule2 = self._build_test_rule(t_rule2_id, t_sg_id, project_id, '192.168.56.0/24') t_sg = { 'id': t_sg_id, 'name': 'top_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2] } b_sg = { 'id': b_sg_id, 'name': 'bottom_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2] } top_sgs.append(t_sg) bottom1_sgs.append(b_sg) route1 = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG } with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route1) # test delete_sg when sg is not exit rand_id = uuidutils.generate_uuid() self.assertRaises(ext_sg.SecurityGroupNotFound, plugin.delete_security_group, q_ctx, rand_id) # when sg is under deleting from Local dict_para = { 'resource_id': t_sg_id, 'resource_type': t_constants.RT_SG } q_ctx.USER_AGENT = t_constants.LOCAL with t_ctx.session.begin(): core.create_resource(t_ctx, models.DeletingResources, dict_para) self.assertRaises(t_exceptions.ResourceNotFound, plugin.delete_security_group, q_ctx, t_sg_id)
def create_ag_az(context, ag_name, az_name): aggregate = core.create_resource(context, models.Aggregate, {'name': ag_name}) core.create_resource( context, models.AggregateMetadata, {'key': 'availability_zone', 'value': az_name, 'aggregate_id': aggregate['id']}) extra_fields = { 'availability_zone': az_name, 'metadata': {'availability_zone': az_name} } aggregate.update(extra_fields) return aggregate
def test_get_res_routing_ref(self): t_url = 'http://127.0.0.1:9696/v2.0/networks' self.assertIsNone( hclient.get_res_routing_ref(self.context, 'fake_pod_id', t_url, s_type=cons.ST_NEUTRON)) pod_dict = { 'pod_id': 'fake_pod_id', 'region_name': 'fake_region_name', 'az_name': 'fake_az' } api.create_pod(self.context, pod_dict) routes = [ { 'top_id': 'top_id', 'bottom_id': 'bottom_id', 'pod_id': 'fake_pod_id', 'project_id': 'test_project_id', 'resource_type': 'network' }, ] with self.context.session.begin(): for route in routes: core.create_resource(self.context, models.ResourceRouting, route) config_dict = { 'service_id': 'fake_service_id', 'pod_id': 'fake_pod_id', 'service_type': cons.ST_NEUTRON, 'service_url': 'http://127.0.0.1:9696/v2.0/networks' } api.create_cached_endpoints(self.context, config_dict) s_ctx = { 't_ver': 'v2.0', 'b_ver': 'v2.0', 't_url': t_url, 'b_url': t_url } self.assertEqual( s_ctx, hclient.get_res_routing_ref(self.context, 'top_id', t_url, s_type=cons.ST_NEUTRON))
def _prepare_port_pair_group_test(self, project_id, t_ctx, pod_name, index, t_pp_ids, create_bottom, b_pp_ids): t_ppg_id = uuidutils.generate_uuid() b_ppg_id = uuidutils.generate_uuid() t_client = FakeClient() b_client = FakeClient(pod_name) t_pps = [t_client.get_resource( 'port_pair', t_ctx, e) for e in t_pp_ids] if create_bottom: b_pps = [b_client.get_resource( 'port_pair', t_ctx, e) for e in b_pp_ids] top_ppg = { "group_id": 1, "description": "", "tenant_id": project_id, "port_pair_group_parameters": {"lb_fields": []}, "port_pairs": t_pps, "project_id": project_id, "id": t_ppg_id, "name": 'top_ppg_%d' % index, "tap_enabled": False} TOP_PORTPAIRGROUPS.append(DotDict(top_ppg)) if create_bottom: btm_ppg = { "group_id": 1, "description": "", "tenant_id": project_id, "port_pair_group_parameters": {"lb_fields": []}, "port_pairs": b_pps, "project_id": project_id, "id": b_ppg_id, "name": 'btm_ppg_%d' % index, "tap_enabled": False} if pod_name == 'pod_1': BOTTOM1_PORTPAIRGROUPS.append(DotDict(btm_ppg)) else: BOTTOM2_PORTPAIRGROUPS.append(DotDict(btm_ppg)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(t_ctx, models.ResourceRouting, {'top_id': t_ppg_id, 'bottom_id': b_ppg_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_PAIR_GROUP}) return t_ppg_id, b_ppg_id
def _test_delete_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, top_rules, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule(t_rule1_id, t_sg_id, project_id, '10.0.1.0/24') t_rule2 = self._build_test_rule(t_rule2_id, t_sg_id, project_id, '10.0.2.0/24') b_rule1 = self._build_test_rule(t_rule1_id, b_sg_id, project_id, '10.0.1.0/24') b_rule2 = self._build_test_rule(t_rule2_id, b_sg_id, project_id, '10.0.2.0/24') t_sg = { 'id': t_sg_id, 'name': 'test', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2] } b_sg = { 'id': b_sg_id, 'name': t_sg_id, 'description': '', 'tenant_id': project_id, 'security_group_rules': [b_rule1, b_rule2] } top_sgs.append(t_sg) top_rules.append(t_rule1) top_rules.append(t_rule2) bottom1_sgs.append(b_sg) route = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG } with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route) plugin.delete_security_group_rule(q_ctx, t_rule1_id) self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules'])) b_rule = bottom1_sgs[0]['security_group_rules'][0] self.assertEqual(b_sg_id, b_rule['security_group_id']) t_rule2.pop('security_group_id', None) b_rule.pop('security_group_id', None) self.assertEqual(t_rule2, b_rule)
def _test_get_security_group(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule( t_rule1_id, t_sg_id, project_id, '10.0.0.0/24') t_rule2 = self._build_test_rule( t_rule2_id, t_sg_id, project_id, '192.168.56.0/24') t_sg = {'id': t_sg_id, 'name': 'top_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2]} b_sg = {'id': b_sg_id, 'name': 'bottom_sg', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1, t_rule2]} top_sgs.append(t_sg) bottom1_sgs.append(b_sg) route1 = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route1) # test get_sg for normal situation res = plugin.get_security_group(q_ctx, t_sg_id) self.assertTrue(res['id'] == t_sg_id and res['name'] == 'top_sg') # test get_sg when the top_sg is under deleting dict_para = {'resource_id': t_sg_id, 'resource_type': t_constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.DeletingResources, dict_para) q_ctx.USER_AGENT = t_constants.LOCAL self.assertRaises(t_exceptions.ResourceNotFound, plugin.get_security_group, q_ctx, t_sg_id) # test get_sg when the request is from user_agent q_ctx.USER_AGENT = t_constants.USER_AGENT self.assertRaises(t_exceptions.ResourceIsInDeleting, plugin.get_security_group, q_ctx, t_sg_id)
def test_get_failed_or_new_jobs(self, mock_now): mock_now.return_value = datetime.datetime(2000, 1, 2, 12, 0, 0) job_dict_list = [ {'timestamp': datetime.datetime(2000, 1, 1, 12, 0, 0), 'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1", 'status': constants.JS_Fail}, # job_uuid1 {'timestamp': datetime.datetime(2000, 1, 1, 12, 5, 0), 'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1", 'status': constants.JS_Fail}, # job_uuid3 {'timestamp': datetime.datetime(2000, 1, 1, 12, 20, 0), 'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1", 'status': constants.JS_Fail}, # job_uuid5 {'timestamp': datetime.datetime(2000, 1, 1, 12, 15, 0), 'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1", 'status': constants.JS_Fail}, # job_uuid7 {'timestamp': datetime.datetime(2000, 1, 1, 12, 25, 0), 'resource_id': 'uuid3', 'type': 'res3', 'project_id': "uuid1", 'status': constants.JS_Success}, # job_uuid9 {'timestamp': datetime.datetime(2000, 1, 1, 12, 30, 0), 'resource_id': 'uuid4', 'type': 'res4', 'project_id': "uuid1", 'status': constants.JS_New}, # job_uuid11 {'timestamp': datetime.datetime(1999, 12, 31, 12, 0, 0), 'resource_id': 'uuid5', 'type': 'res5', 'project_id': "uuid1", 'status': constants.JS_Fail}, # job_uuid13 {'timestamp': datetime.datetime(1999, 12, 31, 11, 59, 59), 'resource_id': 'uuid6', 'type': 'res6', 'project_id': "uuid1", 'status': constants.JS_Fail}] # job_uuid15 for i, job_dict in enumerate(job_dict_list, 1): job_dict['id'] = 'job_uuid%d' % (2 * i - 1) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i - 1) core.create_resource(self.context, models.AsyncJob, job_dict) job_dict['id'] = 'job_uuid%d' % (2 * i) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i) job_dict['status'] = constants.JS_New core.create_resource(self.context, models.AsyncJob, job_dict) # for res3 + uuid3, the latest job's status is "Success", not returned # for res6 + uuid6, the latest job is out of the redo time span expected_failed_jobs = [ {'resource_id': 'uuid1', 'type': 'res1', 'project_id': "uuid1"}, {'resource_id': 'uuid2', 'type': 'res2', 'project_id': "uuid1"}, {'resource_id': 'uuid5', 'type': 'res5', 'project_id': "uuid1"}] expected_new_jobs = [{'resource_id': 'uuid4', 'type': 'res4', 'project_id': "uuid1"}] (failed_jobs, new_jobs) = db_api.get_latest_failed_or_new_jobs(self.context) six.assertCountEqual(self, expected_failed_jobs, failed_jobs) six.assertCountEqual(self, expected_new_jobs, new_jobs)
def post(self, **kw): context = t_context.extract_context_from_environ() if 'region' not in kw: pecan.abort(400, _('Request body region not found')) return print 'MaXiao kw '+str(request.context['request_data']) region = request.context['request_data']['region'] region_name = region.get('region_name', '').strip() _uuid = uuidutils.generate_uuid() try: with context.session.begin(): new_region = core.create_resource( context, models.Region, {'id': _uuid, 'region_name': region_name}) except db_exc.DBDuplicateEntry as e1: LOG.exception('Record already exists on %(region_name)s: ' '%(exception)s', {'region_name': region_name, 'exception': e1}) return Response(_('Record already exists'), 409) except Exception as e2: LOG.exception('Failed to create region: %(region_name)s,' '%(exception)s ', {'region_name': region_name, 'exception': e2}) return Response(_('Failed to create region'), 500) return {'region': new_region}
def test_resources(self): """Create all the resources to test model definition""" try: model_list = [] for _, model_class in inspect.getmembers(models): if inspect.isclass(model_class) and ( issubclass(model_class, core.ModelBase)): model_list.append(model_class) for model_class in _sort_model_by_foreign_key(model_list): create_dict = _construct_resource_dict(model_class) with self.context.session.begin(): core.create_resource( self.context, model_class, create_dict) except Exception as e: msg = str(e) self.fail('test_resources raised Exception unexpectedly %s' % msg)
def get_or_create_route(t_ctx, q_ctx, project_id, pod, ele, _type, list_ele_method): # use configuration option later route_expire_threshold = 30 _id = ele['id'] with t_ctx.session.begin(): routes = core.query_resource(t_ctx, models.ResourceRouting, [{ 'key': 'top_id', 'comparator': 'eq', 'value': _id }, { 'key': 'pod_id', 'comparator': 'eq', 'value': pod['pod_id'] }], []) if routes: route = routes[0] if route['bottom_id']: return route, ALL_DONE else: route_time = route['updated_at'] or route['created_at'] current_time = datetime.datetime.utcnow() delta = current_time - route_time if delta.seconds > route_expire_threshold: # NOTE(zhiyuan) cannot directly remove the route, we have # a race here that other worker is updating this route, we # need to check if the corresponding element has been # created by other worker eles = list_ele_method(t_ctx, q_ctx, pod, ele, _type) if eles: route['bottom_id'] = eles[0]['id'] core.update_resource(t_ctx, models.ResourceRouting, route['id'], route) return route, RES_DONE try: core.delete_resource(t_ctx, models.ResourceRouting, route['id']) except db_exc.ResourceNotFound: pass try: # NOTE(zhiyuan) try/except block inside a with block will cause # problem, so move them out of the block and manually handle the # session context t_ctx.session.begin() route = core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': _id, 'pod_id': pod['pod_id'], 'project_id': project_id, 'resource_type': _type }) t_ctx.session.commit() return route, NONE_DONE except db_exc.DBDuplicateEntry: t_ctx.session.rollback() return None, NONE_DONE finally: t_ctx.session.close()
def post(self, **kw): context = t_context.extract_context_from_environ() if not policy.enforce(context, policy.ADMIN_API_PODS_CREATE): pecan.abort(401, _('Unauthorized to create firewall_gateway')) return firewall_gateway = request.context['request_data']['firewall_gateway'] fabric = firewall_gateway.get('fabric', '').strip() firewall_id = firewall_gateway.get('firewall_id', '').strip() router_id = firewall_gateway.get('router_id', '').strip() project_id = firewall_gateway.get('project_id', '').strip() admin_state_up = firewall_gateway.get('admin_state_up', True) status = firewall_gateway.get('status', "DOWN") description = firewall_gateway.get('description', '').strip() _uuid = uuidutils.generate_uuid() try: with context.session.begin(): new_firewall_gateway = core.create_resource( context, models.FirewallGateway, { 'id': _uuid, 'fabric': fabric, 'firewall_id': firewall_id, 'router_id': router_id, 'project_id': project_id, 'admin_state_up': admin_state_up, 'status': status, 'description': description }) return_object = m.SuccessMessage( result={'firewall_gateway': new_firewall_gateway}) return return_object.to_dict() except db_exc.DBDuplicateEntry as e1: LOG.exception( 'Record firewall_gateway already exists for ' 'router_id %(router_id)s: ' 'firewall_id%(firewall_id)s: ' '%(exception)s', { 'router_id': router_id, 'firewall_id': firewall_id, 'exception': e1 }) return_object = m.FirewallGatewayExists(router_id=router_id, firewall_id=firewall_id) return return_object.to_dict() except Exception as e2: LOG.exception( 'Failed to create firewall_gateway: ' 'router_id: %(router_id)s,' 'firewall_id %(firewall_id)s: ' '%(exception)s ', { 'router_id': router_id, 'firewall_id': firewall_id, 'exception': e2 }) return_object = m.FailureMessage() return return_object.to_dict()
def create_pod_binding(context, tenant_id, pod_id): with context.session.begin(): return core.create_resource(context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod_id, 'is_binding': True})
def get_pod_by_az_tenant(context, az_name, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id}], []) for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) if az_name and pod['az_name'] == az_name: return pod, pod['pod_az_name'] elif az_name == '' and pod['az_name'] != '': # if the az_name is not specified, a defult bottom # pod will be selected return pod, pod['pod_az_name'] else: pass # TODO(joehuang): schedule one dynamically in the future if az_name != '': filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}] else: filters = None # if az_name is valid, select a pod under this az_name # if az_name is '', select the first valid bottom pod. # change to dynamic schedluing in the future pods = db_api.list_pods(context, filters=filters) for pod in pods: if pod['pod_name'] != '' and pod['az_name'] != '': try: with context.session.begin(): core.create_resource( context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod['pod_id'], 'is_binding': True}) return pod, pod['pod_az_name'] except Exception as e: LOG.error(_LE('Fail to create pod binding: %(exception)s'), {'exception': e}) return None, None return None, None
def _prepare_port_pair_test(self, project_id, t_ctx, pod_name, index, ingress, egress, create_bottom, portpairgroup_id=None): t_pp_id = uuidutils.generate_uuid() b_pp_id = uuidutils.generate_uuid() top_pp = { 'id': t_pp_id, 'project_id': project_id, 'tenant_id': project_id, 'ingress': ingress, 'egress': egress, 'name': 'top_pp_%d' % index, 'service_function_parameters': { "weight": 1, "correlation": DotDict({'value': 'null'})}, 'description': "description", 'portpairgroup_id': portpairgroup_id } TOP_PORTPAIRS.append(DotDict(top_pp)) if create_bottom: btm_pp = { 'id': b_pp_id, 'project_id': project_id, 'tenant_id': project_id, 'ingress': ingress, 'egress': egress, 'name': 'btm_pp_%d' % index, 'service_function_parameters': { "weight": 1, "correlation": DotDict({'value': 'null'})}, 'description': "description", 'portpairgroup_id': portpairgroup_id } if pod_name == 'pod_1': BOTTOM1_PORTPAIRS.append(DotDict(btm_pp)) else: BOTTOM2_PORTPAIRS.append(DotDict(btm_pp)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(t_ctx, models.ResourceRouting, {'top_id': t_pp_id, 'bottom_id': b_pp_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_PAIR}) return t_pp_id, b_pp_id
def _test_update_default_sg(self, plugin, q_ctx, t_ctx, pod_id, top_sgs, top_rules, bottom1_sgs): t_sg_id = uuidutils.generate_uuid() t_rule1_id = uuidutils.generate_uuid() t_rule2_id = uuidutils.generate_uuid() b_sg_id = uuidutils.generate_uuid() project_id = 'test_prject_id' t_rule1 = self._build_test_rule( t_rule1_id, t_sg_id, project_id, '10.0.0.0/24') t_sg = {'id': t_sg_id, 'name': 'default', 'description': '', 'tenant_id': project_id, 'security_group_rules': [t_rule1]} b_sg = {'id': b_sg_id, 'name': 'default', 'description': '', 'tenant_id': project_id, 'security_group_rules': []} top_sgs.append(t_sg) top_rules.append(t_rule1) bottom1_sgs.append(b_sg) route1 = { 'top_id': t_sg_id, 'pod_id': pod_id, 'bottom_id': b_sg_id, 'resource_type': constants.RT_SG} with t_ctx.session.begin(): core.create_resource(t_ctx, models.ResourceRouting, route1) t_rule2 = { 'security_group_rule': self._build_test_rule( t_rule2_id, t_sg_id, project_id, '10.0.1.0/24')} plugin.create_security_group_rule(q_ctx, t_rule2) self.assertEqual(len(top_sgs[0]['security_group_rules']), len(bottom1_sgs[0]['security_group_rules'])) for i in range(len(bottom1_sgs[0]['security_group_rules'])): self.assertTrue(self._compare_rule( bottom1_sgs[0]['security_group_rules'][i], top_sgs[0]['security_group_rules'][i])) plugin.delete_security_group_rule(q_ctx, t_rule1_id) self.assertEqual(len(bottom1_sgs[0]['security_group_rules']), len(top_sgs[0]['security_group_rules'])) for i in range(len(bottom1_sgs[0]['security_group_rules'])): self.assertTrue(self._compare_rule( bottom1_sgs[0]['security_group_rules'][i], top_sgs[0]['security_group_rules'][i]))
def test_get_failed_or_new_jobs(self, mock_now): mock_now.return_value = datetime.datetime(2000, 1, 2, 12, 0, 0) job_dict_list = [ {'timestamp': datetime.datetime(2000, 1, 1, 12, 0, 0), 'resource_id': 'uuid1', 'type': 'res1', 'status': constants.JS_Fail}, # job_uuid1 {'timestamp': datetime.datetime(2000, 1, 1, 12, 5, 0), 'resource_id': 'uuid1', 'type': 'res1', 'status': constants.JS_Fail}, # job_uuid3 {'timestamp': datetime.datetime(2000, 1, 1, 12, 20, 0), 'resource_id': 'uuid2', 'type': 'res2', 'status': constants.JS_Fail}, # job_uuid5 {'timestamp': datetime.datetime(2000, 1, 1, 12, 15, 0), 'resource_id': 'uuid2', 'type': 'res2', 'status': constants.JS_Fail}, # job_uuid7 {'timestamp': datetime.datetime(2000, 1, 1, 12, 25, 0), 'resource_id': 'uuid3', 'type': 'res3', 'status': constants.JS_Success}, # job_uuid9 {'timestamp': datetime.datetime(2000, 1, 1, 12, 30, 0), 'resource_id': 'uuid4', 'type': 'res4', 'status': constants.JS_New}, # job_uuid11 {'timestamp': datetime.datetime(1999, 12, 31, 12, 0, 0), 'resource_id': 'uuid5', 'type': 'res5', 'status': constants.JS_Fail}, # job_uuid13 {'timestamp': datetime.datetime(1999, 12, 31, 11, 59, 59), 'resource_id': 'uuid6', 'type': 'res6', 'status': constants.JS_Fail}] # job_uuid15 for i, job_dict in enumerate(job_dict_list, 1): job_dict['id'] = 'job_uuid%d' % (2 * i - 1) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i - 1) core.create_resource(self.context, models.AsyncJob, job_dict) job_dict['id'] = 'job_uuid%d' % (2 * i) job_dict['extra_id'] = 'extra_uuid%d' % (2 * i) job_dict['status'] = constants.JS_New core.create_resource(self.context, models.AsyncJob, job_dict) # for res3 + uuid3, the latest job's status is "Success", not returned # for res6 + uuid6, the latest job is out of the redo time span expected_failed_jobs = [ {'resource_id': 'uuid1', 'type': 'res1'}, {'resource_id': 'uuid2', 'type': 'res2'}, {'resource_id': 'uuid5', 'type': 'res5'}] expected_new_jobs = [{'resource_id': 'uuid4', 'type': 'res4'}] (failed_jobs, new_jobs) = db_api.get_latest_failed_or_new_jobs(self.context) six.assertCountEqual(self, expected_failed_jobs, failed_jobs) six.assertCountEqual(self, expected_new_jobs, new_jobs)
def post(self, **kw): context = t_context.extract_context_from_environ() if not t_context.is_admin_context(context): pecan.abort(400, _('Admin role required to create bindings')) return if 'pod_binding' not in kw: pecan.abort(400, _('Request body not found')) return pod_b = kw['pod_binding'] tenant_id = pod_b.get('tenant_id', '').strip() pod_id = pod_b.get('pod_id', '').strip() _uuid = uuidutils.generate_uuid() if tenant_id == '' or pod_id == '': return Response(_('Tenant_id and pod_id can not be empty'), 422) # the az_pod_map_id should be exist for in the pod map table try: with context.session.begin(): pod = core.get_resource(context, models.Pod, pod_id) if pod.get('az_name') == '': return Response(_('Top region can not be bound'), 422) except t_exc.ResourceNotFound: return Response(_('pod_id not found in pod'), 422) except Exception as e: LOG.exception( _LE('Failed to get_resource for pod_id: ' '%(pod_id)s ,' '%(exception)s '), { 'pod_id': pod_id, 'exception': e }) pecan.abort(500, _('Failed to create pod binding')) return try: with context.session.begin(): pod_binding = core.create_resource(context, models.PodBinding, { 'id': _uuid, 'tenant_id': tenant_id, 'pod_id': pod_id }) except db_exc.DBDuplicateEntry: return Response(_('Pod binding already exists'), 409) except db_exc.DBConstraintError: return Response(_('pod_id not exists in pod'), 422) except db_exc.DBReferenceError: return Response(_('DB reference not exists in pod'), 422) except Exception as e: LOG.exception(_LE('Failed to create pod binding: %(exception)s '), {'exception': e}) pecan.abort(500, _('Failed to create pod binding')) return return {'pod_binding': pod_binding}
def test_get_pod_by_top_id(self): self._create_pod(1, 'test_az_uuid1') self._create_pod(2, 'test_az_uuid2') routes = [ { 'top_id': 'top_uuid_1', 'bottom_id': 'bottom_uuid_1', 'pod_id': 'test_pod_uuid_1', 'project_id': 'test_project_uuid_1', 'resource_type': 'port' }, { 'top_id': 'top_uuid_2', 'bottom_id': 'bottom_uuid_2-1', 'pod_id': 'test_pod_uuid_1', 'project_id': 'test_project_uuid_1', 'resource_type': 'network' }, { 'top_id': 'top_uuid_2', 'bottom_id': 'bottom_uuid_2-2', 'pod_id': 'test_pod_uuid_2', 'project_id': 'test_project_uuid_1', 'resource_type': 'network' }, { 'top_id': 'top_uuid_3', 'bottom_id': '', 'pod_id': 'test_pod_uuid_1', 'project_id': 'test_project_uuid_1', 'resource_type': 'port' } ] with self.context.session.begin(): for route in routes: core.create_resource( self.context, models.ResourceRouting, route) pod = api.get_pod_by_top_id(self.context, 'top_uuid_1') self.assertEqual(pod['pod_id'], 'test_pod_uuid_1') pod = api.get_pod_by_top_id(self.context, 'top_uuid_2') # more than one routing entries found, method returns None self.assertIsNone(pod) pod = api.get_pod_by_top_id(self.context, 'top_uuid_3') # bottom_id is empty, method returns None self.assertIsNone(pod)
def get_or_create_route(t_ctx, q_ctx, project_id, pod, ele, _type, list_ele_method): # use configuration option later route_expire_threshold = 30 _id = ele['id'] with t_ctx.session.begin(): routes = core.query_resource( t_ctx, models.ResourceRouting, [{'key': 'top_id', 'comparator': 'eq', 'value': _id}, {'key': 'pod_id', 'comparator': 'eq', 'value': pod['pod_id']}, {'key': 'resource_type', 'comparator': 'eq', 'value': _type}], []) if routes: route = routes[0] if route['bottom_id']: return route, ALL_DONE else: route_time = route['updated_at'] or route['created_at'] current_time = datetime.datetime.utcnow() delta = current_time - route_time if delta.seconds > route_expire_threshold: # NOTE(zhiyuan) cannot directly remove the route, we have # a race here that other worker is updating this route, we # need to check if the corresponding element has been # created by other worker eles = list_ele_method(t_ctx, q_ctx, pod, ele, _type) if eles: route['bottom_id'] = eles[0]['id'] core.update_resource(t_ctx, models.ResourceRouting, route['id'], route) return route, RES_DONE try: core.delete_resource(t_ctx, models.ResourceRouting, route['id']) except db_exc.ResourceNotFound: pass try: # NOTE(zhiyuan) try/except block inside a with block will cause # problem, so move them out of the block and manually handle the # session context t_ctx.session.begin() route = core.create_resource(t_ctx, models.ResourceRouting, {'top_id': _id, 'pod_id': pod['pod_id'], 'project_id': project_id, 'resource_type': _type}) t_ctx.session.commit() return route, NONE_DONE except db_exc.DBDuplicateEntry: t_ctx.session.rollback() return None, NONE_DONE finally: t_ctx.session.close()
def new_job(context, _type, resource_id): with context.session.begin(): job_dict = {'id': uuidutils.generate_uuid(), 'type': _type, 'status': constants.JS_New, 'resource_id': resource_id, 'extra_id': uuidutils.generate_uuid()} job = core.create_resource(context, models.Job, job_dict) return job
def _prepare_port_chain_test(self, project_id, t_ctx, pod_name, index, create_bottom, ids): t_pc_id = uuidutils.generate_uuid() b_pc_id = uuidutils.generate_uuid() top_pc = { "tenant_id": project_id, "name": "t_pc_%s" % index, "chain_parameters": { "symmetric": False, "correlation": "mpls"}, "port_pair_groups": ids['t_ppg_id'], "flow_classifiers": ids['t_fc_id'], "project_id": project_id, "chain_id": 1, "description": "", "id": t_pc_id} TOP_PORTCHAINS.append(DotDict(top_pc)) if create_bottom: btm_pc = { "tenant_id": project_id, "name": "b_pc_%s" % index, "chain_parameters": { "symmetric": False, "correlation": "mpls"}, "port_pair_groups": ids['b_ppg_id'], "flow_classifiers": ids['b_fc_id'], "project_id": project_id, "chain_id": 1, "description": "", "id": b_pc_id} if pod_name == 'pod_1': BOTTOM1_PORTCHAINS.append(DotDict(btm_pc)) else: BOTTOM2_PORTCHAINS.append(DotDict(btm_pc)) pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' core.create_resource(t_ctx, models.ResourceRouting, {'top_id': t_pc_id, 'bottom_id': b_pc_id, 'pod_id': pod_id, 'project_id': project_id, 'resource_type': constants.RT_PORT_CHAIN}) return t_pc_id, b_pc_id