def get_one(self, _id): # NOTE(zhiyuan) this function handles two kinds of requests # GET /flavors/flavor_id # GET /flavors/detail context = t_context.extract_context_from_environ() if _id == 'detail': with context.session.begin(): flavors = core.query_resource(context, models.InstanceTypes, [], []) for flavor in flavors: flavor['id'] = flavor['flavorid'] del flavor['flavorid'] return {'flavors': flavors} else: with context.session.begin(): flavors = core.query_resource(context, models.InstanceTypes, [{'key': 'flavorid', 'comparator': 'eq', 'value': _id}], []) if not flavors: pecan.abort(404, 'Flavor not found') return flavor = flavors[0] flavor['id'] = flavor['flavorid'] del flavor['flavorid'] return {'flavor': flavor}
def get_all_ag(context, filters=None, sorts=None): aggregates = core.query_resource(context, models.Aggregate, filters or [], sorts or []) metadatas = core.query_resource( context, models.AggregateMetadata, [{'key': 'key', 'comparator': 'eq', 'value': 'availability_zone'}], []) agg_meta_map = {} for metadata in metadatas: agg_meta_map[metadata['aggregate_id']] = metadata for aggregate in aggregates: extra_fields = { 'availability_zone': '', 'metadata': {} } if aggregate['id'] in agg_meta_map: metadata = agg_meta_map[aggregate['id']] extra_fields['availability_zone'] = metadata['value'] extra_fields['metadata'] = { 'availability_zone': metadata['value']} aggregate.update(extra_fields) return aggregates
def test_post_bottom_pod(self, mock_context): mock_context.return_value = self.context kw = {'pod': {'pod_name': 'BottomPod', 'az_name': 'TopAZ'}} pod_id = self.controller.post(**kw)['pod']['pod_id'] with self.context.session.begin(): pod = core.get_resource(self.context, models.Pod, pod_id) self.assertEqual(pod['pod_name'], 'BottomPod') self.assertEqual(pod['az_name'], 'TopAZ') pods = core.query_resource(self.context, models.Pod, [{'key': 'pod_name', 'comparator': 'eq', 'value': 'BottomPod'}], []) self.assertEqual(len(pods), 1) ag_name = utils.get_ag_name('BottomPod') aggregates = core.query_resource(self.context, models.Aggregate, [{'key': 'name', 'comparator': 'eq', 'value': ag_name}], []) self.assertEqual(len(aggregates), 1) metadatas = core.query_resource( self.context, models.AggregateMetadata, [{'key': 'key', 'comparator': 'eq', 'value': 'availability_zone'}, {'key': 'aggregate_id', 'comparator': 'eq', 'value': aggregates[0]['id']}], []) self.assertEqual(len(metadatas), 1) self.assertEqual(metadatas[0]['value'], 'TopAZ')
def _check_routes(self): for res in (TOP_NETS, TOP_SUBNETS, BOTTOM_NETS, BOTTOM_SUBNETS): self.assertEqual(1, len(res)) self.assertEqual(2, len(TOP_PORTS)) self.assertEqual(2, len(BOTTOM_PORTS)) with self.context.session.begin(): routes = core.query_resource(self.context, models.ResourceRouting, [], []) self.assertEqual(4, len(routes)) actual = [[], [], [], []] for region in ('t_region', 'b_region'): actual[0].append(self.controller._get_client( region).list_resources('network', self.context, [])[0]['id']) actual[1].append(self.controller._get_client( region).list_resources('subnet', self.context, [])[0]['id']) t_ports = self.controller._get_client( region).list_resources('port', self.context, []) if 'device_id' in t_ports[0]: actual[2].append(t_ports[0]['id']) actual[3].append(t_ports[1]['id']) else: actual[2].append(t_ports[1]['id']) actual[3].append(t_ports[0]['id']) expect = [[route['top_id'], route['bottom_id']] for route in routes] self.assertItemsEqual(expect, actual)
def get_pod_by_az_tenant(context, az_name, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id}], []) for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) if pod['az_name'] == az_name: return pod, pod['pod_az_name'] # TODO(joehuang): schedule one dynamically in the future filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}] pods = db_api.list_pods(context, filters=filters) for pod in pods: if pod['pod_name'] != '': try: with context.session.begin(): core.create_resource( context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod['pod_id']}) return pod, pod['pod_az_name'] except Exception as e: LOG.error(_LE('Fail to create pod binding: %(exception)s'), {'exception': e}) return None, None return None, None
def get_bottom_mappings_by_tenant_pod(context, tenant_id, pod_id, resource_type): """Get resource routing for specific tenant and pod :param context: context object :param tenant_id: tenant id to look up :param pod_id: pod to look up :param resource_type: specific resource :return: a dic {top_id : route} """ route_filters = [{'key': 'pod_id', 'comparator': 'eq', 'value': pod_id}, {'key': 'project_id', 'comparator': 'eq', 'value': tenant_id}, {'key': 'resource_type', 'comparator': 'eq', 'value': resource_type}] routings = {} with context.session.begin(): routes = core.query_resource( context, models.ResourceRouting, route_filters, []) for _route in routes: if not _route['bottom_id']: continue routings[_route['top_id']] = _route return routings
def test_job_run_expire(self): job_type = 'fake_resource' @xmanager._job_handle(job_type) def fake_handle(self, ctx, payload): pass fake_id = uuidutils.generate_uuid() fake_project_id = uuidutils.generate_uuid() payload = {job_type: fake_id} db_api.new_job(self.context, fake_project_id, job_type, fake_id) expired_job = { 'id': uuidutils.generate_uuid(), 'type': job_type, 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 200), 'status': constants.JS_Running, 'resource_id': fake_id, 'extra_id': constants.SP_EXTRA_ID } core.create_resource(self.context, models.AsyncJob, expired_job) fake_handle(None, self.context, payload=payload) logs = core.query_resource(self.context, models.AsyncJobLog, [], []) self.assertEqual(fake_id, logs[0]['resource_id']) self.assertEqual(job_type, logs[0]['type'])
def test_job_run_expire(self): @xmanager._job_handle('fake_resource') def fake_handle(self, ctx, payload): pass fake_id = uuidutils.generate_uuid() payload = {'fake_resource': fake_id} expired_job = { 'id': uuidutils.generate_uuid(), 'type': 'fake_resource', 'timestamp': datetime.datetime.now() - datetime.timedelta(0, 120), 'status': constants.JS_Running, 'resource_id': fake_id, 'extra_id': constants.SP_EXTRA_ID } core.create_resource(self.context, models.Job, expired_job) fake_handle(None, self.context, payload=payload) jobs = core.query_resource(self.context, models.Job, [], []) expected_status = ['New', 'Fail', 'Success'] job_status = [job['status'] for job in jobs] self.assertItemsEqual(expected_status, job_status) for i in xrange(3): self.assertEqual(fake_id, jobs[i]['resource_id']) self.assertEqual('fake_resource', jobs[i]['type'])
def get_all(self): context = t_context.extract_context_from_environ() with context.session.begin(): flavors = core.query_resource(context, models.InstanceTypes, [], []) return {'flavors': [dict( [('id', flavor['flavorid']), ('name', flavor['name'])]) for flavor in flavors]}
def get_or_create_route(t_ctx, q_ctx, project_id, pod, ele, _type, list_ele_method): # use configuration option later route_expire_threshold = 30 _id = ele['id'] with t_ctx.session.begin(): routes = core.query_resource( t_ctx, models.ResourceRouting, [{'key': 'top_id', 'comparator': 'eq', 'value': _id}, {'key': 'pod_id', 'comparator': 'eq', 'value': pod['pod_id']}, {'key': 'resource_type', 'comparator': 'eq', 'value': _type}], []) if routes: route = routes[0] if route['bottom_id']: return route, ALL_DONE else: route_time = route['updated_at'] or route['created_at'] current_time = datetime.datetime.utcnow() delta = current_time - route_time if delta.seconds > route_expire_threshold: # NOTE(zhiyuan) cannot directly remove the route, we have # a race here that other worker is updating this route, we # need to check if the corresponding element has been # created by other worker eles = list_ele_method(t_ctx, q_ctx, pod, ele, _type) if eles: route['bottom_id'] = eles[0]['id'] core.update_resource(t_ctx, models.ResourceRouting, route['id'], route) return route, RES_DONE try: core.delete_resource(t_ctx, models.ResourceRouting, route['id']) except db_exc.ResourceNotFound: pass try: # NOTE(zhiyuan) try/except block inside a with block will cause # problem, so move them out of the block and manually handle the # session context t_ctx.session.begin() route = core.create_resource(t_ctx, models.ResourceRouting, {'top_id': _id, 'pod_id': pod['pod_id'], 'project_id': project_id, 'resource_type': _type}) t_ctx.session.commit() return route, NONE_DONE except db_exc.DBDuplicateEntry: t_ctx.session.rollback() return None, NONE_DONE finally: t_ctx.session.close()
def get_running_job(context, _type, resource_id): jobs = core.query_resource( context, models.AsyncJob, [{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}, {'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running}, {'key': 'type', 'comparator': 'eq', 'value': _type}], []) if jobs: return jobs[0] else: return None
def get_latest_job(context, status, _type, resource_id): jobs = core.query_resource( context, models.AsyncJob, [{'key': 'status', 'comparator': 'eq', 'value': status}, {'key': 'type', 'comparator': 'eq', 'value': _type}, {'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}], [('timestamp', False)]) if jobs: return jobs[0] else: return None
def test_delete(self, mock_context): mock_context.return_value = self.context kw = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}} pod_id = self.controller.post(**kw)['pod']['pod_id'] self.controller.delete(pod_id) with self.context.session.begin(): pods = core.query_resource(self.context, models.Pod, [{'key': 'region_name', 'comparator': 'eq', 'value': 'BottomPod'}], []) self.assertEqual(0, len(pods))
def test_delete_mappings_by_bottom_id(self): self._create_pod(0, 'test_az_uuid_0') self._create_pod(1, 'test_az_uuid_1') self._create_pod(2, 'test_az_uuid_2') self._create_resource_mappings() bottom_id = 'bottom_uuid_1' api.delete_mappings_by_bottom_id(self.context, bottom_id) filters = [{'key': 'bottom_id', 'comparator': 'eq', 'value': bottom_id}] routing = core.query_resource( self.context, models.ResourceRouting, filters, []) self.assertEqual(len(routing), 0)
def delete_pre_created_resource_mapping(context, name): with context.session.begin(): entries = core.query_resource( context, models.ResourceRouting, filters=[{'key': 'top_id', 'comparator': 'eq', 'value': name}], sorts=[]) if entries: core.delete_resources( context, models.ResourceRouting, filters=[{'key': 'top_id', 'comparator': 'eq', 'value': entries[0]['bottom_id']}]) core.delete_resource(context, models.ResourceRouting, entries[0]['id'])
def _get_top_region(self, ctx): top_region_name = '' try: with ctx.session.begin(): pods = core.query_resource(ctx, models.Pod, [], []) for pod in pods: if pod['az_name'] == '' and pod['pod_name'] != '': return pod['pod_name'] except Exception: return top_region_name return top_region_name
def test_post_top_pod(self, mock_context): mock_context.return_value = self.context kw = {'pod': {'pod_name': 'TopPod', 'az_name': ''}} pod_id = self.controller.post(**kw)['pod']['pod_id'] with self.context.session.begin(): pod = core.get_resource(self.context, models.Pod, pod_id) self.assertEqual(pod['pod_name'], 'TopPod') self.assertEqual(pod['az_name'], '') pods = core.query_resource(self.context, models.Pod, [{'key': 'pod_name', 'comparator': 'eq', 'value': 'TopPod'}], []) self.assertEqual(len(pods), 1)
def delete(self, _id): context = t_context.extract_context_from_environ() with context.session.begin(): flavors = core.query_resource(context, models.InstanceTypes, [{'key': 'flavorid', 'comparator': 'eq', 'value': _id}], []) if not flavors: pecan.abort(404, 'Flavor not found') return core.delete_resource(context, models.InstanceTypes, flavors[0]['id']) pecan.response.status = 202 return
def _get_top_region(self, ctx): top_region_name = '' try: with ctx.session.begin(): pods = core.query_resource(ctx, models.Pod, [], []) for pod in pods: if pod['az_name'] == '' and pod['pod_name'] != '': return pod['pod_name'] except Exception as e: LOG.exception(_LE('Failed to get top region: %(exception)s '), {'exception': e}) return top_region_name return top_region_name
def get_one_ag(context, aggregate_id): aggregate = core.get_resource(context, models.Aggregate, aggregate_id) metadatas = core.query_resource( context, models.AggregateMetadata, [{'key': 'key', 'comparator': 'eq', 'value': 'availability_zone'}, {'key': 'aggregate_id', 'comparator': 'eq', 'value': aggregate['id']}], []) if metadatas: aggregate['availability_zone'] = metadatas[0]['value'] aggregate['metadata'] = { 'availability_zone': metadatas[0]['value']} else: aggregate['availability_zone'] = '' aggregate['metadata'] = {} return aggregate
def get_pod_by_az_tenant(context, az_name, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id}], []) for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) if az_name and pod['az_name'] == az_name: return pod, pod['pod_az_name'] elif az_name == '' and pod['az_name'] != '': # if the az_name is not specified, a defult bottom # pod will be selected return pod, pod['pod_az_name'] else: pass # TODO(joehuang): schedule one dynamically in the future if az_name != '': filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}] else: filters = None # if az_name is valid, select a pod under this az_name # if az_name is '', select the first valid bottom pod. # change to dynamic schedluing in the future pods = db_api.list_pods(context, filters=filters) for pod in pods: if pod['pod_name'] != '' and pod['az_name'] != '': try: with context.session.begin(): core.create_resource( context, models.PodBinding, {'id': uuidutils.generate_uuid(), 'tenant_id': tenant_id, 'pod_id': pod['pod_id'], 'is_binding': True}) return pod, pod['pod_az_name'] except Exception as e: LOG.error(_LE('Fail to create pod binding: %(exception)s'), {'exception': e}) return None, None return None, None
def get_all(self): context = t_context.extract_context_from_environ() if not t_context.is_admin_context(context): pecan.abort(400, _('Admin role required to list bindings')) return try: with context.session.begin(): pod_bindings = core.query_resource(context, models.PodBinding, [], []) except Exception: pecan.abort(500, _('Fail to list tenant pod bindings')) return return {'pod_bindings': pod_bindings}
def get_pod_by_top_id(context, _id): """Get pod resource from pod table by top id of resource :param context: context object :param _id: the top id of resource :returns: pod resource """ route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id}] with context.session.begin(): routes = core.query_resource( context, models.ResourceRouting, route_filters, []) if not routes or len(routes) != 1: return None route = routes[0] if not route['bottom_id']: return None return core.get_resource(context, models.Pod, route['pod_id'])
def get_all(self): context = t_context.extract_context_from_environ() if not policy.enforce(context, policy.ADMIN_API_BINDINGS_LIST): pecan.abort(401, _('Unauthorized to list bindings')) return try: with context.session.begin(): pod_bindings = core.query_resource(context, models.PodBinding, [], []) except Exception: pecan.abort(500, _('Fail to list tenant pod bindings')) return return {'pod_bindings': pod_bindings}
def test_job_handle(self): job_type = 'fake_resource' @xmanager._job_handle(job_type) def fake_handle(self, ctx, payload): pass fake_id = 'fake_id' fake_project_id = uuidutils.generate_uuid() payload = {job_type: fake_id} db_api.new_job(self.context, fake_project_id, job_type, fake_id) fake_handle(None, self.context, payload=payload) logs = core.query_resource(self.context, models.AsyncJobLog, [], []) self.assertEqual(fake_id, logs[0]['resource_id']) self.assertEqual(job_type, logs[0]['type'])
def list_pods_by_tenant(context, tenant_id): pod_bindings = core.query_resource(context, models.PodBinding, [{'key': 'tenant_id', 'comparator': 'eq', 'value': tenant_id}], []) pods = [] if pod_bindings: for pod_b in pod_bindings: pod = core.get_resource(context, models.Pod, pod_b['pod_id']) pods.append(pod) return pods
def test_job_handle_exception(self): @xmanager._job_handle('fake_resource') def fake_handle(self, ctx, payload): raise Exception() fake_id = 'fake_id' payload = {'fake_resource': fake_id} fake_handle(None, self.context, payload=payload) jobs = core.query_resource(self.context, models.Job, [], []) expected_status = [constants.JS_New, constants.JS_Fail] job_status = [job['status'] for job in jobs] self.assertItemsEqual(expected_status, job_status) self.assertEqual(fake_id, jobs[0]['resource_id']) self.assertEqual(fake_id, jobs[1]['resource_id']) self.assertEqual('fake_resource', jobs[0]['type']) self.assertEqual('fake_resource', jobs[1]['type'])
def _handle_sg_rule_for_new_group(self, context, pod, top_sgs, bottom_sg_ids): client = self._get_client(pod['pod_name']) for i, t_sg in enumerate(top_sgs): b_sg_id = bottom_sg_ids[i] new_b_rules = [] for t_rule in t_sg['security_group_rules']: if t_rule['remote_group_id']: # we do not handle remote group rule for non-default # security group, actually tricircle plugin in neutron # will reject such rule # default security group is not passed with top_sgs so # t_rule will not belong to default security group continue new_b_rules.append( self._construct_bottom_rule(t_rule, b_sg_id)) try: b_sg = client.get_security_groups(context, b_sg_id) for b_rule in b_sg['security_group_rules']: self._safe_delete_security_group_rule( context, client, b_rule['id']) if new_b_rules: rule_body = {'security_group_rules': new_b_rules} self._safe_create_security_group_rule(context, client, rule_body) except Exception: # if we fails when operating bottom security group rule, we # update the security group mapping to set bottom_id to None # and expire the mapping, so next time the security group rule # operations can be redone with context.session.begin(): routes = core.query_resource( context, models.ResourceRouting, [{'key': 'top_id', 'comparator': 'eq', 'value': t_sg['id']}, {'key': 'bottom_id', 'comparator': 'eq', 'value': b_sg_id}], []) update_dict = {'bottom_id': None, 'created_at': constants.expire_time, 'updated_at': constants.expire_time} core.update_resource(context, models.ResourceRouting, routes[0]['id'], update_dict) raise
def test_delete_trunk(self): project_id = TEST_TENANT_ID q_ctx = FakeNeutronContext() t_ctx = context.get_db_context() self._basic_pod_setup() fake_plugin = FakePlugin() t_trunk, b_trunk = self._prepare_trunk_test(project_id, t_ctx, 'pod_1', 1, True) fake_plugin.delete_trunk(q_ctx, t_trunk['id']) self.assertEqual(len(TOP_TRUNKS), 0) self.assertEqual(len(BOTTOM1_TRUNKS), 0) route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': t_trunk['id']}] routes = core.query_resource(t_ctx, models.ResourceRouting, route_filters, []) self.assertEqual(len(routes), 0)
def test_delete_error(self, mock_ctx, mock_delete): t_pod, b_pod = self._prepare_pod() mock_ctx.return_value = self.context # pass invalid id res = self.controller.delete('fake_id') self.assertEqual('Server not found', res['Error']['message']) self.assertEqual(404, res['Error']['code']) t_server_id = 't_server_id' b_server_id = 'b_server_id' with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': t_server_id, 'bottom_id': b_server_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': constants.RT_SERVER}) mock_delete.return_value = None # pass stale server id res = self.controller.delete(t_server_id) self.assertEqual('Server not found', res['Error']['message']) self.assertEqual(404, res['Error']['code']) routes = core.query_resource( self.context, models.ResourceRouting, [{'key': 'top_id', 'comparator': 'eq', 'value': t_server_id}], []) # check the stale mapping is deleted self.assertEqual(0, len(routes)) with self.context.session.begin(): core.create_resource( self.context, models.ResourceRouting, {'top_id': t_server_id, 'bottom_id': b_server_id, 'pod_id': b_pod['pod_id'], 'project_id': self.project_id, 'resource_type': constants.RT_SERVER}) # exception occurs when deleting server mock_delete.side_effect = t_exceptions.PodNotFound('pod2') res = self.controller.delete(t_server_id) self.assertEqual('Pod pod2 could not be found.', res['Error']['message']) self.assertEqual(404, res['Error']['code'])
def get_or_create_route(t_ctx, q_ctx, project_id, pod, ele, _type, list_ele_method): # use configuration option later route_expire_threshold = 30 _id = ele['id'] with t_ctx.session.begin(): routes = core.query_resource(t_ctx, models.ResourceRouting, [{ 'key': 'top_id', 'comparator': 'eq', 'value': _id }, { 'key': 'pod_id', 'comparator': 'eq', 'value': pod['pod_id'] }, { 'key': 'resource_type', 'comparator': 'eq', 'value': _type }], []) if routes: route = routes[0] if route['bottom_id']: return route, ALL_DONE else: route_time = route['updated_at'] or route['created_at'] current_time = datetime.datetime.utcnow() delta = current_time - route_time if delta.seconds > route_expire_threshold: # NOTE(zhiyuan) cannot directly remove the route, we have # a race here that other worker is updating this route, we # need to check if the corresponding element has been # created by other worker eles = list_ele_method(t_ctx, q_ctx, pod, ele, _type) if eles: route['bottom_id'] = eles[0]['id'] core.update_resource(t_ctx, models.ResourceRouting, route['id'], route) return route, RES_DONE try: core.delete_resource(t_ctx, models.ResourceRouting, route['id']) except db_exc.ResourceNotFound: pass try: # NOTE(zhiyuan) try/except block inside a with block will cause # problem, so move them out of the block and manually handle the # session context t_ctx.session.begin() route = core.create_resource( t_ctx, models.ResourceRouting, { 'top_id': _id, 'pod_id': pod['pod_id'], 'project_id': project_id, 'resource_type': _type }) t_ctx.session.commit() return route, NONE_DONE except db_exc.DBDuplicateEntry: t_ctx.session.rollback() return None, NONE_DONE finally: t_ctx.session.close()
def list_pods(context, filters=None, sorts=None): return core.query_resource(context, models.Pod, filters or [], sorts or [])
def list_cached_endpoints(context, filters=None, sorts=None): return core.query_resource(context, models.CachedEndpoint, filters or [], sorts or [])
def list_pods(context, filters=None, sorts=None): with context.session.begin(): return core.query_resource(context, models.Pod, filters or [], sorts or [])
def list_pod_service_configurations(context, filters=None, sorts=None): with context.session.begin(): return core.query_resource(context, models.PodServiceConfiguration, filters or [], sorts or [])
def list_resource_routings(context, filters=None, sorts=None): with context.session.begin(): return core.query_resource(context, models.ResourceRouting, filters or [], sorts or [])
def list_jobs(context, filters=None, sorts=None): with context.session.begin(): # get all jobs from job table jobs = core.query_resource(context, models.AsyncJob, filters or [], sorts or []) return jobs
def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net, t_router, t_ew_bridge_net, t_ew_bridge_subnet, need_ns_bridge): b_client = self._get_client(b_pod['pod_name']) router_body = {'router': {'name': t_router['id'], 'distributed': False}} project_id = t_router['tenant_id'] # create bottom router in target bottom pod _, b_router_id = self.helper.prepare_bottom_element( ctx, project_id, b_pod, t_router, 'router', router_body) # handle E-W networking # create top E-W bridge port q_ctx = None # no need to pass neutron context when using client t_ew_bridge_port_id = self.helper.get_bridge_interface( ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'], b_router_id, None, True) # create bottom E-W bridge port t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id) (is_new, b_ew_bridge_port_id, _, _) = self.helper.get_bottom_bridge_elements( ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet, t_ew_bridge_port) # attach bottom E-W bridge port to bottom router if is_new: # only attach bridge port the first time b_client.action_routers(ctx, 'add_interface', b_router_id, {'port_id': b_ew_bridge_port_id}) else: # still need to check if the bridge port is bound port = b_client.get_ports(ctx, b_ew_bridge_port_id) if not port.get('device_id'): b_client.action_routers(ctx, 'add_interface', b_router_id, {'port_id': b_ew_bridge_port_id}) # handle N-S networking if need_ns_bridge: t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % ( project_id) t_ns_bridge_net = self._get_resource_by_name( t_client, ctx, 'network', t_ns_bridge_net_name) t_ns_bridge_subnet = self._get_resource_by_name( t_client, ctx, 'subnet', t_ns_bridge_subnet_name) # create bottom N-S bridge network and subnet (_, _, b_ns_bridge_subnet_id, b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements( ctx, project_id, b_pod, t_ns_bridge_net, True, t_ns_bridge_subnet, None) # create top N-S bridge gateway port t_ns_bridge_gateway_id = self.helper.get_bridge_interface( ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], b_router_id, None, False) t_ns_bridge_gateway = t_client.get_ports(ctx, t_ns_bridge_gateway_id) # add external gateway for bottom router # add gateway is update operation, can run multiple times gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address'] b_client.action_routers( ctx, 'add_gateway', b_router_id, {'network_id': b_ns_bridge_net_id, 'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id, 'ip_address': gateway_ip}]}) # attach internal port to bottom router t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'], t_net['id']) b_net_id = db_api.get_bottom_id_by_top_id_pod_name( ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK) if b_net_id: b_ports = self._get_router_interfaces(b_client, ctx, b_router_id, b_net_id) else: b_ports = [] if not t_ports and b_ports: # remove redundant bottom interface b_port = b_ports[0] request_body = {'port_id': b_port['id']} b_client.action_routers(ctx, 'remove_interface', b_router_id, request_body) elif t_ports and not b_ports: # create new bottom interface t_port = t_ports[0] # only consider ipv4 address currently t_subnet_id = t_port['fixed_ips'][0]['subnet_id'] t_subnet = t_client.get_subnets(ctx, t_subnet_id) (b_net_id, subnet_map) = self.helper.prepare_bottom_network_subnets( ctx, q_ctx, project_id, b_pod, t_net, [t_subnet]) # the gateway ip of bottom subnet is set to the ip of t_port, so # we just attach the bottom subnet to the bottom router and neutron # server in the bottom pod will create the interface for us, using # the gateway ip. b_client.action_routers(ctx, 'add_interface', b_router_id, {'subnet_id': subnet_map[t_subnet_id]}) if not t_router['external_gateway_info']: return # handle floatingip t_ext_net_id = t_router['external_gateway_info']['network_id'] t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id', 'comparator': 'eq', 'value': t_ext_net_id}]) # skip unbound top floatingip t_ip_fip_map = dict([(fip['floating_ip_address'], fip) for fip in t_fips if fip['port_id']]) mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id, constants.RT_NETWORK) # bottom external network should exist b_ext_pod, b_ext_net_id = mappings[0] b_ext_client = self._get_client(b_ext_pod['pod_name']) b_fips = b_ext_client.list_floatingips( ctx, [{'key': 'floating_network_id', 'comparator': 'eq', 'value': b_ext_net_id}]) # skip unbound bottom floatingip b_ip_fip_map = dict([(fip['floating_ip_address'], fip) for fip in b_fips if fip['port_id']]) add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map] del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map] for add_fip in add_fips: fip = t_ip_fip_map[add_fip] t_int_port_id = fip['port_id'] b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name( ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT) if not b_int_port_id: LOG.warning(_LW('Port %(port_id)s associated with floating ip ' '%(fip)s is not mapped to bottom pod'), {'port_id': t_int_port_id, 'fip': add_fip}) continue t_int_port = t_client.get_ports(ctx, t_int_port_id) if t_int_port['network_id'] != t_net['id']: # only handle floating ip association for the given top network continue if need_ns_bridge: # create top N-S bridge interface port t_ns_bridge_port_id = self.helper.get_bridge_interface( ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None, b_int_port_id, False) t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id) b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'], constants.RT_NETWORK) port_body = { 'port': { 'tenant_id': project_id, 'admin_state_up': True, 'name': 'ns_bridge_port', 'network_id': b_ext_bridge_net_id, 'fixed_ips': [{'ip_address': t_ns_bridge_port[ 'fixed_ips'][0]['ip_address']}] } } _, b_ns_bridge_port_id = self.helper.prepare_bottom_element( ctx, project_id, b_ext_pod, t_ns_bridge_port, constants.RT_PORT, port_body) self._safe_create_bottom_floatingip( ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip, b_ns_bridge_port_id) self._safe_create_bottom_floatingip( ctx, b_pod, b_client, b_ns_bridge_net_id, t_ns_bridge_port['fixed_ips'][0]['ip_address'], b_int_port_id) else: self._safe_create_bottom_floatingip( ctx, b_pod, b_client, b_ext_net_id, add_fip, b_int_port_id) for del_fip in del_fips: fip = b_ip_fip_map[del_fip] if need_ns_bridge: b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id']) entries = core.query_resource( ctx, models.ResourceRouting, [{'key': 'bottom_id', 'comparator': 'eq', 'value': b_ns_bridge_port['id']}, {'key': 'pod_id', 'comparator': 'eq', 'value': b_ext_pod['pod_id']}], []) t_ns_bridge_port_id = entries[0]['top_id'] b_int_fips = b_client.list_floatingips( ctx, [{'key': 'floating_ip_address', 'comparator': 'eq', 'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']}, {'key': 'floating_network_id', 'comparator': 'eq', 'value': b_ns_bridge_net_id}]) if b_int_fips: b_client.delete_floatingips(ctx, b_int_fips[0]['id']) b_ext_client.update_floatingips( ctx, fip['id'], {'floatingip': {'port_id': None}}) # for bridge port, we have two resource routing entries, one # for bridge port in top pod, another for bridge port in bottom # pod. calling t_client.delete_ports will delete bridge port in # bottom pod as well as routing entry for it, but we also need # to remove routing entry for bridge port in top pod, bridge # network will be deleted when deleting router # first we update the routing entry to set bottom_id to None # and expire the entry, so if we succeed to delete the bridge # port next, this expired entry will be deleted; otherwise, we # fail to delete the bridge port, when the port is accessed via # lock_handle module, that module will find the port and update # the entry with ctx.session.begin(): core.update_resources( ctx, models.ResourceRouting, [{'key': 'bottom_id', 'comparator': 'eq', 'value': t_ns_bridge_port_id}], {'bottom_id': None, 'created_at': constants.expire_time, 'updated_at': constants.expire_time}) # delete bridge port t_client.delete_ports(ctx, t_ns_bridge_port_id) # delete the expired entry, even if this deletion fails, we # still have a chance that lock_handle module will delete it with ctx.session.begin(): core.delete_resources(ctx, models.ResourceRouting, [{'key': 'bottom_id', 'comparator': 'eq', 'value': t_ns_bridge_port_id}]) else: b_client.update_floatingips(ctx, fip['id'], {'floatingip': {'port_id': None}})