def test_get_items_by_ids(self): self._setup_items() fake_kind_items = db_api.get_items(self.context, 'fake') fake1_kind_items = db_api.get_items(self.context, 'fake1') item_id = fake_kind_items[0]['id'] other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] items = db_api.get_items_by_ids(self.context, []) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, set([])) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, [i['id'] for i in fake_kind_items]) self.assertEqual(2, len(items)) items = db_api.get_items_by_ids( self.context, (fake_kind_items[0]['id'], fake1_kind_items[0]['id'])) self.assertEqual(2, len(items)) items = db_api.get_items_by_ids(self.context, (item_id,)) self.assertEqual(1, len(items)) self.assertEqual(item_id, items[0]['id']) items = db_api.get_items_by_ids(self.context, (other_item_id,)) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, (item_id, other_item_id)) self.assertEqual(1, len(items)) items = db_api.get_items_by_ids(self.context, (fakes.random_ec2_id('fake')),) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, (item_id, fakes.random_ec2_id('fake'))) self.assertEqual(1, len(items))
def test_get_items(self): self._setup_items() items = db_api.get_items(self.context, 'fake') self.assertEqual(2, len(items)) items = db_api.get_items(self.context, 'fake0') self.assertEqual(0, len(items))
def test_get_items_ids(self): self._setup_items() item = db_api.get_items(self.context, 'fake1')[0] other_item = db_api.get_items(self.other_context, 'fake1')[0] items_ids = db_api.get_items_ids(self.context, 'fake1', item_os_ids=[item['os_id'], other_item['os_id']]) self.assertThat(items_ids, matchers.ListMatches( [(item['id'], item['os_id']), (other_item['id'], other_item['os_id'])], orderless_lists=True)) items_ids = db_api.get_items_ids(self.context, 'fake', item_os_ids=[item['os_id']]) self.assertEqual(0, len(items_ids)) item_ids = db_api.get_items_ids(self.context, 'fake1', item_ids=[item['id'], other_item['id']]) self.assertThat(item_ids, matchers.ListMatches( [(item['id'], item['os_id']), (other_item['id'], other_item['os_id'])], orderless_lists=True)) items_ids = db_api.get_items_ids(self.context, 'fake', item_ids=[item['id']]) self.assertEqual(0, len(items_ids))
def test_get_items_ids(self): self._setup_items() item = db_api.get_items(self.context, 'fake1')[0] other_item = db_api.get_items(self.other_context, 'fake1')[0] items_ids = db_api.get_items_ids( self.context, 'fake1', item_os_ids=[item['os_id'], other_item['os_id']]) self.assertThat( items_ids, matchers.ListMatches([(item['id'], item['os_id']), (other_item['id'], other_item['os_id'])], orderless_lists=True)) items_ids = db_api.get_items_ids(self.context, 'fake', item_os_ids=[item['os_id']]) self.assertEqual(0, len(items_ids)) item_ids = db_api.get_items_ids( self.context, 'fake1', item_ids=[item['id'], other_item['id']]) self.assertThat( item_ids, matchers.ListMatches([(item['id'], item['os_id']), (other_item['id'], other_item['os_id'])], orderless_lists=True)) items_ids = db_api.get_items_ids(self.context, 'fake', item_ids=[item['id']]) self.assertEqual(0, len(items_ids))
def test_get_items_by_ids(self): self._setup_items() fake_kind_items = db_api.get_items(self.context, 'fake') fake1_kind_items = db_api.get_items(self.context, 'fake1') item_id = fake_kind_items[0]['id'] other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] items = db_api.get_items_by_ids(self.context, []) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, set([])) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, [i['id'] for i in fake_kind_items]) self.assertEqual(2, len(items)) items = db_api.get_items_by_ids( self.context, (fake_kind_items[0]['id'], fake1_kind_items[0]['id'])) self.assertEqual(2, len(items)) items = db_api.get_items_by_ids(self.context, (item_id, )) self.assertEqual(1, len(items)) self.assertEqual(item_id, items[0]['id']) items = db_api.get_items_by_ids(self.context, (other_item_id, )) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, (item_id, other_item_id)) self.assertEqual(1, len(items)) items = db_api.get_items_by_ids( self.context, (fakes.random_ec2_id('fake')), ) self.assertEqual(0, len(items)) items = db_api.get_items_by_ids(self.context, (item_id, fakes.random_ec2_id('fake'))) self.assertEqual(1, len(items))
def get_db_items(self): self.instances = { i['os_id']: i for i in db_api.get_items(self.context, 'i') } self.snapshots = { s['os_id']: s for s in db_api.get_items(self.context, 'snap') } return super(VolumeDescriber, self).get_db_items()
def get_os_items(self): if self.all_db_items is None: self.all_db_items = db_api.get_items(self.context, 'sg') os_groups = security_group_engine.get_os_groups(self.context) if self.check_and_repair_default_groups(os_groups, self.all_db_items): self.all_db_items = db_api.get_items(self.context, 'sg') self.items = self.get_db_items() os_groups = security_group_engine.get_os_groups(self.context) for os_group in os_groups: os_group['name'] = _translate_group_name(self.context, os_group, self.all_db_items) return os_groups
def test_get_item_by_id(self): self._setup_items() item_id = db_api.get_items(self.context, 'fake')[0]['id'] other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] item = db_api.get_item_by_id(self.context, item_id) self.assertThat(item, matchers.DictMatches({'id': item_id, 'os_id': None, 'vpc_id': None})) item = db_api.get_item_by_id(self.context, other_item_id) self.assertIsNone(item) item = db_api.get_item_by_id(self.context, fakes.random_ec2_id('fake')) self.assertIsNone(item)
def _get_vpn_gateways_external_ips(context, neutron): vpcs = {vpc['id']: vpc for vpc in db_api.get_items(context, 'vpc')} external_ips = {} routers = neutron.list_routers(tenant_id=context.project_id)['routers'] for router in routers: info = router['external_gateway_info'] if info: for ip in info['external_fixed_ips']: if netaddr.valid_ipv4(ip['ip_address']): external_ips[router['id']] = ip['ip_address'] return { vgw['id']: external_ips.get(vpcs[vgw['vpc_id']]['os_id']) for vgw in db_api.get_items(context, 'vgw') if vgw['vpc_id'] }
def _get_vpn_gateways_external_ips(context, neutron): vpcs = {vpc['id']: vpc for vpc in db_api.get_items(context, 'vpc')} external_ips = {} routers = neutron.list_routers( tenant_id=context.project_id)['routers'] for router in routers: info = router['external_gateway_info'] if info: for ip in info['external_fixed_ips']: if netaddr.valid_ipv4(ip['ip_address']): external_ips[router['id']] = ip['ip_address'] return {vgw['id']: external_ips.get(vpcs[vgw['vpc_id']]['os_id']) for vgw in db_api.get_items(context, 'vgw') if vgw['vpc_id']}
def create_customer_gateway(context, ip_address, type, bgp_asn=None): if bgp_asn and bgp_asn != DEFAULT_BGP_ASN: raise exception.Unsupported("BGP dynamic routing is unsupported") customer_gateway = next((cgw for cgw in db_api.get_items(context, "cgw") if cgw["ip_address"] == ip_address), None) if not customer_gateway: customer_gateway = db_api.add_item(context, "cgw", {"ip_address": ip_address}) return {"customerGateway": _format_customer_gateway(customer_gateway)}
def disassociate_address(self, context, public_ip=None, association_id=None): LOG.info('Disassociating %s', association_id) neutron = clients.neutron(context) floatingips=neutron.list_floatingips(tenant_id=context.project_id)['floatingips'] LOG.info('Existing floating ips: %s', floatingips) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if not CONF.disable_ec2_classic: if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when ' 'unmapping an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(tikitavi): check the public IP exists to raise AWS # exception otherwise os_floating_ip = self.get_os_floating_ip_by_public_ip( context, public_ip) os_ports = self.get_os_ports(context) os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports) if os_instance_id: nova = clients.nova(context) nova.servers.remove_floating_ip(os_instance_id, public_ip) return None if not address: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) if 'network_interface_id' not in address: msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) LOG.info('DB address: %s', address) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound( id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] LOG.info('Disassociating %(private_ip_address)s from interface %(network_interface_id)s', {'private_ip_address': private_ip_address, 'network_interface_id': network_interface_id}) _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) update = neutron.update_floatingip(address['os_id'], {'floatingip': {'port_id': None}}) LOG.info('Neutron.update result is %s', update)
def delete_customer_gateway(context, customer_gateway_id): customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) vpn_connections = db_api.get_items(context, "vpn") if any(vpn["customer_gateway_id"] == customer_gateway["id"] for vpn in vpn_connections): raise exception.IncorrectState(reason=_("The customer gateway is in use.")) db_api.delete_item(context, customer_gateway["id"]) return True
def detach_volume(context, volume_id, instance_id=None, device=None, force=None): #volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume_id) os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') if not os_instance_id: # TODO(ft): Change the message with the real AWS message reason = _('Volume %(vol_id)s is not attached to anything') raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) nova = clients.nova(context) nova.volumes.delete_server_volume(os_instance_id, os_volume.id) os_volume.get() instance_id = next((i['id'] for i in db_api.get_items(context, 'i') if i['os_id'] == os_instance_id), None) # [varun]: Sending delete on termination as false (last param below) # when volume is detached delete on termination flag does not make sense # therefore sending false to make consistent with AWS return _format_attachment(context, os_volume, instance_id=instance_id, delete_on_termination_flag=False)
def associate_dhcp_options(context, dhcp_options_id, vpc_id): vpc = ec2utils.get_db_item(context, vpc_id) rollback_dhcp_options_id = vpc.get('dhcp_options_id') if dhcp_options_id == 'default': dhcp_options_id = None dhcp_options = None else: dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) dhcp_options_id = dhcp_options['id'] neutron = clients.neutron(context) os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] network_interfaces = db_api.get_items(context, 'eni') rollback_dhcp_options_object = ( db_api.get_item_by_id(context, rollback_dhcp_options_id) if dhcp_options_id is not None else None) with common.OnCrashCleaner() as cleaner: _associate_vpc_item(context, vpc, dhcp_options_id) cleaner.addCleanup(_associate_vpc_item, context, vpc, rollback_dhcp_options_id) for network_interface in network_interfaces: os_port = next((p for p in os_ports if p['id'] == network_interface['os_id']), None) if not os_port: continue _add_dhcp_opts_to_port(context, dhcp_options, network_interface, os_port, neutron) cleaner.addCleanup(_add_dhcp_opts_to_port, context, rollback_dhcp_options_object, network_interface, os_port, neutron) return True
def _update_routes_in_associated_subnets(context, route_table, cleaner, rollabck_route_table_object, is_main=None): if is_main is None: vpc = db_api.get_item_by_id(context, route_table['vpc_id']) is_main = vpc['route_table_id'] == route_table['id'] if is_main: appropriate_rtb_ids = (route_table['id'], None) else: appropriate_rtb_ids = (route_table['id'], ) router_objects = _get_router_objects(context, route_table) neutron = clients.neutron(context) for subnet in db_api.get_items(context, 'subnet'): if (subnet['vpc_id'] == route_table['vpc_id'] and subnet.get('route_table_id') in appropriate_rtb_ids): _update_subnet_host_routes( context, subnet, route_table, cleaner=cleaner, rollback_route_table_object=rollabck_route_table_object, router_objects=router_objects, neutron=neutron)
def delete_vpn_gateway(context, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpn_connections = db_api.get_items(context, "vpn") if vpn_gateway["vpc_id"] or any(vpn["vpn_gateway_id"] == vpn_gateway["id"] for vpn in vpn_connections): raise exception.IncorrectState(reason=_("The VPN gateway is in use.")) db_api.delete_item(context, vpn_gateway["id"]) return True
def associate_dhcp_options(context, dhcp_options_id, vpc_id): vpc = ec2utils.get_db_item(context, vpc_id) rollback_dhcp_options_id = vpc.get('dhcp_options_id') if dhcp_options_id == 'default': dhcp_options_id = None dhcp_options = None else: dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) dhcp_options_id = dhcp_options['id'] neutron = clients.neutron(context) os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] network_interfaces = db_api.get_items(context, 'eni') rollback_dhcp_options_object = (db_api.get_item_by_id( context, rollback_dhcp_options_id) if dhcp_options_id is not None else None) with common.OnCrashCleaner() as cleaner: _associate_vpc_item(context, vpc, dhcp_options_id) cleaner.addCleanup(_associate_vpc_item, context, vpc, rollback_dhcp_options_id) for network_interface in network_interfaces: os_port = next( (p for p in os_ports if p['id'] == network_interface['os_id']), None) if not os_port: continue _add_dhcp_opts_to_port(context, dhcp_options, network_interface, os_port, neutron) cleaner.addCleanup(_add_dhcp_opts_to_port, context, rollback_dhcp_options_object, network_interface, os_port, neutron) return True
def disassociate_address(self, context, public_ip=None, association_id=None): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(ft): association_id is unused in EC2 Classic mode, but it's # passed there to validate its emptiness in one place return AddressEngineNova().disassociate_address( context, public_ip=public_ip, association_id=association_id) address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound(id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) neutron.update_floatingip(address['os_id'], {'floatingip': { 'port_id': None }})
def disassociate_address(self, context, public_ip=None, association_id=None): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(ft): association_id is unused in EC2 Classic mode, but it's # passed there to validate its emptiness in one place return AddressEngineNova().disassociate_address( context, public_ip=public_ip, association_id=association_id) address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound( id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) neutron.update_floatingip(address['os_id'], {'floatingip': {'port_id': None}})
def attach_network_interface(context, network_interface_id, instance_id, device_index): network_interface = ec2utils.get_db_item(context, network_interface_id) if 'instance_id' in network_interface: raise exception.InvalidParameterValue( _("Network interface '%(id)s' is currently in use.") % {'id': network_interface_id}) os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # TODO(Alex) Check that the instance is not yet attached to another VPC # TODO(Alex) Check that the instance is "our", not created via nova # (which means that it doesn't belong to any VPC and can't be attached) if any(eni['device_index'] == device_index for eni in db_api.get_items(context, 'eni') if eni.get('instance_id') == instance_id): raise exception.InvalidParameterValue( _("Instance '%(id)s' already has an interface attached at " "device index '%(index)s'.") % {'id': instance_id, 'index': device_index}) neutron = clients.neutron(context) os_port = neutron.show_port(network_interface['os_id'])['port'] nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: # TODO(Alex) nova inserts compute:%availability_zone into device_owner # 'device_owner': 'compute:None'}}) _attach_network_interface_item(context, network_interface, instance_id, device_index) cleaner.addCleanup(_detach_network_interface_item, context, network_interface) nova.servers.interface_attach(os_instance_id, os_port['id'], None, None) return {'attachmentId': ec2utils.change_ec2_id_kind( network_interface['id'], 'eni-attach')}
def release_address(self, context, public_ip, allocation_id): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an allocation id when releasing a ' 'VPC elastic IP address') raise exception.InvalidParameterValue(msg) return AddressEngineNova().release_address(context, public_ip, None) address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound( id=allocation_id) if 'network_interface_id' in address: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, address['id']) cleaner.addCleanup(db_api.restore_item, context, 'eipalloc', address) try: neutron.delete_floatingip(address['os_id']) except neutron_exception.NotFound: pass
def describe_addresses(context, public_ip=None, allocation_id=None, filter=None): formatted_addresses = AddressDescriber( address_engine.get_os_ports(context), db_api.get_items(context, 'i')).describe( context, allocation_id, public_ip, filter) return {'addressesSet': formatted_addresses}
def _update_routes_in_associated_subnets(context, cleaner, route_table, default_associations_only=None, update_target=None): if default_associations_only: appropriate_rtb_ids = (None, ) else: vpc = db_api.get_item_by_id(context, route_table['vpc_id']) if vpc['route_table_id'] == route_table['id']: appropriate_rtb_ids = (route_table['id'], None) else: appropriate_rtb_ids = (route_table['id'], ) neutron = clients.neutron(context) subnets = [ subnet for subnet in db_api.get_items(context, 'subnet') if (subnet['vpc_id'] == route_table['vpc_id'] and subnet.get('route_table_id') in appropriate_rtb_ids) ] # NOTE(ft): we need to update host routes for both host and vpn target # because vpn-related routes are present in host routes as well _update_host_routes(context, neutron, cleaner, route_table, subnets) if not update_target or update_target == VPN_TARGET: vpn_connection_api._update_vpn_routes(context, neutron, cleaner, route_table, subnets)
def get_db_item_by_os_id(context, kind, os_id, items_by_os_id=None, **extension_kwargs): """Get DB item by OS id (create if it doesn't exist). Args: context (RequestContext): The request context. kind (str): The kind of item. os_id (str): OS id of an object. items_by_os_id (dict of items): The dict of known DB items, OS id is used as a key. extension_kwargs (dict): Additional parameters passed to a registered extension at creating item. Returns: A found or created item. Search item in passed dict. If it's not found - create a new item, and add it to the dict (if it's passed). If an extension is registered on corresponding item kind, call it passing extension_kwargs to it. """ if os_id is None: return None if items_by_os_id is not None: item = items_by_os_id.get(os_id) if item: return item else: item = next((i for i in db_api.get_items(context, kind) if i['os_id'] == os_id), None) if not item: item = auto_create_db_item(context, kind, os_id, **extension_kwargs) if items_by_os_id is not None: items_by_os_id[os_id] = item return item
def attach_vpn_gateway(context, vpc_id, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpc = ec2utils.get_db_item(context, vpc_id) if vpn_gateway["vpc_id"] and vpn_gateway["vpc_id"] != vpc["id"]: raise exception.VpnGatewayAttachmentLimitExceeded() attached_vgw = ec2utils.get_attached_gateway(context, vpc["id"], "vgw") if attached_vgw and attached_vgw["id"] != vpn_gateway["id"]: raise exception.InvalidVpcState(vpc_id=vpc["id"], vgw_id=attached_vgw["id"]) subnets = [subnet for subnet in db_api.get_items(context, "subnet") if subnet["vpc_id"] == vpc["id"]] if not vpn_gateway["vpc_id"]: external_network_id = None if not ec2utils.get_attached_gateway(context, vpc["id"], "igw"): external_network_id = ec2utils.get_os_public_network(context)["id"] neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: _attach_vpn_gateway_item(context, vpn_gateway, vpc["id"]) cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway) if external_network_id: neutron.add_gateway_router(vpc["os_id"], {"network_id": external_network_id}) cleaner.addCleanup(neutron.remove_gateway_router, vpc["os_id"]) for subnet in subnets: _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc) vpn_connection_api._reset_vpn_connections(context, neutron, cleaner, vpn_gateway, subnets=subnets) return {"attachment": _format_attachment(vpn_gateway)}
def _stop_vpn_in_subnet(context, neutron, cleaner, subnet): os_vpnservice_id = subnet.get("os_vpnservice_id") if not os_vpnservice_id: return for vpn in db_api.get_items(context, "vpn"): vpn_connection_api._delete_subnet_vpn(context, neutron, cleaner, subnet, vpn) _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet["id"])
def attach_internet_gateway(context, internet_gateway_id, vpc_id): igw = ec2utils.get_db_item(context, internet_gateway_id) if igw.get('vpc_id'): msg_params = {'igw_id': igw['id'], 'vpc_id': igw['vpc_id']} msg = _("resource %(igw_id)s is already attached to " "network %(vpc_id)s") % msg_params raise exception.ResourceAlreadyAssociated(msg) vpc = ec2utils.get_db_item(context, vpc_id) # TODO(ft): move search by vpc_id to DB api for gw in db_api.get_items(context, 'igw'): if gw.get('vpc_id') == vpc['id']: msg = _("Network %(vpc_id)s already has an internet gateway " "attached") % { 'vpc_id': vpc['id'] } raise exception.InvalidParameterValue(msg) os_public_network = ec2utils.get_os_public_network(context) neutron = clients.neutron(context) # TODO(ft): set attaching state into db with common.OnCrashCleaner() as cleaner: _attach_internet_gateway_item(context, igw, vpc['id']) cleaner.addCleanup(_detach_internet_gateway_item, context, igw) neutron.add_gateway_router(vpc['os_id'], {'network_id': os_public_network['id']}) return True
def release_address(self, context, public_ip, allocation_id): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an allocation id when releasing a ' 'VPC elastic IP address') raise exception.InvalidParameterValue(msg) return AddressEngineNova().release_address(context, public_ip, None) address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound(id=allocation_id) if 'network_interface_id' in address: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, address['id']) cleaner.addCleanup(db_api.restore_item, context, 'eipalloc', address) try: neutron.delete_floatingip(address['os_id']) except neutron_exception.NotFound: pass
def detach_vpn_gateway(context, vpc_id, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) if vpn_gateway['vpc_id'] != vpc_id: raise exception.InvalidVpnGatewayAttachmentNotFound( vgw_id=vpn_gateway_id, vpc_id=vpc_id) vpc = db_api.get_item_by_id(context, vpc_id) neutron = clients.neutron(context) remove_os_gateway_router = ( ec2utils.get_attached_gateway(context, vpc_id, 'igw') is None) subnets = [subnet for subnet in db_api.get_items(context, 'subnet') if subnet['vpc_id'] == vpc['id']] with common.OnCrashCleaner() as cleaner: _detach_vpn_gateway_item(context, vpn_gateway) cleaner.addCleanup(_attach_vpn_gateway_item, context, vpn_gateway, vpc_id) vpn_connection_api._stop_gateway_vpn_connections( context, neutron, cleaner, vpn_gateway) for subnet in subnets: _delete_subnet_vpnservice(context, neutron, cleaner, subnet) if remove_os_gateway_router: try: neutron.remove_gateway_router(vpc['os_id']) except neutron_exception.NotFound: pass return True
def attach_vpn_gateway(context, vpc_id, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpc = ec2utils.get_db_item(context, vpc_id) if vpn_gateway['vpc_id'] and vpn_gateway['vpc_id'] != vpc['id']: raise exception.VpnGatewayAttachmentLimitExceeded() attached_vgw = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw') if attached_vgw and attached_vgw['id'] != vpn_gateway['id']: raise exception.InvalidVpcState(vpc_id=vpc['id'], vgw_id=attached_vgw['id']) subnets = [subnet for subnet in db_api.get_items(context, 'subnet') if subnet['vpc_id'] == vpc['id']] if not vpn_gateway['vpc_id']: external_network_id = None if not ec2utils.get_attached_gateway(context, vpc['id'], 'igw'): external_network_id = ec2utils.get_os_public_network(context)['id'] neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: _attach_vpn_gateway_item(context, vpn_gateway, vpc['id']) cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway) if external_network_id: neutron.add_gateway_router(vpc['os_id'], {'network_id': external_network_id}) cleaner.addCleanup(neutron.remove_gateway_router, vpc['os_id']) for subnet in subnets: _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc) vpn_connection_api._reset_vpn_connections( context, neutron, cleaner, vpn_gateway, subnets=subnets) return {'attachment': _format_attachment(vpn_gateway)}
def _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, subnets=None, route_tables=None, vpn_connections=None): if not vpn_gateway['vpc_id']: return # TODO(ft): implement search filters in DB api vpn_connections = (vpn_connections or [ vpn for vpn in db_api.get_items(context, 'vpn') if vpn['vpn_gateway_id'] == vpn_gateway['id'] ]) if not vpn_connections: return subnets = (subnets or [ subnet for subnet in db_api.get_items(context, 'subnet') if subnet['vpc_id'] == vpn_gateway['vpc_id'] ]) if not subnets: return vpc = db_api.get_item_by_id(context, vpn_gateway['vpc_id']) customer_gateways = { cgw['id']: cgw for cgw in db_api.get_items(context, 'cgw') } route_tables = route_tables or db_api.get_items(context, 'rtb') route_tables = { rtb['id']: rtb for rtb in route_tables if rtb['vpc_id'] == vpc['id'] } route_tables_cidrs = {} for subnet in subnets: route_table_id = subnet.get('route_table_id', vpc['route_table_id']) if route_table_id not in route_tables_cidrs: route_tables_cidrs[route_table_id] = (_get_route_table_vpn_cidrs( route_tables[route_table_id], vpn_gateway, vpn_connections)) cidrs = route_tables_cidrs[route_table_id] for vpn_conn in vpn_connections: if vpn_conn['id'] in cidrs: _set_subnet_vpn( context, neutron, cleaner, subnet, vpn_conn, customer_gateways[vpn_conn['customer_gateway_id']], cidrs[vpn_conn['id']]) else: _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_conn)
def _stop_vpn_in_subnet(context, neutron, cleaner, subnet): os_vpnservice_id = subnet.get('os_vpnservice_id') if not os_vpnservice_id: return for vpn in db_api.get_items(context, 'vpn'): vpn_connection_api._delete_subnet_vpn(context, neutron, cleaner, subnet, vpn) _safe_delete_vpnservice(neutron, os_vpnservice_id, subnet['id'])
def delete_vpn_gateway(context, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpn_connections = db_api.get_items(context, 'vpn') if vpn_gateway['vpc_id'] or any(vpn['vpn_gateway_id'] == vpn_gateway['id'] for vpn in vpn_connections): raise exception.IncorrectState(reason=_('The VPN gateway is in use.')) db_api.delete_item(context, vpn_gateway['id']) return True
def test_get_item_by_id(self): self._setup_items() item_id = db_api.get_items(self.context, 'fake')[0]['id'] other_item_id = db_api.get_items(self.other_context, 'fake')[0]['id'] item = db_api.get_item_by_id(self.context, item_id) self.assertThat( item, matchers.DictMatches({ 'id': item_id, 'os_id': None, 'vpc_id': None })) item = db_api.get_item_by_id(self.context, other_item_id) self.assertIsNone(item) item = db_api.get_item_by_id(self.context, fakes.random_ec2_id('fake')) self.assertIsNone(item)
def delete_customer_gateway(context, customer_gateway_id): customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) vpn_connections = db_api.get_items(context, 'vpn') if any(vpn['customer_gateway_id'] == customer_gateway['id'] for vpn in vpn_connections): raise exception.IncorrectState( reason=_('The customer gateway is in use.')) db_api.delete_item(context, customer_gateway['id']) return True
def get_db_items(self): associations = collections.defaultdict(list) for subnet in db_api.get_items(self.context, 'subnet'): if 'route_table_id' in subnet: associations[subnet['route_table_id']].append(subnet['id']) self.associations = associations vpcs = db_api.get_items(self.context, 'vpc') self.vpcs = {vpc['id']: vpc for vpc in vpcs} gateways = db_api.get_items(self.context, 'igw') self.gateways = {igw['id']: igw for igw in gateways} # TODO(ft): scan route tables to get only used instances and # network interfaces to reduce DB and Nova throughput network_interfaces = db_api.get_items(self.context, 'eni') self.network_interfaces = { eni['id']: eni for eni in network_interfaces } return super(RouteTableDescriber, self).get_db_items()
def describe_addresses(context, public_ip=None, allocation_id=None, filter=None): LOG.info('Describe_addresses with allocation_id %s', allocation_id) formatted_addresses = AddressDescriber( address_engine.get_os_ports(context), db_api.get_items(context, 'i')).describe( context, allocation_id, public_ip, filter) LOG.info('Formatted addresses: %s', formatted_addresses) return {'addressesSet': formatted_addresses}
def create_customer_gateway(context, ip_address, type, bgp_asn=None): if bgp_asn and bgp_asn != DEFAULT_BGP_ASN: raise exception.Unsupported("BGP dynamic routing is unsupported") customer_gateway = next((cgw for cgw in db_api.get_items(context, 'cgw') if cgw['ip_address'] == ip_address), None) if not customer_gateway: customer_gateway = db_api.add_item(context, 'cgw', {'ip_address': ip_address}) return {'customerGateway': _format_customer_gateway(customer_gateway)}
def get_db_items(self): # TODO(ft): we can't get all images from DB per one request due # different kinds. It's need to refactor DB API and ec2utils functions # to work with kind smarter if self.ids: local_images = db_api.get_items_by_ids(self.context, self.ids) else: local_images = sum((db_api.get_items(self.context, kind) for kind in ('ami', 'ari', 'aki')), []) public_images = sum( (db_api.get_public_items(self.context, kind, self.ids) for kind in ('ami', 'ari', 'aki')), []) mapped_ids = [] if self.ids: mapped_ids = [{ 'id': item_id, 'os_id': os_id } for kind in ('ami', 'ari', 'aki') for item_id, os_id in db_api.get_items_ids( self.context, kind, item_ids=self.ids)] # NOTE(ft): mapped_ids must be the first to let complete items from # next lists to override mappings, which do not have item body data images = sum((mapped_ids, local_images, public_images), []) if self.ids: # NOTE(ft): public images, owned by a current user, appear in both # local and public lists of images. Therefore it's not enough to # just compare length of requested and retrieved lists to make sure # that all requested images are retrieved. images_ids = set(i['id'] for i in images) if len(images_ids) < len(self.ids): missed_ids = self.ids - images_ids raise exception.InvalidAMIIDNotFound(id=next(iter(missed_ids))) self.pending_images = { i['id']: i for i in local_images if not i['os_id'] } self.snapshot_ids = dict( (s['os_id'], s['id']) for s in db_api.get_items(self.context, 'snap')) self.local_images_os_ids = set(i['os_id'] for i in local_images) self.ids_dict = {} return images
def _check(): for vpc in db_api.get_items(context, 'vpc'): if vpc.get('is_default'): return vpc try: default_vpc = _create_vpc(context, DEFAULT_VPC_CIDR_BLOCK, is_default=True) return default_vpc except Exception: LOG.exception('Failed to create default vpc') return None
def get_db_items(self): # TODO(ft): we can't get all images from DB per one request due # different kinds. It's need to refactor DB API and ec2utils functions # to work with kind smarter if self.ids: local_images = db_api.get_items_by_ids(self.context, self.ids) else: local_images = sum((db_api.get_items(self.context, kind) for kind in ('ami', 'ari', 'aki')), []) public_images = sum((db_api.get_public_items(self.context, kind, self.ids) for kind in ('ami', 'ari', 'aki')), []) mapped_ids = [] if self.ids: mapped_ids = [{'id': item_id, 'os_id': os_id} for kind in ('ami', 'ari', 'aki') for item_id, os_id in db_api.get_items_ids( self.context, kind, item_ids=self.ids)] # NOTE(ft): mapped_ids must be the first to let complete items from # next lists to override mappings, which do not have item body data images = sum((mapped_ids, local_images, public_images), []) if self.ids: # NOTE(ft): public images, owned by a current user, appear in both # local and public lists of images. Therefore it's not enough to # just compare length of requested and retrieved lists to make sure # that all requested images are retrieved. images_ids = set(i['id'] for i in images) if len(images_ids) < len(self.ids): missed_ids = self.ids - images_ids raise exception.InvalidAMIIDNotFound(id=next(iter(missed_ids))) self.pending_images = {i['id']: i for i in local_images if not i['os_id']} self.snapshot_ids = dict( (s['os_id'], s['id']) for s in db_api.get_items(self.context, 'snap')) self.local_images_os_ids = set(i['os_id'] for i in local_images) self.ids_dict = {} return images
def delete_dhcp_options(context, dhcp_options_id): if not dhcp_options_id: raise exception.MissingParameter( _('DHCP options ID must be specified')) dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) vpcs = db_api.get_items(context, 'vpc') for vpc in vpcs: if dhcp_options['id'] == vpc.get('dhcp_options_id'): raise exception.DependencyViolation(obj1_id=dhcp_options['id'], obj2_id=vpc['id']) db_api.delete_item(context, dhcp_options['id']) return True
def release_address(self, context, public_ip, allocation_id): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an allocation id when releasing a ' 'VPC elastic IP address') raise exception.InvalidParameterValue(msg) os_floating_ip = self.get_os_floating_ip_by_public_ip(context, public_ip) try: neutron.delete_floatingip(os_floating_ip['id']) except neutron_exception.NotFound: pass return address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound( id=allocation_id) if 'network_interface_id' in address: if CONF.disable_ec2_classic: network_interface_id = address['network_interface_id'] network_interface = db_api.get_item_by_id(context, network_interface_id) default_vpc = ec2utils.check_and_create_default_vpc(context) if default_vpc: default_vpc_id = default_vpc['id'] if (network_interface and network_interface['vpc_id'] == default_vpc_id): association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') self.disassociate_address( context, association_id=association_id) else: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) else: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, address['id']) cleaner.addCleanup(db_api.restore_item, context, 'eipalloc', address) try: neutron.delete_floatingip(address['os_id']) except neutron_exception.NotFound: pass
def get_db_items(context, kind, ec2_ids): if not ec2_ids: return db_api.get_items(context, kind) if not isinstance(ec2_ids, set): ec2_ids = set(ec2_ids) items = db_api.get_items_by_ids(context, ec2_ids) if len(items) < len(ec2_ids): missed_ids = ec2_ids - set((item['id'] for item in items)) params = {'id': next(iter(missed_ids))} raise NOT_FOUND_EXCEPTION_MAP[kind](**params) return items
def disassociate_address(self, context, public_ip=None, association_id=None): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if not CONF.disable_ec2_classic: if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when ' 'unmapping an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(tikitavi): check the public IP exists to raise AWS # exception otherwise os_floating_ip = self.get_os_floating_ip_by_public_ip( context, public_ip) os_ports = self.get_os_ports(context) os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports) if os_instance_id: nova = clients.nova(context) nova.servers.remove_floating_ip(os_instance_id, public_ip) return None if not address: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) if 'network_interface_id' not in address: msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound( id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) neutron.update_floatingip(address['os_id'], {'floatingip': {'port_id': None}})
def _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, subnets=None, route_tables=None, vpn_connections=None): if not vpn_gateway['vpc_id']: return # TODO(ft): implement search filters in DB api vpn_connections = (vpn_connections or [vpn for vpn in db_api.get_items(context, 'vpn') if vpn['vpn_gateway_id'] == vpn_gateway['id']]) if not vpn_connections: return subnets = (subnets or [subnet for subnet in db_api.get_items(context, 'subnet') if subnet['vpc_id'] == vpn_gateway['vpc_id']]) if not subnets: return vpc = db_api.get_item_by_id(context, vpn_gateway['vpc_id']) customer_gateways = {cgw['id']: cgw for cgw in db_api.get_items(context, 'cgw')} route_tables = route_tables or db_api.get_items(context, 'rtb') route_tables = {rtb['id']: rtb for rtb in route_tables if rtb['vpc_id'] == vpc['id']} route_tables_cidrs = {} for subnet in subnets: route_table_id = subnet.get('route_table_id', vpc['route_table_id']) if route_table_id not in route_tables_cidrs: route_tables_cidrs[route_table_id] = ( _get_route_table_vpn_cidrs(route_tables[route_table_id], vpn_gateway, vpn_connections)) cidrs = route_tables_cidrs[route_table_id] for vpn_conn in vpn_connections: if vpn_conn['id'] in cidrs: _set_subnet_vpn( context, neutron, cleaner, subnet, vpn_conn, customer_gateways[vpn_conn['customer_gateway_id']], cidrs[vpn_conn['id']]) else: _delete_subnet_vpn(context, neutron, cleaner, subnet, vpn_conn)
def delete_dhcp_options(context, dhcp_options_id): if not dhcp_options_id: raise exception.MissingParameter( _('DHCP options ID must be specified')) dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) vpcs = db_api.get_items(context, 'vpc') for vpc in vpcs: if dhcp_options['id'] == vpc.get('dhcp_options_id'): raise exception.DependencyViolation( obj1_id=dhcp_options['id'], obj2_id=vpc['id']) db_api.delete_item(context, dhcp_options['id']) return True
def _stop_gateway_vpn_connections(context, neutron, cleaner, vpn_gateway): def undo_vpn_connection(context, vpn_connection, connections_ids): vpn_connection['os_ipsec_site_connections'] = connections_ids db_api.update_item(context, vpn_connection) for vpn_connection in db_api.get_items(context, 'vpn'): if vpn_connection['vpn_gateway_id'] == vpn_gateway['id']: _stop_vpn_connection(neutron, vpn_connection) connection_ids = vpn_connection['os_ipsec_site_connections'] vpn_connection['os_ipsec_site_connections'] = {} db_api.update_item(context, vpn_connection) cleaner.addCleanup(undo_vpn_connection, context, vpn_connection, connection_ids)
def _get_active_route_destinations(context, route_table): vpn_connections = {vpn['vpn_gateway_id']: vpn for vpn in db_api.get_items(context, 'vpn')} dst_ids = [route[id_key] for route in route_table['routes'] for id_key in ('gateway_id', 'network_interface_id') if route.get(id_key) is not None] dst_ids.extend(route_table.get('propagating_gateways', [])) destinations = {item['id']: item for item in db_api.get_items_by_ids(context, dst_ids) if (item['vpc_id'] == route_table['vpc_id'] and (ec2utils.get_ec2_id_kind(item['id']) != 'vgw' or item['id'] in vpn_connections))} for vpn in six.itervalues(vpn_connections): if vpn['vpn_gateway_id'] in destinations: destinations[vpn['vpn_gateway_id']]['vpn_connection'] = vpn return destinations
def _format_security_groups_ids_names(context): neutron = clients.neutron(context) os_security_groups = neutron.list_security_groups( tenant_id=context.project_id)['security_groups'] security_groups = db_api.get_items(context, 'sg') ec2_security_groups = {} for os_security_group in os_security_groups: security_group = next((g for g in security_groups if g['os_id'] == os_security_group['id']), None) if security_group is None: continue ec2_security_groups[os_security_group['id']] = ( {'groupId': security_group['id'], 'groupName': _translate_group_name(context, os_security_group, security_groups)}) return ec2_security_groups
def detach_volume(context, volume_id, instance_id=None, device=None, force=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') if not os_instance_id: # TODO(ft): Change the message with the real AWS message reason = _('Volume %(vol_id)s is not attached to anything') raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) nova = clients.nova(context) nova.volumes.delete_server_volume(os_instance_id, os_volume.id) os_volume.get() instance_id = next((i['id'] for i in db_api.get_items(context, 'i') if i['os_id'] == os_instance_id), None) return _format_attachment(context, volume, os_volume, instance_id=instance_id)