def _create_security_group(context, group_name, group_description, vpc_id=None, default=False): neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: try: secgroup_body = ( {'security_group': {'name': group_name, 'description': group_description}}) os_security_group = neutron.create_security_group( secgroup_body)['security_group'] except neutron_exception.OverQuotaClient: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(neutron.delete_security_group, os_security_group['id']) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']} if not default: security_group = db_api.add_item(context, 'sg', item) else: item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg') # NOTE(andrey-mp): try to add item with specific id # and catch exception if it exists security_group = db_api.restore_item(context, 'sg', item) return {'return': 'true', 'groupId': security_group['id']}
def attach_internet_gateway(context, internet_gateway_id, vpc_id): igw = ec2utils.get_db_item(context, internet_gateway_id) if igw.get('vpc_id'): msg_params = {'igw_id': igw['id'], 'vpc_id': igw['vpc_id']} msg = _('resource %(igw_id)s is already attached to ' 'network %(vpc_id)s') % msg_params raise exception.ResourceAlreadyAssociated(msg) vpc = ec2utils.get_db_item(context, vpc_id) if ec2utils.get_attached_gateway(context, vpc['id'], 'igw'): msg = _('Network %(vpc_id)s already has an internet gateway ' 'attached') % {'vpc_id': vpc['id']} raise exception.InvalidParameterValue(msg) external_network_id = None if not ec2utils.get_attached_gateway(context, vpc['id'], 'vgw'): external_network_id = ec2utils.get_os_public_network(context)['id'] neutron = clients.neutron(context) # TODO(ft): set attaching state into db with common.OnCrashCleaner() as cleaner: _attach_internet_gateway_item(context, igw, vpc['id']) cleaner.addCleanup(_detach_internet_gateway_item, context, igw) if external_network_id: neutron.add_gateway_router(vpc['os_id'], {'network_id': external_network_id}) return True
def associate_dhcp_options(context, dhcp_options_id, vpc_id): vpc = ec2utils.get_db_item(context, vpc_id) rollback_dhcp_options_id = vpc.get('dhcp_options_id') if dhcp_options_id == 'default': dhcp_options_id = None dhcp_options = None else: dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) dhcp_options_id = dhcp_options['id'] neutron = clients.neutron(context) os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] network_interfaces = db_api.get_items(context, 'eni') rollback_dhcp_options_object = ( db_api.get_item_by_id(context, rollback_dhcp_options_id) if dhcp_options_id is not None else None) with common.OnCrashCleaner() as cleaner: _associate_vpc_item(context, vpc, dhcp_options_id) cleaner.addCleanup(_associate_vpc_item, context, vpc, rollback_dhcp_options_id) for network_interface in network_interfaces: os_port = next((p for p in os_ports if p['id'] == network_interface['os_id']), None) if not os_port: continue _add_dhcp_opts_to_port(context, dhcp_options, network_interface, os_port, neutron) cleaner.addCleanup(_add_dhcp_opts_to_port, context, rollback_dhcp_options_object, network_interface, os_port, neutron) return True
def attach_vpn_gateway(context, vpc_id, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpc = ec2utils.get_db_item(context, vpc_id) if vpn_gateway["vpc_id"] and vpn_gateway["vpc_id"] != vpc["id"]: raise exception.VpnGatewayAttachmentLimitExceeded() attached_vgw = ec2utils.get_attached_gateway(context, vpc["id"], "vgw") if attached_vgw and attached_vgw["id"] != vpn_gateway["id"]: raise exception.InvalidVpcState(vpc_id=vpc["id"], vgw_id=attached_vgw["id"]) subnets = [subnet for subnet in db_api.get_items(context, "subnet") if subnet["vpc_id"] == vpc["id"]] if not vpn_gateway["vpc_id"]: external_network_id = None if not ec2utils.get_attached_gateway(context, vpc["id"], "igw"): external_network_id = ec2utils.get_os_public_network(context)["id"] neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: _attach_vpn_gateway_item(context, vpn_gateway, vpc["id"]) cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway) if external_network_id: neutron.add_gateway_router(vpc["os_id"], {"network_id": external_network_id}) cleaner.addCleanup(neutron.remove_gateway_router, vpc["os_id"]) for subnet in subnets: _create_subnet_vpnservice(context, neutron, cleaner, subnet, vpc) vpn_connection_api._reset_vpn_connections(context, neutron, cleaner, vpn_gateway, subnets=subnets) return {"attachment": _format_attachment(vpn_gateway)}
def attach_network_interface(context, network_interface_id, instance_id, device_index): network_interface = ec2utils.get_db_item(context, network_interface_id) if 'instance_id' in network_interface: raise exception.InvalidParameterValue( _("Network interface '%(id)s' is currently in use.") % {'id': network_interface_id}) os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # TODO(Alex) Check that the instance is not yet attached to another VPC # TODO(Alex) Check that the instance is "our", not created via nova # (which means that it doesn't belong to any VPC and can't be attached) if any(eni['device_index'] == device_index for eni in db_api.get_items(context, 'eni') if eni.get('instance_id') == instance_id): raise exception.InvalidParameterValue( _("Instance '%(id)s' already has an interface attached at " "device index '%(index)s'.") % {'id': instance_id, 'index': device_index}) neutron = clients.neutron(context) os_port = neutron.show_port(network_interface['os_id'])['port'] nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: # TODO(Alex) nova inserts compute:%availability_zone into device_owner # 'device_owner': 'compute:None'}}) _attach_network_interface_item(context, network_interface, instance_id, device_index) cleaner.addCleanup(_detach_network_interface_item, context, network_interface) nova.servers.interface_attach(os_instance_id, os_port['id'], None, None) return {'attachmentId': ec2utils.change_ec2_id_kind( network_interface['id'], 'eni-attach')}
def delete_internet_gateway(context, internet_gateway_id): igw = ec2utils.get_db_item(context, internet_gateway_id) if igw.get("vpc_id"): msg = _("The internetGateway '%(igw_id)s' has dependencies and " "cannot be deleted.") % {"igw_id": igw["id"]} raise exception.DependencyViolation(msg) db_api.delete_item(context, igw["id"]) return True
def delete_route(context, route_table_id, destination_cidr_block): route_table = ec2utils.get_db_item(context, route_table_id) for route_index, route in enumerate(route_table['routes']): if route['destination_cidr_block'] != destination_cidr_block: continue if route.get('gateway_id', 0) is None: msg = _('cannot remove local route %(destination_cidr_block)s ' 'in route table %(route_table_id)s') msg = msg % {'route_table_id': route_table_id, 'destination_cidr_block': destination_cidr_block} raise exception.InvalidParameterValue(msg) break else: raise exception.InvalidRouteNotFound( route_table_id=route_table_id, destination_cidr_block=destination_cidr_block) update_target = _get_route_target(route) if update_target == VPN_TARGET: vpn_gateway = db_api.get_item_by_id(context, route['gateway_id']) if (not vpn_gateway or vpn_gateway['vpc_id'] != route_table['vpc_id']): update_target = None rollback_route_table_state = copy.deepcopy(route_table) del route_table['routes'][route_index] with common.OnCrashCleaner() as cleaner: db_api.update_item(context, route_table) cleaner.addCleanup(db_api.update_item, context, rollback_route_table_state) if update_target: _update_routes_in_associated_subnets( context, cleaner, route_table, update_target=update_target) return True
def delete_vpn_connection(context, vpn_connection_id): vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, vpn_connection['id']) cleaner.addCleanup(db_api.restore_item, context, 'vpn', vpn_connection) neutron = clients.neutron(context) _stop_vpn_connection(neutron, vpn_connection) try: neutron.delete_ipsecpolicy(vpn_connection['os_ipsecpolicy_id']) except neutron_exception.Conflict as ex: LOG.warning( _('Failed to delete ipsecoplicy %(os_id)s during deleting ' 'VPN connection %(id)s. Reason: %(reason)s'), {'id': vpn_connection['id'], 'os_id': vpn_connection['os_ipsecpolicy_id'], 'reason': ex.message}) except neutron_exception.NotFound: pass try: neutron.delete_ikepolicy(vpn_connection['os_ikepolicy_id']) except neutron_exception.Conflict as ex: LOG.warning( _('Failed to delete ikepolicy %(os_id)s during deleting ' 'VPN connection %(id)s. Reason: %(reason)s'), {'id': vpn_connection['id'], 'os_id': vpn_connection['os_ikepolicy_id'], 'reason': ex.message}) except neutron_exception.NotFound: pass return True
def delete_vpn_gateway(context, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpn_connections = db_api.get_items(context, "vpn") if vpn_gateway["vpc_id"] or any(vpn["vpn_gateway_id"] == vpn_gateway["id"] for vpn in vpn_connections): raise exception.IncorrectState(reason=_("The VPN gateway is in use.")) db_api.delete_item(context, vpn_gateway["id"]) return True
def check_normal_flow(kind, ec2_id): item['id'] = ec2_id res = ec2utils.get_db_item('fake_context', ec2_id) self.assertThat(res, matchers.DictMatches(item)) db_api.get_item_by_id.assert_called_once_with('fake_context', ec2_id) db_api.reset_mock()
def detach_vpn_gateway(context, vpc_id, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) if vpn_gateway['vpc_id'] != vpc_id: raise exception.InvalidVpnGatewayAttachmentNotFound( vgw_id=vpn_gateway_id, vpc_id=vpc_id) vpc = db_api.get_item_by_id(context, vpc_id) neutron = clients.neutron(context) remove_os_gateway_router = ( ec2utils.get_attached_gateway(context, vpc_id, 'igw') is None) subnets = [subnet for subnet in db_api.get_items(context, 'subnet') if subnet['vpc_id'] == vpc['id']] with common.OnCrashCleaner() as cleaner: _detach_vpn_gateway_item(context, vpn_gateway) cleaner.addCleanup(_attach_vpn_gateway_item, context, vpn_gateway, vpc_id) vpn_connection_api._stop_gateway_vpn_connections( context, neutron, cleaner, vpn_gateway) for subnet in subnets: _delete_subnet_vpnservice(context, neutron, cleaner, subnet) if remove_os_gateway_router: try: neutron.remove_gateway_router(vpc['os_id']) except neutron_exception.NotFound: pass return True
def delete_customer_gateway(context, customer_gateway_id): customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) vpn_connections = db_api.get_items(context, "vpn") if any(vpn["customer_gateway_id"] == customer_gateway["id"] for vpn in vpn_connections): raise exception.IncorrectState(reason=_("The customer gateway is in use.")) db_api.delete_item(context, customer_gateway["id"]) return True
def delete_group(self, context, group_name=None, group_id=None, delete_default=False): neutron = clients.neutron(context) if group_id is None or not group_id.startswith('sg-'): return SecurityGroupEngineNova().delete_group(context, group_name, group_id) security_group = ec2utils.get_db_item(context, group_id) try: if not delete_default: os_security_group = neutron.show_security_group( security_group['os_id']) if (os_security_group and os_security_group['security_group']['name'] == security_group['vpc_id']): raise exception.CannotDelete() neutron.delete_security_group(security_group['os_id']) except neutron_exception.Conflict as ex: # TODO(Alex): Instance ID is unknown here, report exception message # in its place - looks readable. raise exception.DependencyViolation( obj1_id=group_id, obj2_id=ex.message) except neutron_exception.NeutronClientException as ex: # TODO(Alex): do log error # TODO(Alex): adjust caught exception classes to catch: # the port doesn't exist pass db_api.delete_item(context, group_id)
def revoke_security_group_egress(context, group_id, ip_permissions=None): security_group = ec2utils.get_db_item(context, group_id) if not security_group.get('vpc_id'): raise exception.InvalidParameterValue(message=_('Only Amazon VPC ' 'security groups may be used with this operation.')) return _revoke_security_group(context, group_id, None, ip_permissions, 'egress')
def get_group_os_id(self, context, group_id, group_name, nova_security_groups=None): if group_id: return ec2utils.get_db_item(context, group_id, 'sg')['os_id'] nova_group = self.get_nova_group_by_name(context, group_name, nova_security_groups) return str(nova_group.id)
def describe_image_attribute(context, image_id, attribute): def _block_device_mapping_attribute(os_image, image, result): properties = ec2utils.deserialize_os_image_properties(os_image) mappings = _format_mappings(context, properties) if mappings: result['blockDeviceMapping'] = mappings def _description_attribute(os_image, image, result): result['description'] = {'value': image.get('description')} def _launch_permission_attribute(os_image, image, result): result['launchPermission'] = [] if os_image.is_public: result['launchPermission'].append({'group': 'all'}) def _kernel_attribute(os_image, image, result): kernel_id = os_image.properties.get('kernel_id') if kernel_id: result['kernel'] = { 'value': ec2utils.os_id_to_ec2_id(context, 'aki', kernel_id) } def _ramdisk_attribute(os_image, image, result): ramdisk_id = os_image.properties.get('ramdisk_id') if ramdisk_id: result['ramdisk'] = { 'value': ec2utils.os_id_to_ec2_id(context, 'ari', ramdisk_id) } # NOTE(ft): Openstack extension, AWS-incompability def _root_device_name_attribute(os_image, image, result): properties = ec2utils.deserialize_os_image_properties(os_image) result['rootDeviceName'] = ( ec2utils.block_device_properties_root_device_name(properties)) supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'description': _description_attribute, 'launchPermission': _launch_permission_attribute, 'kernel': _kernel_attribute, 'ramdisk': _ramdisk_attribute, # NOTE(ft): Openstack extension, AWS-incompability 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.InvalidRequest() os_image = ec2utils.get_os_image(context, image_id) if not os_image: # TODO(ft): figure out corresponding AWS error raise exception.IncorrectState( reason='Image is still being created or failed') _check_owner(context, os_image) image = ec2utils.get_db_item(context, image_id) result = {'imageId': image_id} fn(os_image, image, result) return result
def release_address(self, context, public_ip, allocation_id): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an allocation id when releasing a ' 'VPC elastic IP address') raise exception.InvalidParameterValue(msg) return AddressEngineNova().release_address(context, public_ip, None) address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound( id=allocation_id) if 'network_interface_id' in address: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, address['id']) cleaner.addCleanup(db_api.restore_item, context, 'eipalloc', address) try: neutron.delete_floatingip(address['os_id']) except neutron_exception.NotFound: pass
def delete_vpn_gateway(context, vpn_gateway_id): vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpn_connections = db_api.get_items(context, 'vpn') if vpn_gateway['vpc_id'] or any(vpn['vpn_gateway_id'] == vpn_gateway['id'] for vpn in vpn_connections): raise exception.IncorrectState(reason=_('The VPN gateway is in use.')) db_api.delete_item(context, vpn_gateway['id']) return True
def associate_address(self, context, public_ip=None, instance_id=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False): os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # NOTE(ft): check the public IP exists to raise AWS exception otherwise self.get_nova_ip_by_public_ip(context, public_ip) nova = clients.nova(context) nova.servers.add_floating_ip(os_instance_id, public_ip) return None
def detach_internet_gateway(context, internet_gateway_id, vpc_id): igw = ec2utils.get_db_item(context, internet_gateway_id) vpc = ec2utils.get_db_item(context, vpc_id) if igw.get("vpc_id") != vpc["id"]: raise exception.GatewayNotAttached(gw_id=igw["id"], vpc_id=vpc["id"]) remove_os_gateway_router = ec2utils.get_attached_gateway(context, vpc_id, "vgw") is None neutron = clients.neutron(context) # TODO(ft): set detaching state into db with common.OnCrashCleaner() as cleaner: _detach_internet_gateway_item(context, igw) cleaner.addCleanup(_attach_internet_gateway_item, context, igw, vpc["id"]) if remove_os_gateway_router: try: neutron.remove_gateway_router(vpc["os_id"]) except neutron_exception.NotFound: pass return True
def attach_volume(context, volume_id, instance_id, device): volume = ec2utils.get_db_item(context, volume_id) instance = ec2utils.get_db_item(context, instance_id) nova = clients.nova(context) try: nova.volumes.create_server_volume(instance['os_id'], volume['os_id'], device) except (nova_exception.Conflict, nova_exception.BadRequest): # TODO(andrey-mp): raise correct errors for different cases LOG.exception('Attach has failed.') raise exception.UnsupportedOperation() cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) attachment = _format_attachment(context, volume, os_volume, instance_id=instance_id) # NOTE(andrey-mp): nova sets deleteOnTermination=False for attached volume attachment['deleteOnTermination'] = False return attachment
def _create_security_group(context, group_name, group_description, vpc_id=None): nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: try: os_security_group = nova.security_groups.create(group_name, group_description) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(nova.security_groups.delete, os_security_group.id) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) security_group = db_api.add_item(context, 'sg', {'vpc_id': vpc_id, 'os_id': os_security_group.id}) return {'return': 'true', 'groupId': security_group['id']}
def enable_vgw_route_propagation(context, route_table_id, gateway_id): route_table = ec2utils.get_db_item(context, route_table_id) # NOTE(ft): AWS returns GatewayNotAttached for all invalid cases of # gateway_id value vpn_gateway = ec2utils.get_db_item(context, gateway_id) if vpn_gateway['vpc_id'] != route_table['vpc_id']: raise exception.GatewayNotAttached(gw_id=vpn_gateway['id'], vpc_id=route_table['vpc_id']) if vpn_gateway['id'] in route_table.setdefault('propagating_gateways', []): return True with common.OnCrashCleaner() as cleaner: _append_propagation_to_route_table_item(context, route_table, vpn_gateway['id']) cleaner.addCleanup(_remove_propagation_from_route_table_item, context, route_table, vpn_gateway['id']) _update_routes_in_associated_subnets(context, cleaner, route_table, update_target=VPN_TARGET) return True
def delete_snapshot(context, snapshot_id): snapshot = ec2utils.get_db_item(context, snapshot_id) cinder = clients.cinder(context) try: cinder.volume_snapshots.delete(snapshot['os_id']) except cinder_exception.NotFound: pass # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud # It will be deleted by describer in the future return True
def create_subnet(context, vpc_id, cidr_block, availability_zone=None): vpc = ec2utils.get_db_item(context, vpc_id) vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block']) subnet_ipnet = netaddr.IPNetwork(cidr_block) if subnet_ipnet not in vpc_ipnet: raise exception.InvalidSubnetRange(cidr_block=cidr_block) main_route_table = db_api.get_item_by_id(context, vpc['route_table_id']) (host_routes, gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip( context, main_route_table, cidr_block) neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: # NOTE(andrey-mp): set fake name to filter networks in instance api os_network_body = {'network': {'name': 'subnet-0'}} try: os_network = neutron.create_network(os_network_body)['network'] cleaner.addCleanup(neutron.delete_network, os_network['id']) # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for # OpenStack we decided not to support this as compatibility. os_subnet_body = {'subnet': {'network_id': os_network['id'], 'ip_version': '4', 'cidr': cidr_block, 'host_routes': host_routes}} os_subnet = neutron.create_subnet(os_subnet_body)['subnet'] cleaner.addCleanup(neutron.delete_subnet, os_subnet['id']) except neutron_exception.OverQuotaClient: raise exception.SubnetLimitExceeded() try: neutron.add_interface_router(vpc['os_id'], {'subnet_id': os_subnet['id']}) except neutron_exception.BadRequest: raise exception.InvalidSubnetConflict(cidr_block=cidr_block) cleaner.addCleanup(neutron.remove_interface_router, vpc['os_id'], {'subnet_id': os_subnet['id']}) subnet = db_api.add_item(context, 'subnet', {'os_id': os_subnet['id'], 'vpc_id': vpc['id']}) cleaner.addCleanup(db_api.delete_item, context, subnet['id']) vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner, subnet, vpc, main_route_table) neutron.update_network(os_network['id'], {'network': {'name': subnet['id']}}) # NOTE(ft): In some cases we need gateway_ip to be None (see # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet # creation to allow automatic configuration of the default port by # which subnet is attached to the router. neutron.update_subnet(os_subnet['id'], {'subnet': {'name': subnet['id'], 'gateway_ip': gateway_ip}}) os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] return {'subnet': _format_subnet(context, subnet, os_subnet, os_network, os_ports)}
def release_address(self, context, public_ip, allocation_id): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _('You must specify an allocation id when releasing a ' 'VPC elastic IP address') raise exception.InvalidParameterValue(msg) os_floating_ip = self.get_os_floating_ip_by_public_ip(context, public_ip) try: neutron.delete_floatingip(os_floating_ip['id']) except neutron_exception.NotFound: pass return address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound( id=allocation_id) if 'network_interface_id' in address: if CONF.disable_ec2_classic: network_interface_id = address['network_interface_id'] network_interface = db_api.get_item_by_id(context, network_interface_id) default_vpc = ec2utils.check_and_create_default_vpc(context) if default_vpc: default_vpc_id = default_vpc['id'] if (network_interface and network_interface['vpc_id'] == default_vpc_id): association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') self.disassociate_address( context, association_id=association_id) else: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) else: raise exception.InvalidIPAddressInUse( ip_address=address['public_ip']) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, address['id']) cleaner.addCleanup(db_api.restore_item, context, 'eipalloc', address) try: neutron.delete_floatingip(address['os_id']) except neutron_exception.NotFound: pass
def delete_volume(context, volume_id): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) try: cinder.volumes.delete(volume['os_id']) except cinder_exception.BadRequest: # TODO(andrey-mp): raise correct errors for different cases raise exception.UnsupportedOperation() except cinder_exception.NotFound: pass # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud # It will be deleted by describer in the future return True
def delete_dhcp_options(context, dhcp_options_id): if not dhcp_options_id: raise exception.MissingParameter( _('DHCP options ID must be specified')) dhcp_options = ec2utils.get_db_item(context, dhcp_options_id) vpcs = db_api.get_items(context, 'vpc') for vpc in vpcs: if dhcp_options['id'] == vpc.get('dhcp_options_id'): raise exception.DependencyViolation( obj1_id=dhcp_options['id'], obj2_id=vpc['id']) db_api.delete_item(context, dhcp_options['id']) return True
def attach_internet_gateway(context, internet_gateway_id, vpc_id): igw = ec2utils.get_db_item(context, internet_gateway_id) if igw.get("vpc_id"): msg_params = {"igw_id": igw["id"], "vpc_id": igw["vpc_id"]} msg = _("resource %(igw_id)s is already attached to " "network %(vpc_id)s") % msg_params raise exception.ResourceAlreadyAssociated(msg) vpc = ec2utils.get_db_item(context, vpc_id) if ec2utils.get_attached_gateway(context, vpc["id"], "igw"): msg = _("Network %(vpc_id)s already has an internet gateway " "attached") % {"vpc_id": vpc["id"]} raise exception.InvalidParameterValue(msg) external_network_id = None if not ec2utils.get_attached_gateway(context, vpc["id"], "vgw"): external_network_id = ec2utils.get_os_public_network(context)["id"] neutron = clients.neutron(context) # TODO(ft): set attaching state into db with common.OnCrashCleaner() as cleaner: _attach_internet_gateway_item(context, igw, vpc["id"]) cleaner.addCleanup(_detach_internet_gateway_item, context, igw) if external_network_id: neutron.add_gateway_router(vpc["os_id"], {"network_id": external_network_id}) return True
def associate_route_table(context, route_table_id, subnet_id): route_table = ec2utils.get_db_item(context, route_table_id) subnet = ec2utils.get_db_item(context, subnet_id) if route_table['vpc_id'] != subnet['vpc_id']: msg = _('Route table %(rtb_id)s and subnet %(subnet_id)s belong to ' 'different networks') msg = msg % {'rtb_id': route_table_id, 'subnet_id': subnet_id} raise exception.InvalidParameterValue(msg) if 'route_table_id' in subnet: msg = _('The specified association for route table %(rtb_id)s ' 'conflicts with an existing association') msg = msg % {'rtb_id': route_table_id} raise exception.ResourceAlreadyAssociated(msg) with common.OnCrashCleaner() as cleaner: _associate_subnet_item(context, subnet, route_table['id']) cleaner.addCleanup(_disassociate_subnet_item, context, subnet) _update_subnet_routes(context, cleaner, subnet, route_table) return {'associationId': ec2utils.change_ec2_id_kind(subnet['id'], 'rtbassoc')}
def create_vpn_connection_route(context, vpn_connection_id, destination_cidr_block): vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) if destination_cidr_block in vpn_connection['cidrs']: return True neutron = clients.neutron(context) vpn_gateway = db_api.get_item_by_id(context, vpn_connection['vpn_gateway_id']) with common.OnCrashCleaner() as cleaner: _add_cidr_to_vpn_connection_item(context, vpn_connection, destination_cidr_block) cleaner.addCleanup(_remove_cidr_from_vpn_connection_item, context, vpn_connection, destination_cidr_block) _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, vpn_connections=[vpn_connection]) return True
def assign_private_ip_addresses(context, network_interface_id, private_ip_address=None, secondary_private_ip_address_count=None, allow_reassignment=False): # TODO(Alex): allow_reassignment is not supported at the moment network_interface = ec2utils.get_db_item(context, network_interface_id) subnet = db_api.get_item_by_id(context, network_interface['subnet_id']) neutron = clients.neutron(context) os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] os_port = neutron.show_port(network_interface['os_id'])['port'] subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr']) fixed_ips = os_port['fixed_ips'] or [] if private_ip_address is not None: for ip_address in private_ip_address: if netaddr.IPAddress(ip_address) not in subnet_ipnet: raise exception.InvalidParameterValue( value=str(ip_address), parameter='PrivateIpAddress', reason='IP address is out of the subnet range') fixed_ips.append({'ip_address': str(ip_address)}) elif secondary_private_ip_address_count > 0: for _i in range(secondary_private_ip_address_count): fixed_ips.append({'subnet_id': os_subnet['id']}) try: neutron.update_port(os_port['id'], {'port': {'fixed_ips': fixed_ips}}) except neutron_exception.IpAddressGenerationFailureClient: raise exception.NetworkInterfaceLimitExceeded( subnet_id=subnet['id']) except neutron_exception.IpAddressInUseClient: msg = _('Some of %(addresses)s is assigned, but move is not ' 'allowed.') % {'addresses': private_ip_address} raise exception.InvalidParameterValue(msg) except neutron_exception.BadRequest as ex: # NOTE(ft):AWS returns PrivateIpAddressLimitExceeded, but Neutron does # general InvalidInput (converted to BadRequest) in the same case. msg = _('Specified network interface parameters are invalid. ' 'Reason: %(reason)s') % {'reason': ex.message} raise exception.InvalidParameterValue(msg) return True
def replace_route_table_association(context, association_id, route_table_id): route_table = ec2utils.get_db_item(context, route_table_id) if route_table['vpc_id'] == ec2utils.change_ec2_id_kind( association_id, 'vpc'): vpc = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'vpc')) if vpc is None: raise exception.InvalidAssociationIDNotFound(id=association_id) rollback_route_table_id = vpc['route_table_id'] with common.OnCrashCleaner() as cleaner: _associate_vpc_item(context, vpc, route_table['id']) cleaner.addCleanup(_associate_vpc_item, context, vpc, rollback_route_table_id) _update_routes_in_associated_subnets( context, cleaner, route_table, default_associations_only=True) else: subnet = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'subnet')) if subnet is None or 'route_table_id' not in subnet: raise exception.InvalidAssociationIDNotFound(id=association_id) if subnet['vpc_id'] != route_table['vpc_id']: msg = _('Route table association %(rtbassoc_id)s and route table ' '%(rtb_id)s belong to different networks') msg = msg % { 'rtbassoc_id': association_id, 'rtb_id': route_table_id } raise exception.InvalidParameterValue(msg) rollback_route_table_id = subnet['route_table_id'] with common.OnCrashCleaner() as cleaner: _associate_subnet_item(context, subnet, route_table['id']) cleaner.addCleanup(_associate_subnet_item, context, subnet, rollback_route_table_id) _update_subnet_routes(context, cleaner, subnet, route_table) return {'newAssociationId': association_id}
def create_snapshot(context, volume_id, description=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) # NOTE(ft): Easy fix to allow snapshot creation in statuses other than # AVAILABLE without cinder modifications. Potential race condition # though. Seems arguably non-fatal. if os_volume.status not in ['available', 'in-use', 'attaching', 'detaching']: msg = (_("'%s' is not in a state where snapshots are allowed.") % volume_id) raise exception.IncorrectState(reason=msg) with common.OnCrashCleaner() as cleaner: os_snapshot = cinder.volume_snapshots.create(os_volume.id, True) cleaner.addCleanup(os_snapshot.delete) snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id}) cleaner.addCleanup(db_api.delete_item, context, snapshot['id']) os_snapshot.update(display_name=snapshot['id'], display_description=description) return _format_snapshot(context, snapshot, os_snapshot, volume_id=volume_id)
def delete_subnet(context, subnet_id): subnet = ec2utils.get_db_item(context, subnet_id) vpc = db_api.get_item_by_id(context, subnet['vpc_id']) network_interfaces = network_interface_api.describe_network_interfaces( context, filter=[{'name': 'subnet-id', 'value': [subnet_id]}])['networkInterfaceSet'] if network_interfaces: msg = _("The subnet '%(subnet_id)s' has dependencies and " "cannot be deleted.") % {'subnet_id': subnet_id} raise exception.DependencyViolation(msg) neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, subnet['id']) cleaner.addCleanup(db_api.restore_item, context, 'subnet', subnet) vpn_gateway_api._stop_vpn_in_subnet(context, neutron, cleaner, subnet) try: neutron.remove_interface_router(vpc['os_id'], {'subnet_id': subnet['os_id']}) except neutron_exception.NotFound: pass cleaner.addCleanup(neutron.add_interface_router, vpc['os_id'], {'subnet_id': subnet['os_id']}) try: os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] except neutron_exception.NotFound: pass else: try: neutron.delete_network(os_subnet['network_id']) except neutron_exception.NetworkInUseClient as ex: LOG.warning(_('Failed to delete network %(os_id)s during ' 'deleting Subnet %(id)s. Reason: %(reason)s'), {'id': subnet['id'], 'os_id': os_subnet['network_id'], 'reason': ex.message}) return True
def _revoke_security_group(context, group_id, group_name, ip_permissions, direction): rules_bodies = _build_rules(context, group_id, group_name, ip_permissions, direction) if not rules_bodies: return True os_rules = security_group_engine.get_os_group_rules( context, rules_bodies[0]['security_group_id']) os_rules_to_delete = [] for rule_body in rules_bodies: for os_rule in os_rules: if _are_identical_rules(rule_body, os_rule): os_rules_to_delete.append(os_rule['id']) if len(os_rules_to_delete) != len(rules_bodies): security_group = ec2utils.get_db_item(context, group_id) if security_group.get('vpc_id'): raise exception.InvalidPermissionNotFound() return True for os_rule_id in os_rules_to_delete: security_group_engine.delete_os_group_rule(context, os_rule_id) return True
def delete_vpn_connection_route(context, vpn_connection_id, destination_cidr_block): vpn_connection = ec2utils.get_db_item(context, vpn_connection_id) if destination_cidr_block not in vpn_connection['cidrs']: raise exception.InvalidRouteNotFound( _('The specified route %(destination_cidr_block)s does not exist') % {'destination_cidr_block': destination_cidr_block}) neutron = clients.neutron(context) vpn_gateway = db_api.get_item_by_id(context, vpn_connection['vpn_gateway_id']) with common.OnCrashCleaner() as cleaner: _remove_cidr_from_vpn_connection_item(context, vpn_connection, destination_cidr_block) cleaner.addCleanup(_add_cidr_to_vpn_connection_item, context, vpn_connection, destination_cidr_block) _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, vpn_connections=[vpn_connection]) return True
def delete_route(context, route_table_id, destination_cidr_block): route_table = ec2utils.get_db_item(context, route_table_id) for route_index, route in enumerate(route_table['routes']): if route['destination_cidr_block'] != destination_cidr_block: continue if route.get('gateway_id', 0) is None: msg = _('cannot remove local route %(destination_cidr_block)s ' 'in route table %(route_table_id)s') msg = msg % { 'route_table_id': route_table_id, 'destination_cidr_block': destination_cidr_block } raise exception.InvalidParameterValue(msg) break else: raise exception.InvalidRouteNotFound( route_table_id=route_table_id, destination_cidr_block=destination_cidr_block) update_target = _get_route_target(route) if update_target == VPN_TARGET: vpn_gateway = db_api.get_item_by_id(context, route['gateway_id']) if (not vpn_gateway or vpn_gateway['vpc_id'] != route_table['vpc_id']): update_target = None rollback_route_table_state = copy.deepcopy(route_table) del route_table['routes'][route_index] with common.OnCrashCleaner() as cleaner: db_api.update_item(context, route_table) cleaner.addCleanup(db_api.update_item, context, rollback_route_table_state) if update_target: _update_routes_in_associated_subnets(context, cleaner, route_table, update_target=update_target) return True
def detach_volume(context, volume_id, instance_id=None, device=None, force=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') if not os_instance_id: # TODO(ft): Change the message with the real AWS message reason = _('Volume %(vol_id)s is not attached to anything') raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) nova = clients.nova(context) nova.volumes.delete_server_volume(os_instance_id, os_volume.id) os_volume.get() instance_id = next((i['id'] for i in db_api.get_items(context, 'i') if i['os_id'] == os_instance_id), None) return _format_attachment(context, volume, os_volume, instance_id=instance_id)
def create_route_table(context, vpc_id): vpc = ec2utils.get_db_item(context, vpc_id) route_table = _create_route_table(context, vpc) return {'routeTable': _format_route_table(context, route_table, is_main=False)}
def associate_address(self, context, public_ip=None, instance_id=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False): instance_network_interfaces = [] if instance_id: # TODO(ft): implement search in DB layer for eni in db_api.get_items(context, 'eni'): if instance_id and eni.get('instance_id') == instance_id: instance_network_interfaces.append(eni) neutron = clients.neutron(context) if public_ip: if instance_network_interfaces: msg = _('You must specify an allocation id when mapping ' 'an address to a VPC instance') raise exception.InvalidParameterCombination(msg) # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if address and _is_address_valid(context, neutron, address): msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) # NOTE(ft): in fact only the first two parameters are used to # associate an address in EC2 Classic mode. Other parameters are # sent to validate their emptiness in one place return AddressEngineNova().associate_address( context, public_ip=public_ip, instance_id=instance_id, allocation_id=allocation_id, network_interface_id=network_interface_id, private_ip_address=private_ip_address, allow_reassociation=allow_reassociation) if instance_id: if not instance_network_interfaces: # NOTE(ft): check the instance exists ec2utils.get_db_item(context, instance_id) msg = _('You must specify an IP address when mapping ' 'to a non-VPC instance') raise exception.InvalidParameterCombination(msg) if len(instance_network_interfaces) > 1: raise exception.InvalidInstanceId(instance_id=instance_id) network_interface = instance_network_interfaces[0] else: network_interface = ec2utils.get_db_item(context, network_interface_id) if not private_ip_address: private_ip_address = network_interface['private_ip_address'] address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound(id=allocation_id) if address.get('network_interface_id') == network_interface['id']: # NOTE(ft): idempotent call pass elif address.get('network_interface_id') and not allow_reassociation: msg = _('resource %(eipalloc_id)s is already associated with ' 'associate-id %(eipassoc_id)s') msg = msg % { 'eipalloc_id': allocation_id, 'eipassoc_id': ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') } raise exception.ResourceAlreadyAssociated(msg) else: internet_gateways = ( internet_gateway_api.describe_internet_gateways( context, filter=[{ 'name': 'attachment.vpc-id', 'value': [network_interface['vpc_id']] }])['internetGatewaySet']) if len(internet_gateways) == 0: msg = _('Network %(vpc_id)s is not attached to any internet ' 'gateway') % { 'vpc_id': network_interface['vpc_id'] } raise exception.GatewayNotAttached(msg) with common.OnCrashCleaner() as cleaner: _associate_address_item(context, address, network_interface['id'], private_ip_address) cleaner.addCleanup(_disassociate_address_item, context, address) os_floating_ip = { 'port_id': network_interface['os_id'], 'fixed_ip_address': private_ip_address } neutron.update_floatingip(address['os_id'], {'floatingip': os_floating_ip}) # TODO(ft): generate unique association id for each act of association return ec2utils.change_ec2_id_kind(address['id'], 'eipassoc')
def modify_image_attribute(context, image_id, attribute=None, user_group=None, operation_type=None, description=None, launch_permission=None, product_code=None, user_id=None, value=None): os_image = ec2utils.get_os_image(context, image_id) if not os_image: # TODO(ft): figure out corresponding AWS error raise exception.IncorrectState( reason='Image is still being created or failed') attributes = set() # NOTE(andrey-mp): launchPermission structure is converted here # to plain parameters: attribute, user_group, operation_type, user_id if launch_permission is not None: attributes.add('launchPermission') user_group = list() user_id = list() if len(launch_permission) == 0: msg = _('No operation specified for launchPermission attribute.') raise exception.InvalidParameterCombination(msg) if len(launch_permission) > 1: msg = _('Only one operation can be specified.') raise exception.InvalidParameterCombination(msg) operation_type, permissions = launch_permission.popitem() for index_key in permissions: permission = permissions[index_key] if 'group' in permission: user_group.append(permission['group']) if 'user_id' in permission: user_id.append(permission['user_id']) if attribute == 'launchPermission': attributes.add('launchPermission') if description is not None: attributes.add('description') value = description if attribute == 'description': attributes.add('description') # check attributes if len(attributes) == 0: if product_code is not None: attribute = 'productCodes' if attribute in [ 'kernel', 'ramdisk', 'productCodes', 'blockDeviceMapping' ]: raise exception.InvalidParameter( _('Parameter %s is invalid. ' 'The attribute is not supported.') % attribute) raise exception.InvalidParameterCombination('No attributes specified.') if len(attributes) > 1: raise exception.InvalidParameterCombination( _('Fields for multiple attribute types specified: %s') % str(attributes)) if 'launchPermission' in attributes: if not user_group: msg = _('No operation specified for launchPermission attribute.') raise exception.InvalidParameterCombination(msg) if len(user_group) != 1 and user_group[0] != 'all': msg = _('only group "all" is supported') raise exception.InvalidParameterValue(parameter='UserGroup', value=user_group, reason=msg) if operation_type not in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.InvalidParameterValue(parameter='OperationType', value='operation_type', reason=msg) _check_owner(context, os_image) os_image.update(is_public=(operation_type == 'add')) return True if 'description' in attributes: if not value: raise exception.MissingParameter( 'The request must contain the parameter description') _check_owner(context, os_image) image = ec2utils.get_db_item(context, image_id) image['description'] = value db_api.update_item(context, image) return True
def create_image(context, instance_id, name=None, description=None, no_reboot=False, block_device_mapping=None): instance = ec2utils.get_db_item(context, instance_id) if not instance_api._is_ebs_instance(context, instance['os_id']): msg = _('Instance does not have a volume attached at root (null).') raise exception.InvalidParameterValue(value=instance_id, parameter='InstanceId', reason=msg) nova = clients.nova(context) os_instance = nova.servers.get(instance['os_id']) restart_instance = False if not no_reboot and os_instance.status != 'SHUTOFF': if os_instance.status != 'ACTIVE': # TODO(ft): Change the error code and message with the real AWS # ones msg = _('Instance must be run or stopped') raise exception.IncorrectState(reason=msg) restart_instance = True # meaningful image name name_map = dict(instance=instance['os_id'], now=timeutils.isotime()) name = name or _('image of %(instance)s at %(now)s') % name_map def delayed_create(context, image, name, os_instance): try: os_instance.stop() # wait instance for really stopped start_time = time.time() while os_instance.status != 'SHUTOFF': time.sleep(1) os_instance.get() # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: err = (_("Couldn't stop instance within %d sec") % timeout) raise exception.EC2Exception(message=err) # NOTE(ft): create an image with ec2_id metadata to let other code # link os and db objects in race conditions os_image_id = os_instance.create_image( name, metadata={'ec2_id': image['id']}) image['os_id'] = os_image_id db_api.update_item(context, image) except Exception: LOG.exception(_LE('Failed to complete image %s creation'), image.id) try: image['state'] = 'failed' db_api.update_item(context, image) except Exception: LOG.warning(_LW("Couldn't set 'failed' state for db image %s"), image.id, exc_info=True) try: os_instance.start() except Exception: LOG.warning(_LW('Failed to start instance %(i_id)s after ' 'completed creation of image %(image_id)s'), { 'i_id': instance['id'], 'image_id': image['id'] }, exc_info=True) image = {'is_public': False, 'description': description} if restart_instance: # NOTE(ft): image type is hardcoded, because we don't know it now, # but cannot change it later. But Nova doesn't specify container format # for snapshots of volume backed instances, so that it is 'ami' in fact image = db_api.add_item(context, 'ami', image) eventlet.spawn_n(delayed_create, context, image, name, os_instance) else: glance = clients.glance(context) with common.OnCrashCleaner() as cleaner: os_image_id = os_instance.create_image(name) cleaner.addCleanup(glance.images.delete, os_image_id) # TODO(andrey-mp): snapshot and volume also must be deleted in case # of error os_image = glance.images.get(os_image_id) image['os_id'] = os_image_id image = db_api.add_item(context, _get_os_image_kind(os_image), image) return {'imageId': image['id']}
def create_subnet(context, vpc_id, cidr_block, availability_zone=None): vpc = ec2utils.get_db_item(context, vpc_id) vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block']) subnet_ipnet = netaddr.IPNetwork(cidr_block) if subnet_ipnet not in vpc_ipnet: raise exception.InvalidSubnetRange(cidr_block=cidr_block) main_route_table = db_api.get_item_by_id(context, vpc['route_table_id']) (host_routes, gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip( context, main_route_table, cidr_block) neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: # NOTE(andrey-mp): set fake name to filter networks in instance api os_network_body = {'network': {'name': 'subnet-0'}} try: os_network = neutron.create_network(os_network_body)['network'] cleaner.addCleanup(neutron.delete_network, os_network['id']) # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for # OpenStack we decided not to support this as compatibility. os_subnet_body = { 'subnet': { 'network_id': os_network['id'], 'ip_version': '4', 'cidr': cidr_block, 'host_routes': host_routes } } os_subnet = neutron.create_subnet(os_subnet_body)['subnet'] cleaner.addCleanup(neutron.delete_subnet, os_subnet['id']) except neutron_exception.OverQuotaClient: raise exception.SubnetLimitExceeded() try: neutron.add_interface_router(vpc['os_id'], {'subnet_id': os_subnet['id']}) except neutron_exception.BadRequest: raise exception.InvalidSubnetConflict(cidr_block=cidr_block) cleaner.addCleanup(neutron.remove_interface_router, vpc['os_id'], {'subnet_id': os_subnet['id']}) subnet = db_api.add_item(context, 'subnet', { 'os_id': os_subnet['id'], 'vpc_id': vpc['id'] }) cleaner.addCleanup(db_api.delete_item, context, subnet['id']) vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner, subnet, vpc, main_route_table) neutron.update_network(os_network['id'], {'network': { 'name': subnet['id'] }}) # NOTE(ft): In some cases we need gateway_ip to be None (see # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet # creation to allow automatic configuration of the default port by # which subnet is attached to the router. neutron.update_subnet( os_subnet['id'], {'subnet': { 'name': subnet['id'], 'gateway_ip': gateway_ip }}) os_ports = neutron.list_ports(tenant_id=context.project_id)['ports'] return { 'subnet': _format_subnet(context, subnet, os_subnet, os_network, os_ports) }
def get_group_os_id(self, context, group_id, group_name): if group_name and not group_id: os_group = self.get_os_group_by_name(context, group_name) return str(os_group['id']) return ec2utils.get_db_item(context, group_id, 'sg')['os_id']
def delete_vpc(context, vpc_id): vpc = ec2utils.get_db_item(context, vpc_id) subnets = subnet_api.describe_subnets(context, filter=[{ 'name': 'vpc-id', 'value': [vpc_id] }])['subnetSet'] internet_gateways = internet_gateway_api.describe_internet_gateways( context, filter=[{ 'name': 'attachment.vpc-id', 'value': [vpc['id']] }])['internetGatewaySet'] route_tables = route_table_api.describe_route_tables(context, filter=[{ 'name': 'vpc-id', 'value': [vpc['id']] }])['routeTableSet'] security_groups = security_group_api.describe_security_groups( context, filter=[{ 'name': 'vpc-id', 'value': [vpc['id']] }])['securityGroupInfo'] vpn_gateways = vpn_gateway_api.describe_vpn_gateways( context, filter=[{ 'name': 'attachment.vpc-id', 'value': [vpc['id']] }])['vpnGatewaySet'] if (subnets or internet_gateways or len(route_tables) > 1 or len(security_groups) > 1 or vpn_gateways): msg = _("The vpc '%(vpc_id)s' has dependencies and " "cannot be deleted.") msg = msg % {'vpc_id': vpc['id']} raise exception.DependencyViolation(msg) neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: db_api.delete_item(context, vpc['id']) cleaner.addCleanup(db_api.restore_item, context, 'vpc', vpc) route_table_api._delete_route_table(context, vpc['route_table_id'], cleaner=cleaner) if len(security_groups) > 0: security_group_api.delete_security_group( context, group_id=security_groups[0]['groupId'], delete_default=True) try: neutron.delete_router(vpc['os_id']) except neutron_exception.Conflict as ex: LOG.warning( 'Failed to delete router %(os_id)s during deleting ' 'VPC %(id)s. Reason: %(reason)s', { 'id': vpc['id'], 'os_id': vpc['os_id'], 'reason': ex.message }) except neutron_exception.NotFound: pass return True
def delete_route_table(context, route_table_id): route_table = ec2utils.get_db_item(context, route_table_id) vpc = db_api.get_item_by_id(context, route_table['vpc_id']) _delete_route_table(context, route_table['id'], vpc) return True
def _set_route(context, route_table_id, destination_cidr_block, gateway_id, instance_id, network_interface_id, vpc_peering_connection_id, do_replace): route_table = ec2utils.get_db_item(context, route_table_id) vpc = db_api.get_item_by_id(context, route_table['vpc_id']) vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block']) route_ipnet = netaddr.IPNetwork(destination_cidr_block) if route_ipnet in vpc_ipnet: msg = _('Cannot create a more specific route for ' '%(destination_cidr_block)s than local route ' '%(vpc_cidr_block)s in route table %(rtb_id)s') msg = msg % {'rtb_id': route_table_id, 'destination_cidr_block': destination_cidr_block, 'vpc_cidr_block': vpc['cidr_block']} raise exception.InvalidParameterValue(msg) obj_param_count = len([p for p in (gateway_id, network_interface_id, instance_id, vpc_peering_connection_id) if p is not None]) if obj_param_count != 1: msg = _('The request must contain exactly one of gatewayId, ' 'networkInterfaceId, vpcPeeringConnectionId or instanceId') if obj_param_count == 0: raise exception.MissingParameter(msg) else: raise exception.InvalidParameterCombination(msg) rollabck_route_table_state = copy.deepcopy(route_table) if do_replace: route_index, old_route = next( ((i, r) for i, r in enumerate(route_table['routes']) if r['destination_cidr_block'] == destination_cidr_block), (None, None)) if route_index is None: msg = _("There is no route defined for " "'%(destination_cidr_block)s' in the route table. " "Use CreateRoute instead.") msg = msg % {'destination_cidr_block': destination_cidr_block} raise exception.InvalidParameterValue(msg) else: del route_table['routes'][route_index] if gateway_id: gateway = ec2utils.get_db_item(context, gateway_id) if gateway.get('vpc_id') != route_table['vpc_id']: if ec2utils.get_ec2_id_kind(gateway_id) == 'vgw': raise exception.InvalidGatewayIDNotFound(id=gateway['id']) else: # igw raise exception.InvalidParameterValue( _('Route table %(rtb_id)s and network gateway %(igw_id)s ' 'belong to different networks') % {'rtb_id': route_table_id, 'igw_id': gateway_id}) route = {'gateway_id': gateway['id']} elif network_interface_id: network_interface = ec2utils.get_db_item(context, network_interface_id) if network_interface['vpc_id'] != route_table['vpc_id']: msg = _('Route table %(rtb_id)s and interface %(eni_id)s ' 'belong to different networks') msg = msg % {'rtb_id': route_table_id, 'eni_id': network_interface_id} raise exception.InvalidParameterValue(msg) route = {'network_interface_id': network_interface['id']} elif instance_id: # TODO(ft): implement search in DB layer network_interfaces = [eni for eni in db_api.get_items(context, 'eni') if eni.get('instance_id') == instance_id] if len(network_interfaces) == 0: msg = _("Invalid value '%(i_id)s' for instance ID. " "Instance is not in a VPC.") msg = msg % {'i_id': instance_id} raise exception.InvalidParameterValue(msg) elif len(network_interfaces) > 1: raise exception.InvalidInstanceId(instance_id=instance_id) network_interface = network_interfaces[0] if network_interface['vpc_id'] != route_table['vpc_id']: msg = _('Route table %(rtb_id)s and interface %(eni_id)s ' 'belong to different networks') msg = msg % {'rtb_id': route_table_id, 'eni_id': network_interface['id']} raise exception.InvalidParameterValue(msg) route = {'network_interface_id': network_interface['id']} else: raise exception.InvalidRequest('Parameter VpcPeeringConnectionId is ' 'not supported by this implementation') route['destination_cidr_block'] = destination_cidr_block update_target = _get_route_target(route) if do_replace: idempotent_call = False old_target = _get_route_target(old_route) if old_target != update_target: update_target = None else: old_route = next((r for r in route_table['routes'] if r['destination_cidr_block'] == destination_cidr_block), None) idempotent_call = old_route == route if old_route and not idempotent_call: raise exception.RouteAlreadyExists( destination_cidr_block=destination_cidr_block) if not idempotent_call: route_table['routes'].append(route) with common.OnCrashCleaner() as cleaner: db_api.update_item(context, route_table) cleaner.addCleanup(db_api.update_item, context, rollabck_route_table_state) _update_routes_in_associated_subnets(context, cleaner, route_table, update_target=update_target) return True
def create_network_interface(context, subnet_id, private_ip_address=None, private_ip_addresses=None, secondary_private_ip_address_count=None, description=None, security_group_id=None): subnet = ec2utils.get_db_item(context, subnet_id) if subnet is None: raise exception.InvalidSubnetIDNotFound(id=subnet_id) neutron = clients.neutron(context) os_subnet = neutron.show_subnet(subnet['os_id'])['subnet'] # NOTE(Alex): Combine and check ip addresses. Neutron will accept # ip_address as a parameter for specified address and subnet_id for # address to auto-allocate. # TODO(Alex): Implement better diagnostics. subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr']) if not private_ip_addresses: private_ip_addresses = [] if private_ip_address is not None: private_ip_addresses.insert(0, { 'private_ip_address': private_ip_address, 'primary': True }) primary_ip = None fixed_ips = [] for ip in private_ip_addresses: ip_address = netaddr.IPAddress(ip['private_ip_address']) if ip_address not in subnet_ipnet: raise exception.InvalidParameterValue( value=str(ip_address), parameter='PrivateIpAddresses', reason='IP address is out of the subnet range') if ip.get('primary', False): if primary_ip is not None: raise exception.InvalidParameterValue( value=str(ip_address), parameter='PrivateIpAddresses', reason='More than one primary ip is supplied') else: primary_ip = str(ip_address) fixed_ips.insert(0, {'ip_address': primary_ip}) else: fixed_ips.append({'ip_address': str(ip_address)}) if not fixed_ips and not secondary_private_ip_address_count: secondary_private_ip_address_count = 1 if secondary_private_ip_address_count is None: secondary_private_ip_address_count = 0 if secondary_private_ip_address_count > 0: for _i in range(secondary_private_ip_address_count): fixed_ips.append({'subnet_id': os_subnet['id']}) vpc = db_api.get_item_by_id(context, subnet['vpc_id']) vpc_id = vpc['id'] dhcp_options_id = vpc.get('dhcp_options_id', None) if not security_group_id: default_groups = security_group_api.describe_security_groups( context, filter=[{ 'name': 'vpc-id', 'value': [vpc_id] }, { 'name': 'group-name', 'value': ['default'] }])['securityGroupInfo'] security_group_id = [ default_group['groupId'] for default_group in default_groups ] security_groups = db_api.get_items_by_ids(context, security_group_id) if any(security_group['vpc_id'] != vpc['id'] for security_group in security_groups): msg = _('You have specified two resources that belong to ' 'different networks.') raise exception.InvalidGroupNotFound(msg) os_groups = [security_group['os_id'] for security_group in security_groups] with common.OnCrashCleaner() as cleaner: os_port_body = { 'port': { 'network_id': os_subnet['network_id'], 'security_groups': os_groups } } os_port_body['port']['fixed_ips'] = fixed_ips try: os_port = neutron.create_port(os_port_body)['port'] except (neutron_exception.IpAddressGenerationFailureClient, neutron_exception.OverQuotaClient): raise exception.InsufficientFreeAddressesInSubnet() except (neutron_exception.IpAddressInUseClient, neutron_exception.BadRequest) as ex: # NOTE(ft): AWS returns InvalidIPAddress.InUse for a primary IP # address, but InvalidParameterValue for secondary one. # AWS returns PrivateIpAddressLimitExceeded, but Neutron does # general InvalidInput (converted to BadRequest) in the same case. msg = _('Specified network interface parameters are invalid. ' 'Reason: %(reason)s') % { 'reason': ex.message } raise exception.InvalidParameterValue(msg) cleaner.addCleanup(neutron.delete_port, os_port['id']) if primary_ip is None: primary_ip = os_port['fixed_ips'][0]['ip_address'] network_interface = db_api.add_item( context, 'eni', { 'os_id': os_port['id'], 'vpc_id': subnet['vpc_id'], 'subnet_id': subnet['id'], 'description': description, 'private_ip_address': primary_ip }) cleaner.addCleanup(db_api.delete_item, context, network_interface['id']) network_interface_id = network_interface['id'] neutron.update_port(os_port['id'], {'port': { 'name': network_interface_id }}) if dhcp_options_id: dhcp_options._add_dhcp_opts_to_port( context, db_api.get_item_by_id(context, dhcp_options_id), network_interface, os_port) security_groups = security_group_api._format_security_groups_ids_names( context) return { 'networkInterface': _format_network_interface(context, network_interface, os_port, security_groups=security_groups) }
def create_image(context, instance_id, name=None, description=None, no_reboot=False, block_device_mapping=None): instance = ec2utils.get_db_item(context, instance_id) if not instance_api._is_ebs_instance(context, instance['os_id']): msg = _('Instance does not have a volume attached at root (null).') raise exception.InvalidParameterValue(value=instance_id, parameter='InstanceId', reason=msg) nova = clients.nova(context) os_instance = nova.servers.get(instance['os_id']) restart_instance = False if not no_reboot and os_instance.status != 'SHUTOFF': if os_instance.status != 'ACTIVE': # TODO(ft): Change the error code and message with the real AWS # ones msg = _('Instance must be run or stopped') raise exception.IncorrectState(reason=msg) restart_instance = True os_instance.stop() # wait instance for really stopped start_time = time.time() while os_instance.status != 'SHUTOFF': time.sleep(1) os_instance.get() # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: err = _("Couldn't stop instance within %d sec") % timeout raise exception.EC2Exception(message=err) # meaningful image name name_map = dict(instance=instance['os_id'], now=timeutils.isotime()) name = name or _('image of %(instance)s at %(now)s') % name_map glance = clients.glance(context) with common.OnCrashCleaner() as cleaner: os_image_id = os_instance.create_image(name) cleaner.addCleanup(glance.images.delete, os_image_id) # TODO(andrey-mp): snapshot and volume also must be deleted in case # of error os_image = glance.images.get(os_image_id) image = db_api.add_item(context, _get_os_image_kind(os_image), { 'os_id': os_image_id, 'is_public': False, 'description': description }) if restart_instance: os_instance.start() return {'imageId': image['id']}
def get_group_os_id(self, context, group_id, group_name): if group_name: return SecurityGroupEngineNova().get_group_os_id( context, group_id, group_name) return ec2utils.get_db_item(context, group_id, 'sg')['os_id']
def create_vpn_connection(context, customer_gateway_id, vpn_gateway_id, type, options=None): if not options or options.get('static_routes_only') is not True: raise exception.Unsupported('BGP dynamic routing is unsupported') customer_gateway = ec2utils.get_db_item(context, customer_gateway_id) vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id) vpn_connection = next( (vpn for vpn in db_api.get_items(context, 'vpn') if vpn['customer_gateway_id'] == customer_gateway_id), None) if vpn_connection: if vpn_connection['vpn_gateway_id'] == vpn_gateway_id: ec2_vpn_connections = describe_vpn_connections( context, vpn_connection_id=[vpn_connection['id']]) return { 'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0] } else: raise exception.InvalidCustomerGatewayDuplicateIpAddress() neutron = clients.neutron(context) with common.OnCrashCleaner() as cleaner: os_ikepolicy = { 'ike_version': 'v1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'pfs': 'group2', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 28800 } } os_ikepolicy = neutron.create_ikepolicy({'ikepolicy': os_ikepolicy})['ikepolicy'] cleaner.addCleanup(neutron.delete_ikepolicy, os_ikepolicy['id']) os_ipsecpolicy = { 'transform_protocol': 'esp', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'pfs': 'group2', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600 } } os_ipsecpolicy = neutron.create_ipsecpolicy( {'ipsecpolicy': os_ipsecpolicy})['ipsecpolicy'] cleaner.addCleanup(neutron.delete_ipsecpolicy, os_ipsecpolicy['id']) psk = ''.join(random.choice(SHARED_KEY_CHARS) for _x in range(32)) vpn_connection = db_api.add_item( context, 'vpn', { 'customer_gateway_id': customer_gateway['id'], 'vpn_gateway_id': vpn_gateway['id'], 'pre_shared_key': psk, 'os_ikepolicy_id': os_ikepolicy['id'], 'os_ipsecpolicy_id': os_ipsecpolicy['id'], 'cidrs': [], 'os_ipsec_site_connections': {} }) cleaner.addCleanup(db_api.delete_item, context, vpn_connection['id']) neutron.update_ikepolicy(os_ikepolicy['id'], {'ikepolicy': { 'name': vpn_connection['id'] }}) neutron.update_ipsecpolicy( os_ipsecpolicy['id'], {'ipsecpolicy': { 'name': vpn_connection['id'] }}) _reset_vpn_connections(context, neutron, cleaner, vpn_gateway, vpn_connections=[vpn_connection]) ec2_vpn_connections = describe_vpn_connections( context, vpn_connection_id=[vpn_connection['id']]) return {'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}