def get_os_items(self): nova = clients.nova(ec2_context.get_os_admin_context()) os_instances = nova.servers.list( search_opts={'all_tenants': True, 'project_id': self.context.project_id}) self.os_instances = {i.id: i for i in os_instances} return clients.cinder(self.context).volumes.list()
def attach_network_interface(context, network_interface_id, instance_id, device_index): network_interface = ec2utils.get_db_item(context, network_interface_id) if 'instance_id' in network_interface: raise exception.InvalidParameterValue( _("Network interface '%(id)s' is currently in use.") % {'id': network_interface_id}) os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # TODO(Alex) Check that the instance is not yet attached to another VPC # TODO(Alex) Check that the instance is "our", not created via nova # (which means that it doesn't belong to any VPC and can't be attached) if any(eni['device_index'] == device_index for eni in db_api.get_items(context, 'eni') if eni.get('instance_id') == instance_id): raise exception.InvalidParameterValue( _("Instance '%(id)s' already has an interface attached at " "device index '%(index)s'.") % {'id': instance_id, 'index': device_index}) neutron = clients.neutron(context) os_port = neutron.show_port(network_interface['os_id'])['port'] nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: # TODO(Alex) nova inserts compute:%availability_zone into device_owner # 'device_owner': 'compute:None'}}) _attach_network_interface_item(context, network_interface, instance_id, device_index) cleaner.addCleanup(_detach_network_interface_item, context, network_interface) nova.servers.interface_attach(os_instance_id, os_port['id'], None, None) return {'attachmentId': ec2utils.change_ec2_id_kind( network_interface['id'], 'eni-attach')}
def get_os_items(self): nova = clients.nova(self.context) zones = nova.availability_zones.list(detailed=False) for zone in zones: if zone.zoneName == CONF.internal_service_availability_zone: zones.remove(zone) return zones
def _create_security_group(context, group_name, group_description, vpc_id=None, default=False): nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: try: os_security_group = nova.security_groups.create( group_name, group_description) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(nova.security_groups.delete, os_security_group.id) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) item = {'vpc_id': vpc_id, 'os_id': os_security_group.id} if not default: security_group = db_api.add_item(context, 'sg', item) else: item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg') # NOTE(andrey-mp): try to add item with specific id # and catch exception if it exists security_group = db_api.restore_item(context, 'sg', item) return {'return': 'true', 'groupId': security_group['id']}
def disassociate_address(self, context, public_ip=None, association_id=None): LOG.info('Disassociating %s', association_id) neutron = clients.neutron(context) floatingips=neutron.list_floatingips(tenant_id=context.project_id)['floatingips'] LOG.info('Existing floating ips: %s', floatingips) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if not CONF.disable_ec2_classic: if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when ' 'unmapping an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(tikitavi): check the public IP exists to raise AWS # exception otherwise os_floating_ip = self.get_os_floating_ip_by_public_ip( context, public_ip) os_ports = self.get_os_ports(context) os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports) if os_instance_id: nova = clients.nova(context) nova.servers.remove_floating_ip(os_instance_id, public_ip) return None if not address: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) if 'network_interface_id' not in address: msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) LOG.info('DB address: %s', address) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound( id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] LOG.info('Disassociating %(private_ip_address)s from interface %(network_interface_id)s', {'private_ip_address': private_ip_address, 'network_interface_id': network_interface_id}) _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) update = neutron.update_floatingip(address['os_id'], {'floatingip': {'port_id': None}}) LOG.info('Neutron.update result is %s', update)
def get_os_items(self): nova = clients.nova(ec2_context.get_os_admin_context()) os_instances = nova.servers.list(search_opts={ 'all_tenants': True, 'project_id': self.context.project_id }) self.os_instances = {i.id: i for i in os_instances} return clients.cinder(self.context).volumes.list()
def delete_key_pair(context, key_name): nova = clients.nova(context) try: nova.keypairs.delete(key_name) except nova_exception.NotFound: # aws returns true even if the key doesn't exist pass return True
def allocate_address(self, context, domain=None): nova = clients.nova(context) try: nova_floating_ip = nova.floating_ips.create() except nova_exception.Forbidden: raise exception.AddressLimitExceeded() return None, self.convert_ips_to_neutron_format(context, [nova_floating_ip])[0]
def disassociate_address(self, context, public_ip=None, association_id=None): os_instance_id = self.get_nova_ip_by_public_ip(context, public_ip).instance_id if os_instance_id: nova = clients.nova(context) nova.servers.remove_floating_ip(os_instance_id, public_ip) return None
def get_os_items(self): # Original EC2 in nova filters out vpn keys for admin user. # We're not filtering out the vpn keys for now. # In order to implement this we'd have to configure vpn_key_suffix # in our config which we consider an overkill. # suffix = CONF.vpn_key_suffix # if context.is_admin or not key_pair['name'].endswith(suffix): nova = clients.nova(self.context) return nova.keypairs.list()
def associate_address(self, context, public_ip=None, instance_id=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False): os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # NOTE(ft): check the public IP exists to raise AWS exception otherwise self.get_nova_ip_by_public_ip(context, public_ip) nova = clients.nova(context) nova.servers.add_floating_ip(os_instance_id, public_ip) return None
def delete_group(self, context, group_name=None, group_id=None, delete_default=False): nova = clients.nova(context) os_id = self.get_group_os_id(context, group_id, group_name) try: nova.security_groups.delete(os_id) except Exception as ex: # TODO(Alex): do log error # nova doesn't differentiate Conflict exception like neutron does pass
def get_nova_group_by_name(self, context, group_name, nova_security_groups=None): if nova_security_groups is None: nova = clients.nova(context) nova_security_groups = nova.security_groups.list() nova_group = next((g for g in nova_security_groups if g.name == group_name), None) if nova_group is None: raise exception.InvalidGroupNotFound(sg_id=group_name) return nova_group
def get_os_group_rules(self, context, os_id): nova = clients.nova(context) os_security_group = nova.security_groups.get(os_id) os_rules = os_security_group.rules neutron_rules = [] for os_rule in os_rules: neutron_rules.append( self.convert_rule_to_neutron(context, os_rule, nova.security_groups.list())) return neutron_rules
def get_nova_ip_by_public_ip(self, context, public_ip, nova_floating_ips=None): if nova_floating_ips is None: nova = clients.nova(context) nova_floating_ips = nova.floating_ips.list() nova_ip = next((ip for ip in nova_floating_ips if ip.ip == public_ip), None) if nova_ip is None: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) return nova_ip
def create_key_pair(context, key_name): _validate_name(key_name) nova = clients.nova(context) try: key_pair = nova.keypairs.create(key_name) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource="keypairs") except nova_exception.Conflict: raise exception.InvalidKeyPairDuplicate(key_name=key_name) formatted_key_pair = _format_key_pair(key_pair) formatted_key_pair["keyMaterial"] = key_pair.private_key return formatted_key_pair
def create_key_pair(context, key_name): _validate_name(key_name) nova = clients.nova(context) try: key_pair = nova.keypairs.create(key_name) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='keypairs') except nova_exception.Conflict: raise exception.InvalidKeyPairDuplicate(key_name=key_name) formatted_key_pair = _format_key_pair(key_pair) formatted_key_pair['keyMaterial'] = key_pair.private_key return formatted_key_pair
def get_nova_group_by_name(self, context, group_name, nova_security_groups=None): if nova_security_groups is None: nova = clients.nova(context) nova_security_groups = nova.security_groups.list() nova_group = next( (g for g in nova_security_groups if g.name == group_name), None) if nova_group is None: raise exception.InvalidGroupNotFound(id=group_name) return nova_group
def disassociate_address(self, context, public_ip=None, association_id=None): neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if not CONF.disable_ec2_classic: if address and _is_address_valid(context, neutron, address): msg = _('You must specify an association id when ' 'unmapping an address from a VPC instance') raise exception.InvalidParameterValue(msg) # NOTE(tikitavi): check the public IP exists to raise AWS # exception otherwise os_floating_ip = self.get_os_floating_ip_by_public_ip( context, public_ip) os_ports = self.get_os_ports(context) os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports) if os_instance_id: nova = clients.nova(context) nova.servers.remove_floating_ip(os_instance_id, public_ip) return None if not address: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) if 'network_interface_id' not in address: msg = _('You must specify an association id when unmapping ' 'an address from a VPC instance') raise exception.InvalidParameterValue(msg) association_id = ec2utils.change_ec2_id_kind(address['id'], 'eipassoc') address = db_api.get_item_by_id( context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc')) if address is None or not _is_address_valid(context, neutron, address): raise exception.InvalidAssociationIDNotFound( id=association_id) if 'network_interface_id' in address: with common.OnCrashCleaner() as cleaner: network_interface_id = address['network_interface_id'] private_ip_address = address['private_ip_address'] _disassociate_address_item(context, address) cleaner.addCleanup(_associate_address_item, context, address, network_interface_id, private_ip_address) neutron.update_floatingip(address['os_id'], {'floatingip': {'port_id': None}})
def authorize_security_group(self, context, rule_body): nova = clients.nova(context) try: os_security_group_rule = nova.security_group_rules.create( rule_body['security_group_id'], rule_body.get('protocol'), rule_body.get('port_range_min', -1), rule_body.get('port_range_max', -1), rule_body.get('remote_ip_prefix'), rule_body.get('remote_group_id')) except nova_exception.Conflict: raise exception.InvalidPermissionDuplicate() except nova_exception.OverLimit: raise exception.RulesPerSecurityGroupLimitExceeded()
def import_key_pair(context, key_name, public_key_material): _validate_name(key_name) if not public_key_material: raise exception.MissingParameter(_("The request must contain the parameter PublicKeyMaterial")) nova = clients.nova(context) public_key = base64.b64decode(public_key_material).decode("utf-8") try: key_pair = nova.keypairs.create(key_name, public_key) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource="keypairs") except nova_exception.Conflict: raise exception.InvalidKeyPairDuplicate(key_name=key_name) return _format_key_pair(key_pair)
def import_key_pair(context, key_name, public_key_material): _validate_name(key_name) if not public_key_material: raise exception.MissingParameter( _('The request must contain the parameter PublicKeyMaterial')) nova = clients.nova(context) public_key = base64.b64decode(public_key_material).decode("utf-8") try: key_pair = nova.keypairs.create(key_name, public_key) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='keypairs') except nova_exception.Conflict: raise exception.InvalidKeyPairDuplicate(key_name=key_name) return _format_key_pair(key_pair)
def attach_volume(context, volume_id, instance_id, device): volume = ec2utils.get_db_item(context, volume_id) instance = ec2utils.get_db_item(context, instance_id) nova = clients.nova(context) try: nova.volumes.create_server_volume(instance['os_id'], volume['os_id'], device) except (nova_exception.Conflict, nova_exception.BadRequest): # TODO(andrey-mp): raise correct errors for different cases LOG.exception('Attach has failed.') raise exception.UnsupportedOperation() cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) attachment = _format_attachment(context, volume, os_volume, instance_id=instance_id) # NOTE(andrey-mp): nova sets deleteOnTermination=False for attached volume attachment['deleteOnTermination'] = False return attachment
def detach_volume(context, volume_id, instance_id=None, device=None, force=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') if not os_instance_id: # TODO(ft): Change the message with the real AWS message reason = _('Volume %(vol_id)s is not attached to anything') raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) nova = clients.nova(context) nova.volumes.delete_server_volume(os_instance_id, os_volume.id) os_volume.get() instance_id = next((i['id'] for i in db_api.get_items(context, 'i') if i['os_id'] == os_instance_id), None) return _format_attachment(context, volume, os_volume, instance_id=instance_id)
def _create_security_group(context, group_name, group_description, vpc_id=None): nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: try: os_security_group = nova.security_groups.create(group_name, group_description) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(nova.security_groups.delete, os_security_group.id) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) security_group = db_api.add_item(context, 'sg', {'vpc_id': vpc_id, 'os_id': os_security_group.id}) return {'return': 'true', 'groupId': security_group['id']}
def _create_security_group(context, group_name, group_description, vpc_id=None): nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: try: os_security_group = nova.security_groups.create( group_name, group_description) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(nova.security_groups.delete, os_security_group.id) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) security_group = db_api.add_item(context, 'sg', { 'vpc_id': vpc_id, 'os_id': os_security_group.id }) return {'return': 'true', 'groupId': security_group['id']}
def _describe_verbose(context): nova = clients.nova(context) availability_zones = nova.availability_zones.list() formatted_availability_zones = [] for availability_zone in availability_zones: formatted_availability_zones.append( _format_availability_zone(availability_zone)) for host, services in availability_zone.hosts.items(): formatted_availability_zones.append( {'zoneName': '|- %s' % host, 'zoneState': ''}) for service, values in services.items(): active = ":-)" if values['active'] else "XXX" enabled = 'enabled' if values['available'] else 'disabled' formatted_availability_zones.append( {'zoneName': '| |- %s' % service, 'zoneState': ('%s %s %s' % (enabled, active, values['updated_at']))}) return {'availabilityZoneInfo': formatted_availability_zones}
def attach_network_interface(context, network_interface_id, instance_id, device_index): network_interface = ec2utils.get_db_item(context, network_interface_id) if 'instance_id' in network_interface: raise exception.InvalidParameterValue( _("Network interface '%(id)s' is currently in use.") % {'id': network_interface_id}) os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # TODO(Alex) Check that the instance is not yet attached to another VPC # TODO(Alex) Check that the instance is "our", not created via nova # (which means that it doesn't belong to any VPC and can't be attached) if any(eni['device_index'] == device_index for eni in db_api.get_items(context, 'eni') if eni.get('instance_id') == instance_id): raise exception.InvalidParameterValue( _("Instance '%(id)s' already has an interface attached at " "device index '%(index)s'.") % { 'id': instance_id, 'index': device_index }) neutron = clients.neutron(context) os_port = neutron.show_port(network_interface['os_id'])['port'] nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: # TODO(Alex) nova inserts compute:%availability_zone into device_owner # 'device_owner': 'compute:None'}}) _attach_network_interface_item(context, network_interface, instance_id, device_index) cleaner.addCleanup(_detach_network_interface_item, context, network_interface) nova.servers.interface_attach(os_instance_id, os_port['id'], None, None) return { 'attachmentId': ec2utils.change_ec2_id_kind(network_interface['id'], 'eni-attach') }
def _create_security_group(context, group_name, group_description, vpc_id=None, default=False): nova = clients.nova(context) with common.OnCrashCleaner() as cleaner: try: os_security_group = nova.security_groups.create(group_name, group_description) except nova_exception.OverLimit: raise exception.ResourceLimitExceeded(resource='security groups') cleaner.addCleanup(nova.security_groups.delete, os_security_group.id) if vpc_id: # NOTE(Alex) Check if such vpc exists ec2utils.get_db_item(context, vpc_id) item = {'vpc_id': vpc_id, 'os_id': os_security_group.id} if not default: security_group = db_api.add_item(context, 'sg', item) else: item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg') # NOTE(andrey-mp): try to add item with specific id # and catch exception if it exists security_group = db_api.restore_item(context, 'sg', item) return {'return': 'true', 'groupId': security_group['id']}
def get_max_instances(): nova = clients.nova(context) quotas = nova.quotas.get(context.project_id, context.user_id) return quotas.instances
def _format_route_table(context, route_table, is_main=False, associated_subnet_ids=[], gateways={}, network_interfaces={}, vpn_connections_by_gateway_id={}): vpc_id = route_table['vpc_id'] ec2_route_table = { 'routeTableId': route_table['id'], 'vpcId': vpc_id, 'routeSet': [], 'propagatingVgwSet': [ {'gatewayId': vgw_id} for vgw_id in route_table.get('propagating_gateways', [])], # NOTE(ft): AWS returns empty tag set for a route table # if no tag exists 'tagSet': [], } # TODO(ft): refactor to get Nova instances outside of this function nova = clients.nova(context) for route in route_table['routes']: origin = ('CreateRouteTable' if route.get('gateway_id', 0) is None else 'CreateRoute') ec2_route = {'destinationCidrBlock': route['destination_cidr_block'], 'origin': origin} if 'gateway_id' in route: gateway_id = route['gateway_id'] if gateway_id is None: state = 'active' ec2_gateway_id = 'local' else: gateway = gateways.get(gateway_id) state = ('active' if gateway and gateway.get('vpc_id') == vpc_id else 'blackhole') ec2_gateway_id = gateway_id ec2_route.update({'gatewayId': ec2_gateway_id, 'state': state}) else: network_interface_id = route['network_interface_id'] network_interface = network_interfaces.get(network_interface_id) instance_id = (network_interface.get('instance_id') if network_interface else None) state = 'blackhole' if instance_id: instance = db_api.get_item_by_id(context, instance_id) if instance: try: os_instance = nova.servers.get(instance['os_id']) if os_instance and os_instance.status == 'ACTIVE': state = 'active' except nova_exception.NotFound: pass ec2_route.update({'instanceId': instance_id, 'instanceOwnerId': context.project_id}) ec2_route.update({'networkInterfaceId': network_interface_id, 'state': state}) ec2_route_table['routeSet'].append(ec2_route) for vgw_id in route_table.get('propagating_gateways', []): vgw = gateways.get(vgw_id) if vgw and vgw_id in vpn_connections_by_gateway_id: cidrs = set() vpn_connections = vpn_connections_by_gateway_id[vgw_id] for vpn_connection in vpn_connections: cidrs.update(vpn_connection['cidrs']) state = 'active' if vgw['vpc_id'] == vpc_id else 'blackhole' for cidr in cidrs: ec2_route = {'gatewayId': vgw_id, 'destinationCidrBlock': cidr, 'state': state, 'origin': 'EnableVgwRoutePropagation'} ec2_route_table['routeSet'].append(ec2_route) associations = [] if is_main: associations.append({ 'routeTableAssociationId': ec2utils.change_ec2_id_kind(vpc_id, 'rtbassoc'), 'routeTableId': route_table['id'], 'main': True}) for subnet_id in associated_subnet_ids: associations.append({ 'routeTableAssociationId': ec2utils.change_ec2_id_kind(subnet_id, 'rtbassoc'), 'routeTableId': route_table['id'], 'subnetId': subnet_id, 'main': False}) if associations: ec2_route_table['associationSet'] = associations return ec2_route_table
def associate_address(self, context, public_ip=None, instance_id=None, allocation_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False): instance_network_interfaces = [] if instance_id: # TODO(ft): implement search in DB layer for eni in db_api.get_items(context, 'eni'): if eni.get('instance_id') == instance_id: instance_network_interfaces.append(eni) neutron = clients.neutron(context) if public_ip: # TODO(ft): implement search in DB layer address = next((addr for addr in db_api.get_items(context, 'eipalloc') if addr['public_ip'] == public_ip), None) if not CONF.disable_ec2_classic: if instance_network_interfaces: msg = _('You must specify an allocation id when mapping ' 'an address to a VPC instance') raise exception.InvalidParameterCombination(msg) if address and _is_address_valid(context, neutron, address): msg = _( "The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id'] # NOTE(ft): check the public IP exists to raise AWS exception # otherwise self.get_os_floating_ip_by_public_ip(context, public_ip) nova = clients.nova(context) nova.servers.add_floating_ip(os_instance_id, public_ip) return None if not address: msg = _("The address '%(public_ip)s' does not belong to you.") raise exception.AuthFailure(msg % {'public_ip': public_ip}) allocation_id = address['id'] if instance_id: if not instance_network_interfaces: # NOTE(ft): check the instance exists ec2utils.get_db_item(context, instance_id) msg = _('You must specify an IP address when mapping ' 'to a non-VPC instance') raise exception.InvalidParameterCombination(msg) if len(instance_network_interfaces) > 1: raise exception.InvalidInstanceId(instance_id=instance_id) network_interface = instance_network_interfaces[0] else: network_interface = ec2utils.get_db_item(context, network_interface_id) if not private_ip_address: private_ip_address = network_interface['private_ip_address'] address = ec2utils.get_db_item(context, allocation_id) if not _is_address_valid(context, neutron, address): raise exception.InvalidAllocationIDNotFound( id=allocation_id) if address.get('network_interface_id') == network_interface['id']: # NOTE(ft): idempotent call pass elif address.get('network_interface_id') and not allow_reassociation: msg = _('resource %(eipalloc_id)s is already associated with ' 'associate-id %(eipassoc_id)s') msg = msg % {'eipalloc_id': allocation_id, 'eipassoc_id': ec2utils.change_ec2_id_kind( address['id'], 'eipassoc')} raise exception.ResourceAlreadyAssociated(msg) else: internet_gateways = ( internet_gateway_api.describe_internet_gateways( context, filter=[{'name': 'attachment.vpc-id', 'value': [network_interface['vpc_id']]}]) ['internetGatewaySet']) if len(internet_gateways) == 0: msg = _('Network %(vpc_id)s is not attached to any internet ' 'gateway') % {'vpc_id': network_interface['vpc_id']} raise exception.GatewayNotAttached(msg) with common.OnCrashCleaner() as cleaner: _associate_address_item(context, address, network_interface['id'], private_ip_address) cleaner.addCleanup(_disassociate_address_item, context, address) os_floating_ip = {'port_id': network_interface['os_id'], 'fixed_ip_address': private_ip_address} neutron.update_floatingip(address['os_id'], {'floatingip': os_floating_ip}) # TODO(ft): generate unique association id for each act of association return ec2utils.change_ec2_id_kind(address['id'], 'eipassoc')
def create_image(context, instance_id, name=None, description=None, no_reboot=False, block_device_mapping=None): instance = ec2utils.get_db_item(context, instance_id) if not instance_api._is_ebs_instance(context, instance['os_id']): msg = _('Instance does not have a volume attached at root (null).') raise exception.InvalidParameterValue(value=instance_id, parameter='InstanceId', reason=msg) nova = clients.nova(context) os_instance = nova.servers.get(instance['os_id']) restart_instance = False if not no_reboot and os_instance.status != 'SHUTOFF': if os_instance.status != 'ACTIVE': # TODO(ft): Change the error code and message with the real AWS # ones msg = _('Instance must be run or stopped') raise exception.IncorrectState(reason=msg) restart_instance = True # meaningful image name name_map = dict(instance=instance['os_id'], now=timeutils.isotime()) name = name or _('image of %(instance)s at %(now)s') % name_map def delayed_create(context, image, name, os_instance): try: os_instance.stop() # wait instance for really stopped start_time = time.time() while os_instance.status != 'SHUTOFF': time.sleep(1) os_instance.get() # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: err = (_("Couldn't stop instance within %d sec") % timeout) raise exception.EC2Exception(message=err) # NOTE(ft): create an image with ec2_id metadata to let other code # link os and db objects in race conditions os_image_id = os_instance.create_image( name, metadata={'ec2_id': image['id']}) image['os_id'] = os_image_id db_api.update_item(context, image) except Exception: LOG.exception(_LE('Failed to complete image %s creation'), image.id) try: image['state'] = 'failed' db_api.update_item(context, image) except Exception: LOG.warning(_LW("Couldn't set 'failed' state for db image %s"), image.id, exc_info=True) try: os_instance.start() except Exception: LOG.warning(_LW('Failed to start instance %(i_id)s after ' 'completed creation of image %(image_id)s'), { 'i_id': instance['id'], 'image_id': image['id'] }, exc_info=True) image = {'is_public': False, 'description': description} if restart_instance: # NOTE(ft): image type is hardcoded, because we don't know it now, # but cannot change it later. But Nova doesn't specify container format # for snapshots of volume backed instances, so that it is 'ami' in fact image = db_api.add_item(context, 'ami', image) eventlet.spawn_n(delayed_create, context, image, name, os_instance) else: glance = clients.glance(context) with common.OnCrashCleaner() as cleaner: os_image_id = os_instance.create_image(name) cleaner.addCleanup(glance.images.delete, os_image_id) # TODO(andrey-mp): snapshot and volume also must be deleted in case # of error os_image = glance.images.get(os_image_id) image['os_id'] = os_image_id image = db_api.add_item(context, _get_os_image_kind(os_image), image) return {'imageId': image['id']}
def get_os_floating_ips(self, context): nova = clients.nova(context) return self.convert_ips_to_neutron_format(context, nova.floating_ips.list())
def release_address(self, context, public_ip, allocation_id): nova = clients.nova(context) nova.floating_ips.delete(self.get_nova_ip_by_public_ip(context, public_ip).id)
def create_image(context, instance_id, name=None, description=None, no_reboot=False, block_device_mapping=None): instance = ec2utils.get_db_item(context, instance_id) if not instance_api._is_ebs_instance(context, instance['os_id']): msg = _('Instance does not have a volume attached at root (null).') raise exception.InvalidParameterValue(value=instance_id, parameter='InstanceId', reason=msg) nova = clients.nova(context) os_instance = nova.servers.get(instance['os_id']) restart_instance = False if not no_reboot and os_instance.status != 'SHUTOFF': if os_instance.status != 'ACTIVE': # TODO(ft): Change the error code and message with the real AWS # ones msg = _('Instance must be run or stopped') raise exception.IncorrectState(reason=msg) restart_instance = True # meaningful image name name_map = dict(instance=instance['os_id'], now=timeutils.isotime()) name = name or _('image of %(instance)s at %(now)s') % name_map def delayed_create(context, image, name, os_instance): try: os_instance.stop() # wait instance for really stopped start_time = time.time() while os_instance.status != 'SHUTOFF': time.sleep(1) os_instance.get() # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: err = (_("Couldn't stop instance within %d sec") % timeout) raise exception.EC2Exception(message=err) # NOTE(ft): create an image with ec2_id metadata to let other code # link os and db objects in race conditions os_image_id = os_instance.create_image( name, metadata={'ec2_id': image['id']}) image['os_id'] = os_image_id db_api.update_item(context, image) except Exception: LOG.exception(_LE('Failed to complete image %s creation'), image.id) try: image['state'] = 'failed' db_api.update_item(context, image) except Exception: LOG.warning(_LW("Couldn't set 'failed' state for db image %s"), image.id, exc_info=True) try: os_instance.start() except Exception: LOG.warning(_LW('Failed to start instance %(i_id)s after ' 'completed creation of image %(image_id)s'), {'i_id': instance['id'], 'image_id': image['id']}, exc_info=True) image = {'is_public': False, 'description': description} if restart_instance: # NOTE(ft): image type is hardcoded, because we don't know it now, # but cannot change it later. But Nova doesn't specify container format # for snapshots of volume backed instances, so that it is 'ami' in fact image = db_api.add_item(context, 'ami', image) eventlet.spawn_n(delayed_create, context, image, name, os_instance) else: glance = clients.glance(context) with common.OnCrashCleaner() as cleaner: os_image_id = os_instance.create_image(name) cleaner.addCleanup(glance.images.delete, os_image_id) # TODO(andrey-mp): snapshot and volume also must be deleted in case # of error os_image = glance.images.get(os_image_id) image['os_id'] = os_image_id image = db_api.add_item(context, _get_os_image_kind(os_image), image) return {'imageId': image['id']}
def delete_os_group_rule(self, context, os_id): nova = clients.nova(context) nova.security_group_rules.delete(os_id)
def get_os_groups(self, context): nova = clients.nova(context) return self.convert_groups_to_neutron_format( context, nova.security_groups.list())