示例#1
0
    def test_nova(self, nova):
        reload(clients)

        # test normal flow
        context = mock.Mock(
            auth_token='fake_token',
            service_catalog=[{'type': 'computev21',
                              'endpoints': [{'publicURL': 'novav21_url'}]}])
        with fixtures.LoggerFixture() as logs:
            res = clients.nova(context)
        self.assertEqual(nova.return_value, res)
        nova.assert_called_with(
            '2.3', bypass_url='novav21_url', cacert=None, insecure=False,
            auth_url='keystone_url', auth_token='fake_token',
            username=None, api_key=None, project_id=None)
        self.assertEqual(0, len(logs.output))

        # test switching to v2 client
        nova.side_effect = [nova_exception.UnsupportedVersion(), 'v2_client']
        with fixtures.LoggerFixture() as logs:
            res = clients.nova(context)
        self.assertEqual('v2_client', res)
        nova.assert_called_with(
            '2', bypass_url='novav21_url', cacert=None, insecure=False,
            auth_url='keystone_url', auth_token='fake_token',
            username=None, api_key=None, project_id=None)
        self.assertNotEqual(0, len(logs.output))

        # test raising of an exception if v2 client is not supported as well
        nova.side_effect = nova_exception.UnsupportedVersion()
        self.assertRaises(nova_exception.UnsupportedVersion,
                          clients.nova, context)

        nova.side_effect = None
        reload(clients)

        # test switching to 'compute' service type
        context.service_catalog = [{'type': 'compute',
                                    'endpoints': [{'publicURL': 'nova_url'}]}]
        with fixtures.LoggerFixture() as logs:
            res = clients.nova(context)
        nova.assert_called_with(
            '2.3', bypass_url='nova_url', cacert=None, insecure=False,
            auth_url='keystone_url', auth_token='fake_token',
            username=None, api_key=None, project_id=None)
        self.assertNotEqual(0, len(logs.output))

        # test behavior if 'compute' service type is not found as well
        context.service_catalog = [{'type': 'fake'}]
        clients.nova(context)
        nova.assert_called_with(
            '2.3', bypass_url=None, cacert=None, insecure=False,
            auth_url='keystone_url', auth_token='fake_token',
            username=None, api_key=None, project_id=None)
示例#2
0
    def test_nova(self, nova, get_api_version):
        context = mock.NonCallableMock(session=mock.sentinel.session)

        # test normal flow with get_api_version call
        res = clients.nova(context)
        self.assertEqual(nova.return_value, res)
        nova.assert_called_with("2.3", service_type="compute", session=mock.sentinel.session)
        get_api_version.assert_called_once_with(context)

        # test CONF.nova_service_type is used
        self.configure(nova_service_type="compute_legacy")
        clients.nova(context)
        nova.assert_called_with("2.3", service_type="compute_legacy", session=mock.sentinel.session)
示例#3
0
    def test_nova(self, nova, get_api_version):
        context = mock.NonCallableMock(session=mock.sentinel.session)

        # test normal flow with get_api_version call
        res = clients.nova(context)
        self.assertEqual(nova.return_value, res)
        nova.assert_called_with('2.3', service_type='compute',
                                session=mock.sentinel.session)
        get_api_version.assert_called_once_with(context)

        # test CONF.nova_service_type is used
        self.configure(nova_service_type='compute_legacy')
        clients.nova(context)
        nova.assert_called_with('2.3', service_type='compute_legacy',
                                session=mock.sentinel.session)
示例#4
0
def attach_network_interface(context, network_interface_id,
                             instance_id, device_index):
    network_interface = ec2utils.get_db_item(context, network_interface_id)
    if 'instance_id' in network_interface:
        raise exception.InvalidParameterValue(
            _("Network interface '%(id)s' is currently in use.") %
            {'id': network_interface_id})
    os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
    # TODO(Alex) Check that the instance is not yet attached to another VPC
    # TODO(Alex) Check that the instance is "our", not created via nova
    # (which means that it doesn't belong to any VPC and can't be attached)
    if any(eni['device_index'] == device_index
           for eni in db_api.get_items(context, 'eni')
           if eni.get('instance_id') == instance_id):
        raise exception.InvalidParameterValue(
            _("Instance '%(id)s' already has an interface attached at "
              "device index '%(index)s'.") % {'id': instance_id,
                                              'index': device_index})
    neutron = clients.neutron(context)
    os_port = neutron.show_port(network_interface['os_id'])['port']
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        # TODO(Alex) nova inserts compute:%availability_zone into device_owner
        #                              'device_owner': 'compute:None'}})
        _attach_network_interface_item(context, network_interface,
                                       instance_id, device_index)
        cleaner.addCleanup(_detach_network_interface_item, context,
                           network_interface)
        nova.servers.interface_attach(os_instance_id, os_port['id'],
                                      None, None)
    return {'attachmentId': ec2utils.change_ec2_id_kind(
                    network_interface['id'], 'eni-attach')}
def create_security_group(context, group_name, group_description, vpc_id=None):
    nova = clients.nova(context)
    if vpc_id and group_name != vpc_id:
        security_groups = describe_security_groups(context,
                                                   filter=[{
                                                       'name': 'vpc-id',
                                                       'value': [vpc_id]
                                                   }, {
                                                       'name':
                                                       'group-name',
                                                       'value': [group_name]
                                                   }])['securityGroupInfo']
        if security_groups:
            raise exception.InvalidGroupDuplicate(name=group_name)
    with common.OnCrashCleaner() as cleaner:
        try:
            # TODO(Alex): Shouldn't allow creation of groups with existing
            # name if in the same VPC or in EC2-Classic.
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
            security_group = db_api.add_item(context, 'sg', {
                'vpc_id': vpc_id,
                'os_id': os_security_group.id
            })
            return {'return': 'true', 'groupId': security_group['id']}
    return {'return': 'true'}
示例#6
0
 def get_os_items(self):
     nova = clients.nova(self.context)
     zones = nova.availability_zones.list(detailed=False)
     for zone in zones:
         if zone.zoneName == CONF.internal_service_availability_zone:
             zones.remove(zone)
     return zones
示例#7
0
 def get_os_items(self):
     nova = clients.nova(self.context)
     zones = nova.availability_zones.list(detailed=False)
     for zone in zones:
         if zone.zoneName == CONF.internal_service_availability_zone:
             zones.remove(zone)
     return zones
示例#8
0
def detach_volume(context,
                  volume_id,
                  instance_id=None,
                  device=None,
                  force=None):
    #volume = ec2utils.get_db_item(context, volume_id)

    cinder = clients.cinder(context)
    os_volume = cinder.volumes.get(volume_id)
    os_instance_id = next(iter(os_volume.attachments), {}).get('server_id')
    if not os_instance_id:
        # TODO(ft): Change the message with the real AWS message
        reason = _('Volume %(vol_id)s is not attached to anything')
        raise exception.IncorrectState(reason=reason % {'vol_id': volume_id})

    nova = clients.nova(context)
    nova.volumes.delete_server_volume(os_instance_id, os_volume.id)
    os_volume.get()
    instance_id = next((i['id'] for i in db_api.get_items(context, 'i')
                        if i['os_id'] == os_instance_id), None)
    # [varun]: Sending delete on termination as false (last param below)
    # when volume is detached delete on termination flag does not make sense
    # therefore sending false to make consistent with AWS
    return _format_attachment(context,
                              os_volume,
                              instance_id=instance_id,
                              delete_on_termination_flag=False)
示例#9
0
 def disassociate_address(self, context, public_ip=None,
                          association_id=None):
     os_instance_id = self.get_nova_ip_by_public_ip(context,
                                                    public_ip).instance_id
     if os_instance_id:
         nova = clients.nova(context)
         nova.servers.remove_floating_ip(os_instance_id, public_ip)
     return None
示例#10
0
def delete_key_pair(context, key_name):
    nova = clients.nova(context)
    try:
        nova.keypairs.delete(key_name)
    except nova_exception.NotFound:
        # aws returns true even if the key doesn't exist
        pass
    return True
示例#11
0
 def allocate_address(self, context, domain=None):
     nova = clients.nova(context)
     try:
         nova_floating_ip = nova.floating_ips.create()
     except nova_exception.Forbidden:
         raise exception.AddressLimitExceeded()
     return None, self.convert_ips_to_neutron_format(context,
                                                     [nova_floating_ip])[0]
示例#12
0
def delete_key_pair(context, key_name):
    nova = clients.nova(context)
    try:
        nova.keypairs.delete(key_name)
    except nova_exception.NotFound:
        # aws returns true even if the key doesn't exist
        pass
    return True
示例#13
0
 def allocate_address(self, context, domain=None):
     nova = clients.nova(context)
     try:
         nova_floating_ip = nova.floating_ips.create()
     except nova_exception.Forbidden:
         raise exception.AddressLimitExceeded()
     return None, self.convert_ips_to_neutron_format(
         context, [nova_floating_ip])[0]
示例#14
0
 def get_os_items(self):
     # Original EC2 in nova filters out vpn keys for admin user.
     # We're not filtering out the vpn keys for now.
     # In order to implement this we'd have to configure vpn_key_suffix
     # in our config which we consider an overkill.
     # suffix = CONF.vpn_key_suffix
     # if context.is_admin or not key_pair['name'].endswith(suffix):
     nova = clients.nova(self.context)
     return nova.keypairs.list()
示例#15
0
 def associate_address(self, context, public_ip=None, instance_id=None,
                       allocation_id=None, network_interface_id=None,
                       private_ip_address=None, allow_reassociation=False):
     os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
     # NOTE(ft): check the public IP exists to raise AWS exception otherwise
     self.get_nova_ip_by_public_ip(context, public_ip)
     nova = clients.nova(context)
     nova.servers.add_floating_ip(os_instance_id, public_ip)
     return None
示例#16
0
 def get_os_items(self):
     # Original EC2 in nova filters out vpn keys for admin user.
     # We're not filtering out the vpn keys for now.
     # In order to implement this we'd have to configure vpn_key_suffix
     # in our config which we consider an overkill.
     # suffix = CONF.vpn_key_suffix
     # if context.is_admin or not key_pair['name'].endswith(suffix):
     nova = clients.nova(self.context)
     return nova.keypairs.list()
示例#17
0
def delete_key_pair(context, key_name):
    nova = clients.nova(context)
    try:
        nova.keypairs.delete(key_name)
    except nova_exception.NotFound:
        # aws returns true even if the key doesn't exist
        # AK - but we are not aws
        raise exception.InvalidKeypairNotFound(id=key_name)
    return True
示例#18
0
 def get_nova_group_by_name(self, context, group_name,
                            nova_security_groups=None):
     if nova_security_groups is None:
         nova = clients.nova(context)
         nova_security_groups = nova.security_groups.list()
     nova_group = next((g for g in nova_security_groups
                        if g.name == group_name), None)
     if nova_group is None:
         raise exception.InvalidGroupNotFound(sg_id=group_name)
     return nova_group
 def get_os_group_rules(self, context, os_id):
     nova = clients.nova(context)
     os_security_group = nova.security_groups.get(os_id)
     os_rules = os_security_group.rules
     neutron_rules = []
     for os_rule in os_rules:
         neutron_rules.append(
             self.convert_rule_to_neutron(context, os_rule,
                                          nova.security_groups.list()))
     return neutron_rules
示例#20
0
 def disassociate_address(self,
                          context,
                          public_ip=None,
                          association_id=None):
     os_instance_id = self.get_nova_ip_by_public_ip(context,
                                                    public_ip).instance_id
     if os_instance_id:
         nova = clients.nova(context)
         nova.servers.remove_floating_ip(os_instance_id, public_ip)
     return None
示例#21
0
 def delete_group(self, context, group_name=None, group_id=None,
                  delete_default=False):
     nova = clients.nova(context)
     os_id = self.get_group_os_id(context, group_id, group_name)
     try:
         nova.security_groups.delete(os_id)
     except Exception as ex:
         # TODO(Alex): do log error
         # nova doesn't differentiate Conflict exception like neutron does
         pass
示例#22
0
 def get_os_group_rules(self, context, os_id):
     nova = clients.nova(context)
     os_security_group = nova.security_groups.get(os_id)
     os_rules = os_security_group.rules
     neutron_rules = []
     for os_rule in os_rules:
         neutron_rules.append(
             self.convert_rule_to_neutron(context,
                                          os_rule,
                                          nova.security_groups.list()))
     return neutron_rules
示例#23
0
 def get_nova_ip_by_public_ip(self, context, public_ip,
                              nova_floating_ips=None):
     if nova_floating_ips is None:
         nova = clients.nova(context)
         nova_floating_ips = nova.floating_ips.list()
     nova_ip = next((ip for ip in nova_floating_ips
                     if ip.ip == public_ip), None)
     if nova_ip is None:
         msg = _("The address '%(public_ip)s' does not belong to you.")
         raise exception.AuthFailure(msg % {'public_ip': public_ip})
     return nova_ip
 def get_nova_group_by_name(self,
                            context,
                            group_name,
                            nova_security_groups=None):
     if nova_security_groups is None:
         nova = clients.nova(context)
         nova_security_groups = nova.security_groups.list()
     nova_group = next(
         (g for g in nova_security_groups if g.name == group_name), None)
     if nova_group is None:
         raise exception.InvalidGroupNotFound(sg_id=group_name)
     return nova_group
示例#25
0
def create_key_pair(context, key_name):
    _validate_name(key_name)
    nova = clients.nova(context)
    try:
        key_pair = nova.keypairs.create(key_name)
    except nova_exception.OverLimit:
        raise exception.ResourceLimitExceeded(resource='keypairs')
    except nova_exception.Conflict:
        raise exception.InvalidKeyPairDuplicate(key_name=key_name)
    formatted_key_pair = _format_key_pair(key_pair)
    formatted_key_pair['keyMaterial'] = key_pair.private_key
    return formatted_key_pair
示例#26
0
def create_key_pair(context, key_name):
    _validate_name(key_name)
    nova = clients.nova(context)
    try:
        key_pair = nova.keypairs.create(key_name)
    except nova_exception.OverLimit:
        raise exception.ResourceLimitExceeded(resource='keypairs')
    except nova_exception.Conflict:
        raise exception.InvalidKeyPairDuplicate(key_name=key_name)
    formatted_key_pair = _format_key_pair(key_pair)
    formatted_key_pair['keyMaterial'] = key_pair.private_key
    return formatted_key_pair
示例#27
0
 def get_nova_ip_by_public_ip(self,
                              context,
                              public_ip,
                              nova_floating_ips=None):
     if nova_floating_ips is None:
         nova = clients.nova(context)
         nova_floating_ips = nova.floating_ips.list()
     nova_ip = next((ip for ip in nova_floating_ips if ip.ip == public_ip),
                    None)
     if nova_ip is None:
         msg = _("The address '%(public_ip)s' does not belong to you.")
         raise exception.AuthFailure(msg % {'public_ip': public_ip})
     return nova_ip
 def delete_group(self,
                  context,
                  group_name=None,
                  group_id=None,
                  delete_default=False):
     nova = clients.nova(context)
     os_id = self.get_group_os_id(context, group_id, group_name)
     try:
         nova.security_groups.delete(os_id)
     except Exception as ex:
         # TODO(Alex): do log error
         # nova doesn't differentiate Conflict exception like neutron does
         pass
 def authorize_security_group(self, context, rule_body):
     nova = clients.nova(context)
     try:
         os_security_group_rule = nova.security_group_rules.create(
             rule_body['security_group_id'], rule_body.get('protocol'),
             rule_body.get('port_range_min', -1),
             rule_body.get('port_range_max', -1),
             rule_body.get('remote_ip_prefix'),
             rule_body.get('remote_group_id'))
     except nova_exception.Conflict:
         raise exception.InvalidPermissionDuplicate()
     except nova_exception.OverLimit:
         raise exception.RulesPerSecurityGroupLimitExceeded()
示例#30
0
 def authorize_security_group(self, context, rule_body):
     nova = clients.nova(context)
     try:
         os_security_group_rule = nova.security_group_rules.create(
             rule_body['security_group_id'],
             rule_body.get('protocol'),
             rule_body.get('port_range_min', -1),
             rule_body.get('port_range_max', -1),
             rule_body.get('remote_ip_prefix'),
             rule_body.get('remote_group_id'))
     except nova_exception.Conflict:
         raise exception.InvalidPermissionDuplicate()
     except nova_exception.OverLimit:
         raise exception.RulesPerSecurityGroupLimitExceeded()
示例#31
0
def import_key_pair(context, key_name, public_key_material):
    _validate_name(key_name)
    if not public_key_material:
        raise exception.MissingParameter(
            _('The request must contain the parameter PublicKeyMaterial'))
    nova = clients.nova(context)
    public_key = base64.b64decode(public_key_material).decode("utf-8")
    try:
        key_pair = nova.keypairs.create(key_name, public_key)
    except nova_exception.OverLimit:
        raise exception.ResourceLimitExceeded(resource='keypairs')
    except nova_exception.Conflict:
        raise exception.InvalidKeyPairDuplicate(key_name=key_name)
    return _format_key_pair(key_pair)
示例#32
0
def import_key_pair(context, key_name, public_key_material):
    _validate_name(key_name)
    if not public_key_material:
        raise exception.MissingParameter(
            _('The request must contain the parameter PublicKeyMaterial'))
    nova = clients.nova(context)
    public_key = base64.b64decode(public_key_material).decode("utf-8")
    try:
        key_pair = nova.keypairs.create(key_name, public_key)
    except nova_exception.OverLimit:
        raise exception.ResourceLimitExceeded(resource='keypairs')
    except nova_exception.Conflict:
        raise exception.InvalidKeyPairDuplicate(key_name=key_name)
    return _format_key_pair(key_pair)
示例#33
0
 def associate_address(self,
                       context,
                       public_ip=None,
                       instance_id=None,
                       allocation_id=None,
                       network_interface_id=None,
                       private_ip_address=None,
                       allow_reassociation=False):
     os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
     # NOTE(ft): check the public IP exists to raise AWS exception otherwise
     self.get_nova_ip_by_public_ip(context, public_ip)
     nova = clients.nova(context)
     nova.servers.add_floating_ip(os_instance_id, public_ip)
     return None
示例#34
0
def show_delete_on_termination_flag(context, volume_id):
    #volume = ec2utils.get_db_item(context, volume_id)
    #if not volume:
    #    _msg = ("No volume found corresponding to volume_id=" + volume_id)
    #    raise exception.InvalidRequest(_msg)
    #volume_id = volume['os_id']
    nova = clients.nova(context)
    try:
        response = nova.volumes.show_delete_on_termination_flag(volume_id)
        format_delete_on_termination_response(response, context)

        return {"volume": response._info}
    except (nova_exception.Conflict, nova_exception.BadRequest):
        # TODO(anant): raise correct errors for different cases
        raise exception.UnsupportedOperation()
示例#35
0
def get_os_instance_and_project_id(context, fixed_ip):
    try:
        nova = clients.nova(context)
        os_address = nova.fixed_ips.get(fixed_ip)
        os_instances = nova.servers.list(
                search_opts={'hostname': os_address.hostname,
                             'all_tenants': True})
        return next((os_instance.id, os_instance.tenant_id)
                    for os_instance in os_instances
                    if any((addr['addr'] == fixed_ip and
                            addr['OS-EXT-IPS:type'] == 'fixed')
                           for addr in itertools.chain(
                                *os_instance.addresses.itervalues())))
    except (nova_exception.NotFound, StopIteration):
        raise exception.EC2MetadataNotFound()
示例#36
0
def _create_security_group(context, group_name, group_description,
                           vpc_id=None):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(group_name,
                                                            group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete,
                           os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        security_group = db_api.add_item(context, 'sg',
                                         {'vpc_id': vpc_id,
                                          'os_id': os_security_group.id})
        return {'return': 'true',
                'groupId': security_group['id']}
示例#37
0
def attach_volume(context, volume_id, instance_id, device):
    #volume = ec2utils.get_db_item(context, volume_id)
    ec2utils.validate_device_name(str(device).lower())
    instance = ec2utils.get_db_item(context, instance_id)

    nova = clients.nova(context)
    try:
        nova.volumes.create_server_volume(instance['os_id'], volume_id, device)
    except (nova_exception.Conflict, nova_exception.BadRequest):
        # TODO(andrey-mp): raise correct errors for different cases
        raise exception.UnsupportedOperation()
    cinder = clients.cinder(context)
    os_volume = cinder.volumes.get(volume_id)
    # [varun]: Sending delete on termination as false (last param below)
    # when volume is attached delete on termination flag will be false by
    # default therefore sending false to make consistent with AWS
    return _format_attachment(context,
                              os_volume,
                              instance_id=instance_id,
                              delete_on_termination_flag=False)
示例#38
0
def _create_security_group(context,
                           group_name,
                           group_description,
                           vpc_id=None):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        security_group = db_api.add_item(context, 'sg', {
            'vpc_id': vpc_id,
            'os_id': os_security_group.id
        })
        return {'return': 'true', 'groupId': security_group['id']}
示例#39
0
def _describe_verbose(context):
    nova = clients.nova(context)
    availability_zones = nova.availability_zones.list()

    formatted_availability_zones = []
    for availability_zone in availability_zones:
        formatted_availability_zones.append(
            _format_availability_zone(availability_zone))
        for host, services in availability_zone.hosts.items():
            formatted_availability_zones.append(
                {'zoneName': '|- %s' % host,
                 'zoneState': ''})
            for service, values in services.items():
                active = ":-)" if values['active'] else "XXX"
                enabled = 'enabled' if values['available'] else 'disabled'
                formatted_availability_zones.append(
                    {'zoneName': '| |- %s' % service,
                     'zoneState': ('%s %s %s' % (enabled, active,
                                                 values['updated_at']))})

    return {'availabilityZoneInfo': formatted_availability_zones}
示例#40
0
def _describe_verbose(context):
    nova = clients.nova(context)
    availability_zones = nova.availability_zones.list()

    formatted_availability_zones = []
    for availability_zone in availability_zones:
        formatted_availability_zones.append(
            _format_availability_zone(availability_zone))
        for host, services in availability_zone.hosts.items():
            formatted_availability_zones.append({
                'zoneName': '|- %s' % host,
                'zoneState': ''
            })
            for service, values in services.items():
                active = ":-)" if values['active'] else "XXX"
                enabled = 'enabled' if values['available'] else 'disabled'
                formatted_availability_zones.append({
                    'zoneName':
                    '| |- %s' % service,
                    'zoneState':
                    ('%s %s %s' % (enabled, active, values['updated_at']))
                })

    return {'availabilityZoneInfo': formatted_availability_zones}
示例#41
0
文件: api.py 项目: openstack/ec2-api
def _build_metadata(context, ec2_instance, ec2_reservation,
                    os_instance_id, remote_ip):
    metadata = {
        'ami-id': ec2_instance['imageId'],
        'ami-launch-index': ec2_instance['amiLaunchIndex'],
        # NOTE (ft): the fake value as it is in Nova EC2 metadata
        'ami-manifest-path': 'FIXME',
        # NOTE (ft): empty value as it is in Nova EC2 metadata
        'ancestor-ami-ids': [],
        'block-device-mapping': _build_block_device_mappings(context,
                                                             ec2_instance,
                                                             os_instance_id),
        # NOTE(ft): Nova EC2 metadata returns instance's hostname with
        # dhcp_domain suffix if it's set in config.
        # But i don't see any reason to return a hostname differs from EC2
        # describe output one. If we need to consider dhcp_domain suffix
        # then we should do it in the describe operation
        'hostname': ec2_instance['privateDnsName'],
        # NOTE (ft): the fake value as it is in Nova EC2 metadata
        'instance-action': 'none',
        'instance-id': ec2_instance['instanceId'],
        'instance-type': ec2_instance['instanceType'],
        'local-hostname': ec2_instance['privateDnsName'],
        'local-ipv4': ec2_instance['privateIpAddress'] or remote_ip,
        'placement': {
            'availability-zone': ec2_instance['placement']['availabilityZone']
        },
        # NOTE (ft): empty value as it is in Nova EC2 metadata
        'product-codes': [],
        'public-hostname': ec2_instance['dnsName'],
        'public-ipv4': ec2_instance.get('ipAddress', ''),
        'reservation-id': ec2_reservation['reservationId'],
        'security-groups': [sg['groupName']
                            for sg in ec2_reservation.get('groupSet', [])],
    }
    if 'kernelId' in ec2_instance:
        metadata['kernel-id'] = ec2_instance['kernelId']
    if 'ramdiskId' in ec2_instance:
        metadata['ramdisk-id'] = ec2_instance['ramdiskId']
    # public keys are strangely rendered in ec2 metadata service
    #  meta-data/public-keys/ returns '0=keyname' (with no trailing /)
    # and only if there is a public key given.
    # '0=keyname' means there is a normally rendered dict at
    #  meta-data/public-keys/0
    #
    # meta-data/public-keys/ : '0=%s' % keyname
    # meta-data/public-keys/0/ : 'openssh-key'
    # meta-data/public-keys/0/openssh-key : '%s' % publickey
    if ec2_instance['keyName']:
        metadata['public-keys'] = {
            '0': {'_name': "0=" + ec2_instance['keyName']}}
        nova = clients.nova(context)
        os_instance = nova.servers.get(os_instance_id)
        try:
            keypair = nova.keypairs._get(
                '/%s/%s?user_id=%s' % (nova.keypairs.keypair_prefix,
                                       ec2_instance['keyName'],
                                       os_instance.user_id),
                'keypair')
        except nova_exception.NotFound:
            pass
        else:
            metadata['public-keys']['0']['openssh-key'] = keypair.public_key

    full_metadata = {'meta-data': metadata}

    userdata = instance_api.describe_instance_attribute(
                    context, ec2_instance['instanceId'], 'userData')
    if 'userData' in userdata:
        userdata = userdata['userData']['value']
        userdata = base64.b64decode(userdata)
        userdata = userdata.decode("utf-8")
        full_metadata['user-data'] = userdata

    return full_metadata
示例#42
0
 def get_os_groups(self, context):
     nova = clients.nova(context)
     return self.convert_groups_to_neutron_format(
                     context,
                     nova.security_groups.list())
示例#43
0
 def delete_os_group_rule(self, context, os_id):
     nova = clients.nova(context)
     nova.security_group_rules.delete(os_id)
示例#44
0
 def get_os_floating_ips(self, context):
     nova = clients.nova(context)
     return self.convert_ips_to_neutron_format(context,
                                               nova.floating_ips.list())
示例#45
0
def create_image(context,
                 instance_id,
                 name=None,
                 description=None,
                 no_reboot=False,
                 block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id,
                            exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'), {
                                'i_id': instance['id'],
                                'image_id': image['id']
                            },
                        exc_info=True)

    image = {'is_public': False, 'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
示例#46
0
def _format_route_table(context, route_table, is_main=False,
                        associated_subnet_ids=[],
                        gateways={},
                        network_interfaces={},
                        vpn_connections_by_gateway_id={}):
    vpc_id = route_table['vpc_id']
    ec2_route_table = {
        'routeTableId': route_table['id'],
        'vpcId': vpc_id,
        'routeSet': [],
        'propagatingVgwSet': [
            {'gatewayId': vgw_id}
            for vgw_id in route_table.get('propagating_gateways', [])],
        # NOTE(ft): AWS returns empty tag set for a route table
        # if no tag exists
        'tagSet': [],
    }
    # TODO(ft): refactor to get Nova instances outside of this function
    nova = clients.nova(context)
    for route in route_table['routes']:
        origin = ('CreateRouteTable'
                  if route.get('gateway_id', 0) is None else
                  'CreateRoute')
        ec2_route = {'destinationCidrBlock': route['destination_cidr_block'],
                     'origin': origin}
        if 'gateway_id' in route:
            gateway_id = route['gateway_id']
            if gateway_id is None:
                state = 'active'
                ec2_gateway_id = 'local'
            else:
                gateway = gateways.get(gateway_id)
                state = ('active'
                         if gateway and gateway.get('vpc_id') == vpc_id else
                         'blackhole')
                ec2_gateway_id = gateway_id
            ec2_route.update({'gatewayId': ec2_gateway_id,
                              'state': state})
        else:
            network_interface_id = route['network_interface_id']
            network_interface = network_interfaces.get(network_interface_id)
            instance_id = (network_interface.get('instance_id')
                           if network_interface else
                           None)
            state = 'blackhole'
            if instance_id:
                instance = db_api.get_item_by_id(context, instance_id)
                if instance:
                    try:
                        os_instance = nova.servers.get(instance['os_id'])
                        if os_instance and os_instance.status == 'ACTIVE':
                            state = 'active'
                    except nova_exception.NotFound:
                        pass
                ec2_route.update({'instanceId': instance_id,
                                  'instanceOwnerId': context.project_id})
            ec2_route.update({'networkInterfaceId': network_interface_id,
                              'state': state})
        ec2_route_table['routeSet'].append(ec2_route)

    for vgw_id in route_table.get('propagating_gateways', []):
        vgw = gateways.get(vgw_id)
        if vgw and vgw_id in vpn_connections_by_gateway_id:
            cidrs = set()
            vpn_connections = vpn_connections_by_gateway_id[vgw_id]
            for vpn_connection in vpn_connections:
                cidrs.update(vpn_connection['cidrs'])
            state = 'active' if vgw['vpc_id'] == vpc_id else 'blackhole'
            for cidr in cidrs:
                ec2_route = {'gatewayId': vgw_id,
                             'destinationCidrBlock': cidr,
                             'state': state,
                             'origin': 'EnableVgwRoutePropagation'}
                ec2_route_table['routeSet'].append(ec2_route)

    associations = []
    if is_main:
        associations.append({
            'routeTableAssociationId': ec2utils.change_ec2_id_kind(vpc_id,
                                                                   'rtbassoc'),
            'routeTableId': route_table['id'],
            'main': True})
    for subnet_id in associated_subnet_ids:
        associations.append({
            'routeTableAssociationId': ec2utils.change_ec2_id_kind(subnet_id,
                                                                   'rtbassoc'),
            'routeTableId': route_table['id'],
            'subnetId': subnet_id,
            'main': False})
    if associations:
        ec2_route_table['associationSet'] = associations

    return ec2_route_table
示例#47
0
 def release_address(self, context, public_ip, allocation_id):
     nova = clients.nova(context)
     nova.floating_ips.delete(
         self.get_nova_ip_by_public_ip(context, public_ip).id)
示例#48
0
def _format_route_table(context,
                        route_table,
                        is_main=False,
                        associated_subnet_ids=[],
                        gateways={},
                        network_interfaces={},
                        vpn_connections_by_gateway_id={}):
    vpc_id = route_table['vpc_id']
    ec2_route_table = {
        'routeTableId':
        route_table['id'],
        'vpcId':
        vpc_id,
        'routeSet': [],
        'propagatingVgwSet': [{
            'gatewayId': vgw_id
        } for vgw_id in route_table.get('propagating_gateways', [])],
        # NOTE(ft): AWS returns empty tag set for a route table
        # if no tag exists
        'tagSet': [],
    }
    # TODO(ft): refactor to get Nova instances outside of this function
    nova = clients.nova(context)
    for route in route_table['routes']:
        origin = ('CreateRouteTable'
                  if route.get('gateway_id', 0) is None else 'CreateRoute')
        ec2_route = {
            'destinationCidrBlock': route['destination_cidr_block'],
            'origin': origin
        }
        if 'gateway_id' in route:
            gateway_id = route['gateway_id']
            if gateway_id is None:
                state = 'active'
                ec2_gateway_id = 'local'
            else:
                gateway = gateways.get(gateway_id)
                state = ('active' if gateway
                         and gateway.get('vpc_id') == vpc_id else 'blackhole')
                ec2_gateway_id = gateway_id
            ec2_route.update({'gatewayId': ec2_gateway_id, 'state': state})
        else:
            network_interface_id = route['network_interface_id']
            network_interface = network_interfaces.get(network_interface_id)
            instance_id = (network_interface.get('instance_id')
                           if network_interface else None)
            state = 'blackhole'
            if instance_id:
                instance = db_api.get_item_by_id(context, instance_id)
                if instance:
                    try:
                        os_instance = nova.servers.get(instance['os_id'])
                        if os_instance and os_instance.status == 'ACTIVE':
                            state = 'active'
                    except nova_exception.NotFound:
                        pass
                ec2_route.update({
                    'instanceId': instance_id,
                    'instanceOwnerId': context.project_id
                })
            ec2_route.update({
                'networkInterfaceId': network_interface_id,
                'state': state
            })
        ec2_route_table['routeSet'].append(ec2_route)

    for vgw_id in route_table.get('propagating_gateways', []):
        vgw = gateways.get(vgw_id)
        if vgw and vgw_id in vpn_connections_by_gateway_id:
            cidrs = set()
            vpn_connections = vpn_connections_by_gateway_id[vgw_id]
            for vpn_connection in vpn_connections:
                cidrs.update(vpn_connection['cidrs'])
            state = 'active' if vgw['vpc_id'] == vpc_id else 'blackhole'
            for cidr in cidrs:
                ec2_route = {
                    'gatewayId': vgw_id,
                    'destinationCidrBlock': cidr,
                    'state': state,
                    'origin': 'EnableVgwRoutePropagation'
                }
                ec2_route_table['routeSet'].append(ec2_route)

    associations = []
    if is_main:
        associations.append({
            'routeTableAssociationId':
            ec2utils.change_ec2_id_kind(vpc_id, 'rtbassoc'),
            'routeTableId':
            route_table['id'],
            'main':
            True
        })
    for subnet_id in associated_subnet_ids:
        associations.append({
            'routeTableAssociationId':
            ec2utils.change_ec2_id_kind(subnet_id, 'rtbassoc'),
            'routeTableId':
            route_table['id'],
            'subnetId':
            subnet_id,
            'main':
            False
        })
    if associations:
        ec2_route_table['associationSet'] = associations

    return ec2_route_table
示例#49
0
 def release_address(self, context, public_ip, allocation_id):
     nova = clients.nova(context)
     nova.floating_ips.delete(self.get_nova_ip_by_public_ip(context,
                                                            public_ip).id)
示例#50
0
def create_image(context, instance_id, name=None, description=None,
                 no_reboot=False, block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id, exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'),
                        {'i_id': instance['id'],
                         'image_id': image['id']},
                        exc_info=True)

    image = {'is_public': False,
             'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
示例#51
0
 def get_max_instances():
     nova = clients.nova(context)
     quotas = nova.quotas.get(context.project_id, context.user_id)
     return quotas.instances
示例#52
0
def create_image(context,
                 instance_id,
                 name=None,
                 description=None,
                 no_reboot=False,
                 block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True
        os_instance.stop()

        # wait instance for really stopped
        start_time = time.time()
        while os_instance.status != 'SHUTOFF':
            time.sleep(1)
            os_instance.get()
            # NOTE(yamahata): timeout and error. 1 hour for now for safety.
            #                 Is it too short/long?
            #                 Or is there any better way?
            timeout = 1 * 60 * 60
            if time.time() > start_time + timeout:
                err = _("Couldn't stop instance within %d sec") % timeout
                raise exception.EC2Exception(message=err)

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    glance = clients.glance(context)
    with common.OnCrashCleaner() as cleaner:
        os_image_id = os_instance.create_image(name)
        cleaner.addCleanup(glance.images.delete, os_image_id)
        # TODO(andrey-mp): snapshot and volume also must be deleted in case
        # of error
        os_image = glance.images.get(os_image_id)
        image = db_api.add_item(context, _get_os_image_kind(os_image), {
            'os_id': os_image_id,
            'is_public': False,
            'description': description
        })

    if restart_instance:
        os_instance.start()

    return {'imageId': image['id']}
示例#53
0
 def get_os_floating_ips(self, context):
     nova = clients.nova(context)
     return self.convert_ips_to_neutron_format(context,
                                               nova.floating_ips.list())
示例#54
0
 def get_max_instances():
     nova = clients.nova(context)
     quotas = nova.quotas.get(context.project_id, context.user_id)
     return quotas.instances