コード例 #1
0
 def test_add_item_isolation(self):
     os_id = fakes.random_os_id()
     db_api.add_item(self.context, 'fake', {'os_id': os_id})
     self.assertRaises(orm_exception.NoResultFound, db_api.add_item,
                       self.context, 'fake1', {'os_id': os_id})
     self.assertRaises(orm_exception.NoResultFound, db_api.add_item,
                       self.other_context, 'fake', {'os_id': os_id})
コード例 #2
0
 def auto_update_db(self, image, os_image):
     if not image:
         kind = _get_os_image_kind(os_image)
         if self.context.project_id == os_image.owner:
             image = ec2utils.get_db_item_by_os_id(self.context,
                                                   kind,
                                                   os_image.id,
                                                   self.items_dict,
                                                   os_image=os_image)
         else:
             image_id = ec2utils.os_id_to_ec2_id(
                 self.context,
                 kind,
                 os_image.id,
                 items_by_os_id=self.items_dict,
                 ids_by_os_id=self.ids_dict)
             image = {'id': image_id, 'os_id': os_image.id}
     elif (self.context.project_id == os_image.owner
           and image.get('is_public') != os_image.is_public):
         image['is_public'] = os_image.is_public
         if image['id'] in self.local_images_os_ids:
             db_api.update_item(self.context, image)
         else:
             # TODO(ft): currently update_item can not update id mapping,
             # because its project_id is None. Instead of upgrade db_api,
             # we use add_item. But its execution leads to a needless
             # DB call. This should be reworked in the future.
             kind = ec2utils.get_ec2_id_kind(image['id'])
             db_api.add_item(self.context, kind, image)
     return image
コード例 #3
0
ファイル: image.py プロジェクト: jpoley/ec2-api
 def auto_update_db(self, image, os_image):
     if not image:
         kind = _get_os_image_kind(os_image)
         if self.context.project_id == os_image.owner:
             if os_image.properties.get('ec2_id') in self.pending_images:
                 # NOTE(ft): the image is being creating, Glance had created
                 # image, but creating thread doesn't yet update db item
                 image = self.pending_images[os_image.properties['ec2_id']]
                 image['os_id'] = os_image.id
                 image['is_public'] = os_image.is_public
                 db_api.update_item(self.context, image)
             else:
                 image = ec2utils.get_db_item_by_os_id(
                     self.context, kind, os_image.id, self.items_dict,
                     os_image=os_image)
         else:
             image_id = ec2utils.os_id_to_ec2_id(
                 self.context, kind, os_image.id,
                 items_by_os_id=self.items_dict, ids_by_os_id=self.ids_dict)
             image = {'id': image_id,
                      'os_id': os_image.id}
     elif (self.context.project_id == os_image.owner and
             image.get('is_public') != os_image.is_public):
         image['is_public'] = os_image.is_public
         if image['id'] in self.local_images_os_ids:
             db_api.update_item(self.context, image)
         else:
             # TODO(ft): currently update_item can not update id mapping,
             # because its project_id is None. Instead of upgrade db_api,
             # we use add_item. But its execution leads to a needless
             # DB call. This should be reworked in the future.
             kind = ec2utils.get_ec2_id_kind(image['id'])
             db_api.add_item(self.context, kind, image)
     return image
コード例 #4
0
 def test_add_item_isolation(self):
     os_id = fakes.random_os_id()
     db_api.add_item(self.context, 'fake', {'os_id': os_id})
     self.assertRaises(
             orm_exception.NoResultFound,
             db_api.add_item, self.context, 'fake1', {'os_id': os_id})
     self.assertRaises(
             orm_exception.NoResultFound,
             db_api.add_item, self.other_context, 'fake', {'os_id': os_id})
コード例 #5
0
    def test_delete_item(self):
        item = db_api.add_item(self.context, 'fake', {})
        db_api.delete_item(self.context, item['id'])
        item = db_api.get_item_by_id(self.context, item['id'])
        self.assertIsNone(item)

        # NOTE(ft): delete not existing item should pass quitely
        db_api.delete_item(self.context, fakes.random_ec2_id('fake'))

        item = db_api.add_item(self.context, 'fake', {})
        db_api.delete_item(self.other_context, item['id'])
        item = db_api.get_item_by_id(self.context, item['id'])
        self.assertIsNotNone(item)
コード例 #6
0
    def test_delete_item(self):
        item = db_api.add_item(self.context, 'fake', {})
        db_api.delete_item(self.context, item['id'])
        item = db_api.get_item_by_id(self.context, item['id'])
        self.assertIsNone(item)

        # NOTE(ft): delete not existing item should pass quitely
        db_api.delete_item(self.context, fakes.random_ec2_id('fake'))

        item = db_api.add_item(self.context, 'fake', {})
        db_api.delete_item(self.other_context, item['id'])
        item = db_api.get_item_by_id(self.context, item['id'])
        self.assertIsNotNone(item)
コード例 #7
0
def create_snapshot(context, volume_id, description=None):
    volume = ec2utils.get_db_item(context, volume_id)
    cinder = clients.cinder(context)
    os_volume = cinder.volumes.get(volume['os_id'])
    # NOTE(ft): Easy fix to allow snapshot creation in statuses other than
    # AVAILABLE without cinder modifications. Potential race condition
    # though. Seems arguably non-fatal.
    if os_volume.status not in [
            'available', 'in-use', 'attaching', 'detaching'
    ]:
        msg = (_("'%s' is not in a state where snapshots are allowed.") %
               volume_id)
        raise exception.IncorrectState(reason=msg)
    with common.OnCrashCleaner() as cleaner:
        os_snapshot = cinder.volume_snapshots.create(
            os_volume.id, force=True, display_description=description)
        cleaner.addCleanup(os_snapshot.delete)
        snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id})
        cleaner.addCleanup(db_api.delete_item, context, snapshot['id'])
        os_snapshot.update(display_name=snapshot['id'])

    return _format_snapshot(context,
                            snapshot,
                            os_snapshot,
                            volume_id=volume_id)
コード例 #8
0
def create_customer_gateway(context, ip_address, type, bgp_asn=None):
    if bgp_asn and bgp_asn != DEFAULT_BGP_ASN:
        raise exception.Unsupported("BGP dynamic routing is unsupported")
    customer_gateway = next((cgw for cgw in db_api.get_items(context, "cgw") if cgw["ip_address"] == ip_address), None)
    if not customer_gateway:
        customer_gateway = db_api.add_item(context, "cgw", {"ip_address": ip_address})
    return {"customerGateway": _format_customer_gateway(customer_gateway)}
コード例 #9
0
def create_volume(context,
                  availability_zone=None,
                  size=None,
                  snapshot_id=None,
                  volume_type=None,
                  iops=None,
                  encrypted=None,
                  kms_key_id=None):
    if snapshot_id is not None:
        snapshot = ec2utils.get_db_item(context, snapshot_id)
        os_snapshot_id = snapshot['os_id']
    else:
        os_snapshot_id = None

    cinder = clients.cinder(context)
    with common.OnCrashCleaner() as cleaner:
        os_volume = cinder.volumes.create(size,
                                          snapshot_id=os_snapshot_id,
                                          volume_type=volume_type,
                                          availability_zone=availability_zone)
        cleaner.addCleanup(os_volume.delete)

        volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id})
        cleaner.addCleanup(db_api.delete_item, context, volume['id'])
        os_volume.update(display_name=volume['id'])

    return _format_volume(context, volume, os_volume, snapshot_id=snapshot_id)
コード例 #10
0
def create_security_group(context, group_name, group_description, vpc_id=None):
    nova = clients.nova(context)
    if vpc_id and group_name != vpc_id:
        security_groups = describe_security_groups(context,
                                                   filter=[{
                                                       'name': 'vpc-id',
                                                       'value': [vpc_id]
                                                   }, {
                                                       'name':
                                                       'group-name',
                                                       'value': [group_name]
                                                   }])['securityGroupInfo']
        if security_groups:
            raise exception.InvalidGroupDuplicate(name=group_name)
    with common.OnCrashCleaner() as cleaner:
        try:
            # TODO(Alex): Shouldn't allow creation of groups with existing
            # name if in the same VPC or in EC2-Classic.
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
            security_group = db_api.add_item(context, 'sg', {
                'vpc_id': vpc_id,
                'os_id': os_security_group.id
            })
            return {'return': 'true', 'groupId': security_group['id']}
    return {'return': 'true'}
コード例 #11
0
def create_dhcp_options(context, dhcp_configuration):
    dhcp_options = {}
    for dhcp_option in dhcp_configuration:
        key = dhcp_option['key']
        values = dhcp_option['value']
        if key not in DHCP_OPTIONS_MAP:
            raise exception.InvalidParameterValue(
                value=values,
                parameter=key,
                reason='Unrecognized key is specified')
        if not type(values) is list:
            raise exception.InvalidParameterValue(
                value=values,
                parameter=key,
                reason='List of values is expected')
        if key not in ['domain-name', 'netbios-node-type']:
            ips = []
            for ip in values:
                ip_address = netaddr.IPAddress(ip)
                if not ip_address:
                    raise exception.InvalidParameterValue(
                        value=ip,
                        parameter=key,
                        reason='Invalid list of IPs is specified')
                ips.append(ip)
            dhcp_options[key] = ips
        else:
            dhcp_options[key] = values
    dhcp_options = db_api.add_item(context, 'dopt',
                                   {'dhcp_configuration': dhcp_options})
    return {'dhcpOptions': _format_dhcp_options(context, dhcp_options)}
コード例 #12
0
ファイル: security_group.py プロジェクト: openstack/ec2-api
def _create_security_group(context, group_name, group_description,
                           vpc_id=None, default=False):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            secgroup_body = (
                {'security_group': {'name': group_name,
                                    'description': group_description}})
            os_security_group = neutron.create_security_group(
                secgroup_body)['security_group']
        except neutron_exception.OverQuotaClient:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(neutron.delete_security_group,
                           os_security_group['id'])
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true',
                'groupId': security_group['id']}
コード例 #13
0
def auto_create_db_item(context, kind, os_id, project_id=None,
                        **extension_kwargs):
    item = {'os_id': os_id}
    extension = _auto_create_db_item_extensions.get(kind)
    if extension:
        extension(context, item, **extension_kwargs)
    return db_api.add_item(context, kind, item, project_id=project_id)
コード例 #14
0
ファイル: security_group.py プロジェクト: Pansanel/ec2-api
def _create_security_group(context,
                           group_name,
                           group_description,
                           vpc_id=None,
                           default=False):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group.id}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true', 'groupId': security_group['id']}
コード例 #15
0
ファイル: dhcp_options.py プロジェクト: vichusharma/ec2-api
def create_dhcp_options(context, dhcp_configuration):
    dhcp_options = {}
    for dhcp_option in dhcp_configuration:
        key = dhcp_option['key']
        values = dhcp_option['value']
        if key not in DHCP_OPTIONS_MAP:
            raise exception.InvalidParameterValue(
                        value=values,
                        parameter=key,
                        reason='Unrecognized key is specified')
        if not type(values) is list:
            raise exception.InvalidParameterValue(
                value=values,
                parameter=key,
                reason='List of values is expected')
        if key not in ['domain-name', 'netbios-node-type']:
            ips = []
            for ip in values:
                ip_address = netaddr.IPAddress(ip)
                if not ip_address:
                    raise exception.InvalidParameterValue(
                        value=ip,
                        parameter=key,
                        reason='Invalid list of IPs is specified')
                ips.append(ip)
            dhcp_options[key] = ips
        else:
            dhcp_options[key] = values
    dhcp_options = db_api.add_item(context, 'dopt',
                                   {'dhcp_configuration': dhcp_options})
    return {'dhcpOptions': _format_dhcp_options(context, dhcp_options)}
コード例 #16
0
    def test_add_item(self):
        new_item = {
            'os_id': fakes.random_os_id(),
            'vpc_id': fakes.random_ec2_id('fake_vpc'),
            'str_attr': 'fake_str',
            'int_attr': 1234,
            'bool_attr': True,
            'dict_attr': {
                'key1': 'val1',
                'key2': 'val2'
            },
            'list_attr': ['fake_str', 1234, True, {
                'key': 'val'
            }, []]
        }
        item = db_api.add_item(self.context, 'fake', new_item)
        self.assertIn('id', item)
        self.assertIsNotNone(item['id'])
        item_id = item.pop('id')
        self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake']))
        self.assertThat(item,
                        matchers.DictMatches(new_item, orderless_lists=True))

        item = db_api.get_item_by_id(self.context, item_id)
        new_item['id'] = item_id
        self.assertThat(item,
                        matchers.DictMatches(new_item, orderless_lists=True))
コード例 #17
0
def _create_security_group(context, group_name, group_description,
                           vpc_id=None, default=False):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            secgroup_body = (
                {'security_group': {'name': group_name,
                                    'description': group_description}})
            os_security_group = neutron.create_security_group(
                secgroup_body)['security_group']
        except neutron_exception.OverQuotaClient:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(neutron.delete_security_group,
                           os_security_group['id'])
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true',
                'groupId': security_group['id']}
コード例 #18
0
ファイル: test_db_api.py プロジェクト: zeus911/ec2-api
 def test_restore_item(self):
     os_id = fakes.random_os_id()
     item = {'os_id': os_id, 'key': 'val1'}
     new_item = db_api.add_item(self.context, 'fake', item)
     item['id'] = new_item['id']
     self.assertRaises(exception.EC2DBDuplicateEntry, db_api.restore_item,
                       self.context, 'fake', item)
コード例 #19
0
ファイル: test_db_api.py プロジェクト: JioCloudVPC/ec2-api
 def test_restore_item(self):
     os_id = fakes.random_os_id()
     item = {'os_id': os_id, 'key': 'val1'}
     new_item = db_api.add_item(self.context, 'fake', item)
     item['id'] = new_item['id']
     self.assertRaises(
         exception.EC2DBDuplicateEntry,
         db_api.restore_item, self.context, 'fake', item)
コード例 #20
0
ファイル: customer_gateway.py プロジェクト: zeus911/ec2-api
def create_customer_gateway(context, ip_address, type, bgp_asn=None):
    if bgp_asn and bgp_asn != DEFAULT_BGP_ASN:
        raise exception.Unsupported("BGP dynamic routing is unsupported")
    customer_gateway = next((cgw for cgw in db_api.get_items(context, 'cgw')
                             if cgw['ip_address'] == ip_address), None)
    if not customer_gateway:
        customer_gateway = db_api.add_item(context, 'cgw',
                                           {'ip_address': ip_address})
    return {'customerGateway': _format_customer_gateway(customer_gateway)}
コード例 #21
0
def create_subnet(context, vpc_id, cidr_block, availability_zone=None):
    vpc = ec2utils.get_db_item(context, vpc_id)
    vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
    subnet_ipnet = netaddr.IPNetwork(cidr_block)
    if subnet_ipnet not in vpc_ipnet:
        raise exception.InvalidSubnetRange(cidr_block=cidr_block)

    gateway_ip = str(netaddr.IPAddress(subnet_ipnet.first + 1))
    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    host_routes = route_table_api._get_subnet_host_routes(
        context, main_route_table, gateway_ip)
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_network_body = {'network': {}}
        try:
            os_network = neutron.create_network(os_network_body)['network']
            cleaner.addCleanup(neutron.delete_network, os_network['id'])
            # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
            # OpenStack we decided not to support this as compatibility.
            os_subnet_body = {
                'subnet': {
                    'network_id': os_network['id'],
                    'ip_version': '4',
                    'cidr': cidr_block,
                    'host_routes': host_routes
                }
            }
            os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
            cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
        except neutron_exception.OverQuotaClient:
            raise exception.SubnetLimitExceeded()
        try:
            neutron.add_interface_router(vpc['os_id'],
                                         {'subnet_id': os_subnet['id']})
        except neutron_exception.BadRequest:
            raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
        cleaner.addCleanup(neutron.remove_interface_router, vpc['os_id'],
                           {'subnet_id': os_subnet['id']})
        subnet = db_api.add_item(context, 'subnet', {
            'os_id': os_subnet['id'],
            'vpc_id': vpc['id']
        })
        cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
        neutron.update_network(os_network['id'],
                               {'network': {
                                   'name': subnet['id']
                               }})
        neutron.update_subnet(os_subnet['id'],
                              {'subnet': {
                                  'name': subnet['id']
                              }})
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    return {
        'subnet': _format_subnet(context, subnet, os_subnet, os_network,
                                 os_ports)
    }
コード例 #22
0
 def test_add_item_with_same_os_id(self):
     # NOTE(ft): check normal update item on add
     os_id = fakes.random_os_id()
     item1 = db_api.add_item(self.context, 'fake',
                             {'os_id': os_id,
                              'key': 'val1',
                              'key1': 'val'})
     item_id = item1['id']
     item2 = db_api.add_item(self.context, 'fake',
                             {'os_id': os_id,
                              'key': 'val2',
                              'key2': 'val'})
     expected_item = {'id': item_id,
                      'os_id': os_id,
                      'vpc_id': None,
                      'key': 'val2',
                      'key1': 'val',
                      'key2': 'val'}
     self.assertThat(item2, matchers.DictMatches(expected_item))
コード例 #23
0
 def do_check(new_item):
     item = db_api.add_item(self.context, 'fake', new_item)
     item_id = item.pop('id')
     if 'id' in new_item:
         new_item_id = new_item.pop('id')
         self.assertNotEqual(new_item_id, item_id)
     new_item.setdefault('os_id', None)
     new_item.setdefault('vpc_id', None)
     self.assertThat(item, matchers.DictMatches(new_item,
                                                orderless_lists=True))
コード例 #24
0
 def do_check(new_item):
     item = db_api.add_item(self.context, 'fake', new_item)
     item_id = item.pop('id')
     if 'id' in new_item:
         new_item_id = new_item.pop('id')
         self.assertNotEqual(new_item_id, item_id)
     new_item.setdefault('os_id', None)
     new_item.setdefault('vpc_id', None)
     self.assertThat(
         item, matchers.DictMatches(new_item, orderless_lists=True))
コード例 #25
0
def _create_route_table(context, vpc):
    route_table = {
        'vpc_id':
        vpc['id'],
        'routes': [{
            'destination_cidr_block': vpc['cidr_block'],
            'gateway_id': None
        }]
    }
    route_table = db_api.add_item(context, 'rtb', route_table)
    return route_table
コード例 #26
0
ファイル: subnet.py プロジェクト: JioCloudVPC/ec2-api
def create_subnet(context, vpc_id, cidr_block,
                  availability_zone=None):
    vpc = ec2utils.get_db_item(context, vpc_id)
    vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
    subnet_ipnet = netaddr.IPNetwork(cidr_block)
    if subnet_ipnet not in vpc_ipnet:
        raise exception.InvalidSubnetRange(cidr_block=cidr_block)

    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    (host_routes,
     gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip(
            context, main_route_table, cidr_block)
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        # NOTE(andrey-mp): set fake name to filter networks in instance api
        os_network_body = {'network': {'name': 'subnet-0'}}
        try:
            os_network = neutron.create_network(os_network_body)['network']
            cleaner.addCleanup(neutron.delete_network, os_network['id'])
            # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
            # OpenStack we decided not to support this as compatibility.
            os_subnet_body = {'subnet': {'network_id': os_network['id'],
                                         'ip_version': '4',
                                         'cidr': cidr_block,
                                         'host_routes': host_routes}}
            os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
            cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
        except neutron_exception.OverQuotaClient:
            raise exception.SubnetLimitExceeded()
        try:
            neutron.add_interface_router(vpc['os_id'],
                                         {'subnet_id': os_subnet['id']})
        except neutron_exception.BadRequest:
            raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
        cleaner.addCleanup(neutron.remove_interface_router,
                           vpc['os_id'], {'subnet_id': os_subnet['id']})
        subnet = db_api.add_item(context, 'subnet',
                                 {'os_id': os_subnet['id'],
                                  'vpc_id': vpc['id']})
        cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
        vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner,
                                             subnet, vpc, main_route_table)
        neutron.update_network(os_network['id'],
                               {'network': {'name': subnet['id']}})
        # NOTE(ft): In some cases we need gateway_ip to be None (see
        # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet
        # creation to allow automatic configuration of the default port by
        # which subnet is attached to the router.
        neutron.update_subnet(os_subnet['id'],
                              {'subnet': {'name': subnet['id'],
                                          'gateway_ip': gateway_ip}})
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    return {'subnet': _format_subnet(context, subnet, os_subnet,
                                     os_network, os_ports)}
コード例 #27
0
def create_subnet(context, vpc_id, cidr_block,
                  availability_zone=None):
    vpc = ec2utils.get_db_item(context, vpc_id)
    vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
    subnet_ipnet = netaddr.IPNetwork(cidr_block)
    if subnet_ipnet not in vpc_ipnet:
        raise exception.InvalidSubnetRange(cidr_block=cidr_block)

    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    (host_routes,
     gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip(
            context, main_route_table, cidr_block)
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_network_body = {'network': {}}
        try:
            os_network = neutron.create_network(os_network_body)['network']
            cleaner.addCleanup(neutron.delete_network, os_network['id'])
            # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
            # OpenStack we decided not to support this as compatibility.
            os_subnet_body = {'subnet': {'network_id': os_network['id'],
                                         'ip_version': '4',
                                         'cidr': cidr_block,
                                         'host_routes': host_routes}}
            os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
            cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
        except neutron_exception.OverQuotaClient:
            raise exception.SubnetLimitExceeded()
        try:
            neutron.add_interface_router(vpc['os_id'],
                                         {'subnet_id': os_subnet['id']})
        except neutron_exception.BadRequest:
            raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
        cleaner.addCleanup(neutron.remove_interface_router,
                           vpc['os_id'], {'subnet_id': os_subnet['id']})
        subnet = db_api.add_item(context, 'subnet',
                                 {'os_id': os_subnet['id'],
                                  'vpc_id': vpc['id']})
        cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
        vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner,
                                             subnet, vpc, main_route_table)
        neutron.update_network(os_network['id'],
                               {'network': {'name': subnet['id']}})
        # NOTE(ft): In some cases we need gateway_ip to be None (see
        # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet
        # creation to allow automatic configuration of the default port by
        # which subnet is attached to the router.
        neutron.update_subnet(os_subnet['id'],
                              {'subnet': {'name': subnet['id'],
                                          'gateway_ip': gateway_ip}})
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    return {'subnet': _format_subnet(context, subnet, os_subnet,
                                     os_network, os_ports)}
コード例 #28
0
ファイル: test_db_api.py プロジェクト: zeus911/ec2-api
 def test_update_item_os_id(self):
     item = db_api.add_item(self.context, 'fake', {})
     item['os_id'] = 'fake_os_id'
     db_api.update_item(self.context, item)
     item = db_api.get_item_by_id(self.context, item['id'])
     self.assertThat({'os_id': 'fake_os_id'}, matchers.IsSubDictOf(item))
     item['os_id'] = 'other_fake_os_id'
     self.assertRaises(exception.EC2DBInvalidOsIdUpdate, db_api.update_item,
                       self.context, item)
     item['os_id'] = None
     self.assertRaises(exception.EC2DBInvalidOsIdUpdate, db_api.update_item,
                       self.context, item)
コード例 #29
0
 def auto_update_db(self, image, os_image):
     if not image:
         kind = _get_os_image_kind(os_image)
         if self.context.project_id == os_image.owner:
             if getattr(os_image, 'ec2_id', None) in self.pending_images:
                 # NOTE(ft): the image is being creating, Glance had created
                 # image, but creating thread doesn't yet update db item
                 image = self.pending_images[os_image.ec2_id]
                 image['os_id'] = os_image.id
                 image['is_public'] = os_image.visibility == 'public'
                 db_api.update_item(self.context, image)
             else:
                 image = ec2utils.get_db_item_by_os_id(self.context,
                                                       kind,
                                                       os_image.id,
                                                       self.items_dict,
                                                       os_image=os_image)
         else:
             image_id = ec2utils.os_id_to_ec2_id(
                 self.context,
                 kind,
                 os_image.id,
                 items_by_os_id=self.items_dict,
                 ids_by_os_id=self.ids_dict)
             image = {'id': image_id, 'os_id': os_image.id}
     elif (self.context.project_id == os_image.owner
           and image.get('is_public') != os_image.visibility == 'public'):
         image['is_public'] = os_image.visibility == 'public'
         if image['id'] in self.local_images_os_ids:
             db_api.update_item(self.context, image)
         else:
             # TODO(ft): currently update_item can not update id mapping,
             # because its project_id is None. Instead of upgrade db_api,
             # we use add_item. But its execution leads to a needless
             # DB call. This should be reworked in the future.
             kind = ec2utils.get_ec2_id_kind(image['id'])
             db_api.add_item(self.context, kind, image)
     return image
コード例 #30
0
 def test_update_item(self):
     item = db_api.add_item(self.context, 'fake', {'key': 'val1',
                                                   'key1': 'val'})
     item['key'] = 'val2'
     item.pop('key1')
     item['key2'] = 'val'
     item_id = item['id']
     db_api.update_item(self.context, item)
     item = db_api.get_item_by_id(self.context, item_id)
     self.assertThat(item, matchers.DictMatches({'id': item_id,
                                                 'os_id': None,
                                                 'vpc_id': None,
                                                 'key': 'val2',
                                                 'key2': 'val'}))
コード例 #31
0
 def test_add_item_with_same_os_id(self):
     # NOTE(ft): check normal update item on add
     os_id = fakes.random_os_id()
     item1 = db_api.add_item(self.context, 'fake', {
         'os_id': os_id,
         'key': 'val1',
         'key1': 'val'
     })
     item_id = item1['id']
     item2 = db_api.add_item(self.context, 'fake', {
         'os_id': os_id,
         'key': 'val2',
         'key2': 'val'
     })
     expected_item = {
         'id': item_id,
         'os_id': os_id,
         'vpc_id': None,
         'key': 'val2',
         'key1': 'val',
         'key2': 'val'
     }
     self.assertThat(item2, matchers.DictMatches(expected_item))
コード例 #32
0
 def test_update_item_os_id(self):
     item = db_api.add_item(self.context, 'fake', {})
     item['os_id'] = 'fake_os_id'
     db_api.update_item(self.context, item)
     item = db_api.get_item_by_id(self.context, item['id'])
     self.assertThat({'os_id': 'fake_os_id'},
                     matchers.IsSubDictOf(item))
     item['os_id'] = 'other_fake_os_id'
     self.assertRaises(exception.EC2DBInvalidOsIdUpdate,
                       db_api.update_item,
                       self.context, item)
     item['os_id'] = None
     self.assertRaises(exception.EC2DBInvalidOsIdUpdate,
                       db_api.update_item,
                       self.context, item)
コード例 #33
0
def _create_vpc(context, cidr_block, is_default=False):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_router_body = {'router': {}}
        try:
            os_router = neutron.create_router(os_router_body)['router']
        except neutron_exception.OverQuotaClient:
            raise exception.VpcLimitExceeded()
        cleaner.addCleanup(neutron.delete_router, os_router['id'])
        vpc = db_api.add_item(
            context, 'vpc', {
                'os_id': os_router['id'],
                'cidr_block': cidr_block,
                'is_default': is_default
            })
        cleaner.addCleanup(db_api.delete_item, context, vpc['id'])
        route_table = route_table_api._create_route_table(context, vpc)
        cleaner.addCleanup(route_table_api._delete_route_table, context,
                           route_table['id'])
        vpc['route_table_id'] = route_table['id']
        db_api.update_item(context, vpc)
        neutron.update_router(os_router['id'], {'router': {'name': vpc['id']}})
        sg_id = security_group_api._create_default_security_group(context, vpc)
        cleaner.addCleanup(security_group_api.delete_security_group,
                           context,
                           group_id=sg_id,
                           delete_default=True)
        if is_default:
            igw_id = internet_gateway_api.create_internet_gateway(
                context)['internetGateway']['internetGatewayId']
            cleaner.addCleanup(internet_gateway_api.delete_internet_gateway,
                               context, igw_id)
            internet_gateway_api.attach_internet_gateway(
                context, igw_id, vpc['id'])
            cleaner.addCleanup(internet_gateway_api.detach_internet_gateway,
                               context, igw_id, vpc['id'])
            subnet = subnet_api.create_subnet(
                context, vpc['id'], DEFAULT_SUBNET_CIDR_BLOCK)['subnet']
            cleaner.addCleanup(subnet_api.delete_subnet, context,
                               subnet['subnetId'])
            route_table_api.create_route(context,
                                         route_table['id'],
                                         '0.0.0.0/0',
                                         gateway_id=igw_id)
    return vpc
コード例 #34
0
    def test_add_item_id(self):
        os_id = fakes.random_os_id()
        item_id = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake']))
        item = db_api.get_item_by_id(self.context, item_id)
        self.assertIsNone(item)
        item = db_api.add_item(self.context, 'fake', {'os_id': os_id})
        self.assertThat(item, matchers.DictMatches({'id': item_id,
                                                    'os_id': os_id,
                                                    'vpc_id': None}))
        # NOTE(ft): add os_id when item exists
        item_id = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertEqual(item_id, item['id'])

        # NOTE(ft): add os_id when id exists
        os_id = fakes.random_os_id()
        item_id1 = db_api.add_item_id(self.context, 'fake', os_id)
        item_id2 = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertEqual(item_id1, item_id2)
コード例 #35
0
def _create_security_group(context, group_name, group_description,
                           vpc_id=None):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(group_name,
                                                            group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete,
                           os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        security_group = db_api.add_item(context, 'sg',
                                         {'vpc_id': vpc_id,
                                          'os_id': os_security_group.id})
        return {'return': 'true',
                'groupId': security_group['id']}
コード例 #36
0
 def test_update_item(self):
     item = db_api.add_item(self.context, 'fake', {
         'key': 'val1',
         'key1': 'val'
     })
     item['key'] = 'val2'
     item.pop('key1')
     item['key2'] = 'val'
     item_id = item['id']
     db_api.update_item(self.context, item)
     item = db_api.get_item_by_id(self.context, item_id)
     self.assertThat(
         item,
         matchers.DictMatches({
             'id': item_id,
             'os_id': None,
             'vpc_id': None,
             'key': 'val2',
             'key2': 'val'
         }))
コード例 #37
0
ファイル: security_group.py プロジェクト: varunarya10/ec2-api
def _create_security_group(context,
                           group_name,
                           group_description,
                           vpc_id=None):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        security_group = db_api.add_item(context, 'sg', {
            'vpc_id': vpc_id,
            'os_id': os_security_group.id
        })
        return {'return': 'true', 'groupId': security_group['id']}
コード例 #38
0
    def allocate_address(self, context, domain=None):
        if not domain or domain == 'standard':
            return AddressEngineNova().allocate_address(context)
        os_public_network = ec2utils.get_os_public_network(context)
        neutron = clients.neutron(context)

        with common.OnCrashCleaner() as cleaner:
            os_floating_ip = {'floating_network_id': os_public_network['id']}
            try:
                os_floating_ip = neutron.create_floatingip(
                        {'floatingip': os_floating_ip})
            except neutron_exception.OverQuotaClient:
                raise exception.AddressLimitExceeded()
            os_floating_ip = os_floating_ip['floatingip']
            cleaner.addCleanup(neutron.delete_floatingip, os_floating_ip['id'])

            address = {'os_id': os_floating_ip['id'],
                       'public_ip': os_floating_ip['floating_ip_address']}
            address = db_api.add_item(context, 'eipalloc', address)
        return address, os_floating_ip
コード例 #39
0
def create_vpc(context, cidr_block, instance_tenancy='default'):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_router_body = {'router': {}}
        try:
            os_router = neutron.create_router(os_router_body)['router']
        except neutron_exception.OverQuotaClient:
            raise exception.VpcLimitExceeded()
        cleaner.addCleanup(neutron.delete_router, os_router['id'])
        vpc = db_api.add_item(context, 'vpc',
                              {'os_id': os_router['id'],
                               'cidr_block': cidr_block})
        cleaner.addCleanup(db_api.delete_item, context, vpc['id'])
        route_table = route_table_api._create_route_table(context, vpc)
        cleaner.addCleanup(route_table_api._delete_route_table,
                           context, route_table['id'])
        vpc['route_table_id'] = route_table['id']
        db_api.update_item(context, vpc)
        neutron.update_router(os_router['id'], {'router': {'name': vpc['id']}})
        security_group_api._create_default_security_group(context, vpc)
    return {'vpc': _format_vpc(vpc)}
コード例 #40
0
ファイル: volume.py プロジェクト: openstack/ec2-api
def create_volume(context, availability_zone=None, size=None,
                  snapshot_id=None, volume_type=None, iops=None,
                  encrypted=None, kms_key_id=None):
    if snapshot_id is not None:
        snapshot = ec2utils.get_db_item(context, snapshot_id)
        os_snapshot_id = snapshot['os_id']
    else:
        os_snapshot_id = None

    cinder = clients.cinder(context)
    with common.OnCrashCleaner() as cleaner:
        os_volume = cinder.volumes.create(
                size, snapshot_id=os_snapshot_id, volume_type=volume_type,
                availability_zone=availability_zone)
        cleaner.addCleanup(os_volume.delete)

        volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id})
        cleaner.addCleanup(db_api.delete_item, context, volume['id'])
        os_volume.update(display_name=volume['id'])

    return _format_volume(context, volume, os_volume, snapshot_id=snapshot_id)
コード例 #41
0
    def test_add_item(self):
        new_item = {'os_id': fakes.random_os_id(),
                    'vpc_id': fakes.random_ec2_id('fake_vpc'),
                    'str_attr': 'fake_str',
                    'int_attr': 1234,
                    'bool_attr': True,
                    'dict_attr': {'key1': 'val1',
                                  'key2': 'val2'},
                    'list_attr': ['fake_str', 1234, True, {'key': 'val'}, []]}
        item = db_api.add_item(self.context, 'fake', new_item)
        self.assertIn('id', item)
        self.assertIsNotNone(item['id'])
        item_id = item.pop('id')
        self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake']))
        self.assertThat(item, matchers.DictMatches(new_item,
                                                   orderless_lists=True))

        item = db_api.get_item_by_id(self.context, item_id)
        new_item['id'] = item_id
        self.assertThat(item, matchers.DictMatches(new_item,
                                                   orderless_lists=True))
コード例 #42
0
ファイル: vpc.py プロジェクト: openstack/ec2-api
def _create_vpc(context, cidr_block, is_default=False):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_router_body = {'router': {}}
        try:
            os_router = neutron.create_router(os_router_body)['router']
        except neutron_exception.OverQuotaClient:
            raise exception.VpcLimitExceeded()
        cleaner.addCleanup(neutron.delete_router, os_router['id'])
        vpc = db_api.add_item(context, 'vpc',
                              {'os_id': os_router['id'],
                               'cidr_block': cidr_block,
                               'is_default': is_default})
        cleaner.addCleanup(db_api.delete_item, context, vpc['id'])
        route_table = route_table_api._create_route_table(context, vpc)
        cleaner.addCleanup(route_table_api._delete_route_table,
                           context, route_table['id'])
        vpc['route_table_id'] = route_table['id']
        db_api.update_item(context, vpc)
        neutron.update_router(os_router['id'], {'router': {'name': vpc['id']}})
        sg_id = security_group_api._create_default_security_group(context, vpc)
        cleaner.addCleanup(security_group_api.delete_security_group, context,
                           group_id=sg_id, delete_default=True)
        if is_default:
            igw_id = internet_gateway_api.create_internet_gateway(
                context)['internetGateway']['internetGatewayId']
            cleaner.addCleanup(internet_gateway_api.delete_internet_gateway,
                               context, igw_id)
            internet_gateway_api.attach_internet_gateway(context, igw_id,
                                                         vpc['id'])
            cleaner.addCleanup(internet_gateway_api.detach_internet_gateway,
                               context, igw_id, vpc['id'])
            subnet = subnet_api.create_subnet(
                context, vpc['id'],
                DEFAULT_SUBNET_CIDR_BLOCK)['subnet']
            cleaner.addCleanup(subnet_api.delete_subnet, context,
                               subnet['subnetId'])
            route_table_api.create_route(context, route_table['id'],
                                         '0.0.0.0/0', gateway_id=igw_id)
    return vpc
コード例 #43
0
ファイル: snapshot.py プロジェクト: JioCloudVPC/ec2-api
def create_snapshot(context, volume_id, description=None):
    volume = ec2utils.get_db_item(context, volume_id)
    cinder = clients.cinder(context)
    os_volume = cinder.volumes.get(volume['os_id'])
    # NOTE(ft): Easy fix to allow snapshot creation in statuses other than
    # AVAILABLE without cinder modifications. Potential race condition
    # though. Seems arguably non-fatal.
    if os_volume.status not in ['available', 'in-use',
                                'attaching', 'detaching']:
        msg = (_("'%s' is not in a state where snapshots are allowed.") %
               volume_id)
        raise exception.IncorrectState(reason=msg)
    with common.OnCrashCleaner() as cleaner:
        os_snapshot = cinder.volume_snapshots.create(os_volume.id, True)
        cleaner.addCleanup(os_snapshot.delete)
        snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id})
        cleaner.addCleanup(db_api.delete_item, context, snapshot['id'])
        os_snapshot.update(display_name=snapshot['id'],
                           display_description=description)

    return _format_snapshot(context, snapshot, os_snapshot,
                            volume_id=volume_id)
コード例 #44
0
    def test_add_item_id(self):
        os_id = fakes.random_os_id()
        item_id = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertTrue(validator.validate_ec2_id(item_id, '', ['fake']))
        item = db_api.get_item_by_id(self.context, item_id)
        self.assertIsNone(item)
        item = db_api.add_item(self.context, 'fake', {'os_id': os_id})
        self.assertThat(
            item,
            matchers.DictMatches({
                'id': item_id,
                'os_id': os_id,
                'vpc_id': None
            }))
        # NOTE(ft): add os_id when item exists
        item_id = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertEqual(item_id, item['id'])

        # NOTE(ft): add os_id when id exists
        os_id = fakes.random_os_id()
        item_id1 = db_api.add_item_id(self.context, 'fake', os_id)
        item_id2 = db_api.add_item_id(self.context, 'fake', os_id)
        self.assertEqual(item_id1, item_id2)
コード例 #45
0
ファイル: security_group.py プロジェクト: jpoley/ec2-api
def _create_security_group(context, group_name, group_description,
                           vpc_id=None, default=False):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(group_name,
                                                            group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete,
                           os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group.id}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true',
                'groupId': security_group['id']}
コード例 #46
0
ファイル: vpn_connection.py プロジェクト: pombredanne/ec2-api
def create_vpn_connection(context, customer_gateway_id, vpn_gateway_id,
                          type, options=None):
    if not options or options.get('static_routes_only') is not True:
        raise exception.Unsupported('BGP dynamic routing is unsupported')
    customer_gateway = ec2utils.get_db_item(context, customer_gateway_id)
    vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
    vpn_connection = next(
        (vpn for vpn in db_api.get_items(context, 'vpn')
         if vpn['customer_gateway_id'] == customer_gateway_id),
        None)
    if vpn_connection:
        if vpn_connection['vpn_gateway_id'] == vpn_gateway_id:
            ec2_vpn_connections = describe_vpn_connections(
                context, vpn_connection_id=[vpn_connection['id']])
            return {
                'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
        else:
            raise exception.InvalidCustomerGatewayDuplicateIpAddress()
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_ikepolicy = {'ike_version': 'v1',
                        'auth_algorithm': 'sha1',
                        'encryption_algorithm': 'aes-128',
                        'pfs': 'group2',
                        'phase1_negotiation_mode': 'main',
                        'lifetime': {'units': 'seconds',
                                     'value': 28800}}
        os_ikepolicy = neutron.create_ikepolicy(
            {'ikepolicy': os_ikepolicy})['ikepolicy']
        cleaner.addCleanup(neutron.delete_ikepolicy, os_ikepolicy['id'])

        os_ipsecpolicy = {'transform_protocol': 'esp',
                          'auth_algorithm': 'sha1',
                          'encryption_algorithm': 'aes-128',
                          'pfs': 'group2',
                          'encapsulation_mode': 'tunnel',
                          'lifetime': {'units': 'seconds',
                                       'value': 3600}}
        os_ipsecpolicy = neutron.create_ipsecpolicy(
            {'ipsecpolicy': os_ipsecpolicy})['ipsecpolicy']
        cleaner.addCleanup(neutron.delete_ipsecpolicy, os_ipsecpolicy['id'])

        psk = ''.join(random.choice(SHARED_KEY_CHARS) for _x in range(32))
        vpn_connection = db_api.add_item(
             context, 'vpn',
             {'customer_gateway_id': customer_gateway['id'],
              'vpn_gateway_id': vpn_gateway['id'],
              'pre_shared_key': psk,
              'os_ikepolicy_id': os_ikepolicy['id'],
              'os_ipsecpolicy_id': os_ipsecpolicy['id'],
              'cidrs': [],
              'os_ipsec_site_connections': {}})
        cleaner.addCleanup(db_api.delete_item, context, vpn_connection['id'])

        neutron.update_ikepolicy(
            os_ikepolicy['id'], {'ikepolicy': {'name': vpn_connection['id']}})
        neutron.update_ipsecpolicy(
            os_ipsecpolicy['id'],
            {'ipsecpolicy': {'name': vpn_connection['id']}})

        _reset_vpn_connections(context, neutron, cleaner,
                               vpn_gateway, vpn_connections=[vpn_connection])

    ec2_vpn_connections = describe_vpn_connections(
        context, vpn_connection_id=[vpn_connection['id']])
    return {
        'vpnConnection': ec2_vpn_connections['vpnConnectionSet'][0]}
コード例 #47
0
def create_network_interface(context, subnet_id,
                             private_ip_address=None,
                             private_ip_addresses=None,
                             secondary_private_ip_address_count=None,
                             description=None,
                             security_group_id=None):
    subnet = ec2utils.get_db_item(context, subnet_id)
    if subnet is None:
        raise exception.InvalidSubnetIDNotFound(id=subnet_id)
    neutron = clients.neutron(context)
    os_subnet = neutron.show_subnet(subnet['os_id'])['subnet']
    # NOTE(Alex): Combine and check ip addresses. Neutron will accept
    # ip_address as a parameter for specified address and subnet_id for
    # address to auto-allocate.
    # TODO(Alex): Implement better diagnostics.
    subnet_ipnet = netaddr.IPNetwork(os_subnet['cidr'])
    if not private_ip_addresses:
        private_ip_addresses = []
    if private_ip_address is not None:
        private_ip_addresses.insert(0,
                                    {'private_ip_address': private_ip_address,
                                     'primary': True})
    primary_ip = None
    fixed_ips = []
    for ip in private_ip_addresses:
        ip_address = netaddr.IPAddress(ip['private_ip_address'])
        if ip_address not in subnet_ipnet:
            raise exception.InvalidParameterValue(
                value=str(ip_address),
                parameter='PrivateIpAddresses',
                reason='IP address is out of the subnet range')
        if ip.get('primary', False):
            if primary_ip is not None:
                raise exception.InvalidParameterValue(
                    value=str(ip_address),
                    parameter='PrivateIpAddresses',
                    reason='More than one primary ip is supplied')
            else:
                primary_ip = str(ip_address)
                fixed_ips.insert(0, {'ip_address': primary_ip})
        else:
            fixed_ips.append({'ip_address': str(ip_address)})
    if not fixed_ips and not secondary_private_ip_address_count:
        secondary_private_ip_address_count = 1
    if secondary_private_ip_address_count is None:
        secondary_private_ip_address_count = 0
    if secondary_private_ip_address_count > 0:
        for _i in range(secondary_private_ip_address_count):
            fixed_ips.append({'subnet_id': os_subnet['id']})
    vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
    vpc_id = vpc['id']
    dhcp_options_id = vpc.get('dhcp_options_id', None)
    if not security_group_id:
        default_groups = security_group_api.describe_security_groups(
            context,
            filter=[{'name': 'vpc-id', 'value': [vpc_id]},
                    {'name': 'group-name', 'value': ['default']}]
        )['securityGroupInfo']
        security_group_id = [default_group['groupId']
                             for default_group in default_groups]
    security_groups = db_api.get_items_by_ids(context, security_group_id)
    if any(security_group['vpc_id'] != vpc['id']
           for security_group in security_groups):
        msg = _('You have specified two resources that belong to '
                'different networks.')
        raise exception.InvalidGroupNotFound(msg)
    os_groups = [security_group['os_id'] for security_group in security_groups]
    with common.OnCrashCleaner() as cleaner:
        os_port_body = {'port': {'network_id': os_subnet['network_id'],
                                 'security_groups': os_groups}}
        os_port_body['port']['fixed_ips'] = fixed_ips
        try:
            os_port = neutron.create_port(os_port_body)['port']
        except (neutron_exception.IpAddressGenerationFailureClient,
                neutron_exception.OverQuotaClient):
            raise exception.InsufficientFreeAddressesInSubnet()
        except (neutron_exception.IpAddressInUseClient,
                neutron_exception.BadRequest) as ex:
            # NOTE(ft): AWS returns InvalidIPAddress.InUse for a primary IP
            # address, but InvalidParameterValue for secondary one.
            # AWS returns PrivateIpAddressLimitExceeded, but Neutron does
            # general InvalidInput (converted to BadRequest) in the same case.
            msg = _('Specified network interface parameters are invalid. '
                    'Reason: %(reason)s') % {'reason': ex.message}
            raise exception.InvalidParameterValue(msg)
        cleaner.addCleanup(neutron.delete_port, os_port['id'])
        if primary_ip is None:
            primary_ip = os_port['fixed_ips'][0]['ip_address']
        network_interface = db_api.add_item(context, 'eni',
                                            {'os_id': os_port['id'],
                                             'vpc_id': subnet['vpc_id'],
                                             'subnet_id': subnet['id'],
                                             'description': description,
                                             'private_ip_address': primary_ip})
        cleaner.addCleanup(db_api.delete_item,
                           context, network_interface['id'])

        network_interface_id = network_interface['id']
        neutron.update_port(os_port['id'],
                            {'port': {'name': network_interface_id}})
        if dhcp_options_id:
            dhcp_options._add_dhcp_opts_to_port(
                context,
                db_api.get_item_by_id(context, dhcp_options_id),
                network_interface,
                os_port)
    security_groups = security_group_api._format_security_groups_ids_names(
        context)
    return {'networkInterface':
            _format_network_interface(context,
                                      network_interface,
                                      os_port,
                                      security_groups=security_groups)}
コード例 #48
0
def register_image(context,
                   name=None,
                   image_location=None,
                   description=None,
                   architecture=None,
                   root_device_name=None,
                   block_device_mapping=None,
                   virtualization_type=None,
                   kernel_id=None,
                   ramdisk_id=None,
                   sriov_net_support=None):
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    properties = {}
    metadata = {'properties': properties}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:
        properties['image_location'] = image_location
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        properties['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        properties['bdm_v2'] = True
        properties['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        properties['architecture'] = architecture
    if kernel_id:
        properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id
    if ramdisk_id:
        properties['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    with common.OnCrashCleaner() as cleaner:
        if 'image_location' in properties:
            os_image = _s3_create(context, metadata)
        else:
            metadata.update({'size': 0, 'is_public': False})
            # TODO(ft): set default values of image properties
            glance = clients.glance(context)
            os_image = glance.images.create(**metadata)
        cleaner.addCleanup(os_image.delete)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {
            'os_id': os_image.id,
            'is_public': False,
            'description': description
        })
    return {'imageId': image['id']}
コード例 #49
0
def create_image(context,
                 instance_id,
                 name=None,
                 description=None,
                 no_reboot=False,
                 block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id,
                            exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'), {
                                'i_id': instance['id'],
                                'image_id': image['id']
                            },
                        exc_info=True)

    image = {'is_public': False, 'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
コード例 #50
0
def _create_route_table(context, vpc):
    route_table = {'vpc_id': vpc['id'],
                   'routes': [{'destination_cidr_block': vpc['cidr_block'],
                               'gateway_id': None}]}
    route_table = db_api.add_item(context, 'rtb', route_table)
    return route_table
コード例 #51
0
ファイル: image.py プロジェクト: openstack/ec2-api
def register_image(context, name=None, image_location=None,
                   description=None, architecture=None,
                   root_device_name=None, block_device_mapping=None,
                   virtualization_type=None, kernel_id=None,
                   ramdisk_id=None, sriov_net_support=None):

    # Setup default flags
    is_s3_import = False
    is_url_import = False

    # Process the input arguments
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    metadata = {}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:

        # Resolve the import type
        metadata['image_location'] = image_location
        parsed_url = six.moves.urllib.parse.urlparse(image_location)
        is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3')
        is_url_import = not is_s3_import

        # Check if the name is in the metadata
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        metadata['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        metadata['bdm_v2'] = 'True'
        metadata['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        metadata['architecture'] = architecture
    if kernel_id:
        metadata['kernel_id'] = ec2utils.get_os_image(context,
                                                        kernel_id).id
    if ramdisk_id:
        metadata['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    # Begin the import/registration process
    with common.OnCrashCleaner() as cleaner:

        # Setup the glance client
        glance = clients.glance(context)

        # Check if this is an S3 import
        if is_s3_import:
            os_image = _s3_create(context, metadata)

        # Condition for all non-S3 imports
        else:

            # Create the image in glance
            metadata.update({'visibility': 'private',
                             'container_format': 'bare',
                             'disk_format': 'raw'})
            os_image = glance.images.create(**metadata)

            # Kick-off the URL image import if from URL
            if is_url_import:
                glance.images.image_import(os_image.id, method='web-download',
                                           uri=metadata['image_location'])

            # Otherwise, use the default method
            else:
                glance.images.upload(os_image.id, '', image_size=0)

        # Add cleanups and complete the registration process
        cleaner.addCleanup(glance.images.delete, os_image.id)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {'os_id': os_image.id,
                                                'is_public': False,
                                                'description': description})

    # Return the image ID for the registration process
    return {'imageId': image['id']}
コード例 #52
0
ファイル: ec2utils.py プロジェクト: JioCloudCompute/ec2-api
def auto_create_db_item(context, kind, os_id, **extension_kwargs):
    item = {'os_id': os_id}
    extension = _auto_create_db_item_extensions.get(kind)
    if extension:
        extension(context, item, **extension_kwargs)
    return db_api.add_item(context, kind, item)
コード例 #53
0
def create_internet_gateway(context):
    igw = db_api.add_item(context, 'igw', {})
    return {'internetGateway': _format_internet_gateway(igw)}
コード例 #54
0
def create_image(context,
                 instance_id,
                 name=None,
                 description=None,
                 no_reboot=False,
                 block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True
        os_instance.stop()

        # wait instance for really stopped
        start_time = time.time()
        while os_instance.status != 'SHUTOFF':
            time.sleep(1)
            os_instance.get()
            # NOTE(yamahata): timeout and error. 1 hour for now for safety.
            #                 Is it too short/long?
            #                 Or is there any better way?
            timeout = 1 * 60 * 60
            if time.time() > start_time + timeout:
                err = _("Couldn't stop instance within %d sec") % timeout
                raise exception.EC2Exception(message=err)

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    glance = clients.glance(context)
    with common.OnCrashCleaner() as cleaner:
        os_image_id = os_instance.create_image(name)
        cleaner.addCleanup(glance.images.delete, os_image_id)
        # TODO(andrey-mp): snapshot and volume also must be deleted in case
        # of error
        os_image = glance.images.get(os_image_id)
        image = db_api.add_item(context, _get_os_image_kind(os_image), {
            'os_id': os_image_id,
            'is_public': False,
            'description': description
        })

    if restart_instance:
        os_instance.start()

    return {'imageId': image['id']}
コード例 #55
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def create_image(context, instance_id, name=None, description=None,
                 no_reboot=False, block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id, exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'),
                        {'i_id': instance['id'],
                         'image_id': image['id']},
                        exc_info=True)

    image = {'is_public': False,
             'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
コード例 #56
0
def register_image(context,
                   name=None,
                   image_location=None,
                   description=None,
                   architecture=None,
                   root_device_name=None,
                   block_device_mapping=None,
                   virtualization_type=None,
                   kernel_id=None,
                   ramdisk_id=None,
                   sriov_net_support=None):

    # Setup default flags
    is_s3_import = False
    is_url_import = False

    # Process the input arguments
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    metadata = {}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:

        # Resolve the import type
        metadata['image_location'] = image_location
        parsed_url = six.moves.urllib.parse.urlparse(image_location)
        is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3')
        is_url_import = not is_s3_import

        # Check if the name is in the metadata
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        metadata['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        metadata['bdm_v2'] = 'True'
        metadata['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        metadata['architecture'] = architecture
    if kernel_id:
        metadata['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id
    if ramdisk_id:
        metadata['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id

    # Begin the import/registration process
    with common.OnCrashCleaner() as cleaner:

        # Setup the glance client
        glance = clients.glance(context)

        # Check if this is an S3 import
        if is_s3_import:
            os_image = _s3_create(context, metadata)

        # Condition for all non-S3 imports
        else:

            # Create the image in glance
            metadata.update({
                'visibility': 'private',
                'container_format': 'bare',
                'disk_format': 'raw'
            })
            os_image = glance.images.create(**metadata)

            # Kick-off the URL image import if from URL
            if is_url_import:
                glance.images.image_import(os_image.id,
                                           method='web-download',
                                           uri=metadata['image_location'])

            # Otherwise, use the default method
            else:
                glance.images.upload(os_image.id, '', image_size=0)

        # Add cleanups and complete the registration process
        cleaner.addCleanup(glance.images.delete, os_image.id)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {
            'os_id': os_image.id,
            'is_public': False,
            'description': description
        })

    # Return the image ID for the registration process
    return {'imageId': image['id']}
コード例 #57
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def register_image(context, name=None, image_location=None,
                   description=None, architecture=None,
                   root_device_name=None, block_device_mapping=None,
                   virtualization_type=None, kernel_id=None,
                   ramdisk_id=None, sriov_net_support=None):
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    properties = {}
    metadata = {'properties': properties}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:
        properties['image_location'] = image_location
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        properties['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        properties['bdm_v2'] = True
        properties['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        properties['architecture'] = architecture
    if kernel_id:
        properties['kernel_id'] = ec2utils.get_os_image(context,
                                                        kernel_id).id
    if ramdisk_id:
        properties['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    with common.OnCrashCleaner() as cleaner:
        if 'image_location' in properties:
            os_image = _s3_create(context, metadata)
        else:
            metadata.update({'size': 0,
                             'is_public': False})
            # TODO(ft): set default values of image properties
            glance = clients.glance(context)
            os_image = glance.images.create(**metadata)
        cleaner.addCleanup(os_image.delete)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {'os_id': os_image.id,
                                                'is_public': False,
                                                'description': description})
    return {'imageId': image['id']}
コード例 #58
0
def create_vpn_gateway(context, type, availability_zone=None):
    vpn_gateway = db_api.add_item(context, "vgw", {})
    return {"vpnGateway": _format_vpn_gateway(vpn_gateway)}