Exemple #1
0
    def test_create_subnet_vpnservice(self):
        self.neutron.create_vpnservice.side_effect = tools.get_neutron_create(
            'vpnservice', fakes.ID_OS_VPNSERVICE_1)
        context = base.create_context()
        cleaner = common.OnCrashCleaner()

        vpn_gateway_api._create_subnet_vpnservice(
            context, self.neutron, cleaner,
            copy.deepcopy(self.DB_SUBNET_1_NO_VPN), fakes.DB_VPC_1)

        self.neutron.create_vpnservice.assert_called_once_with(
            {'vpnservice': tools.purge_dict(fakes.OS_VPNSERVICE_1,
                                            ('id',))})
        self.db_api.update_item.assert_called_once_with(
            mock.ANY, fakes.DB_SUBNET_1)

        try:
            with common.OnCrashCleaner() as cleaner:
                vpn_gateway_api._create_subnet_vpnservice(
                    context, self.neutron, cleaner,
                    copy.deepcopy(self.DB_SUBNET_1_NO_VPN), fakes.DB_VPC_1)
                raise Exception('fake-exception')
        except Exception as ex:
            if str(ex) != 'fake-exception':
                raise
        self.db_api.update_item.assert_called_with(
            mock.ANY, self.DB_SUBNET_1_NO_VPN)
        self.neutron.delete_vpnservice.assert_called_once_with(
            fakes.ID_OS_VPNSERVICE_1)
def replace_route_table_association(context, association_id, route_table_id):
    route_table = ec2utils.get_db_item(context, route_table_id)
    if route_table['vpc_id'] == ec2utils.change_ec2_id_kind(
            association_id, 'vpc'):
        vpc = db_api.get_item_by_id(
            context, ec2utils.change_ec2_id_kind(association_id, 'vpc'))
        if vpc is None:
            raise exception.InvalidAssociationIDNotFound(id=association_id)

        rollabck_route_table_object = db_api.get_item_by_id(
            context, vpc['route_table_id'])
        with common.OnCrashCleaner() as cleaner:
            _associate_vpc_item(context, vpc, route_table['id'])
            cleaner.addCleanup(_associate_vpc_item, context, vpc,
                               rollabck_route_table_object['id'])

            # NOTE(ft): this can cause unnecessary update of subnets, which are
            # associated with the route table
            _update_routes_in_associated_subnets(context,
                                                 route_table,
                                                 cleaner,
                                                 rollabck_route_table_object,
                                                 is_main=True)
    else:
        subnet = db_api.get_item_by_id(
            context, ec2utils.change_ec2_id_kind(association_id, 'subnet'))
        if subnet is None or 'route_table_id' not in subnet:
            raise exception.InvalidAssociationIDNotFound(id=association_id)
        if subnet['vpc_id'] != route_table['vpc_id']:
            msg = _('Route table association %(rtbassoc_id)s and route table '
                    '%(rtb_id)s belong to different networks')
            msg = msg % {
                'rtbassoc_id': association_id,
                'rtb_id': route_table_id
            }
            raise exception.InvalidParameterValue(msg)

        rollabck_route_table_object = db_api.get_item_by_id(
            context, subnet['route_table_id'])
        with common.OnCrashCleaner() as cleaner:
            _associate_subnet_item(context, subnet, route_table['id'])
            cleaner.addCleanup(_associate_subnet_item, context, subnet,
                               rollabck_route_table_object['id'])

            _update_subnet_host_routes(
                context,
                subnet,
                route_table,
                cleaner=cleaner,
                rollback_route_table_object=rollabck_route_table_object)

    return {'newAssociationId': association_id}
Exemple #3
0
    def test_delete_subnet_vpn(self):
        context = base.create_context()
        cleaner = common.OnCrashCleaner()

        # subnet is not connected to the vpn
        vpn_connection_api._delete_subnet_vpn(context, self.neutron, cleaner,
                                              fakes.DB_SUBNET_1,
                                              fakes.DB_VPN_CONNECTION_1)
        self.assertFalse(self.db_api.update_item.called)
        self.assertFalse(self.neutron.delete_ipsec_site_connection.called)

        # delete subnet vpn connection
        vpn_connection_api._delete_subnet_vpn(
            context, self.neutron, cleaner, fakes.DB_SUBNET_2,
            copy.deepcopy(fakes.DB_VPN_CONNECTION_1))
        self.db_api.update_item.assert_called_once_with(
            mock.ANY,
            tools.update_dict(fakes.DB_VPN_CONNECTION_1,
                              {'os_ipsec_site_connections': {}}))
        self.neutron.delete_ipsec_site_connection.assert_called_once_with(
            fakes.ID_OS_IPSEC_SITE_CONNECTION_2)

        # delete subnet vpn connection, leave connections of other subnets
        self.db_api.reset_mock()
        self.neutron.reset_mock()
        id_os_connection = fakes.random_os_id()
        vpn_connection_1 = copy.deepcopy(fakes.DB_VPN_CONNECTION_1)
        (vpn_connection_1['os_ipsec_site_connections'][fakes.ID_EC2_SUBNET_1]
         ) = id_os_connection
        vpn_connection_api._delete_subnet_vpn(context, self.neutron, cleaner,
                                              fakes.DB_SUBNET_1,
                                              vpn_connection_1)
        self.db_api.update_item.assert_called_once_with(
            mock.ANY, fakes.DB_VPN_CONNECTION_1)
        self.neutron.delete_ipsec_site_connection.assert_called_once_with(
            id_os_connection)

        # rollback of deleting subnet vpn connection
        self.db_api.reset_mock()
        self.neutron.reset_mock()
        try:
            with common.OnCrashCleaner() as cleaner:
                vpn_connection_api._delete_subnet_vpn(
                    context, self.neutron, cleaner, fakes.DB_SUBNET_2,
                    copy.deepcopy(fakes.DB_VPN_CONNECTION_1))
                raise Exception('fake-exception')
        except Exception as ex:
            if ex.message != 'fake-exception':
                raise
        self.db_api.update_item.assert_called_with(mock.ANY,
                                                   fakes.DB_VPN_CONNECTION_1)
        self.assertFalse(self.neutron.create_ipsec_site_connection.called)
Exemple #4
0
def detach_internet_gateway(context, internet_gateway_id, vpc_id):
    igw = ec2utils.get_db_item(context, internet_gateway_id)
    vpc = ec2utils.get_db_item(context, vpc_id)
    LOG.info('Detaching %(igw)s internet-gateway from %(vpc)s.',
                {'igw': str(igw), 'vpc': str(vpc)})

    if igw.get('vpc_id') != vpc['id']:
        raise exception.GatewayNotAttached(gw_id=igw['id'],
                                           vpc_id=vpc['id'])

    remove_os_gateway_router = (
        ec2utils.get_attached_gateway(context, vpc_id, 'vgw') is None)
    neutron = clients.neutron(context)
    # TODO(ft): set detaching state into db
    with common.OnCrashCleaner() as cleaner:
        _detach_internet_gateway_item(context, igw)
        cleaner.addCleanup(_attach_internet_gateway_item,
                           context, igw, vpc['id'])
        if remove_os_gateway_router:
            try:
                neutron.remove_gateway_router(vpc['os_id'])
            except neutron_exception.NotFound:
                pass
            except Exception as ex:
                floatingips=neutron.list_floatingips(tenant_id=context.project_id)['floatingips']
                LOG.info('Existing floating ips: %(floatingips)s. Exception: %(ex)s.',
                    {'floatingips': floatingips, 'ex': ex})

    return True
Exemple #5
0
def delete_route(context, route_table_id, destination_cidr_block):
    route_table = ec2utils.get_db_item(context, route_table_id)
    for route_index, route in enumerate(route_table['routes']):
        if route['destination_cidr_block'] != destination_cidr_block:
            continue
        if route.get('gateway_id', 0) is None:
            msg = _('cannot remove local route %(destination_cidr_block)s '
                    'in route table %(route_table_id)s')
            msg = msg % {'route_table_id': route_table_id,
                         'destination_cidr_block': destination_cidr_block}
            raise exception.InvalidParameterValue(msg)
        break
    else:
        raise exception.InvalidRouteNotFound(
            route_table_id=route_table_id,
            destination_cidr_block=destination_cidr_block)
    update_target = _get_route_target(route)
    if update_target == VPN_TARGET:
        vpn_gateway = db_api.get_item_by_id(context, route['gateway_id'])
        if (not vpn_gateway or
                vpn_gateway['vpc_id'] != route_table['vpc_id']):
            update_target = None
    rollback_route_table_state = copy.deepcopy(route_table)
    del route_table['routes'][route_index]
    with common.OnCrashCleaner() as cleaner:
        db_api.update_item(context, route_table)
        cleaner.addCleanup(db_api.update_item, context,
                           rollback_route_table_state)

        if update_target:
            _update_routes_in_associated_subnets(
                context, cleaner, route_table, update_target=update_target)

    return True
Exemple #6
0
def disassociate_route_table(context, association_id):
    subnet = db_api.get_item_by_id(
        context, ec2utils.change_ec2_id_kind(association_id, 'subnet'))
    if not subnet:
        vpc = db_api.get_item_by_id(
            context, ec2utils.change_ec2_id_kind(association_id, 'vpc'))
        if vpc is None:
            raise exception.InvalidAssociationIDNotFound(id=association_id)
        msg = _('Cannot disassociate the main route table association '
                '%(rtbassoc_id)s') % {'rtbassoc_id': association_id}
        raise exception.InvalidParameterValue(msg)
    if 'route_table_id' not in subnet:
        raise exception.InvalidAssociationIDNotFound(id=association_id)

    rollback_route_table_id = subnet['route_table_id']
    vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    with common.OnCrashCleaner() as cleaner:
        _disassociate_subnet_item(context, subnet)
        cleaner.addCleanup(_associate_subnet_item, context, subnet,
                           rollback_route_table_id)

        _update_subnet_routes(context, cleaner, subnet, main_route_table)

    return True
def create_snapshot(context, volume_id, description=None):
    volume = ec2utils.get_db_item(context, volume_id)
    cinder = clients.cinder(context)
    os_volume = cinder.volumes.get(volume['os_id'])
    # NOTE(ft): Easy fix to allow snapshot creation in statuses other than
    # AVAILABLE without cinder modifications. Potential race condition
    # though. Seems arguably non-fatal.
    if os_volume.status not in [
            'available', 'in-use', 'attaching', 'detaching'
    ]:
        msg = (_("'%s' is not in a state where snapshots are allowed.") %
               volume_id)
        raise exception.IncorrectState(reason=msg)
    with common.OnCrashCleaner() as cleaner:
        os_snapshot = cinder.volume_snapshots.create(
            os_volume.id, force=True, display_description=description)
        cleaner.addCleanup(os_snapshot.delete)
        snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id})
        cleaner.addCleanup(db_api.delete_item, context, snapshot['id'])
        os_snapshot.update(display_name=snapshot['id'])

    return _format_snapshot(context,
                            snapshot,
                            os_snapshot,
                            volume_id=volume_id)
Exemple #8
0
    def disassociate_address(self, context, public_ip=None,
                             association_id=None):
        LOG.info('Disassociating %s', association_id)
        neutron = clients.neutron(context)

        floatingips=neutron.list_floatingips(tenant_id=context.project_id)['floatingips']
        LOG.info('Existing floating ips: %s', floatingips)

        if public_ip:
            # TODO(ft): implement search in DB layer
            address = next((addr for addr in db_api.get_items(context,
                                                              'eipalloc')
                            if addr['public_ip'] == public_ip), None)

            if not CONF.disable_ec2_classic:
                if address and _is_address_valid(context, neutron, address):
                    msg = _('You must specify an association id when '
                            'unmapping an address from a VPC instance')
                    raise exception.InvalidParameterValue(msg)
                # NOTE(tikitavi): check the public IP exists to raise AWS
                # exception otherwise
                os_floating_ip = self.get_os_floating_ip_by_public_ip(
                    context, public_ip)
                os_ports = self.get_os_ports(context)
                os_instance_id = _get_os_instance_id(context, os_floating_ip,
                                                     os_ports)
                if os_instance_id:
                    nova = clients.nova(context)
                    nova.servers.remove_floating_ip(os_instance_id, public_ip)
                return None

            if not address:
                msg = _("The address '%(public_ip)s' does not belong to you.")
                raise exception.AuthFailure(msg % {'public_ip': public_ip})
            if 'network_interface_id' not in address:
                msg = _('You must specify an association id when unmapping '
                        'an address from a VPC instance')
                raise exception.InvalidParameterValue(msg)
            association_id = ec2utils.change_ec2_id_kind(address['id'],
                                                         'eipassoc')

        address = db_api.get_item_by_id(
            context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc'))
        LOG.info('DB address: %s', address)
        if address is None or not _is_address_valid(context, neutron, address):
            raise exception.InvalidAssociationIDNotFound(
                    id=association_id)
        if 'network_interface_id' in address:
            with common.OnCrashCleaner() as cleaner:
                network_interface_id = address['network_interface_id']
                private_ip_address = address['private_ip_address']
                LOG.info('Disassociating %(private_ip_address)s from interface %(network_interface_id)s',
                         {'private_ip_address': private_ip_address, 'network_interface_id': network_interface_id})
                _disassociate_address_item(context, address)
                cleaner.addCleanup(_associate_address_item, context, address,
                                   network_interface_id, private_ip_address)

                update = neutron.update_floatingip(address['os_id'],
                                                   {'floatingip': {'port_id': None}})
                LOG.info('Neutron.update result is %s', update)
Exemple #9
0
def create_volume(context,
                  availability_zone=None,
                  size=None,
                  snapshot_id=None,
                  volume_type=None,
                  iops=None,
                  encrypted=None,
                  kms_key_id=None):
    if snapshot_id is not None:
        snapshot = ec2utils.get_db_item(context, snapshot_id)
        os_snapshot_id = snapshot['os_id']
    else:
        os_snapshot_id = None

    cinder = clients.cinder(context)
    with common.OnCrashCleaner() as cleaner:
        os_volume = cinder.volumes.create(size,
                                          snapshot_id=os_snapshot_id,
                                          volume_type=volume_type,
                                          availability_zone=availability_zone)
        cleaner.addCleanup(os_volume.delete)

        volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id})
        cleaner.addCleanup(db_api.delete_item, context, volume['id'])
        os_volume.update(display_name=volume['id'])

    return _format_volume(context, volume, os_volume, snapshot_id=snapshot_id)
def create_security_group(context, group_name, group_description, vpc_id=None):
    nova = clients.nova(context)
    if vpc_id and group_name != vpc_id:
        security_groups = describe_security_groups(context,
                                                   filter=[{
                                                       'name': 'vpc-id',
                                                       'value': [vpc_id]
                                                   }, {
                                                       'name':
                                                       'group-name',
                                                       'value': [group_name]
                                                   }])['securityGroupInfo']
        if security_groups:
            raise exception.InvalidGroupDuplicate(name=group_name)
    with common.OnCrashCleaner() as cleaner:
        try:
            # TODO(Alex): Shouldn't allow creation of groups with existing
            # name if in the same VPC or in EC2-Classic.
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
            security_group = db_api.add_item(context, 'sg', {
                'vpc_id': vpc_id,
                'os_id': os_security_group.id
            })
            return {'return': 'true', 'groupId': security_group['id']}
    return {'return': 'true'}
def associate_route_table(context, route_table_id, subnet_id):
    route_table = ec2utils.get_db_item(context, route_table_id)
    subnet = ec2utils.get_db_item(context, subnet_id)
    if route_table['vpc_id'] != subnet['vpc_id']:
        msg = _('Route table %(rtb_id)s and subnet %(subnet_id)s belong to '
                'different networks')
        msg = msg % {'rtb_id': route_table_id, 'subnet_id': subnet_id}
        raise exception.InvalidParameterValue(msg)
    if 'route_table_id' in subnet:
        msg = _('The specified association for route table %(rtb_id)s '
                'conflicts with an existing association')
        msg = msg % {'rtb_id': route_table_id}
        raise exception.ResourceAlreadyAssociated(msg)

    vpc = db_api.get_item_by_id(context, subnet['vpc_id'])
    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    with common.OnCrashCleaner() as cleaner:
        _associate_subnet_item(context, subnet, route_table['id'])
        cleaner.addCleanup(_disassociate_subnet_item, context, subnet)

        _update_subnet_host_routes(
            context,
            subnet,
            route_table,
            cleaner=cleaner,
            rollback_route_table_object=main_route_table)

    return {
        'associationId': ec2utils.change_ec2_id_kind(subnet['id'], 'rtbassoc')
    }
Exemple #12
0
 def run():
     with common.OnCrashCleaner() as cleaner:
         cleaner.addCleanup(obj.fake_clean_method,
                            555, 'arg', {'k': 'v'})
         cleaner.addCleanup(obj.fake_clean_method,
                            666, 'param', {'key': 'value'})
         raise self.FakeException()
Exemple #13
0
    def test_start_vpn_in_subnet(self, create_subnet_vpnservice,
                                 reset_vpn_connection):
        context = base.create_context()
        cleaner = common.OnCrashCleaner()
        mock_manager = mock.Mock()
        mock_manager.attach_mock(create_subnet_vpnservice,
                                 'create_subnet_vpnservice')
        mock_manager.attach_mock(reset_vpn_connection, 'reset_vpn_connection')

        self.set_mock_db_items(fakes.DB_VPN_GATEWAY_1, fakes.DB_VPN_GATEWAY_2)
        vpn_gateway_api._start_vpn_in_subnet(
            context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1),
            fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1)
        mock_manager.assert_has_calls([
            mock.call.create_subnet_vpnservice(
                context, self.neutron, cleaner,
                fakes.DB_SUBNET_1, fakes.DB_VPC_1),
            mock.call.reset_vpn_connection(
                context, self.neutron, cleaner, fakes.DB_VPN_GATEWAY_1,
                subnets=[fakes.DB_SUBNET_1],
                route_tables=[fakes.DB_ROUTE_TABLE_1])])

        create_subnet_vpnservice.reset_mock()
        reset_vpn_connection.reset_mock()
        self.add_mock_db_items(self.DB_VPN_GATEWAY_1_DETACHED)
        vpn_gateway_api._start_vpn_in_subnet(
            context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1),
            fakes.DB_VPC_1, fakes.DB_ROUTE_TABLE_1)
        self.assertFalse(create_subnet_vpnservice.called)
        self.assertFalse(reset_vpn_connection.called)
Exemple #14
0
def associate_dhcp_options(context, dhcp_options_id, vpc_id):
    vpc = ec2utils.get_db_item(context, vpc_id)
    rollback_dhcp_options_id = vpc.get('dhcp_options_id')
    if dhcp_options_id == 'default':
        dhcp_options_id = None
        dhcp_options = None
    else:
        dhcp_options = ec2utils.get_db_item(context, dhcp_options_id)
        dhcp_options_id = dhcp_options['id']
    neutron = clients.neutron(context)
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    network_interfaces = db_api.get_items(context, 'eni')
    rollback_dhcp_options_object = (db_api.get_item_by_id(
        context, rollback_dhcp_options_id)
                                    if dhcp_options_id is not None else None)
    with common.OnCrashCleaner() as cleaner:
        _associate_vpc_item(context, vpc, dhcp_options_id)
        cleaner.addCleanup(_associate_vpc_item, context, vpc,
                           rollback_dhcp_options_id)
        for network_interface in network_interfaces:
            os_port = next(
                (p for p in os_ports if p['id'] == network_interface['os_id']),
                None)
            if not os_port:
                continue
            _add_dhcp_opts_to_port(context, dhcp_options, network_interface,
                                   os_port, neutron)
            cleaner.addCleanup(_add_dhcp_opts_to_port, context,
                               rollback_dhcp_options_object, network_interface,
                               os_port, neutron)
    return True
Exemple #15
0
def attach_network_interface(context, network_interface_id,
                             instance_id, device_index):
    network_interface = ec2utils.get_db_item(context, network_interface_id)
    if 'instance_id' in network_interface:
        raise exception.InvalidParameterValue(
            _("Network interface '%(id)s' is currently in use.") %
            {'id': network_interface_id})
    os_instance_id = ec2utils.get_db_item(context, instance_id)['os_id']
    # TODO(Alex) Check that the instance is not yet attached to another VPC
    # TODO(Alex) Check that the instance is "our", not created via nova
    # (which means that it doesn't belong to any VPC and can't be attached)
    if any(eni['device_index'] == device_index
           for eni in db_api.get_items(context, 'eni')
           if eni.get('instance_id') == instance_id):
        raise exception.InvalidParameterValue(
            _("Instance '%(id)s' already has an interface attached at "
              "device index '%(index)s'.") % {'id': instance_id,
                                              'index': device_index})
    neutron = clients.neutron(context)
    os_port = neutron.show_port(network_interface['os_id'])['port']
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        # TODO(Alex) nova inserts compute:%availability_zone into device_owner
        #                              'device_owner': 'compute:None'}})
        _attach_network_interface_item(context, network_interface,
                                       instance_id, device_index)
        cleaner.addCleanup(_detach_network_interface_item, context,
                           network_interface)
        nova.servers.interface_attach(os_instance_id, os_port['id'],
                                      None, None)
    return {'attachmentId': ec2utils.change_ec2_id_kind(
                    network_interface['id'], 'eni-attach')}
Exemple #16
0
    def release_address(self, context, public_ip, allocation_id):
        neutron = clients.neutron(context)
        if public_ip:
            # TODO(ft): implement search in DB layer
            address = next((addr
                            for addr in db_api.get_items(context, 'eipalloc')
                            if addr['public_ip'] == public_ip), None)
            if address and _is_address_valid(context, neutron, address):
                msg = _('You must specify an allocation id when releasing a '
                        'VPC elastic IP address')
                raise exception.InvalidParameterValue(msg)
            return AddressEngineNova().release_address(context, public_ip,
                                                       None)

        address = ec2utils.get_db_item(context, allocation_id)
        if not _is_address_valid(context, neutron, address):
            raise exception.InvalidAllocationIDNotFound(id=allocation_id)
        if 'network_interface_id' in address:
            raise exception.InvalidIPAddressInUse(
                ip_address=address['public_ip'])

        with common.OnCrashCleaner() as cleaner:
            db_api.delete_item(context, address['id'])
            cleaner.addCleanup(db_api.restore_item, context, 'eipalloc',
                               address)
            try:
                neutron.delete_floatingip(address['os_id'])
            except neutron_exception.NotFound:
                pass
Exemple #17
0
    def disassociate_address(self,
                             context,
                             public_ip=None,
                             association_id=None):
        neutron = clients.neutron(context)
        if public_ip:
            # TODO(ft): implement search in DB layer
            address = next((addr
                            for addr in db_api.get_items(context, 'eipalloc')
                            if addr['public_ip'] == public_ip), None)
            if address and _is_address_valid(context, neutron, address):
                msg = _('You must specify an association id when unmapping '
                        'an address from a VPC instance')
                raise exception.InvalidParameterValue(msg)
            # NOTE(ft): association_id is unused in EC2 Classic mode, but it's
            # passed there to validate its emptiness in one place
            return AddressEngineNova().disassociate_address(
                context, public_ip=public_ip, association_id=association_id)

        address = db_api.get_item_by_id(
            context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc'))
        if address is None or not _is_address_valid(context, neutron, address):
            raise exception.InvalidAssociationIDNotFound(id=association_id)
        if 'network_interface_id' in address:
            with common.OnCrashCleaner() as cleaner:
                network_interface_id = address['network_interface_id']
                private_ip_address = address['private_ip_address']
                _disassociate_address_item(context, address)
                cleaner.addCleanup(_associate_address_item, context, address,
                                   network_interface_id, private_ip_address)

                neutron.update_floatingip(address['os_id'],
                                          {'floatingip': {
                                              'port_id': None
                                          }})
Exemple #18
0
def _create_security_group(context, group_name, group_description,
                           vpc_id=None, default=False):
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            secgroup_body = (
                {'security_group': {'name': group_name,
                                    'description': group_description}})
            os_security_group = neutron.create_security_group(
                secgroup_body)['security_group']
        except neutron_exception.OverQuotaClient:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(neutron.delete_security_group,
                           os_security_group['id'])
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group['id']}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true',
                'groupId': security_group['id']}
Exemple #19
0
def detach_network_interface(context, attachment_id, force=None):
    network_interface = db_api.get_item_by_id(
        context, ec2utils.change_ec2_id_kind(attachment_id, 'eni'))
    if not network_interface or 'instance_id' not in network_interface:
        raise exception.InvalidAttachmentIDNotFound(id=attachment_id)
    if network_interface['device_index'] == 0:
        raise exception.OperationNotPermitted(
            _('The network interface at device index 0 cannot be detached.'))
    neutron = clients.neutron(context)
    os_port = neutron.show_port(network_interface['os_id'])['port']
    with common.OnCrashCleaner() as cleaner:
        instance_id = network_interface['instance_id']
        device_index = network_interface['device_index']
        attach_time = network_interface['attach_time']
        delete_on_termination = network_interface['delete_on_termination']
        _detach_network_interface_item(context, network_interface)
        cleaner.addCleanup(_attach_network_interface_item, context,
                           network_interface, instance_id, device_index,
                           attach_time, delete_on_termination)
        neutron.update_port(os_port['id'],
                            {'port': {
                                'device_id': '',
                                'device_owner': ''
                            }})
    return True
Exemple #20
0
def _create_security_group(context,
                           group_name,
                           group_description,
                           vpc_id=None,
                           default=False):
    nova = clients.nova(context)
    with common.OnCrashCleaner() as cleaner:
        try:
            os_security_group = nova.security_groups.create(
                group_name, group_description)
        except nova_exception.OverLimit:
            raise exception.ResourceLimitExceeded(resource='security groups')
        cleaner.addCleanup(nova.security_groups.delete, os_security_group.id)
        if vpc_id:
            # NOTE(Alex) Check if such vpc exists
            ec2utils.get_db_item(context, vpc_id)
        item = {'vpc_id': vpc_id, 'os_id': os_security_group.id}
        if not default:
            security_group = db_api.add_item(context, 'sg', item)
        else:
            item['id'] = ec2utils.change_ec2_id_kind(vpc_id, 'sg')
            # NOTE(andrey-mp): try to add item with specific id
            # and catch exception if it exists
            security_group = db_api.restore_item(context, 'sg', item)
        return {'return': 'true', 'groupId': security_group['id']}
Exemple #21
0
def attach_vpn_gateway(context, vpc_id, vpn_gateway_id):
    vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
    vpc = ec2utils.get_db_item(context, vpc_id)
    if vpn_gateway['vpc_id'] and vpn_gateway['vpc_id'] != vpc['id']:
        raise exception.VpnGatewayAttachmentLimitExceeded()
    attached_vgw = ec2utils.get_attached_gateway(context, vpc['id'], 'vgw')
    if attached_vgw and attached_vgw['id'] != vpn_gateway['id']:
        raise exception.InvalidVpcState(vpc_id=vpc['id'],
                                        vgw_id=attached_vgw['id'])

    subnets = [subnet for subnet in db_api.get_items(context, 'subnet')
               if subnet['vpc_id'] == vpc['id']]
    if not vpn_gateway['vpc_id']:
        external_network_id = None
        if not ec2utils.get_attached_gateway(context, vpc['id'], 'igw'):
            external_network_id = ec2utils.get_os_public_network(context)['id']
        neutron = clients.neutron(context)

        with common.OnCrashCleaner() as cleaner:
            _attach_vpn_gateway_item(context, vpn_gateway, vpc['id'])
            cleaner.addCleanup(_detach_vpn_gateway_item, context, vpn_gateway)

            if external_network_id:
                neutron.add_gateway_router(vpc['os_id'],
                                           {'network_id': external_network_id})
                cleaner.addCleanup(neutron.remove_gateway_router, vpc['os_id'])

            for subnet in subnets:
                _create_subnet_vpnservice(context, neutron, cleaner,
                                          subnet, vpc)
            vpn_connection_api._reset_vpn_connections(
                context, neutron, cleaner, vpn_gateway, subnets=subnets)

    return {'attachment': _format_attachment(vpn_gateway)}
Exemple #22
0
def detach_vpn_gateway(context, vpc_id, vpn_gateway_id):
    vpn_gateway = ec2utils.get_db_item(context, vpn_gateway_id)
    if vpn_gateway['vpc_id'] != vpc_id:
        raise exception.InvalidVpnGatewayAttachmentNotFound(
            vgw_id=vpn_gateway_id, vpc_id=vpc_id)

    vpc = db_api.get_item_by_id(context, vpc_id)
    neutron = clients.neutron(context)
    remove_os_gateway_router = (
        ec2utils.get_attached_gateway(context, vpc_id, 'igw') is None)
    subnets = [subnet for subnet in db_api.get_items(context, 'subnet')
               if subnet['vpc_id'] == vpc['id']]
    with common.OnCrashCleaner() as cleaner:
        _detach_vpn_gateway_item(context, vpn_gateway)
        cleaner.addCleanup(_attach_vpn_gateway_item, context, vpn_gateway,
                           vpc_id)
        vpn_connection_api._stop_gateway_vpn_connections(
            context, neutron, cleaner, vpn_gateway)
        for subnet in subnets:
            _delete_subnet_vpnservice(context, neutron, cleaner, subnet)

        if remove_os_gateway_router:
            try:
                neutron.remove_gateway_router(vpc['os_id'])
            except neutron_exception.NotFound:
                pass

    return True
Exemple #23
0
def attach_internet_gateway(context, internet_gateway_id, vpc_id):
    igw = ec2utils.get_db_item(context, internet_gateway_id)
    if igw.get('vpc_id'):
        msg_params = {'igw_id': igw['id'], 'vpc_id': igw['vpc_id']}
        msg = _('resource %(igw_id)s is already attached to '
                'network %(vpc_id)s') % msg_params
        raise exception.ResourceAlreadyAssociated(msg)
    vpc = ec2utils.get_db_item(context, vpc_id)
    if ec2utils.get_attached_gateway(context, vpc['id'], 'igw'):
        msg = _('Network %(vpc_id)s already has an internet gateway '
                'attached') % {
                    'vpc_id': vpc['id']
                }
        raise exception.InvalidParameterValue(msg)

    external_network_id = None
    if not ec2utils.get_attached_gateway(context, vpc['id'], 'vgw'):
        external_network_id = ec2utils.get_os_public_network(context)['id']
    neutron = clients.neutron(context)

    # TODO(ft): set attaching state into db
    with common.OnCrashCleaner() as cleaner:
        _attach_internet_gateway_item(context, igw, vpc['id'])
        cleaner.addCleanup(_detach_internet_gateway_item, context, igw)
        if external_network_id:
            neutron.add_gateway_router(vpc['os_id'],
                                       {'network_id': external_network_id})
    return True
Exemple #24
0
def delete_vpn_connection(context, vpn_connection_id):
    vpn_connection = ec2utils.get_db_item(context, vpn_connection_id)
    with common.OnCrashCleaner() as cleaner:
        db_api.delete_item(context, vpn_connection['id'])
        cleaner.addCleanup(db_api.restore_item, context, 'vpn', vpn_connection)
        neutron = clients.neutron(context)
        _stop_vpn_connection(neutron, vpn_connection)
        try:
            neutron.delete_ipsecpolicy(vpn_connection['os_ipsecpolicy_id'])
        except neutron_exception.Conflict as ex:
            LOG.warning(
                _('Failed to delete ipsecoplicy %(os_id)s during deleting '
                  'VPN connection %(id)s. Reason: %(reason)s'), {
                      'id': vpn_connection['id'],
                      'os_id': vpn_connection['os_ipsecpolicy_id'],
                      'reason': ex.message
                  })
        except neutron_exception.NotFound:
            pass
        try:
            neutron.delete_ikepolicy(vpn_connection['os_ikepolicy_id'])
        except neutron_exception.Conflict as ex:
            LOG.warning(
                _('Failed to delete ikepolicy %(os_id)s during deleting '
                  'VPN connection %(id)s. Reason: %(reason)s'), {
                      'id': vpn_connection['id'],
                      'os_id': vpn_connection['os_ikepolicy_id'],
                      'reason': ex.message
                  })
        except neutron_exception.NotFound:
            pass
    return True
def attach_internet_gateway(context, internet_gateway_id, vpc_id):
    igw = ec2utils.get_db_item(context, internet_gateway_id)
    if igw.get('vpc_id'):
        msg_params = {'igw_id': igw['id'], 'vpc_id': igw['vpc_id']}
        msg = _("resource %(igw_id)s is already attached to "
                "network %(vpc_id)s") % msg_params
        raise exception.ResourceAlreadyAssociated(msg)
    vpc = ec2utils.get_db_item(context, vpc_id)
    # TODO(ft): move search by vpc_id to DB api
    for gw in db_api.get_items(context, 'igw'):
        if gw.get('vpc_id') == vpc['id']:
            msg = _("Network %(vpc_id)s already has an internet gateway "
                    "attached") % {
                        'vpc_id': vpc['id']
                    }
            raise exception.InvalidParameterValue(msg)

    os_public_network = ec2utils.get_os_public_network(context)
    neutron = clients.neutron(context)

    # TODO(ft): set attaching state into db
    with common.OnCrashCleaner() as cleaner:
        _attach_internet_gateway_item(context, igw, vpc['id'])
        cleaner.addCleanup(_detach_internet_gateway_item, context, igw)
        neutron.add_gateway_router(vpc['os_id'],
                                   {'network_id': os_public_network['id']})
    return True
Exemple #26
0
    def test_stop_vpn_in_subnet(self, delete_vpnservice, delete_subnet_vpn):
        context = base.create_context()
        cleaner = common.OnCrashCleaner()
        mock_manager = mock.Mock()
        mock_manager.attach_mock(delete_vpnservice, 'delete_vpnservice')
        mock_manager.attach_mock(delete_subnet_vpn, 'delete_subnet_vpn')

        self.set_mock_db_items(fakes.DB_VPN_CONNECTION_1,
                               fakes.DB_VPN_CONNECTION_2)
        vpn_gateway_api._stop_vpn_in_subnet(
            context, self.neutron, cleaner, copy.deepcopy(fakes.DB_SUBNET_1))
        mock_manager.has_calls([
            mock.call.delete_subnet_vpn(
                context, self.neutron, cleaner, fakes.DB_SUBNET_1,
                fakes.DB_VPN_CONNECTION_1),
            mock.call.delete_subnet_vpn(
                context, self.neutron, cleaner, fakes.DB_SUBNET_1,
                fakes.DB_VPN_CONNECTION_2),
            mock.call.delete_vpnservice(
                self.neutron, fakes.ID_OS_VPNSERVICE_1,
                fakes.ID_EC2_SUBNET_1)])

        delete_subnet_vpn.reset_mock()
        delete_vpnservice.reset_mock()
        vpn_gateway_api._stop_vpn_in_subnet(
            context, self.neutron, cleaner, self.DB_SUBNET_1_NO_VPN)
        self.assertFalse(delete_subnet_vpn.called)
        self.assertFalse(delete_vpnservice.called)
def delete_vpc(context, vpc_id):
    vpc = ec2utils.get_db_item(context, vpc_id)
    subnets = subnet_api.describe_subnets(context,
                                          filter=[{
                                              'name': 'vpc-id',
                                              'value': [vpc_id]
                                          }])['subnetSet']
    internet_gateways = internet_gateway_api.describe_internet_gateways(
        context, filter=[{
            'name': 'attachment.vpc-id',
            'value': [vpc['id']]
        }])['internetGatewaySet']
    route_tables = route_table_api.describe_route_tables(context,
                                                         filter=[{
                                                             'name':
                                                             'vpc-id',
                                                             'value':
                                                             [vpc['id']]
                                                         }])['routeTableSet']
    security_groups = security_group_api.describe_security_groups(
        context, filter=[{
            'name': 'vpc-id',
            'value': [vpc['id']]
        }])['securityGroupInfo']
    if (subnets or internet_gateways or len(route_tables) > 1
            or len(security_groups) > 1):
        msg = _("The vpc '%(vpc_id)s' has dependencies and "
                "cannot be deleted.")
        msg = msg % {'vpc_id': vpc['id']}
        raise exception.DependencyViolation(msg)

    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        db_api.delete_item(context, vpc['id'])
        cleaner.addCleanup(db_api.restore_item, context, 'vpc', vpc)
        route_table_api._delete_route_table(context,
                                            vpc['route_table_id'],
                                            cleaner=cleaner)
        if len(security_groups) > 0:
            security_group_api.delete_security_group(
                context,
                group_id=security_groups[0]['groupId'],
                delete_default=True)
        try:
            neutron.delete_router(vpc['os_id'])
        except neutron_exception.Conflict as ex:
            LOG.warning(
                _('Failed to delete router %(os_id)s during deleting '
                  'VPC %(id)s. Reason: %(reason)s'), {
                      'id': vpc['id'],
                      'os_id': vpc['os_id'],
                      'reason': ex.message
                  })
        except neutron_exception.NotFound:
            pass

    return True
Exemple #28
0
    def test_normal_flow(self):
        obj = mock.MagicMock()

        with common.OnCrashCleaner() as cleaner:
            cleaner.addCleanup(obj.fake_clean_method),
            cleaner.addCleanup(obj.fake_clean_method_25),

        self.assertFalse(obj.fake_clean_method.called)
        self.assertFalse(obj.fake_clean_method_25.called)
Exemple #29
0
def create_subnet(context, vpc_id, cidr_block, availability_zone=None):
    vpc = ec2utils.get_db_item(context, vpc_id)
    vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
    subnet_ipnet = netaddr.IPNetwork(cidr_block)
    if subnet_ipnet not in vpc_ipnet:
        raise exception.InvalidSubnetRange(cidr_block=cidr_block)

    gateway_ip = str(netaddr.IPAddress(subnet_ipnet.first + 1))
    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    host_routes = route_table_api._get_subnet_host_routes(
        context, main_route_table, gateway_ip)
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_network_body = {'network': {}}
        try:
            os_network = neutron.create_network(os_network_body)['network']
            cleaner.addCleanup(neutron.delete_network, os_network['id'])
            # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
            # OpenStack we decided not to support this as compatibility.
            os_subnet_body = {
                'subnet': {
                    'network_id': os_network['id'],
                    'ip_version': '4',
                    'cidr': cidr_block,
                    'host_routes': host_routes
                }
            }
            os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
            cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
        except neutron_exception.OverQuotaClient:
            raise exception.SubnetLimitExceeded()
        try:
            neutron.add_interface_router(vpc['os_id'],
                                         {'subnet_id': os_subnet['id']})
        except neutron_exception.BadRequest:
            raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
        cleaner.addCleanup(neutron.remove_interface_router, vpc['os_id'],
                           {'subnet_id': os_subnet['id']})
        subnet = db_api.add_item(context, 'subnet', {
            'os_id': os_subnet['id'],
            'vpc_id': vpc['id']
        })
        cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
        neutron.update_network(os_network['id'],
                               {'network': {
                                   'name': subnet['id']
                               }})
        neutron.update_subnet(os_subnet['id'],
                              {'subnet': {
                                  'name': subnet['id']
                              }})
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    return {
        'subnet': _format_subnet(context, subnet, os_subnet, os_network,
                                 os_ports)
    }
Exemple #30
0
def create_subnet(context, vpc_id, cidr_block,
                  availability_zone=None):
    vpc = ec2utils.get_db_item(context, vpc_id)
    vpc_ipnet = netaddr.IPNetwork(vpc['cidr_block'])
    subnet_ipnet = netaddr.IPNetwork(cidr_block)
    if subnet_ipnet not in vpc_ipnet:
        raise exception.InvalidSubnetRange(cidr_block=cidr_block)

    main_route_table = db_api.get_item_by_id(context, vpc['route_table_id'])
    (host_routes,
     gateway_ip) = route_table_api._get_subnet_host_routes_and_gateway_ip(
            context, main_route_table, cidr_block)
    neutron = clients.neutron(context)
    with common.OnCrashCleaner() as cleaner:
        os_network_body = {'network': {}}
        try:
            os_network = neutron.create_network(os_network_body)['network']
            cleaner.addCleanup(neutron.delete_network, os_network['id'])
            # NOTE(Alex): AWS takes 4 first addresses (.1 - .4) but for
            # OpenStack we decided not to support this as compatibility.
            os_subnet_body = {'subnet': {'network_id': os_network['id'],
                                         'ip_version': '4',
                                         'cidr': cidr_block,
                                         'host_routes': host_routes}}
            os_subnet = neutron.create_subnet(os_subnet_body)['subnet']
            cleaner.addCleanup(neutron.delete_subnet, os_subnet['id'])
        except neutron_exception.OverQuotaClient:
            raise exception.SubnetLimitExceeded()
        try:
            neutron.add_interface_router(vpc['os_id'],
                                         {'subnet_id': os_subnet['id']})
        except neutron_exception.BadRequest:
            raise exception.InvalidSubnetConflict(cidr_block=cidr_block)
        cleaner.addCleanup(neutron.remove_interface_router,
                           vpc['os_id'], {'subnet_id': os_subnet['id']})
        subnet = db_api.add_item(context, 'subnet',
                                 {'os_id': os_subnet['id'],
                                  'vpc_id': vpc['id']})
        cleaner.addCleanup(db_api.delete_item, context, subnet['id'])
        vpn_gateway_api._start_vpn_in_subnet(context, neutron, cleaner,
                                             subnet, vpc, main_route_table)
        neutron.update_network(os_network['id'],
                               {'network': {'name': subnet['id']}})
        # NOTE(ft): In some cases we need gateway_ip to be None (see
        # _get_subnet_host_routes_and_gateway_ip). It's not set during subnet
        # creation to allow automatic configuration of the default port by
        # which subnet is attached to the router.
        neutron.update_subnet(os_subnet['id'],
                              {'subnet': {'name': subnet['id'],
                                          'gateway_ip': gateway_ip}})
    os_ports = neutron.list_ports(tenant_id=context.project_id)['ports']
    return {'subnet': _format_subnet(context, subnet, os_subnet,
                                     os_network, os_ports)}