コード例 #1
0
    def disassociate_floating_ip(self,
                                 context,
                                 address,
                                 affect_auto_assigned=False):
        """Disassociates a floating IP from its fixed IP.

        Makes sure everything makes sense then calls _disassociate_floating_ip,
        rpc'ing to correct host if i'm not it.
        """
        floating_ip = objects.FloatingIP.get_by_address(context, address)

        # handle auto assigned
        if not affect_auto_assigned and floating_ip.auto_assigned:
            raise exception.CannotDisassociateAutoAssignedFloatingIP()

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # make sure floating ip is associated
        if not floating_ip.fixed_ip_id:
            floating_address = floating_ip.address
            raise exception.FloatingIpNotAssociated(address=floating_address)

        fixed_ip = objects.FixedIP.get_by_id(context, floating_ip.fixed_ip_id)

        # send to correct host, unless i'm the correct host
        network = objects.Network.get_by_id(context.elevated(),
                                            fixed_ip.network_id)
        interface = floating_ip.interface
        if network.multi_host:
            instance = objects.Instance.get_by_uuid(context,
                                                    fixed_ip.instance_uuid)
            service = objects.Service.get_by_host_and_binary(
                context.elevated(), instance.host, 'compute-network')
            if service and self.servicegroup_api.service_is_up(service):
                host = instance.host
            else:
                # NOTE(vish): if the service is down just deallocate the data
                #             locally. Set the host to local so the call will
                #             not go over rpc and set interface to None so the
                #             teardown in the driver does not happen.
                host = self.host
                interface = None
        else:
            host = network.host

        if host == self.host:
            # i'm the correct host
            self._disassociate_floating_ip(context, address, interface,
                                           fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._disassociate_floating_ip(
                context, address, interface, host, fixed_ip.instance_uuid)
コード例 #2
0
ファイル: floating_ips.py プロジェクト: HybridF5/jacket
    def disassociate_floating_ip(self, context, address,
                                 affect_auto_assigned=False):
        """Disassociates a floating IP from its fixed IP.

        Makes sure everything makes sense then calls _disassociate_floating_ip,
        rpc'ing to correct host if i'm not it.
        """
        floating_ip = objects.FloatingIP.get_by_address(context, address)

        # handle auto assigned
        if not affect_auto_assigned and floating_ip.auto_assigned:
            raise exception.CannotDisassociateAutoAssignedFloatingIP()

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # make sure floating ip is associated
        if not floating_ip.fixed_ip_id:
            floating_address = floating_ip.address
            raise exception.FloatingIpNotAssociated(address=floating_address)

        fixed_ip = objects.FixedIP.get_by_id(context, floating_ip.fixed_ip_id)

        # send to correct host, unless i'm the correct host
        network = objects.Network.get_by_id(context.elevated(),
                                            fixed_ip.network_id)
        interface = floating_ip.interface
        if network.multi_host:
            instance = objects.Instance.get_by_uuid(
                context, fixed_ip.instance_uuid)
            service = objects.Service.get_by_host_and_binary(
                context.elevated(), instance.host, 'compute-network')
            if service and self.servicegroup_api.service_is_up(service):
                host = instance.host
            else:
                # NOTE(vish): if the service is down just deallocate the data
                #             locally. Set the host to local so the call will
                #             not go over rpc and set interface to None so the
                #             teardown in the driver does not happen.
                host = self.host
                interface = None
        else:
            host = network.host

        if host == self.host:
            # i'm the correct host
            self._disassociate_floating_ip(context, address, interface,
                                           fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._disassociate_floating_ip(context, address,
                    interface, host, fixed_ip.instance_uuid)
コード例 #3
0
ファイル: volume_types.py プロジェクト: HybridF5/jacket
def destroy(context, id):
    """Marks volume types as deleted."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    else:
        elevated = context if context.is_admin else context.elevated()
        db.volume_type_destroy(elevated, id)
コード例 #4
0
def destroy(context, id):
    """Marks volume types as deleted."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    else:
        elevated = context if context.is_admin else context.elevated()
        db.volume_type_destroy(elevated, id)
コード例 #5
0
ファイル: floating_ips.py プロジェクト: HybridF5/jacket
    def associate_floating_ip(self, context, floating_address, fixed_address,
                              affect_auto_assigned=False):
        """Associates a floating IP with a fixed IP.

        Makes sure everything makes sense then calls _associate_floating_ip,
        rpc'ing to correct host if i'm not it.

        Access to the floating_address is verified but access to the
        fixed_address is not verified. This assumes that that the calling
        side has already verified that the fixed_address is legal by
        checking access to the instance.
        """
        floating_ip = objects.FloatingIP.get_by_address(context,
                                                        floating_address)
        # handle auto_assigned
        if not affect_auto_assigned and floating_ip.auto_assigned:
            return

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # disassociate any already associated
        orig_instance_uuid = None
        if floating_ip.fixed_ip_id:
            # find previously associated instance
            fixed_ip = floating_ip.fixed_ip
            if str(fixed_ip.address) == fixed_address:
                # NOTE(vish): already associated to this address
                return
            orig_instance_uuid = fixed_ip.instance_uuid

            self.disassociate_floating_ip(context, floating_address)

        fixed_ip = objects.FixedIP.get_by_address(context, fixed_address)

        # send to correct host, unless i'm the correct host
        network = objects.Network.get_by_id(context.elevated(),
                                            fixed_ip.network_id)
        if network.multi_host:
            instance = objects.Instance.get_by_uuid(
                context, fixed_ip.instance_uuid)
            host = instance.host
        else:
            host = network.host

        interface = floating_ip.interface
        if host == self.host:
            # i'm the correct host
            self._associate_floating_ip(context, floating_address,
                                        fixed_address, interface,
                                        fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._associate_floating_ip(context,
                    floating_address, fixed_address, interface, host,
                    fixed_ip.instance_uuid)

        return orig_instance_uuid
コード例 #6
0
ファイル: volume_types.py プロジェクト: HybridF5/jacket
def remove_volume_type_access(context, volume_type_id, project_id):
    """Remove access to volume type for project_id."""
    if volume_type_id is None:
        msg = _("volume_type_id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    if is_public_volume_type(elevated, volume_type_id):
        msg = _("Type access modification is not applicable to public volume "
                "type.")
        raise exception.InvalidVolumeType(reason=msg)
    return db.volume_type_access_remove(elevated, volume_type_id, project_id)
コード例 #7
0
def remove_volume_type_access(context, volume_type_id, project_id):
    """Remove access to volume type for project_id."""
    if volume_type_id is None:
        msg = _("volume_type_id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    if is_public_volume_type(elevated, volume_type_id):
        msg = _("Type access modification is not applicable to public volume "
                "type.")
        raise exception.InvalidVolumeType(reason=msg)
    return db.volume_type_access_remove(elevated, volume_type_id, project_id)
コード例 #8
0
    def deallocate_for_instance(self, context, **kwargs):
        """Handles deallocating floating IP resources for an instance.

        calls super class deallocate_for_instance() as well.

        rpc.called by network_api
        """
        if 'instance' in kwargs:
            instance_uuid = kwargs['instance'].uuid
        else:
            instance_uuid = kwargs['instance_id']
            if not uuidutils.is_uuid_like(instance_uuid):
                # NOTE(francois.charlier): in some cases the instance might be
                # deleted before the IPs are released, so we need to get
                # deleted instances too
                instance = objects.Instance.get_by_id(
                    context.elevated(read_deleted='yes'), instance_uuid)
                instance_uuid = instance.uuid

        try:
            fixed_ips = objects.FixedIPList.get_by_instance_uuid(
                context, instance_uuid)
        except exception.FixedIpNotFoundForInstance:
            fixed_ips = []
        # add to kwargs so we can pass to super to save a db lookup there
        kwargs['fixed_ips'] = fixed_ips
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip.id
            floating_ips = objects.FloatingIPList.get_by_fixed_ip_id(
                context, fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = str(floating_ip.address)
                try:
                    self.disassociate_floating_ip(context,
                                                  address,
                                                  affect_auto_assigned=True)
                except exception.FloatingIpNotAssociated:
                    LOG.info(_LI("Floating IP %s is not associated. Ignore."),
                             address)
                # deallocate if auto_assigned
                if floating_ip.auto_assigned:
                    self.deallocate_floating_ip(context,
                                                address,
                                                affect_auto_assigned=True)

        # call the next inherited class's deallocate_for_instance()
        # which is currently the NetworkManager version
        # call this after so floating IPs are handled first
        super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
コード例 #9
0
ファイル: floating_ips.py プロジェクト: HybridF5/jacket
    def deallocate_for_instance(self, context, **kwargs):
        """Handles deallocating floating IP resources for an instance.

        calls super class deallocate_for_instance() as well.

        rpc.called by network_api
        """
        if 'instance' in kwargs:
            instance_uuid = kwargs['instance'].uuid
        else:
            instance_uuid = kwargs['instance_id']
            if not uuidutils.is_uuid_like(instance_uuid):
                # NOTE(francois.charlier): in some cases the instance might be
                # deleted before the IPs are released, so we need to get
                # deleted instances too
                instance = objects.Instance.get_by_id(
                    context.elevated(read_deleted='yes'), instance_uuid)
                instance_uuid = instance.uuid

        try:
            fixed_ips = objects.FixedIPList.get_by_instance_uuid(
                context, instance_uuid)
        except exception.FixedIpNotFoundForInstance:
            fixed_ips = []
        # add to kwargs so we can pass to super to save a db lookup there
        kwargs['fixed_ips'] = fixed_ips
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip.id
            floating_ips = objects.FloatingIPList.get_by_fixed_ip_id(context,
                                                                     fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = str(floating_ip.address)
                try:
                    self.disassociate_floating_ip(context,
                                                  address,
                                                  affect_auto_assigned=True)
                except exception.FloatingIpNotAssociated:
                    LOG.info(_LI("Floating IP %s is not associated. Ignore."),
                             address)
                # deallocate if auto_assigned
                if floating_ip.auto_assigned:
                    self.deallocate_floating_ip(context, address,
                                                affect_auto_assigned=True)

        # call the next inherited class's deallocate_for_instance()
        # which is currently the NetworkManager version
        # call this after so floating IPs are handled first
        super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
コード例 #10
0
def update(context, id, name, description, is_public=None):
    """Update volume type by id."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    old_volume_type = get_volume_type(elevated, id)
    try:
        type_updated = db.volume_type_update(
            elevated, id,
            dict(name=name, description=description, is_public=is_public))
        # Rename resource in quota if volume type name is changed.
        if name:
            old_type_name = old_volume_type.get('name')
            if old_type_name != name:
                QUOTAS.update_quota_resource(elevated, old_type_name, name)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        raise exception.VolumeTypeUpdateFailed(id=id)
    return type_updated
コード例 #11
0
    def allocate_for_instance(self, context, **kwargs):
        """Handles allocating the floating IP resources for an instance.

        calls super class allocate_for_instance() as well

        rpc.called by network_api
        """
        instance_uuid = kwargs.get('instance_id')
        if not uuidutils.is_uuid_like(instance_uuid):
            instance_uuid = kwargs.get('instance_uuid')
        project_id = kwargs.get('project_id')
        # call the next inherited class's allocate_for_instance()
        # which is currently the NetworkManager version
        # do this first so fixed ip is already allocated
        nw_info = super(FloatingIP,
                        self).allocate_for_instance(context, **kwargs)
        if CONF.auto_assign_floating_ip:
            context = context.elevated()
            # allocate a floating ip
            floating_address = self.allocate_floating_ip(
                context, project_id, True)
            LOG.debug("floating IP allocation for instance "
                      "|%s|",
                      floating_address,
                      instance_uuid=instance_uuid,
                      context=context)

            # get the first fixed address belonging to the instance
            fixed_ips = nw_info.fixed_ips()
            fixed_address = fixed_ips[0]['address']

            # associate the floating ip to fixed_ip
            self.associate_floating_ip(context,
                                       floating_address,
                                       fixed_address,
                                       affect_auto_assigned=True)

            # create a fresh set of network info that contains the floating ip
            nw_info = self.get_instance_nw_info(context, **kwargs)

        return nw_info
コード例 #12
0
def create(context,
           name,
           extra_specs=None,
           is_public=True,
           projects=None,
           description=None):
    """Creates volume types."""
    extra_specs = extra_specs or {}
    projects = projects or []
    elevated = context if context.is_admin else context.elevated()
    try:
        type_ref = db.volume_type_create(elevated,
                                         dict(name=name,
                                              extra_specs=extra_specs,
                                              is_public=is_public,
                                              description=description),
                                         projects=projects)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        raise exception.VolumeTypeCreateFailed(name=name,
                                               extra_specs=extra_specs)
    return type_ref
コード例 #13
0
ファイル: volume_types.py プロジェクト: HybridF5/jacket
def create(context,
           name,
           extra_specs=None,
           is_public=True,
           projects=None,
           description=None):
    """Creates volume types."""
    extra_specs = extra_specs or {}
    projects = projects or []
    elevated = context if context.is_admin else context.elevated()
    try:
        type_ref = db.volume_type_create(elevated,
                                         dict(name=name,
                                              extra_specs=extra_specs,
                                              is_public=is_public,
                                              description=description),
                                         projects=projects)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        raise exception.VolumeTypeCreateFailed(name=name,
                                               extra_specs=extra_specs)
    return type_ref
コード例 #14
0
ファイル: floating_ips.py プロジェクト: HybridF5/jacket
    def allocate_for_instance(self, context, **kwargs):
        """Handles allocating the floating IP resources for an instance.

        calls super class allocate_for_instance() as well

        rpc.called by network_api
        """
        instance_uuid = kwargs.get('instance_id')
        if not uuidutils.is_uuid_like(instance_uuid):
            instance_uuid = kwargs.get('instance_uuid')
        project_id = kwargs.get('project_id')
        # call the next inherited class's allocate_for_instance()
        # which is currently the NetworkManager version
        # do this first so fixed ip is already allocated
        nw_info = super(FloatingIP, self).allocate_for_instance(context,
                                                                **kwargs)
        if CONF.auto_assign_floating_ip:
            context = context.elevated()
            # allocate a floating ip
            floating_address = self.allocate_floating_ip(context, project_id,
                True)
            LOG.debug("floating IP allocation for instance "
                      "|%s|", floating_address,
                      instance_uuid=instance_uuid, context=context)

            # get the first fixed address belonging to the instance
            fixed_ips = nw_info.fixed_ips()
            fixed_address = fixed_ips[0]['address']

            # associate the floating ip to fixed_ip
            self.associate_floating_ip(context,
                                       floating_address,
                                       fixed_address,
                                       affect_auto_assigned=True)

            # create a fresh set of network info that contains the floating ip
            nw_info = self.get_instance_nw_info(context, **kwargs)

        return nw_info
コード例 #15
0
ファイル: volume_types.py プロジェクト: HybridF5/jacket
def update(context, id, name, description, is_public=None):
    """Update volume type by id."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    old_volume_type = get_volume_type(elevated, id)
    try:
        type_updated = db.volume_type_update(elevated,
                                             id,
                                             dict(name=name,
                                                  description=description,
                                                  is_public=is_public))
        # Rename resource in quota if volume type name is changed.
        if name:
            old_type_name = old_volume_type.get('name')
            if old_type_name != name:
                QUOTAS.update_quota_resource(elevated,
                                             old_type_name,
                                             name)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        raise exception.VolumeTypeUpdateFailed(id=id)
    return type_updated
コード例 #16
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
コード例 #17
0
    def associate_floating_ip(self,
                              context,
                              floating_address,
                              fixed_address,
                              affect_auto_assigned=False):
        """Associates a floating IP with a fixed IP.

        Makes sure everything makes sense then calls _associate_floating_ip,
        rpc'ing to correct host if i'm not it.

        Access to the floating_address is verified but access to the
        fixed_address is not verified. This assumes that that the calling
        side has already verified that the fixed_address is legal by
        checking access to the instance.
        """
        floating_ip = objects.FloatingIP.get_by_address(
            context, floating_address)
        # handle auto_assigned
        if not affect_auto_assigned and floating_ip.auto_assigned:
            return

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # disassociate any already associated
        orig_instance_uuid = None
        if floating_ip.fixed_ip_id:
            # find previously associated instance
            fixed_ip = floating_ip.fixed_ip
            if str(fixed_ip.address) == fixed_address:
                # NOTE(vish): already associated to this address
                return
            orig_instance_uuid = fixed_ip.instance_uuid

            self.disassociate_floating_ip(context, floating_address)

        fixed_ip = objects.FixedIP.get_by_address(context, fixed_address)

        # send to correct host, unless i'm the correct host
        network = objects.Network.get_by_id(context.elevated(),
                                            fixed_ip.network_id)
        if network.multi_host:
            instance = objects.Instance.get_by_uuid(context,
                                                    fixed_ip.instance_uuid)
            host = instance.host
        else:
            host = network.host

        interface = floating_ip.interface
        if host == self.host:
            # i'm the correct host
            self._associate_floating_ip(context, floating_address,
                                        fixed_address, interface,
                                        fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._associate_floating_ip(context,
                                                       floating_address,
                                                       fixed_address,
                                                       interface, host,
                                                       fixed_ip.instance_uuid)

        return orig_instance_uuid
コード例 #18
0
ファイル: api.py プロジェクト: HybridF5/jacket
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False, snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.')
                       % {'vol1': volume_id,
                          'vol2': snapshot.volume_id})
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".')
                   % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(msg, {'s_pid': context.project_id,
                                      's_size': volume['size'],
                                      'd_consumed': _consumed(over),
                                      'd_quota': quotas[over]})
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {'s_pid': context.project_id,
                                      'd_consumed': _consumed(over)})
                    raise exception.BackupLimitExceeded(
                        allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp']
                                         < snapshot['created_at']))
                    else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id,
                              {'status': 'backing-up',
                               'previous_status': previous_status})

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
コード例 #19
0
ファイル: xvp.py プロジェクト: bopopescu/jacket
 def teardown_console(self, context, console):
     """Tears down actual proxies."""
     self._rebuild_xvp_conf(context.elevated())
コード例 #20
0
ファイル: xvp.py プロジェクト: bopopescu/jacket
 def setup_console(self, context, console):
     """Sets up actual proxies."""
     self._rebuild_xvp_conf(context.elevated())