Пример #1
0
    def rescue(self, instance, callback):
        """Rescue the specified instance
            - shutdown the instance VM
            - set 'bootlock' to prevent the instance from starting in rescue
            - spawn a rescue VM (the vm name-label will be instance-N-rescue)

        """
        rescue_vm_ref = VMHelper.lookup(self._session,
                                        instance.name + "-rescue")
        if rescue_vm_ref:
            raise RuntimeError(
                _("Instance is already in Rescue Mode: %s" % instance.name))

        vm_ref = self._get_vm_opaque_ref(instance)
        self._shutdown(instance, vm_ref)
        self._acquire_bootlock(vm_ref)

        instance._rescue = True
        self.spawn(instance)
        rescue_vm_ref = self._get_vm_opaque_ref(instance)

        vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
        vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
        rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
                                             vdi_ref, 1, False)

        self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
Пример #2
0
    def rescue(self, instance, callback):
        """Rescue the specified instance
            - shutdown the instance VM
            - set 'bootlock' to prevent the instance from starting in rescue
            - spawn a rescue VM (the vm name-label will be instance-N-rescue)

        """
        rescue_vm_ref = VMHelper.lookup(self._session,
                                        instance.name + "-rescue")
        if rescue_vm_ref:
            raise RuntimeError(_(
                "Instance is already in Rescue Mode: %s" % instance.name))

        vm_ref = self._get_vm_opaque_ref(instance)
        self._shutdown(instance, vm_ref)
        self._acquire_bootlock(vm_ref)

        instance._rescue = True
        self.spawn(instance)
        rescue_vm_ref = self._get_vm_opaque_ref(instance)

        vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
        vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
        rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
                                             vdi_ref, 1, False)

        self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
Пример #3
0
Файл: vmops.py Проект: yosh/nova
    def create_vifs(self, instance, networks=None):
        """
        Creates vifs for an instance

        """
        vm_opaque_ref = self._get_vm_opaque_ref(instance.id)
        logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref)
        if networks is None:
            networks = db.network_get_all_by_instance(admin_context,
                                                      instance['id'])
        # TODO(tr3buchet) - remove comment in multi-nic
        # this bit here about creating the vifs will be updated
        # in multi-nic to handle multiple IPs on the same network
        # and multiple networks
        # for now it works as there is only one of each
        for network in networks:
            bridge = network['bridge']
            network_ref = \
                NetworkHelper.find_network_with_bridge(self._session, bridge)

            if network_ref:
                try:
                    device = "1" if instance._rescue else "0"
                except AttributeError:
                    device = "0"

                VMHelper.create_vif(
                    self._session,
                    vm_opaque_ref,
                    network_ref,
                    instance.mac_address,
                    device)
Пример #4
0
    def snapshot(self, instance, image_id):
        """Create snapshot from a running VM instance

        :param instance: instance to be snapshotted
        :param image_id: id of image to upload to

        Steps involved in a XenServer snapshot:

        1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
            creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
            Snapshot VHD

        2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
            a 'base-copy' VDI.  The base_copy is immutable and may be chained
            with other base_copies.  If chained, the base_copies
            coalesce together, so, we must wait for this coalescing to occur to
            get a stable representation of the data on disk.

        3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
            that will bundle the VHDs together and then push the bundle into
            Glance.
        """
        template_vm_ref = None
        try:
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
            # call plugin to ship snapshot off to glance
            VMHelper.upload_image(
                    self._session, instance, template_vdi_uuids, image_id)
        finally:
            if template_vm_ref:
                self._destroy(instance, template_vm_ref,
                        shutdown=False, destroy_kernel_ramdisk=False)

        logging.debug(_("Finished snapshot and upload for VM %s"), instance)
Пример #5
0
 def _create_disk(self, instance):
     user = AuthManager().get_user(instance.user_id)
     project = AuthManager().get_project(instance.project_id)
     disk_image_type = VMHelper.determine_disk_image_type(instance)
     vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
             instance.image_id, user, project, disk_image_type)
     return vdi_uuid
Пример #6
0
    def snapshot(self, instance, image_id):
        """Create snapshot from a running VM instance.

        :param instance: instance to be snapshotted
        :param image_id: id of image to upload to

        Steps involved in a XenServer snapshot:

        1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
            creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
            Snapshot VHD

        2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
            a 'base-copy' VDI.  The base_copy is immutable and may be chained
            with other base_copies.  If chained, the base_copies
            coalesce together, so, we must wait for this coalescing to occur to
            get a stable representation of the data on disk.

        3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
            that will bundle the VHDs together and then push the bundle into
            Glance.

        """
        template_vm_ref = None
        try:
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
            # call plugin to ship snapshot off to glance
            VMHelper.upload_image(
                    self._session, instance, template_vdi_uuids, image_id)
        finally:
            if template_vm_ref:
                self._destroy(instance, template_vm_ref,
                        shutdown=False, destroy_kernel_ramdisk=False)

        logging.debug(_("Finished snapshot and upload for VM %s"), instance)
Пример #7
0
 def _create_disk(self, instance):
     user = AuthManager().get_user(instance.user_id)
     project = AuthManager().get_project(instance.project_id)
     disk_image_type = VMHelper.determine_disk_image_type(instance)
     vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
             instance.image_id, user, project, disk_image_type)
     return vdi_uuid
Пример #8
0
 def _destroy_rescue_vbds(self, rescue_vm_ref):
     """Destroys all VBDs tied to a rescue VM."""
     vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
     for vbd_ref in vbd_refs:
         vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
         if vbd_rec.get("userdevice", None) == "1":  # VBD is always 1
             VMHelper.unplug_vbd(self._session, vbd_ref)
             VMHelper.destroy_vbd(self._session, vbd_ref)
Пример #9
0
 def _destroy_rescue_vbds(self, rescue_vm_ref):
     """Destroys all VBDs tied to a rescue VM."""
     vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
     for vbd_ref in vbd_refs:
         vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
         if vbd_rec.get("userdevice", None) == "1":  # VBD is always 1
             VMHelper.unplug_vbd(self._session, vbd_ref)
             VMHelper.destroy_vbd(self._session, vbd_ref)
Пример #10
0
    def migrate_disk_and_power_off(self, instance, dest):
        """Copies a VHD from one host machine to another

        :param instance: the instance that owns the VHD in question
        :param dest: the destination host machine
        :param disk_type: values are 'primary' or 'cow'
        """
        vm_ref = VMHelper.lookup(self._session, instance.name)

        # The primary VDI becomes the COW after the snapshot, and we can
        # identify it via the VBD. The base copy is the parent_uuid returned
        # from the snapshot creation

        base_copy_uuid = cow_uuid = None
        template_vdi_uuids = template_vm_ref = None
        try:
            # transfer the base copy
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
            base_copy_uuid = template_vdi_uuids['image']
            vdi_ref, vm_vdi_rec = \
                    VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
            cow_uuid = vm_vdi_rec['uuid']

            params = {
                'host': dest,
                'vdi_uuid': base_copy_uuid,
                'instance_id': instance.id,
                'sr_path': VMHelper.get_sr_path(self._session)
            }

            task = self._session.async_call_plugin(
                'migration', 'transfer_vhd', {'params': pickle.dumps(params)})
            self._session.wait_for_task(task, instance.id)

            # Now power down the instance and transfer the COW VHD
            self._shutdown(instance, vm_ref, hard=False)

            params = {
                'host': dest,
                'vdi_uuid': cow_uuid,
                'instance_id': instance.id,
                'sr_path': VMHelper.get_sr_path(self._session),
            }

            task = self._session.async_call_plugin(
                'migration', 'transfer_vhd', {'params': pickle.dumps(params)})
            self._session.wait_for_task(task, instance.id)

        finally:
            if template_vm_ref:
                self._destroy(instance,
                              template_vm_ref,
                              shutdown=False,
                              destroy_kernel_ramdisk=False)

        # TODO(mdietz): we could also consider renaming these to something
        # sensible so we don't need to blindly pass around dictionaries
        return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
Пример #11
0
    def migrate_disk_and_power_off(self, instance, dest):
        """Copies a VHD from one host machine to another.

        :param instance: the instance that owns the VHD in question.
        :param dest: the destination host machine.
        :param disk_type: values are 'primary' or 'cow'.

        """
        vm_ref = VMHelper.lookup(self._session, instance.name)

        # The primary VDI becomes the COW after the snapshot, and we can
        # identify it via the VBD. The base copy is the parent_uuid returned
        # from the snapshot creation

        base_copy_uuid = cow_uuid = None
        template_vdi_uuids = template_vm_ref = None
        try:
            # transfer the base copy
            template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
            base_copy_uuid = template_vdi_uuids['image']
            vdi_ref, vm_vdi_rec = \
                    VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
            cow_uuid = vm_vdi_rec['uuid']

            params = {'host': dest,
                      'vdi_uuid': base_copy_uuid,
                      'instance_id': instance.id,
                      'sr_path': VMHelper.get_sr_path(self._session)}

            task = self._session.async_call_plugin('migration', 'transfer_vhd',
                    {'params': pickle.dumps(params)})
            self._session.wait_for_task(task, instance.id)

            # Now power down the instance and transfer the COW VHD
            self._shutdown(instance, vm_ref, hard=False)

            params = {'host': dest,
                      'vdi_uuid': cow_uuid,
                      'instance_id': instance.id,
                      'sr_path': VMHelper.get_sr_path(self._session), }

            task = self._session.async_call_plugin('migration', 'transfer_vhd',
                    {'params': pickle.dumps(params)})
            self._session.wait_for_task(task, instance.id)

        finally:
            if template_vm_ref:
                self._destroy(instance, template_vm_ref,
                        shutdown=False, destroy_kernel_ramdisk=False)

        # TODO(mdietz): we could also consider renaming these to something
        # sensible so we don't need to blindly pass around dictionaries
        return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
Пример #12
0
    def _create_vm(self, instance, vdi_uuid, network_info=None):
        """Create VM instance"""
        instance_name = instance.name
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is not None:
            raise exception.Duplicate(
                _('Attempted to create'
                  ' non-unique name %s') % instance_name)

        #ensure enough free memory is available
        if not VMHelper.ensure_free_mem(self._session, instance):
            LOG.exception(
                _('instance %(instance_name)s: not enough free '
                  'memory') % locals())
            db.instance_set_state(context.get_admin_context(), instance['id'],
                                  power_state.SHUTDOWN)
            return

        user = AuthManager().get_user(instance.user_id)
        project = AuthManager().get_project(instance.project_id)

        # Are we building from a pre-existing disk?
        vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)

        disk_image_type = VMHelper.determine_disk_image_type(instance)

        kernel = None
        if instance.kernel_id:
            kernel = VMHelper.fetch_image(self._session, instance.id,
                                          instance.kernel_id, user, project,
                                          ImageType.KERNEL_RAMDISK)

        ramdisk = None
        if instance.ramdisk_id:
            ramdisk = VMHelper.fetch_image(self._session, instance.id,
                                           instance.ramdisk_id, user, project,
                                           ImageType.KERNEL_RAMDISK)

        use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
                                                 vdi_ref, disk_image_type,
                                                 instance.os_type)
        vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
                                    use_pv_kernel)

        VMHelper.create_vbd(session=self._session,
                            vm_ref=vm_ref,
                            vdi_ref=vdi_ref,
                            userdevice=0,
                            bootable=True)

        # TODO(tr3buchet) - check to make sure we have network info, otherwise
        # create it now. This goes away once nova-multi-nic hits.
        if network_info is None:
            network_info = self._get_network_info(instance)
        self.create_vifs(vm_ref, network_info)
        self.inject_network_info(instance, vm_ref, network_info)
        return vm_ref
Пример #13
0
 def detach_volume(self, instance_name, mountpoint):
     """Detach volume storage to VM instance"""
     # Before we start, check that the VM exists
     vm_ref = VMHelper.lookup(self._session, instance_name)
     if vm_ref is None:
         raise exception.InstanceNotFound(instance_id=instance_name)
     # Detach VBD from VM
     LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals())
     device_number = VolumeHelper.mountpoint_to_number(mountpoint)
     try:
         vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref, device_number)
     except StorageError, exc:
         LOG.exception(exc)
         raise Exception(_("Unable to locate volume %s") % mountpoint)
Пример #14
0
    def _get_snapshot(self, instance):
        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
            return template_vm_ref, template_vdi_uuids
        except self.XenAPI.Failure, exc:
            logging.error(
                _("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals())
            return
Пример #15
0
    def _inject_network_info(self, instance, network_info, vm_ref=None):
        """
        Generate the network info and make calls to place it into the
        xenstore and the xenstore param list.
        vm_ref can be passed in because it will sometimes be different than
        what VMHelper.lookup(session, instance.name) will find (ex: rescue)
        """
        logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)

        if vm_ref:
            # this function raises if vm_ref is not a vm_opaque_ref
            self._session.get_xenapi().VM.get_record(vm_ref)
        else:
            vm_ref = VMHelper.lookup(self._session, instance.name)

        for (network, info) in network_info:
            location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
            self.write_to_param_xenstore(vm_ref, {location: info})
            try:
                # TODO(tr3buchet): fix function call after refactor
                #self.write_to_xenstore(vm_ref, location, info)
                self._make_plugin_call('xenstore.py', 'write_record', instance,
                                       location, {'value': json.dumps(info)},
                                       vm_ref)
            except KeyError:
                # catch KeyError for domid if instance isn't running
                pass
Пример #16
0
 def attach_volume(self, instance_name, device_path, mountpoint):
     """Attach volume storage to VM instance"""
     # Before we start, check that the VM exists
     vm_ref = VMHelper.lookup(self._session, instance_name)
     if vm_ref is None:
         raise exception.NotFound(
             _('Instance %s not found') % instance_name)
     # NOTE: No Resource Pool concept so far
     LOG.debug(
         _("Attach_volume: %(instance_name)s, %(device_path)s,"
           " %(mountpoint)s") % locals())
     # Create the iSCSI SR, and the PDB through which hosts access SRs.
     # But first, retrieve target info, like Host, IQN, LUN and SCSIID
     vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
     label = 'SR-%s' % vol_rec['volumeId']
     description = 'Disk-for:%s' % instance_name
     # Create SR
     sr_ref = VolumeHelper.create_iscsi_storage(self._session, vol_rec,
                                                label, description)
     # Introduce VDI  and attach VBD to VM
     try:
         vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
     except StorageError, exc:
         LOG.exception(exc)
         VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
         raise Exception(
             _('Unable to create VDI on SR %(sr_ref)s for'
               ' instance %(instance_name)s') % locals())
Пример #17
0
    def plug(self, instance, network, mapping, vm_ref=None, device=None):
        if not vm_ref:
            vm_ref = VMHelper.lookup(self._session, instance.name)
        if not device:
            device = 0

        if mapping.get('should_create_vlan'):
            network_ref = self._ensure_vlan_bridge(network)
        else:
            network_ref = NetworkHelper.find_network_with_bridge(
                                        self._session, network['bridge'])
        vif_rec = {}
        vif_rec['device'] = str(device)
        vif_rec['network'] = network_ref
        vif_rec['VM'] = vm_ref
        vif_rec['MAC'] = mapping['mac']
        vif_rec['MTU'] = '1500'
        vif_rec['other_config'] = {}
        if "rxtx_cap" in mapping:
            vif_rec['qos_algorithm_type'] = "ratelimit"
            vif_rec['qos_algorithm_params'] = \
                {"kbps": str(mapping['rxtx_cap'] * 1024)}
        else:
            vif_rec['qos_algorithm_type'] = ""
            vif_rec['qos_algorithm_params'] = {}
        return vif_rec
Пример #18
0
    def _inject_network_info(self, instance, network_info, vm_ref=None):
        """
        Generate the network info and make calls to place it into the
        xenstore and the xenstore param list.
        vm_ref can be passed in because it will sometimes be different than
        what VMHelper.lookup(session, instance.name) will find (ex: rescue)
        """
        logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)

        if vm_ref:
            # this function raises if vm_ref is not a vm_opaque_ref
            self._session.get_xenapi().VM.get_record(vm_ref)
        else:
            vm_ref = VMHelper.lookup(self._session, instance.name)

        for (network, info) in network_info:
            location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
            self.write_to_param_xenstore(vm_ref, {location: info})
            try:
                # TODO(tr3buchet): fix function call after refactor
                #self.write_to_xenstore(vm_ref, location, info)
                self._make_plugin_call('xenstore.py', 'write_record', instance,
                                       location, {'value': json.dumps(info)},
                                       vm_ref)
            except KeyError:
                # catch KeyError for domid if instance isn't running
                pass
Пример #19
0
    def create_vifs(self, vm_ref, network_info):
        """Creates vifs for an instance"""
        logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)

        # this function raises if vm_ref is not a vm_opaque_ref
        self._session.get_xenapi().VM.get_record(vm_ref)

        for device, (network, info) in enumerate(network_info):
            mac_address = info['mac']
            bridge = network['bridge']
            rxtx_cap = info.pop('rxtx_cap')
            network_ref = \
                NetworkHelper.find_network_with_bridge(self._session, bridge)

            VMHelper.create_vif(self._session, vm_ref, network_ref,
                                mac_address, device, rxtx_cap)
Пример #20
0
    def _get_vm_opaque_ref(self, instance_or_vm):
        """
        Refactored out the common code of many methods that receive either
        a vm name or a vm instance, and want a vm instance in return.
        """
        # if instance_or_vm is a string it must be opaque ref or instance name
        if isinstance(instance_or_vm, basestring):
            obj = None
            try:
                # check for opaque ref
                obj = self._session.get_xenapi().VM.get_uuid(instance_or_vm)
                return instance_or_vm
            except self.XenAPI.Failure:
                # wasn't an opaque ref, can be an instance name
                instance_name = instance_or_vm

        # if instance_or_vm is an int/long it must be instance id
        elif isinstance(instance_or_vm, (int, long)):
            ctx = context.get_admin_context()
            instance_obj = db.instance_get(ctx, instance_or_vm)
            instance_name = instance_obj.name
        else:
            instance_name = instance_or_vm.name
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is None:
            raise exception.InstanceNotFound(instance_id=instance_obj.id)
        return vm_ref
Пример #21
0
 def _get_vm_opaque_ref(self, instance_or_vm):
     """Refactored out the common code of many methods that receive either
     a vm name or a vm instance, and want a vm instance in return.
     """
     vm = None
     try:
         if instance_or_vm.startswith("OpaqueRef:"):
             # Got passed an opaque ref; return it
             return instance_or_vm
         else:
             # Must be the instance name
             instance_name = instance_or_vm
     except (AttributeError, KeyError):
         # Note the the KeyError will only happen with fakes.py
         # Not a string; must be an ID or a vm instance
         if isinstance(instance_or_vm, (int, long)):
             ctx = context.get_admin_context()
             try:
                 instance_obj = db.instance_get(ctx, instance_or_vm)
                 instance_name = instance_obj.name
             except exception.NotFound:
                 # The unit tests screw this up, as they use an integer for
                 # the vm name. I'd fix that up, but that's a matter for
                 # another bug report. So for now, just try with the passed
                 # value
                 instance_name = instance_or_vm
         else:
             instance_name = instance_or_vm.name
     vm = VMHelper.lookup(self._session, instance_name)
     if vm is None:
         raise exception.NotFound(
                         _('Instance not present %s') % instance_name)
     return vm
Пример #22
0
class VolumeOps(object):
    """
    Management class for Volume-related tasks
    """
    def __init__(self, session):
        self.XenAPI = session.get_imported_xenapi()
        self._session = session
        # Load XenAPI module in the helper classes respectively
        VolumeHelper.XenAPI = self.XenAPI
        VMHelper.XenAPI = self.XenAPI

    def create_volume_for_sm(self, volume, sr_uuid):
        LOG.debug("Creating volume for Storage Manager")

        sm_vol_rec = {}
        try:
            sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
        except self.XenAPI.Failure, exc:
            LOG.exception(exc)
            raise StorageError(_('Unable to get SR using uuid'))
        #Create VDI
        label = 'vol-' + hex(volume['id'])[:-1]
        # size presented to xenapi is in bytes, while euca api is in GB
        vdi_size = volume['size'] * 1024 * 1024 * 1024
        vdi_ref = VMHelper.create_vdi(self._session, sr_ref, label, vdi_size,
                                      False)
        vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
        sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
        return sm_vol_rec
Пример #23
0
    def _get_snapshot(self, instance):
        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
            return template_vm_ref, template_vdi_uuids
        except self.XenAPI.Failure, exc:
            logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
                    % locals())
            return
Пример #24
0
    def create_vifs(self, vm_ref, network_info):
        """Creates vifs for an instance"""
        logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)

        # this function raises if vm_ref is not a vm_opaque_ref
        self._session.get_xenapi().VM.get_record(vm_ref)

        for device, (network, info) in enumerate(network_info):
            mac_address = info['mac']
            bridge = network['bridge']
            rxtx_cap = info.pop('rxtx_cap')
            network_ref = \
                NetworkHelper.find_network_with_bridge(self._session, bridge)

            VMHelper.create_vif(self._session, vm_ref, network_ref,
                                mac_address, device, rxtx_cap)
Пример #25
0
 def detach_volume(self, connection_info, instance_name, mountpoint):
     """Detach volume storage to VM instance"""
     # Before we start, check that the VM exists
     vm_ref = VMHelper.lookup(self._session, instance_name)
     if vm_ref is None:
         raise exception.InstanceNotFound(instance_id=instance_name)
     # Detach VBD from VM
     LOG.debug(
         _("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals())
     device_number = VolumeHelper.mountpoint_to_number(mountpoint)
     try:
         vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref,
                                               device_number)
     except StorageError, exc:
         LOG.exception(exc)
         raise Exception(_('Unable to locate volume %s') % mountpoint)
Пример #26
0
    def _get_vm_opaque_ref(self, instance_or_vm):
        """
        Refactored out the common code of many methods that receive either
        a vm name or a vm instance, and want a vm instance in return.
        """
        # if instance_or_vm is a string it must be opaque ref or instance name
        if isinstance(instance_or_vm, basestring):
            obj = None
            try:
                # check for opaque ref
                obj = self._session.get_xenapi().VM.get_uuid(instance_or_vm)
                return instance_or_vm
            except self.XenAPI.Failure:
                # wasn't an opaque ref, can be an instance name
                instance_name = instance_or_vm

        # if instance_or_vm is an int/long it must be instance id
        elif isinstance(instance_or_vm, (int, long)):
            ctx = context.get_admin_context()
            instance_obj = db.instance_get(ctx, instance_or_vm)
            instance_name = instance_obj.name
        else:
            instance_name = instance_or_vm.name
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is None:
            raise exception.InstanceNotFound(instance_id=instance_obj.id)
        return vm_ref
Пример #27
0
    def plug(self, instance, network, mapping, vm_ref=None, device=None):
        if not vm_ref:
            vm_ref = VMHelper.lookup(self._session, instance.name)
        if not device:
            device = 0

        if mapping.get('should_create_vlan'):
            network_ref = self._ensure_vlan_bridge(network)
        else:
            network_ref = NetworkHelper.find_network_with_bridge(
                self._session, network['bridge'])
        vif_rec = {}
        vif_rec['device'] = str(device)
        vif_rec['network'] = network_ref
        vif_rec['VM'] = vm_ref
        vif_rec['MAC'] = mapping['mac']
        vif_rec['MTU'] = '1500'
        vif_rec['other_config'] = {}
        if "rxtx_cap" in mapping:
            vif_rec['qos_algorithm_type'] = "ratelimit"
            vif_rec['qos_algorithm_params'] = \
                {"kbps": str(mapping['rxtx_cap'] * 1024)}
        else:
            vif_rec['qos_algorithm_type'] = ""
            vif_rec['qos_algorithm_params'] = {}
        return vif_rec
Пример #28
0
 def attach_volume(self, instance_name, device_path, mountpoint):
     """Attach volume storage to VM instance"""
     # Before we start, check that the VM exists
     vm_ref = VMHelper.lookup(self._session, instance_name)
     if vm_ref is None:
         raise exception.NotFound(_('Instance %s not found')
                                   % instance_name)
     # NOTE: No Resource Pool concept so far
     LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
             " %(mountpoint)s") % locals())
     # Create the iSCSI SR, and the PDB through which hosts access SRs.
     # But first, retrieve target info, like Host, IQN, LUN and SCSIID
     vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
     label = 'SR-%s' % vol_rec['volumeId']
     description = 'Disk-for:%s' % instance_name
     # Create SR
     sr_ref = VolumeHelper.create_iscsi_storage(self._session,
                                                      vol_rec,
                                                      label,
                                                      description)
     # Introduce VDI  and attach VBD to VM
     try:
         vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
     except StorageError, exc:
         LOG.exception(exc)
         VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
         raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
                 ' instance %(instance_name)s') % locals())
Пример #29
0
 def _destroy_rescue_vdis(self, rescue_vm_ref):
     """Destroys all VDIs associated with a rescued VM."""
     vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
     for vdi_ref in vdi_refs:
         try:
             self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
         except self.XenAPI.Failure:
             continue
Пример #30
0
 def _destroy_rescue_vdis(self, rescue_vm_ref):
     """Destroys all VDIs associated with a rescued VM."""
     vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
     for vdi_ref in vdi_refs:
         try:
             self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
         except self.XenAPI.Failure:
             continue
Пример #31
0
    def _create_vm(self, instance, vdi_uuid, network_info=None):
        """Create VM instance"""
        instance_name = instance.name
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is not None:
            raise exception.Duplicate(_('Attempted to create'
                    ' non-unique name %s') % instance_name)

        #ensure enough free memory is available
        if not VMHelper.ensure_free_mem(self._session, instance):
            LOG.exception(_('instance %(instance_name)s: not enough free '
                          'memory') % locals())
            db.instance_set_state(context.get_admin_context(),
                                  instance['id'],
                                  power_state.SHUTDOWN)
            return

        user = AuthManager().get_user(instance.user_id)
        project = AuthManager().get_project(instance.project_id)

        # Are we building from a pre-existing disk?
        vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)

        disk_image_type = VMHelper.determine_disk_image_type(instance)

        kernel = None
        if instance.kernel_id:
            kernel = VMHelper.fetch_image(self._session, instance.id,
                instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)

        ramdisk = None
        if instance.ramdisk_id:
            ramdisk = VMHelper.fetch_image(self._session, instance.id,
                instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)

        use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
            vdi_ref, disk_image_type, instance.os_type)
        vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
                                    use_pv_kernel)

        VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
                vdi_ref=vdi_ref, userdevice=0, bootable=True)

        # TODO(tr3buchet) - check to make sure we have network info, otherwise
        # create it now. This goes away once nova-multi-nic hits.
        if network_info is None:
            network_info = self._get_network_info(instance)
        self.create_vifs(vm_ref, network_info)
        self.inject_network_info(instance, vm_ref, network_info)
        return vm_ref
Пример #32
0
 def _start(self, instance, vm_ref=None):
     """Power on a VM instance"""
     if not vm_ref:
         vm_ref = VMHelper.lookup(self._session, instance.name)
     if vm_ref is None:
         raise exception(_('Attempted to power on non-existent instance'
         ' bad instance id %s') % instance.id)
     LOG.debug(_("Starting instance %s"), instance.name)
     self._session.call_xenapi('VM.start', vm_ref, False, False)
Пример #33
0
 def reset_network(self, instance, vm_ref=None):
     """Creates uuid arg to pass to make_agent_call and calls it."""
     if not vm_ref:
         vm_ref = VMHelper.lookup(self._session, instance.name)
     args = {'id': str(uuid.uuid4())}
     # TODO(tr3buchet): fix function call after refactor
     #resp = self._make_agent_call('resetnetwork', instance, '', args)
     resp = self._make_plugin_call('agent', 'resetnetwork', instance, '',
                                                            args, vm_ref)
Пример #34
0
    def destroy(self, instance):
        """
        Destroy VM instance

        This is the method exposed by xenapi_conn.destroy(). The rest of the
        destroy_* methods are internal.
        """
        vm = VMHelper.lookup(self._session, instance.name)
        return self._destroy(instance, vm, shutdown=True)
Пример #35
0
 def reset_network(self, instance, vm_ref=None):
     """Creates uuid arg to pass to make_agent_call and calls it."""
     if not vm_ref:
         vm_ref = VMHelper.lookup(self._session, instance.name)
     args = {'id': str(uuid.uuid4())}
     # TODO(tr3buchet): fix function call after refactor
     #resp = self._make_agent_call('resetnetwork', instance, '', args)
     resp = self._make_plugin_call('agent', 'resetnetwork', instance, '',
                                                            args, vm_ref)
Пример #36
0
 def _start(self, instance, vm_ref=None):
     """Power on a VM instance"""
     if not vm_ref:
         vm_ref = VMHelper.lookup(self._session, instance.name)
     if vm_ref is None:
         raise exception(_('Attempted to power on non-existent instance'
         ' bad instance id %s') % instance.id)
     LOG.debug(_("Starting instance %s"), instance.name)
     self._session.call_xenapi('VM.start', vm_ref, False, False)
Пример #37
0
    def link_disks(self, instance, base_copy_uuid, cow_uuid):
        """Links the base copy VHD to the COW via the XAPI plugin."""
        new_base_copy_uuid = str(uuid.uuid4())
        new_cow_uuid = str(uuid.uuid4())
        params = {'instance_id': instance.id,
                  'old_base_copy_uuid': base_copy_uuid,
                  'old_cow_uuid': cow_uuid,
                  'new_base_copy_uuid': new_base_copy_uuid,
                  'new_cow_uuid': new_cow_uuid,
                  'sr_path': VMHelper.get_sr_path(self._session), }

        task = self._session.async_call_plugin('migration',
                'move_vhds_into_sr', {'params': pickle.dumps(params)})
        self._session.wait_for_task(task, instance.id)

        # Now we rescan the SR so we find the VHDs
        VMHelper.scan_default_sr(self._session)

        return new_cow_uuid
Пример #38
0
    def link_disks(self, instance, base_copy_uuid, cow_uuid):
        """Links the base copy VHD to the COW via the XAPI plugin."""
        new_base_copy_uuid = str(uuid.uuid4())
        new_cow_uuid = str(uuid.uuid4())
        params = {'instance_id': instance.id,
                  'old_base_copy_uuid': base_copy_uuid,
                  'old_cow_uuid': cow_uuid,
                  'new_base_copy_uuid': new_base_copy_uuid,
                  'new_cow_uuid': new_cow_uuid,
                  'sr_path': VMHelper.get_sr_path(self._session), }

        task = self._session.async_call_plugin('migration',
                'move_vhds_into_sr', {'params': pickle.dumps(params)})
        self._session.wait_for_task(task, instance.id)

        # Now we rescan the SR so we find the VHDs
        VMHelper.scan_default_sr(self._session)

        return new_cow_uuid
Пример #39
0
    def unrescue(self, instance, callback):
        """Unrescue the specified instance.

            - unplug the instance VM's disk from the rescue VM.
            - teardown the rescue VM.
            - release the bootlock to allow the instance VM to start.

        """
        rescue_vm_ref = VMHelper.lookup(self._session,
                                        "%s-rescue" % instance.name)

        if not rescue_vm_ref:
            raise exception.InstanceNotInRescueMode(instance_id=instance.id)

        original_vm_ref = VMHelper.lookup(self._session, instance.name)
        instance._rescue = False

        self._destroy_rescue_instance(rescue_vm_ref)
        self._release_bootlock(original_vm_ref)
        self._start(instance, original_vm_ref)
Пример #40
0
    def unrescue(self, instance, callback):
        """Unrescue the specified instance.

            - unplug the instance VM's disk from the rescue VM.
            - teardown the rescue VM.
            - release the bootlock to allow the instance VM to start.

        """
        rescue_vm_ref = VMHelper.lookup(self._session,
                                        "%s-rescue" % instance.name)

        if not rescue_vm_ref:
            raise exception.InstanceNotInRescueMode(instance_id=instance.id)

        original_vm_ref = VMHelper.lookup(self._session, instance.name)
        instance._rescue = False

        self._destroy_rescue_instance(rescue_vm_ref)
        self._release_bootlock(original_vm_ref)
        self._start(instance, original_vm_ref)
Пример #41
0
    def destroy(self, instance):
        """
        Destroy VM instance

        This is the method exposed by xenapi_conn.destroy(). The rest of the
        destroy_* methods are internal.
        """
        instance_id = instance.id
        LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
        vm_ref = VMHelper.lookup(self._session, instance.name)
        return self._destroy(instance, vm_ref, shutdown=True)
Пример #42
0
Файл: vmops.py Проект: yosh/nova
    def attach_disk(self, instance, disk_info):
        """Links the base copy VHD to the COW via the XAPI plugin"""
        vm_ref = VMHelper.lookup(self._session, instance.name)
        new_base_copy_uuid = str(uuid.uuid4())
        new_cow_uuid = str(uuid.uuid4())
        params = {'instance_id': instance.id,
                  'old_base_copy_uuid': disk_info['base_copy'],
                  'old_cow_uuid': disk_info['cow'],
                  'new_base_copy_uuid': new_base_copy_uuid,
                  'new_cow_uuid': new_cow_uuid,
                  'sr_path': VMHelper.get_sr_path(self._session), }

        task = self._session.async_call_plugin('migration',
                'move_vhds_into_sr', {'params': pickle.dumps(params)})
        self._session.wait_for_task(task, instance.id)

        # Now we rescan the SR so we find the VHDs
        VMHelper.scan_default_sr(self._session)

        return new_cow_uuid
Пример #43
0
    def destroy(self, instance):
        """
        Destroy VM instance

        This is the method exposed by xenapi_conn.destroy(). The rest of the
        destroy_* methods are internal.
        """
        instance_id = instance.id
        LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
        vm_ref = VMHelper.lookup(self._session, instance.name)
        return self._destroy(instance, vm_ref, shutdown=True)
Пример #44
0
    def _destroy_vdis(self, instance, vm):
        """Destroys all VDIs associated with a VM """
        vdis = VMHelper.lookup_vm_vdis(self._session, vm)

        if not vdis:
            return

        for vdi in vdis:
            try:
                task = self._session.call_xenapi('Async.VDI.destroy', vdi)
                self._session.wait_for_task(instance.id, task)
            except self.XenAPI.Failure, exc:
                LOG.exception(exc)
Пример #45
0
    def poll_rescued_instances(self, timeout):
        """Look for expirable rescued instances.

            - forcibly exit rescue mode for any instances that have been
              in rescue mode for >= the provided timeout

        """
        last_ran = self.poll_rescue_last_ran
        if not last_ran:
            # We need a base time to start tracking.
            self.poll_rescue_last_ran = utils.utcnow()
            return

        if not utils.is_older_than(last_ran, timeout):
            # Do not run. Let's bail.
            return

        # Update the time tracker and proceed.
        self.poll_rescue_last_ran = utils.utcnow()

        rescue_vms = []
        for instance in self.list_instances():
            if instance.endswith("-rescue"):
                rescue_vms.append(dict(name=instance,
                                       vm_ref=VMHelper.lookup(self._session,
                                                              instance)))

        for vm in rescue_vms:
            rescue_vm_ref = vm["vm_ref"]

            self._destroy_rescue_instance(rescue_vm_ref)

            original_name = vm["name"].split("-rescue", 1)[0]
            original_vm_ref = VMHelper.lookup(self._session, original_name)

            self._release_bootlock(original_vm_ref)
            self._session.call_xenapi("VM.start", original_vm_ref, False,
                                      False)
Пример #46
0
    def poll_rescued_instances(self, timeout):
        """Look for expirable rescued instances.

            - forcibly exit rescue mode for any instances that have been
              in rescue mode for >= the provided timeout

        """
        last_ran = self.poll_rescue_last_ran
        if not last_ran:
            # We need a base time to start tracking.
            self.poll_rescue_last_ran = utils.utcnow()
            return

        if not utils.is_older_than(last_ran, timeout):
            # Do not run. Let's bail.
            return

        # Update the time tracker and proceed.
        self.poll_rescue_last_ran = utils.utcnow()

        rescue_vms = []
        for instance in self.list_instances():
            if instance.endswith("-rescue"):
                rescue_vms.append(dict(name=instance,
                                       vm_ref=VMHelper.lookup(self._session,
                                                              instance)))

        for vm in rescue_vms:
            rescue_vm_ref = vm["vm_ref"]

            self._destroy_rescue_instance(rescue_vm_ref)

            original_name = vm["name"].split("-rescue", 1)[0]
            original_vm_ref = VMHelper.lookup(self._session, original_name)

            self._release_bootlock(original_vm_ref)
            self._session.call_xenapi("VM.start", original_vm_ref, False,
                                      False)
Пример #47
0
    def snapshot(self, instance, image_id):
        """ Create snapshot from a running VM instance

        :param instance: instance to be snapshotted
        :param image_id: id of image to upload to

        Steps involved in a XenServer snapshot:

        1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
            creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
            Snapshot VHD

        2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
            a 'base-copy' VDI.  The base_copy is immutable and may be chained
            with other base_copies.  If chained, the base_copies
            coalesce together, so, we must wait for this coalescing to occur to
            get a stable representation of the data on disk.

        3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
            that will bundle the VHDs together and then push the bundle into
            Glance.
        """

        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
        except self.XenAPI.Failure, exc:
            logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
                    % locals())
            return
Пример #48
0
    def list_instances_detail(self):
        """List VM instances, returning InstanceInfo objects."""
        instance_infos = []
        for vm_ref in self._session.get_xenapi().VM.get_all():
            vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
            if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
                name = vm_rec["name_label"]

                # TODO(justinsb): This a roundabout way to map the state
                openstack_format = VMHelper.compile_info(vm_rec)
                state = openstack_format['state']

                instance_info = driver.InstanceInfo(name, state)
                instance_infos.append(instance_info)
        return instance_infos
Пример #49
0
    def _destroy_vdis(self, instance, vm_ref):
        """Destroys all VDIs associated with a VM"""
        instance_id = instance.id
        LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") % locals())
        vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)

        if not vdi_refs:
            return

        for vdi_ref in vdi_refs:
            try:
                task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
                self._session.wait_for_task(task, instance.id)
            except self.XenAPI.Failure, exc:
                LOG.exception(exc)
Пример #50
0
    def list_instances_detail(self):
        """List VM instances, returning InstanceInfo objects."""
        instance_infos = []
        for vm_ref in self._session.get_xenapi().VM.get_all():
            vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
            if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
                name = vm_rec["name_label"]

                # TODO(justinsb): This a roundabout way to map the state
                openstack_format = VMHelper.compile_info(vm_rec)
                state = openstack_format['state']

                instance_info = driver.InstanceInfo(name, state)
                instance_infos.append(instance_info)
        return instance_infos
Пример #51
0
    def _destroy_vdis(self, instance, vm_ref):
        """Destroys all VDIs associated with a VM"""
        instance_id = instance.id
        LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
                  % locals())
        vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)

        if not vdi_refs:
            return

        for vdi_ref in vdi_refs:
            try:
                task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
                self._session.wait_for_task(task, instance.id)
            except self.XenAPI.Failure, exc:
                LOG.exception(exc)
Пример #52
0
    def attach_volume(self, connection_info, instance_name, mountpoint):
        """Attach volume storage to VM instance"""
        # Before we start, check that the VM exists
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is None:
            raise exception.InstanceNotFound(instance_id=instance_name)
        # NOTE: No Resource Pool concept so far
        LOG.debug(
            _("Attach_volume: %(connection_info)s, %(instance_name)s,"
              " %(mountpoint)s") % locals())
        driver_type = connection_info['driver_volume_type']
        if driver_type not in ['iscsi', 'xensm']:
            raise exception.VolumeDriverNotFound(driver_type=driver_type)

        data = connection_info['data']
        if 'name_label' not in data:
            label = 'tempSR-%s' % data['volume_id']
        else:
            label = data['name_label']
            del data['name_label']

        if 'name_description' not in data:
            desc = 'Disk-for:%s' % instance_name
        else:
            desc = data['name_description']

        LOG.debug(connection_info)
        sr_params = {}
        if u'sr_uuid' not in data:
            sr_params = VolumeHelper.parse_volume_info(connection_info,
                                                       mountpoint)
            uuid = "FA15E-D15C-" + str(sr_params['id'])
            sr_params['sr_type'] = 'iscsi'
        else:
            uuid = data['sr_uuid']
            for k in data['introduce_sr_keys']:
                sr_params[k] = data[k]

        sr_params['name_description'] = desc

        # Introduce SR
        try:
            sr_ref = self.introduce_sr(uuid, label, sr_params)
            LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
        except self.XenAPI.Failure, exc:
            LOG.exception(exc)
            raise StorageError(_('Unable to introduce Storage Repository'))
Пример #53
0
    def attach_volume(self, connection_info, instance_name, mountpoint):
        """Attach volume storage to VM instance"""
        # Before we start, check that the VM exists
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is None:
            raise exception.InstanceNotFound(instance_id=instance_name)
        # NOTE: No Resource Pool concept so far
        LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
                " %(mountpoint)s") % locals())
        driver_type = connection_info['driver_volume_type']
        if driver_type not in ['iscsi', 'xensm']:
            raise exception.VolumeDriverNotFound(driver_type=driver_type)

        data = connection_info['data']
        if 'name_label' not in data:
            label = 'tempSR-%s' % data['volume_id']
        else:
            label = data['name_label']
            del data['name_label']

        if 'name_description' not in data:
            desc = 'Disk-for:%s' % instance_name
        else:
            desc = data['name_description']

        LOG.debug(connection_info)
        sr_params = {}
        if u'sr_uuid' not in data:
            sr_params = VolumeHelper.parse_volume_info(connection_info,
                                                       mountpoint)
            uuid = "FA15E-D15C-" + str(sr_params['id'])
            sr_params['sr_type'] = 'iscsi'
        else:
            uuid = data['sr_uuid']
            for k in data['introduce_sr_keys']:
                sr_params[k] = data[k]

        sr_params['name_description'] = desc

        # Introduce SR
        try:
            sr_ref = self.introduce_sr(uuid, label, sr_params)
            LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
        except self.XenAPI.Failure, exc:
            LOG.exception(exc)
            raise StorageError(_('Unable to introduce Storage Repository'))
Пример #54
0
 def _destroy_vm(self, instance, vm):
     """Destroys a VM record """
     try:
         kernel = None
         ramdisk = None
         if instance.kernel_id or instance.ramdisk_id:
             (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
                                 self._session, vm)
         task1 = self._session.call_xenapi('Async.VM.destroy', vm)
         LOG.debug(_("Removing kernel/ramdisk files"))
         fn = "remove_kernel_ramdisk"
         args = {}
         if kernel:
             args['kernel-file'] = kernel
         if ramdisk:
             args['ramdisk-file'] = ramdisk
         task2 = self._session.async_call_plugin('glance', fn, args)
         self._session.wait_for_task(instance.id, task1)
         self._session.wait_for_task(instance.id, task2)
         LOG.debug(_("kernel/ramdisk files removed"))
     except self.XenAPI.Failure, exc:
         LOG.exception(exc)
Пример #55
0
    def _destroy_kernel_ramdisk(self, instance, vm_ref):
        """
        Three situations can occur:

            1. We have neither a ramdisk nor a kernel, in which case we are a
               RAW image and can omit this step

            2. We have one or the other, in which case, we should flag as an
               error

            3. We have both, in which case we safely remove both the kernel
               and the ramdisk.
        """
        instance_id = instance.id
        if not instance.kernel_id and not instance.ramdisk_id:
            # 1. No kernel or ramdisk
            LOG.debug(
                _("Instance %(instance_id)s using RAW or VHD, "
                  "skipping kernel and ramdisk deletion") % locals())
            return

        if not (instance.kernel_id and instance.ramdisk_id):
            # 2. We only have kernel xor ramdisk
            raise exception.NotFound(
                _("Instance %(instance_id)s has a kernel or ramdisk but not "
                  "both" % locals()))

        # 3. We have both kernel and ramdisk
        (kernel,
         ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, vm_ref)

        LOG.debug(_("Removing kernel/ramdisk files"))

        args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
        task = self._session.async_call_plugin('glance',
                                               'remove_kernel_ramdisk', args)
        self._session.wait_for_task(task, instance.id)

        LOG.debug(_("kernel/ramdisk files removed"))
Пример #56
0
    def plug(self, instance, network, mapping, vm_ref=None, device=None):
        if not vm_ref:
            vm_ref = VMHelper.lookup(self._session, instance.name)

        if not device:
            device = 0

        # with OVS model, always plug into an OVS integration bridge
        # that is already created
        network_ref = NetworkHelper.find_network_with_bridge(self._session,
                                       FLAGS.xenapi_ovs_integration_bridge)
        vif_rec = {}
        vif_rec['device'] = str(device)
        vif_rec['network'] = network_ref
        vif_rec['VM'] = vm_ref
        vif_rec['MAC'] = mapping['mac']
        vif_rec['MTU'] = '1500'
        vif_rec['qos_algorithm_type'] = ""
        vif_rec['qos_algorithm_params'] = {}
        # OVS on the hypervisor monitors this key and uses it to
        # set the iface-id attribute
        vif_rec['other_config'] = {"nicira-iface-id": mapping['vif_uuid']}
        return vif_rec
Пример #57
0
    def _get_vm_opaque_ref(self, instance_or_vm):
        """Refactored out the common code of many methods that receive either
        a vm name or a vm instance, and want a vm instance in return.
        """
        # if instance_or_vm is a string it must be opaque ref or instance name
        if isinstance(instance_or_vm, basestring):
            obj = None
            try:
                # check for opaque ref
                obj = self._session.get_xenapi().VM.get_record(instance_or_vm)
                return instance_or_vm
            except self.XenAPI.Failure:
                # wasn't an opaque ref, must be an instance name
                instance_name = instance_or_vm

        # if instance_or_vm is an int/long it must be instance id
        elif isinstance(instance_or_vm, (int, long)):
            ctx = context.get_admin_context()
            try:
                instance_obj = db.instance_get(ctx, instance_or_vm)
                instance_name = instance_obj.name
            except exception.NotFound:
                # The unit tests screw this up, as they use an integer for
                # the vm name. I'd fix that up, but that's a matter for
                # another bug report. So for now, just try with the passed
                # value
                instance_name = instance_or_vm

        # otherwise instance_or_vm is an instance object
        else:
            instance_name = instance_or_vm.name
        vm_ref = VMHelper.lookup(self._session, instance_name)
        if vm_ref is None:
            raise exception.NotFound(
                _('Instance not present %s') % instance_name)
        return vm_ref
Пример #58
0
    def unrescue(self, instance, callback):
        """Unrescue the specified instance
            - unplug the instance VM's disk from the rescue VM
            - teardown the rescue VM
            - release the bootlock to allow the instance VM to start

        """
        rescue_vm_ref = VMHelper.lookup(self._session,
                                        instance.name + "-rescue")

        if not rescue_vm_ref:
            raise exception.NotFound(
                _("Instance is not in Rescue Mode: %s" % instance.name))

        original_vm_ref = self._get_vm_opaque_ref(instance)
        vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)

        instance._rescue = False

        for vbd_ref in vbd_refs:
            _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)
            if _vbd_ref["userdevice"] == "1":
                VMHelper.unplug_vbd(self._session, vbd_ref)
                VMHelper.destroy_vbd(self._session, vbd_ref)

        task1 = self._session.call_xenapi("Async.VM.hard_shutdown",
                                          rescue_vm_ref)
        self._session.wait_for_task(task1, instance.id)

        vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
        for vdi_ref in vdi_refs:
            try:
                task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
                self._session.wait_for_task(task, instance.id)
            except self.XenAPI.Failure:
                continue

        task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref)
        self._session.wait_for_task(task2, instance.id)

        self._release_bootlock(original_vm_ref)
        self._start(instance, original_vm_ref)
Пример #59
0
 description = 'Disk-for:%s' % instance_name
 # Create SR
 sr_ref = VolumeHelper.create_iscsi_storage(self._session, vol_rec,
                                            label, description)
 # Introduce VDI  and attach VBD to VM
 try:
     vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
 except StorageError, exc:
     LOG.exception(exc)
     VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
     raise Exception(
         _('Unable to create VDI on SR %(sr_ref)s for'
           ' instance %(instance_name)s') % locals())
 else:
     try:
         vbd_ref = VMHelper.create_vbd(self._session, vm_ref, vdi_ref,
                                       vol_rec['deviceNumber'], False)
     except self.XenAPI.Failure, exc:
         LOG.exception(exc)
         VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
         raise Exception(
             _('Unable to use SR %(sr_ref)s for'
               ' instance %(instance_name)s') % locals())
     else:
         try:
             task = self._session.call_xenapi('Async.VBD.plug', vbd_ref)
             self._session.wait_for_task(task, vol_rec['deviceNumber'])
         except self.XenAPI.Failure, exc:
             LOG.exception(exc)
             VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
             raise Exception(
                 _('Unable to attach volume to instance %s') %
Пример #60
0
        if vm_ref is None:
            raise exception.InstanceNotFound(instance_id=instance_name)
        # Detach VBD from VM
        LOG.debug(
            _("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals())
        device_number = VolumeHelper.mountpoint_to_number(mountpoint)
        try:
            vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref,
                                                  device_number)
        except StorageError, exc:
            LOG.exception(exc)
            raise Exception(_('Unable to locate volume %s') % mountpoint)

        try:
            sr_ref = VolumeHelper.find_sr_from_vbd(self._session, vbd_ref)
            VMHelper.unplug_vbd(self._session, vbd_ref)
        except StorageError, exc:
            LOG.exception(exc)
            raise Exception(_('Unable to detach volume %s') % mountpoint)
        try:
            VMHelper.destroy_vbd(self._session, vbd_ref)
        except StorageError, exc:
            LOG.exception(exc)
            raise Exception(_('Unable to destroy vbd %s') % mountpoint)

        # Forget SR only if no other volumes on this host are using it
        try:
            VolumeHelper.purge_sr(self._session, sr_ref)
        except StorageError, exc:
            LOG.exception(exc)
            raise Exception(_('Error purging SR %s') % sr_ref)