def spawn(self, instance): """ Create a new virtual environment on the container platform. The given parameter is an instance of nova.compute.service.Instance. This function should use the data there to guide the creation of the new instance. The work will be done asynchronously. This function returns a task that allows the caller to detect when it is complete. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the container platform should be in the state that it was before this call began. """ # Update state to inform the nova stack that the VE is launching db.instance_set_state(context.get_admin_context(), instance["id"], power_state.NOSTATE, "launching") LOG.debug("instance %s: is launching" % instance["name"]) # Go through the steps of creating a container # TODO(imsplitbit): Need to add conditionals around this stuff to make # it more durable during failure. And roll back changes made leading # up to the error. self._cache_image(instance) self._create_vz(instance) self._set_vz_os_hint(instance) self._configure_vz(instance) self._set_name(instance) self._add_netif(instance) self._add_ip(instance) self._set_hostname(instance) self._set_nameserver(instance) self._start(instance) self._initial_secure_host(instance) # Begin making our looping async call timer = utils.LoopingCall(f=None) # I stole this from the libvirt driver but it is appropriate to # have this looping timer call so that if a VE doesn't start right # away we can defer all of this. def _wait_for_boot(): try: state = self.get_info(instance["name"])["state"] db.instance_set_state(context.get_admin_context(), instance["id"], state) if state == power_state.RUNNING: LOG.debug("instance %s: booted" % instance["name"]) timer.stop() except: LOG.exception("instance %s: failed to boot" % instance["name"]) db.instance_set_state(context.get_admin_context(), instance["id"], power_state.SHUTDOWN) timer.stop() timer.f = _wait_for_boot return timer.start(interval=0.5, now=True)
def init_host(self, host=socket.gethostname()): """ Initialize anything that is necessary for the driver to function, including catching up with currently running VE's on the given host. """ ctxt = context.get_admin_context() LOG.debug("Hostname: %s" % (host,)) LOG.debug("Instances: %s" % (db.instance_get_all_by_host(ctxt, host))) for instance in db.instance_get_all_by_host(ctxt, host): try: LOG.debug("Checking state of %s" % instance["name"]) state = self.get_info(instance["name"])["state"] except exception.NotFound: state = power_state.SHUTOFF LOG.debug("Current state of %s was %s." % (instance["name"], state)) db.instance_set_state(ctxt, instance["id"], state) if state == power_state.SHUTOFF: db.instance_destroy(ctxt, instance["id"]) if state != power_state.RUNNING: continue
def _create_vm(self, instance, vdi_uuid, network_info=None): """Create VM instance""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is not None: raise exception.Duplicate( _('Attempted to create' ' non-unique name %s') % instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception( _('instance %(instance_name)s: not enough free ' 'memory') % locals()) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. if network_info is None: network_info = self._get_network_info(instance) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, vm_ref, network_info) return vm_ref
def _create_vm(self, instance, vdi_uuid, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is not None: raise exception.InstanceExists(name=instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception(_('instance %(instance_name)s: not enough free ' 'memory') % locals()) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. if network_info is None: network_info = self._get_network_info(instance) # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) return vm_ref
def _wait_for_boot(): try: state = self.get_info(instance["name"])["state"] db.instance_set_state(context.get_admin_context(), instance["id"], state) if state == power_state.RUNNING: LOG.debug("instance %s: booted" % instance["name"]) timer.stop() except: LOG.exception("instance %s: failed to boot" % instance["name"]) db.instance_set_state(context.get_admin_context(), instance["id"], power_state.SHUTDOWN) timer.stop()
def _wait_for_boot(): try: state = self.get_info(instance_name)['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() _inject_files() return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), instance_name) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() return False
def _wait_for_boot(): try: state = self.get_info(instance['name'])['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() _inject_onset_files() return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() return False
def _stop(self, instance): """ Method to stop the instance. This doesn't seem to be a nova-ism but it is for openvz so I am wrapping it under the private namespace and will call it from expected methods. i.e. pause """ try: _, err = utils.execute("sudo", "vzctl", "stop", instance["id"]) if err: LOG.error(err) except ProcessExecutionError: raise exception.Error("Failed to stop %s" % instance["id"]) # Update instance state try: db.instance_set_state(context.get_admin_context(), instance["id"], power_state.SHUTDOWN) except exception.DBError as err: LOG.error(err) raise exception.Error("Failed to update db for %s" % instance["id"]) return True
def _start(self, instance): """ Method to start the instance, I don't believe there is a nova-ism for starting so I am wrapping it under the private namespace and will call it from expected methods. i.e. resume """ try: # Attempt to start the VE. # NOTE: The VE will throw a warning that the hostname is invalid # if it isn't valid. This is logged in LOG.error and is not # an indication of failure. _, err = utils.execute("sudo", "vzctl", "start", instance["id"]) if err: LOG.error(err) except ProcessExecutionError as err: LOG.error(err) raise exception.Error("Failed to start %d" % instance["id"]) # Set instance state as RUNNING db.instance_set_state(context.get_admin_context(), instance["id"], power_state.RUNNING) return True
def schedule_live_migration(self, context, instance_id, dest): """Live migration scheduling method. :param context: :param instance_id: :param dest: destination host :return: The host where instance is running currently. Then scheduler send request that host. """ # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) # Checking instance. self._live_migration_src_check(context, instance_ref) # Checking destination host. self._live_migration_dest_check(context, instance_ref, dest) # Common checking. self._live_migration_common_check(context, instance_ref, dest) # Changing instance_state. db.instance_set_state(context, instance_id, power_state.PAUSED, 'migrating') # Changing volume state for volume_ref in instance_ref['volumes']: db.volume_update(context, volume_ref['id'], {'status': 'migrating'}) # Return value is necessary to send request to src # Check _schedule() in detail. src = instance_ref['host'] return src
def spawn(self, instance): """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance.name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): name = instance['name'] LOG.exception(_('instance %(name)s: not enough free memory') % locals()) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) #if kernel is not present we must download a raw disk if instance.kernel_id: disk_image_type = ImageType.DISK else: disk_image_type = ImageType.DISK_RAW vdi_uuid = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) # write network info admin_context = context.get_admin_context() # TODO(tr3buchet) - remove comment in multi-nic # I've decided to go ahead and consider multiple IPs and networks # at this stage even though they aren't implemented because these will # be needed for multi-nic and there was no sense writing it for single # network/single IP and then having to turn around and re-write it IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) for network in db.network_get_all_by_instance(admin_context, instance['id']): network_IPs = [ip for ip in IPs if ip.network_id == network.id] def ip_dict(ip): return {'netmask': network['netmask'], 'enabled': '1', 'ip': ip.address} mac_id = instance.mac_address.replace(':', '') location = 'vm-data/networking/%s' % mac_id mapping = {'label': network['label'], 'gateway': network['gateway'], 'mac': instance.mac_address, 'dns': [network['dns']], 'ips': [ip_dict(ip) for ip in network_IPs]} self.write_to_param_xenstore(vm_ref, {location: mapping}) # TODO(tr3buchet) - remove comment in multi-nic # this bit here about creating the vifs will be updated # in multi-nic to handle multiple IPs on the same network # and multiple networks # for now it works as there is only one of each bridge = network['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) def _inject_onset_files(): onset_files = instance.onset_files if onset_files: # Check if this is a JSON-encoded string and convert if needed. if isinstance(onset_files, basestring): try: onset_files = json.loads(onset_files) except ValueError: LOG.exception(_("Invalid value for onset_files: '%s'") % onset_files) onset_files = [] # Inject any files, if specified for path, contents in instance.onset_files: LOG.debug(_("Injecting file path: '%s'") % path) self.inject_file(instance, path, contents) # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: state = self.get_info(instance['name'])['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() _inject_onset_files() return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() return False
def spawn(self, instance, disk): """Create VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is not None: raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception(_('instance %(instance_name)s: not enough free ' 'memory') % locals()) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) vdi_ref = kernel = ramdisk = pv_kernel = None # Are we building from a pre-existing disk? if not disk: #if kernel is not present we must download a raw disk disk_image_type = VMHelper.determine_disk_image_type(instance) vdi_uuid = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) else: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) if disk_image_type == ImageType.DISK_RAW: # Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) elif disk_image_type == ImageType.DISK_VHD: # TODO(sirp): Assuming PV for now; this will need to be # configurable as Windows will use HVM. pv_kernel = True if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) # inject_network_info and create vifs networks = self.inject_network_info(instance) self.create_vifs(instance, networks) LOG.debug(_('Starting VM %s...'), vm_ref) self._start(instance, vm_ref) LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) def _inject_onset_files(): onset_files = instance.onset_files if onset_files: # Check if this is a JSON-encoded string and convert if needed. if isinstance(onset_files, basestring): try: onset_files = json.loads(onset_files) except ValueError: LOG.exception(_("Invalid value for onset_files: '%s'") % onset_files) onset_files = [] # Inject any files, if specified for path, contents in instance.onset_files: LOG.debug(_("Injecting file path: '%s'") % path) self.inject_file(instance, path, contents) # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: state = self.get_info(instance_name)['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() _inject_onset_files() return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), instance_name) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() return False