def configure_container_configdrive(self, container_config, instance, injected_files): LOG.debug('Creating LXD config drive') if CONF.config_drive_format not in ('fs', None): msg = (_('Invalid config drive format: %s') % CONF.config_drive_format) raise exception.InstancePowerOnFailure(reason=msg) LOG.info(_LI('Using config drive for instance'), instance=instance) extra_md = {} inst_md = instance_metadata.InstanceMetadata(instance, content=injected_files, extra_md=extra_md) name = instance.name try: with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: container_configdrive = ( self.container_dir.get_container_configdrive(name) ) cdb.make_drive(container_configdrive) container_config = self.configure_disk_path(container_config, 'configdrive', instance) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Creating config drive failed with error: %s'), e, instance=instance) return container_config
def power_on(self, instance, node=None): """Power on the specified instance.""" if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) pm.activate_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstancePowerOnFailure( _("Baremetal power manager failed to start node " "for instance %r") % instance['uuid']) pm.start_console()
def power_on(self, context, instance, network_info, block_device_info=None): try: async_vm_action = self.compute.virtual_machines.start( CONF.azure.resource_group, instance.uuid) async_vm_action.wait(CONF.azure.async_timeout) LOG.info(_LI("Power On Instance in Azure Finish."), instance=instance) except Exception as e: msg = six.text_type(e) LOG.exception(msg) ex = nova_ex.InstancePowerOnFailure(reason=msg) raise ex
def power_on(adapter, instance): """Powers on a VM. :param adapter: A pypowervm.adapter.Adapter. :param instance: The nova instance to power on. :raises: InstancePowerOnFailure """ # Synchronize power-on and power-off ops on a given instance with lockutils.lock('power_%s' % instance.uuid): entry = get_instance_wrapper(adapter, instance) # Get the current state and see if we can start the VM if entry.state in _POWERVM_STARTABLE_STATE: # Now start the lpar try: power.power_on(entry, None) except pvm_exc.Error as e: LOG.exception("PowerVM error during power_on.", instance=instance) raise exc.InstancePowerOnFailure(reason=six.text_type(e))
def start_vm(self, instance): """Start vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to start vm: %s.") % instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status in [ constant.VM_STATUS.STOPPED, constant.VM_STATUS.SUSPENDED ]: self._modify_boot_option_if_needed(instance, fc_vm) self.post(fc_vm.get_vm_action_uri('start'), excp=exception.InstancePowerOnFailure) LOG.info(_("start vm %s success"), fc_vm.name) elif fc_vm.status == constant.VM_STATUS.RUNNING: LOG.info(_("vm has already running.")) else: reason = _("vm status is %s and cannot be powered on.") % \ fc_vm.status raise exception.InstancePowerOnFailure(reason=reason)
def create_vm(self, context, instance, network_info, block_device_info, image_meta, injected_files, admin_password, extra_specs): """ Create VM on FC :param instance: :param network_info: :param image_meta: :param injected_files: :param admin_password: :param block_device_info: :return: """ customization, filtered_injected_files = \ self._split_injected_files(injected_files) # set qos io self._volume_ops.set_qos_specs_to_volume(block_device_info) # prepare network on FC LOG.debug(_('prepare network')) vifs = [] for idx, network_item in enumerate(network_info): pg_urn = self._network_ops.ensure_network(network_item['network']) enable_dhcp = self._network_ops.\ is_enable_dhcp(context, network_item['id']) vifs.append({ 'sequence_num': idx, 'pg_urn': pg_urn, 'enable_dhcp': enable_dhcp, 'network_info': network_item }) location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) # initial obj and create vm try: LOG.debug(_('begin create vm in fc.')) vm_create = vmcreation.get_vm_create(self.fc_client, self.task_ops, instance, image_meta) vm_create(context, self._volume_ops, location, vifs, block_device_info, image_meta, filtered_injected_files, admin_password, extra_specs, customization) vm_create.create_and_boot_vm() except Exception as exc: utils.log_exception(exc) msg = _("create and boot vm %s failed.") % instance['name'] self.delete_vm(context, instance, block_device_info) raise exception.InstancePowerOnFailure(msg) boot_result = {'result': False} def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name']) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=1).wait() if not boot_result['result']: self.delete_vm(context, instance, block_device_info) msg = _("create vm %s success, but start failed.") % \ instance['name'] raise exception.InstancePowerOnFailure(msg) try: urn = FC_MGR.get_vm_by_uuid(instance).urn instance.system_metadata.update({'fc_vm_id': urn.split(':')[-1]}) instance.save() except Exception as exc: utils.log_exception(exc) LOG.warn(_("update sys metadata for %s failed."), instance['name'])