def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug( _("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug(_("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def _split_injected_files(self, injected_files): """ FC plug in use injected_files impress custom info, split this :return: """ customization = {} filtered_injected_files = [] try: for (path, contents) in injected_files: if path == 'fc_customization': for (key, values) in \ ast.literal_eval(contents).items(): customization[key] = values else: filtered_injected_files.append([path, contents]) except Exception as exc: utils.log_exception(exc) msg = _("Error dict object !") raise fc_exc.InvalidCustomizationInfo(reason=msg) return customization, filtered_injected_files
def _split_injected_files(self, injected_files): """ FC plug in use injected_files impress custom info, split this :return: """ customization = {} filtered_injected_files = [] try: for (path, contents) in injected_files: if path == 'fc_customization': for (key, values) in \ ast.literal_eval(contents).items(): customization[key] = values else: filtered_injected_files.append([path, contents]) except Exception as exc: utils.log_exception(exc) msg = _("Error dict object !") raise fc_exc.InvalidCustomizationInfo(reason=msg) return customization, filtered_injected_files
def create_vm(self, context, instance, network_info, block_device_info, image_meta, injected_files, admin_password, extra_specs): """ Create VM on FC :param instance: :param network_info: :param image_meta: :param injected_files: :param admin_password: :param block_device_info: :return: """ customization, filtered_injected_files = \ self._split_injected_files(injected_files) # set qos io self._volume_ops.set_qos_specs_to_volume(block_device_info) # prepare network on FC LOG.debug(_('prepare network')) vifs = [] for idx, network_item in enumerate(network_info): pg_urn = self._network_ops.ensure_network(network_item['network']) enable_dhcp = self._network_ops.\ is_enable_dhcp(context, network_item['id']) vifs.append({ 'sequence_num': idx, 'pg_urn': pg_urn, 'enable_dhcp': enable_dhcp, 'network_info': network_item }) location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) # initial obj and create vm try: LOG.debug(_('begin create vm in fc.')) vm_create = vmcreation.get_vm_create(self.fc_client, self.task_ops, instance, image_meta) vm_create(context, self._volume_ops, location, vifs, block_device_info, image_meta, filtered_injected_files, admin_password, extra_specs, customization) vm_create.create_and_boot_vm() except Exception as exc: utils.log_exception(exc) msg = _("create and boot vm %s failed.") % instance['name'] self.delete_vm(context, instance, block_device_info) raise exception.InstancePowerOnFailure(msg) boot_result = {'result': False} def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name']) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=1).wait() if not boot_result['result']: self.delete_vm(context, instance, block_device_info) msg = _("create vm %s success, but start failed.") % \ instance['name'] raise exception.InstancePowerOnFailure(msg) try: urn = FC_MGR.get_vm_by_uuid(instance).urn instance.system_metadata.update({'fc_vm_id': urn.split(':')[-1]}) instance.save() except Exception as exc: utils.log_exception(exc) LOG.warn(_("update sys metadata for %s failed."), instance['name'])
def create_vm(self, context, instance, network_info, block_device_info, image_meta, injected_files, admin_password, extra_specs): """ Create VM on FC :param instance: :param network_info: :param image_meta: :param injected_files: :param admin_password: :param block_device_info: :return: """ customization, filtered_injected_files = \ self._split_injected_files(injected_files) # set qos io self._volume_ops.set_qos_specs_to_volume(block_device_info) # prepare network on FC LOG.debug(_('prepare network')) vifs = [] for idx, network_item in enumerate(network_info): pg_urn = self._network_ops.ensure_network(network_item['network']) enable_dhcp = self._network_ops.\ is_enable_dhcp(context, network_item['id']) vifs.append({ 'sequence_num': idx, 'pg_urn': pg_urn, 'enable_dhcp': enable_dhcp, 'network_info': network_item }) location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) # initial obj and create vm try: LOG.debug(_('begin create vm in fc.')) vm_create = vmcreation.get_vm_create(self.fc_client, self.task_ops, instance, image_meta) vm_create(context, self._volume_ops, location, vifs, block_device_info, image_meta, filtered_injected_files, admin_password, extra_specs, customization) vm_create.create_and_boot_vm() except Exception as exc: utils.log_exception(exc) msg = _("create and boot vm %s failed.") % instance['name'] self.delete_vm(context, instance, block_device_info) raise exception.InstancePowerOnFailure(msg) boot_result = {'result': False} def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name']) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=1).wait() if not boot_result['result']: self.delete_vm(context, instance, block_device_info) msg = _("create vm %s success, but start failed.") % \ instance['name'] raise exception.InstancePowerOnFailure(msg) try: urn = FC_MGR.get_vm_by_uuid(instance).urn instance.system_metadata.update({'fc_vm_id': urn.split(':')[-1]}) instance.save() except Exception as exc: utils.log_exception(exc) LOG.warn(_("update sys metadata for %s failed."), instance['name'])