def reboot(adapter, instance, hard): """Reboots a VM. :param adapter: A pypowervm.adapter.Adapter. :param instance: The nova instance to reboot. :param hard: Boolean True if hard reboot, False otherwise. :raises: InstanceRebootFailure """ # Synchronize power-on and power-off ops on a given instance with lockutils.lock('power_%s' % instance.uuid): try: entry = get_instance_wrapper(adapter, instance) if entry.state != pvm_bp.LPARState.NOT_ACTIVATED: if hard: power.PowerOp.stop( entry, opts=popts.PowerOffOpts().vsp_hard().restart()) else: power.power_off_progressive(entry, restart=True) else: # pypowervm does NOT throw an exception if "already down". # Any other exception from pypowervm is a legitimate failure; # let it raise up. # If we get here, pypowervm thinks the instance is down. power.power_on(entry, None) except pvm_exc.Error as e: LOG.exception("PowerVM error during reboot.", instance=instance) raise exc.InstanceRebootFailure(reason=six.text_type(e))
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): node = _get_baremetal_node_by_instance_uuid(instance['uuid']) ctx = nova_context.get_admin_context() pm = get_power_manager(node=node, instance=instance) state = pm.reboot_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstanceRebootFailure(_( "Baremetal power manager failed to restart node " "for instance %r") % instance['uuid']) _update_state(ctx, node, instance, state)
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): try: async_vm_action = self.compute.virtual_machines.restart( CONF.azure.resource_group, instance.uuid) async_vm_action.wait(CONF.azure.async_timeout) LOG.info(_LI("Restart Instance in Azure Finish."), instance=instance) except Exception as e: msg = six.text_type(e) LOG.exception(msg) ex = nova_ex.InstanceRebootFailure(reason=msg) raise ex
def reboot(self, instance, network_info): """Reboot a VM instance.""" vm_ref = self._get_vm_ref_from_the_name(instance.name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance.id) self.plug_vifs(instance, network_info) lst_properties = [ "summary.guest.toolsStatus", "runtime.powerState", "summary.guest.toolsRunningStatus" ] props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) pwr_state = None tools_status = None tools_running_status = False for elem in props: for prop in elem.propSet: if prop.name == "runtime.powerState": pwr_state = prop.val elif prop.name == "summary.guest.toolsStatus": tools_status = prop.val elif prop.name == "summary.guest.toolsRunningStatus": tools_running_status = prop.val # Raise an exception if the VM is not powered On. if pwr_state not in ["poweredOn"]: reason = _("instance is not powered on") raise exception.InstanceRebootFailure(reason=reason) # If latest vmware tools are installed in the VM, and that the tools # are running, then only do a guest reboot. Otherwise do a hard reset. if (tools_status == "toolsOk" and tools_running_status == "guestToolsRunning"): LOG.debug(_("Rebooting guest OS of VM %s") % instance.name) self._session._call_method(self._session._get_vim(), "RebootGuest", vm_ref) LOG.debug(_("Rebooted guest OS of VM %s") % instance.name) else: LOG.debug(_("Doing hard reboot of VM %s") % instance.name) reset_task = self._session._call_method(self._session._get_vim(), "ResetVM_Task", vm_ref) self._session._wait_for_task(instance.id, reset_task) LOG.debug(_("Did hard reboot of VM %s") % instance.name)
def test_instance_action_event_details_with_nova_exception(self): """Creates a server using the non-admin user, then reboot it which will generate a nova exception fault and put the instance into ERROR status. Then checks that fault details are visible. """ # Create the server with the non-admin user. server = self._build_server( networks=[{ 'port': nova_fixtures.NeutronFixture.port_1['id'] }]) server = self.api.post_server({'server': server}) server = self._wait_for_state_change(server, 'ACTIVE') # Stop the server before rebooting it so that after the driver.reboot # method raises an exception, the fake driver does not report the # instance power state as running - that will make the compute manager # set the instance vm_state to error. self.api.post_server_action(server['id'], {'os-stop': None}) server = self._wait_for_state_change(server, 'SHUTOFF') # Stub out the compute driver reboot method to raise a nova # exception 'InstanceRebootFailure' to simulate some error. exc_reason = 'reboot failure' with mock.patch.object(self.compute.manager.driver, 'reboot', side_effect=exception.InstanceRebootFailure( reason=exc_reason)) as mock_reboot: reboot_request = {'reboot': {'type': 'HARD'}} self.api.post_server_action(server['id'], reboot_request) # In this case we wait for the status to change to ERROR using # the non-admin user so we can assert the fault details. We also # wait for the task_state to be None since the wrap_instance_fault # decorator runs before the reverts_task_state decorator so we will # be sure the fault is set on the server. server = self._wait_for_server_parameter( server, { 'status': 'ERROR', 'OS-EXT-STS:task_state': None }, api=self.api) mock_reboot.assert_called_once() self._set_policy_rules(overwrite=False) server_id = server['id'] # Calls GET on the server actions and verifies that the reboot # action expected in the response. response = self.api.api_get('/servers/%s/os-instance-actions' % server_id) server_actions = response.body['instanceActions'] for actions in server_actions: if actions['action'] == 'reboot': reboot_request_id = actions['request_id'] # non admin shows instance actions details and verifies the 'details' # in the action events via 'request_id', since microversion 2.51 that # we can show events, but in microversion 2.84 that we can show # 'details' for non-admin. self.api.microversion = '2.84' action_events_response = self.api.api_get( '/servers/%s/os-instance-actions/%s' % (server_id, reboot_request_id)) reboot_action = action_events_response.body['instanceAction'] # Since reboot action failed, the 'message' property in reboot action # should be 'Error', otherwise it's None. self.assertEqual('Error', reboot_action['message']) reboot_action_events = reboot_action['events'] # The instance action events from the non-admin user API response # should not have 'traceback' in it. self.assertNotIn('traceback', reboot_action_events[0]) # The nova exception format message should be in the details. self.assertIn('details', reboot_action_events[0]) self.assertIn(exc_reason, reboot_action_events[0]['details']) # Get the server fault details for the admin user. self.admin_api.microversion = '2.84' action_events_response = self.admin_api.api_get( '/servers/%s/os-instance-actions/%s' % (server_id, reboot_request_id)) reboot_action = action_events_response.body['instanceAction'] self.assertEqual('Error', reboot_action['message']) reboot_action_events = reboot_action['events'] # The admin can see the fault details which includes the traceback, # and make sure the traceback is there by looking for part of it. self.assertIn('traceback', reboot_action_events[0]) self.assertIn('in reboot_instance', reboot_action_events[0]['traceback']) # The nova exception format message should be in the details. self.assertIn(exc_reason, reboot_action_events[0]['details'])