def test_gauge_reflection(self): # Ensure our decorator is done correctly (six.wraps) and we can get the # arguments of our decorated function. expected = ['run', 'gauge'] signature = reflection.get_signature(gauge_check) parameters = list(signature.parameters) self.assertEqual(expected, parameters)
def extract_flow_kwargs(flow_factory, factory_args=None, store=None): """Extract required flow_factory arguments from a job store. Given a flow_factory function or class, a list of positional arguments, and a store dict, extract the remaining arguments for the flow_factory from the store. Throws KeyError if a required argument is not provided. """ if factory_args is None: factory_args = [] if store is None: store = {} kwargs = {} _name, factory_fun = fetch_validate_factory(flow_factory) if reflection.is_subclass(factory_fun, ff_type.FlowFactory): factory_fun = factory_fun.generate sig = reflection.get_signature(factory_fun) for param_name, p in six.iteritems(sig.parameters): if param_name == 'self': continue if p.kind is reflection.Parameter.VAR_POSITIONAL and factory_args: factory_args.popleft() value = store.get(param_name, p.default) if value is reflection.Parameter.empty: msg_tmpl = ('The flow {flow_factory} is missing a required ' 'parameter: {param_name}') msg = msg_tmpl.format(flow_factory=flow_factory.__name__, param_name=param_name) raise KeyError(msg) kwargs[param_name] = value return kwargs
def get_resource(resource, resource_id, eager=False): """Get the resource from the uuid, id or logical name. :param resource: the resource type. :param resource_id: the UUID, ID or logical name of the resource. :returns: The resource. """ resource = getattr(objects, resource) _get = None if utils.is_int_like(resource_id): resource_id = int(resource_id) _get = resource.get elif uuidutils.is_uuid_like(resource_id): _get = resource.get_by_uuid else: _get = resource.get_by_name method_signature = reflection.get_signature(_get) if 'eager' in method_signature.parameters: return _get(pecan.request.context, resource_id, eager=eager) return _get(pecan.request.context, resource_id)
def node_power_action(task, new_state, timeout=None): """Change power state or reset for a node. Perform the requested power action if the transition is required. :param task: a TaskManager instance containing the node to act on. :param new_state: Any power state from ironic.common.states. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: InvalidParameterValue when the wrong state is specified or the wrong driver info is specified. :raises: StorageError when a failure occurs updating the node's storage interface upon setting power on. :raises: other exceptions by the node's power driver if something wrong occurred during the power action. """ notify_utils.emit_power_set_notification(task, fields.NotificationLevel.INFO, fields.NotificationStatus.START, new_state) node = task.node if new_state in (states.POWER_ON, states.REBOOT, states.SOFT_REBOOT): target_state = states.POWER_ON elif new_state in (states.POWER_OFF, states.SOFT_POWER_OFF): target_state = states.POWER_OFF else: target_state = None def _not_going_to_change(): # Neither the ironic service nor the hardware has erred. The # node is, for some reason, already in the requested state, # though we don't know why. eg, perhaps the user previously # requested the node POWER_ON, the network delayed those IPMI # packets, and they are trying again -- but the node finally # responds to the first request, and so the second request # gets to this check and stops. # This isn't an error, so we'll clear last_error field # (from previous operation), log a warning, and return. node['last_error'] = None # NOTE(dtantsur): under rare conditions we can get out of sync here node['power_state'] = curr_state node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification(task, fields.NotificationLevel.INFO, fields.NotificationStatus.END, new_state) LOG.warning( "Not going to change node %(node)s power state because " "current state = requested state = '%(state)s'.", { 'node': node.uuid, 'state': curr_state }) try: curr_state = task.driver.power.get_power_state(task) except Exception as e: with excutils.save_and_reraise_exception(): node['last_error'] = _( "Failed to change power state to '%(target)s'. " "Error: %(error)s") % { 'target': new_state, 'error': e } node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, new_state) if curr_state == states.POWER_ON: if new_state == states.POWER_ON: _not_going_to_change() return elif curr_state == states.POWER_OFF: if new_state in (states.POWER_OFF, states.SOFT_POWER_OFF): _not_going_to_change() return else: # if curr_state == states.ERROR: # be optimistic and continue action LOG.warning("Driver returns ERROR power state for node %s.", node.uuid) # Set the target_power_state and clear any last_error, if we're # starting a new operation. This will expose to other processes # and clients that work is in progress. if node['target_power_state'] != target_state: node['target_power_state'] = target_state node['last_error'] = None node.save() # take power action try: if (target_state == states.POWER_ON and node.provision_state == states.ACTIVE): task.driver.storage.attach_volumes(task) if new_state != states.REBOOT: if ('timeout' in reflection.get_signature( task.driver.power.set_power_state).parameters): task.driver.power.set_power_state(task, new_state, timeout=timeout) else: # FIXME(naohirot): # After driver composition, we should print power interface # name here instead of driver. LOG.warning( "The set_power_state method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.set_power_state(task, new_state) else: # TODO(TheJulia): We likely ought to consider toggling # volume attachments, although we have no mechanism to # really verify what cinder has connector wise. if ('timeout' in reflection.get_signature( task.driver.power.reboot).parameters): task.driver.power.reboot(task, timeout=timeout) else: LOG.warning( "The reboot method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.reboot(task) except Exception as e: with excutils.save_and_reraise_exception(): node['target_power_state'] = states.NOSTATE node['last_error'] = _( "Failed to change power state to '%(target_state)s' " "by '%(new_state)s'. Error: %(error)s") % { 'target_state': target_state, 'new_state': new_state, 'error': e } node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, new_state) else: # success! node['power_state'] = target_state node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification(task, fields.NotificationLevel.INFO, fields.NotificationStatus.END, new_state) LOG.info( 'Successfully set node %(node)s power state to ' '%(target_state)s by %(new_state)s.', { 'node': node.uuid, 'target_state': target_state, 'new_state': new_state }) # NOTE(TheJulia): Similarly to power-on, when we power-off # a node, we should detach any volume attachments. if (target_state == states.POWER_OFF and node.provision_state == states.ACTIVE): try: task.driver.storage.detach_volumes(task) except exception.StorageError as e: LOG.warning( "Volume detachment for node %(node)s " "failed. Error: %(error)s", { 'node': node.uuid, 'error': e })
def node_power_action(task, new_state, timeout=None): """Change power state or reset for a node. Perform the requested power action if the transition is required. :param task: a TaskManager instance containing the node to act on. :param new_state: Any power state from ironic.common.states. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: InvalidParameterValue when the wrong state is specified or the wrong driver info is specified. :raises: StorageError when a failure occurs updating the node's storage interface upon setting power on. :raises: other exceptions by the node's power driver if something wrong occurred during the power action. """ notify_utils.emit_power_set_notification( task, fields.NotificationLevel.INFO, fields.NotificationStatus.START, new_state) node = task.node if _can_skip_state_change(task, new_state): return target_state = _calculate_target_state(new_state) # Set the target_power_state and clear any last_error, if we're # starting a new operation. This will expose to other processes # and clients that work is in progress. if node['target_power_state'] != target_state: node['target_power_state'] = target_state node['last_error'] = None node.save() # take power action try: if (target_state == states.POWER_ON and node.provision_state == states.ACTIVE): task.driver.storage.attach_volumes(task) if new_state != states.REBOOT: if ('timeout' in reflection.get_signature( task.driver.power.set_power_state).parameters): task.driver.power.set_power_state(task, new_state, timeout=timeout) else: # FIXME(naohirot): # After driver composition, we should print power interface # name here instead of driver. LOG.warning( "The set_power_state method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.set_power_state(task, new_state) else: # TODO(TheJulia): We likely ought to consider toggling # volume attachments, although we have no mechanism to # really verify what cinder has connector wise. if ('timeout' in reflection.get_signature( task.driver.power.reboot).parameters): task.driver.power.reboot(task, timeout=timeout) else: LOG.warning("The reboot method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.reboot(task) except Exception as e: with excutils.save_and_reraise_exception(): node['target_power_state'] = states.NOSTATE node['last_error'] = _( "Failed to change power state to '%(target_state)s' " "by '%(new_state)s'. Error: %(error)s") % { 'target_state': target_state, 'new_state': new_state, 'error': e} node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, new_state) else: # success! node['power_state'] = target_state node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.INFO, fields.NotificationStatus.END, new_state) LOG.info('Successfully set node %(node)s power state to ' '%(target_state)s by %(new_state)s.', {'node': node.uuid, 'target_state': target_state, 'new_state': new_state}) # NOTE(TheJulia): Similarly to power-on, when we power-off # a node, we should detach any volume attachments. if (target_state == states.POWER_OFF and node.provision_state == states.ACTIVE): try: task.driver.storage.detach_volumes(task) except exception.StorageError as e: LOG.warning("Volume detachment for node %(node)s " "failed. Error: %(error)s", {'node': node.uuid, 'error': e})
def node_power_action(task, new_state, timeout=None): """Change power state or reset for a node. Perform the requested power action if the transition is required. :param task: a TaskManager instance containing the node to act on. :param new_state: Any power state from ironic.common.states. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: InvalidParameterValue when the wrong state is specified or the wrong driver info is specified. :raises: StorageError when a failure occurs updating the node's storage interface upon setting power on. :raises: other exceptions by the node's power driver if something wrong occurred during the power action. """ notify_utils.emit_power_set_notification( task, fields.NotificationLevel.INFO, fields.NotificationStatus.START, new_state) node = task.node if new_state in (states.POWER_ON, states.REBOOT, states.SOFT_REBOOT): target_state = states.POWER_ON elif new_state in (states.POWER_OFF, states.SOFT_POWER_OFF): target_state = states.POWER_OFF else: target_state = None def _not_going_to_change(): # Neither the ironic service nor the hardware has erred. The # node is, for some reason, already in the requested state, # though we don't know why. eg, perhaps the user previously # requested the node POWER_ON, the network delayed those IPMI # packets, and they are trying again -- but the node finally # responds to the first request, and so the second request # gets to this check and stops. # This isn't an error, so we'll clear last_error field # (from previous operation), log a warning, and return. node['last_error'] = None # NOTE(dtantsur): under rare conditions we can get out of sync here node['power_state'] = curr_state node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.INFO, fields.NotificationStatus.END, new_state) LOG.warning("Not going to change node %(node)s power state because " "current state = requested state = '%(state)s'.", {'node': node.uuid, 'state': curr_state}) try: curr_state = task.driver.power.get_power_state(task) except Exception as e: with excutils.save_and_reraise_exception(): node['last_error'] = _( "Failed to change power state to '%(target)s'. " "Error: %(error)s") % {'target': new_state, 'error': e} node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, new_state) if curr_state == states.POWER_ON: if new_state == states.POWER_ON: _not_going_to_change() return elif curr_state == states.POWER_OFF: if new_state in (states.POWER_OFF, states.SOFT_POWER_OFF): _not_going_to_change() return else: # if curr_state == states.ERROR: # be optimistic and continue action LOG.warning("Driver returns ERROR power state for node %s.", node.uuid) # Set the target_power_state and clear any last_error, if we're # starting a new operation. This will expose to other processes # and clients that work is in progress. if node['target_power_state'] != target_state: node['target_power_state'] = target_state node['last_error'] = None node.save() # take power action try: if (target_state == states.POWER_ON and node.provision_state == states.ACTIVE): task.driver.storage.attach_volumes(task) if new_state != states.REBOOT: if ('timeout' in reflection.get_signature( task.driver.power.set_power_state).parameters): task.driver.power.set_power_state(task, new_state, timeout=timeout) else: # FIXME(naohirot): # After driver composition, we should print power interface # name here instead of driver. LOG.warning( "The set_power_state method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.set_power_state(task, new_state) else: # TODO(TheJulia): We likely ought to consider toggling # volume attachments, although we have no mechanism to # really verify what cinder has connector wise. if ('timeout' in reflection.get_signature( task.driver.power.reboot).parameters): task.driver.power.reboot(task, timeout=timeout) else: LOG.warning("The reboot method of %(driver_name)s " "doesn't support 'timeout' parameter.", {'driver_name': node.driver}) task.driver.power.reboot(task) except Exception as e: with excutils.save_and_reraise_exception(): node['target_power_state'] = states.NOSTATE node['last_error'] = _( "Failed to change power state to '%(target_state)s' " "by '%(new_state)s'. Error: %(error)s") % { 'target_state': target_state, 'new_state': new_state, 'error': e} node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, new_state) else: # success! node['power_state'] = target_state node['target_power_state'] = states.NOSTATE node.save() notify_utils.emit_power_set_notification( task, fields.NotificationLevel.INFO, fields.NotificationStatus.END, new_state) LOG.info('Successfully set node %(node)s power state to ' '%(target_state)s by %(new_state)s.', {'node': node.uuid, 'target_state': target_state, 'new_state': new_state}) # NOTE(TheJulia): Similarly to power-on, when we power-off # a node, we should detach any volume attachments. if (target_state == states.POWER_OFF and node.provision_state == states.ACTIVE): try: task.driver.storage.detach_volumes(task) except exception.StorageError as e: LOG.warning("Volume detachment for node %(node)s " "failed. Error: %(error)s", {'node': node.uuid, 'error': e})