Exemplo n.º 1
0
    def update_firmware(self, task, firmware_images):
        """Updates the firmware on the node.

        :param task: a TaskManager instance containing the node to act on.
        :param firmware_images: A list of firmware images are to apply.
        :returns: None if it is completed.
        :raises: RedfishError on an error from the Sushy library.
        """
        node = task.node

        LOG.debug(
            'Updating firmware on node %(node_uuid)s with firmware '
            '%(firmware_images)s', {
                'node_uuid': node.uuid,
                'firmware_images': firmware_images
            })

        update_service = redfish_utils.get_update_service(task.node)

        # The cleaning infrastructure has an exclusive lock on the node, so
        # there is no need to get one here.
        self._apply_firmware_update(node, update_service, firmware_images)

        # set_async_step_flags calls node.save()
        deploy_utils.set_async_step_flags(node,
                                          reboot=True,
                                          skip_current_step=True,
                                          polling=True)

        manager_utils.node_power_action(task, states.REBOOT)

        return deploy_utils.get_async_step_return_state(task.node)
Exemplo n.º 2
0
    def delete_configuration(self, task):
        """Delete the RAID configuration.

        :param task: a TaskManager instance  containing the node to act on.
        :raises: NodeCleaningFailure, on failure to execute clean step.
        :raises: InstanceDeployFailure, on failure to execute deploy step.
        """
        node = task.node
        LOG.debug("OOB RAID delete_configuration invoked for node %s.",
                  node.uuid)
        driver_internal_info = node.driver_internal_info
        ilo_object = ilo_common.get_ilo_object(node)

        try:
            # Raid configuration in progress, checking status
            if not driver_internal_info.get('ilo_raid_delete_in_progress'):
                ilo_object.delete_raid_configuration()
                self._prepare_for_read_raid(task, 'delete_raid')
                return deploy_utils.get_async_step_return_state(node)
            else:
                # Raid configuration is done, updating raid_config
                raid_conf = ilo_object.read_raid_configuration()
                fields = ['ilo_raid_delete_in_progress']
                if node.clean_step:
                    fields.append('skip_current_clean_step')
                else:
                    fields.append('skip_current_deploy_step')
                self._pop_driver_internal_values(task, *fields)
                if not len(raid_conf['logical_disks']):
                    node.raid_config = {}
                    LOG.debug("Node %(uuid)s raid delete clean step is done.",
                              {'uuid': node.uuid})
                else:
                    # Raid configuration failed
                    err_msg = (_("Step delete_configuration failed "
                                 "on node %(node)s with error: "
                                 "Unable to delete these logical disks: "
                                 "%(disks)s")
                               % {'node': node.uuid,
                                  'disks': raid_conf['logical_disks']})
                    if node.clean_step:
                        raise exception.NodeCleaningFailure(err_msg)
                    else:
                        raise exception.InstanceDeployFailure(reason=err_msg)
        except ilo_error.IloLogicalDriveNotFoundError:
            LOG.info("No logical drive found to delete on node %(node)s",
                     {'node': node.uuid})
        except ilo_error.IloError as ilo_exception:
            operation = (_("Failed to delete raid configuration on node %s")
                         % node.uuid)
            self._pop_driver_internal_values(task,
                                             'ilo_raid_delete_in_progress',
                                             'skip_current_clean_step')
            fields = ['ilo_raid_delete_in_progress']
            if node.clean_step:
                fields.append('skip_current_clean_step')
            else:
                fields.append('skip_current_deploy_step')
            self._pop_driver_internal_values(task, *fields)
            self._set_step_failed(task, operation, ilo_exception)
Exemplo n.º 3
0
    def apply_configuration(self, task, settings):
        """Apply the BIOS settings to the node.

        :param task: a TaskManager instance containing the node to act on.
        :param settings: a list of BIOS settings to be updated.
        :raises: RedfishConnectionError when it fails to connect to Redfish
        :raises: RedfishError on an error from the Sushy library
        """

        system = redfish_utils.get_system(task.node)
        try:
            bios = system.bios
        except sushy.exceptions.MissingAttributeError:
            error_msg = (_('Redfish BIOS factory reset failed for node '
                           '%s, because BIOS settings are not supported.') %
                         task.node.uuid)
            LOG.error(error_msg)
            raise exception.RedfishError(error=error_msg)

        # Convert Ironic BIOS settings to Redfish BIOS attributes
        attributes = {s['name']: s['value'] for s in settings}

        info = task.node.driver_internal_info
        reboot_requested = info.get('post_config_reboot_requested')

        if not reboot_requested:
            # Step 1: Apply settings and issue a reboot
            LOG.debug('Apply BIOS configuration for node %(node_uuid)s: '
                      '%(settings)r', {'node_uuid': task.node.uuid,
                                       'settings': settings})

            if bios.supported_apply_times and (
                    sushy.APPLY_TIME_ON_RESET in bios.supported_apply_times):
                apply_time = sushy.APPLY_TIME_ON_RESET
            else:
                apply_time = None

            try:
                bios.set_attributes(attributes, apply_time=apply_time)
            except sushy.exceptions.SushyError as e:
                error_msg = (_('Redfish BIOS apply configuration failed for '
                               'node %(node)s. Error: %(error)s') %
                             {'node': task.node.uuid, 'error': e})
                LOG.error(error_msg)
                raise exception.RedfishError(error=error_msg)

            self.post_configuration(task, settings)
            self._set_reboot_requested(task, attributes)
            return deploy_utils.get_async_step_return_state(task.node)
        else:
            # Step 2: Verify requested BIOS settings applied
            requested_attrs = info.get('requested_bios_attrs')
            current_attrs = bios.attributes
            LOG.debug('Verify BIOS configuration for node %(node_uuid)s: '
                      '%(attrs)r', {'node_uuid': task.node.uuid,
                                    'attrs': requested_attrs})
            self._clear_reboot_requested(task)
            self._check_bios_attrs(task, current_attrs, requested_attrs)
Exemplo n.º 4
0
    def _execute_pre_boot_bios_step(self, task, step, data=None):
        """Perform operations required prior to the reboot.

        Depending on the step, it executes the operations required
        and moves the node to CLEANWAIT or DEPLOYWAIT state prior to reboot
        based on the operation being performed.
        :param task: a task from TaskManager.
        :param step: name of the clean step to be performed
        :param data: if the clean step is apply_configuration it holds
                     the settings data.
        :raises: NodeCleaningFailure, on failure to execute of clean step.
        :raises: InstanceDeployFailure, on failure to execute of deploy step.
        """
        node = task.node

        if step not in ('apply_configuration', 'factory_reset'):
            errmsg = (_('Could not find the step %(step)s for the '
                        'node %(node)s.') % {
                            'step': step,
                            'node': node.uuid
                        })
            if node.clean_step:
                raise exception.NodeCleaningFailure(errmsg)
            raise exception.InstanceDeployFailure(reason=errmsg)

        try:
            ilo_object = ilo_common.get_ilo_object(node)
            ilo_object.set_bios_settings(data) if step == (
                'apply_configuration') else ilo_object.reset_bios_to_default()
        except (exception.MissingParameterValue,
                exception.InvalidParameterValue, ilo_error.IloError,
                ilo_error.IloCommandNotSupportedError) as ir_exception:
            errmsg = (_('Step %(step)s failed '
                        'on the node %(node)s with error: %(err)s') % {
                            'step': step,
                            'node': node.uuid,
                            'err': ir_exception
                        })
            if node.clean_step:
                raise exception.NodeCleaningFailure(errmsg)
            raise exception.InstanceDeployFailure(reason=errmsg)

        deploy_opts = deploy_utils.build_agent_options(node)
        task.driver.boot.prepare_ramdisk(task, deploy_opts)
        manager_utils.node_power_action(task, states.REBOOT)

        deploy_utils.set_async_step_flags(node,
                                          reboot=True,
                                          skip_current_step=False)
        driver_internal_info = node.driver_internal_info
        if step == 'apply_configuration':
            driver_internal_info['apply_bios'] = True
        else:
            driver_internal_info['reset_bios'] = True

        node.driver_internal_info = driver_internal_info
        node.save()
        return deploy_utils.get_async_step_return_state(node)
Exemplo n.º 5
0
    def factory_reset(self, task):
        """Reset the BIOS settings of the node to the factory default.

        :param task: a TaskManager instance containing the node to act on.
        :raises: RedfishConnectionError when it fails to connect to Redfish
        :raises: RedfishError on an error from the Sushy library
        """
        system = redfish_utils.get_system(task.node)
        try:
            bios = system.bios
        except sushy.exceptions.MissingAttributeError:
            error_msg = (_('Redfish BIOS factory reset failed for node '
                           '%s, because BIOS settings are not supported.') %
                         task.node.uuid)
            LOG.error(error_msg)
            raise exception.RedfishError(error=error_msg)

        node = task.node
        info = node.driver_internal_info
        reboot_requested = info.get('post_factory_reset_reboot_requested')
        if not reboot_requested:
            LOG.debug('Factory reset BIOS configuration for node %(node)s',
                      {'node': node.uuid})
            try:
                bios.reset_bios()
            except sushy.exceptions.SushyError as e:
                error_msg = (_('Redfish BIOS factory reset failed for node '
                               '%(node)s. Error: %(error)s') % {
                                   'node': node.uuid,
                                   'error': e
                               })
                LOG.error(error_msg)
                raise exception.RedfishError(error=error_msg)

            self.post_reset(task)
            self._set_reboot(task)
            return deploy_utils.get_async_step_return_state(task.node)
        else:
            current_attrs = bios.attributes
            LOG.debug(
                'Post factory reset, BIOS configuration for node '
                '%(node_uuid)s: %(attrs)r', {
                    'node_uuid': node.uuid,
                    'attrs': current_attrs
                })
            self._clear_reboot_requested(task)
Exemplo n.º 6
0
    def apply_configuration(self, task, settings):
        """Apply the BIOS configuration to the node

        :param task: a TaskManager instance containing the node to act on
        :param settings: List of BIOS settings to apply
        :raises: DRACOperationError upon an error from python-dracclient

        :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
                  if configuration is in progress asynchronously or None if it
                  is completed.
        """

        LOG.debug(
            "Configuring node %(node_uuid)s with BIOS settings:"
            " %(settings)s", {
                "node_uuid": task.node.uuid,
                "settings": settings
            })
        node = task.node
        # convert ironic settings list to DRAC kwsettings
        kwsettings = {s['name']: s['value'] for s in settings}
        drac_job.validate_job_queue(node)
        client = drac_common.get_drac_client(node)
        try:
            #  Argument validation is done by the dracclient method
            #  set_bios_settings. No need to do it here.
            set_result = client.set_bios_settings(kwsettings)
        except drac_exceptions.BaseClientException as exc:
            LOG.error(
                "Failed to apply BIOS config on node %(node_uuid)s."
                " Error %(error)s", {
                    "node_uuid": task.node.uuid,
                    "error": exc
                })
            raise exception.DracOperationError(error=exc)

        # If no commit is required, we're done
        if not set_result['is_commit_required']:
            LOG.info(
                "Completed BIOS configuration on node %(node_uuid)s"
                " with BIOS settings: %(settings)s", {
                    "node_uuid": task.node.uuid,
                    "settings": settings
                })
            return

        # Otherwise, need to reboot the node as well to commit configuration
        else:
            LOG.debug("Rebooting node %(node_uuid)s to apply BIOS settings",
                      {"node_uuid": task.node.uuid})
            reboot_needed = set_result['is_reboot_required']
            try:
                commit_result = client.commit_pending_bios_changes(
                    reboot=reboot_needed)
            except drac_exceptions.BaseClientException as exc:
                LOG.error(
                    "Failed to commit BIOS changes on node %(node_uuid)s"
                    ". Error %(error)s", {
                        "node_uuid": task.node.uuid,
                        "error": exc
                    })
                raise exception.DracOperationError(error=exc)

            # Store JobID for the async job handler _check_node_bios_jobs
            driver_internal_info = node.driver_internal_info
            driver_internal_info.setdefault('bios_config_job_ids',
                                            []).append(commit_result)
            node.driver_internal_info = driver_internal_info

            # This method calls node.save(), bios_config_job_ids will be saved
            # automatically
            # These flags are for the conductor to manage the asynchronous
            # jobs that have been initiated by this method
            deploy_utils.set_async_step_flags(node,
                                              reboot=reboot_needed,
                                              skip_current_step=True,
                                              polling=True)
            # Return the clean/deploy state string
            return deploy_utils.get_async_step_return_state(node)
Exemplo n.º 7
0
    def factory_reset(self, task):
        """Reset the BIOS settings of the node to the factory default.

        This uses the Lifecycle Controller configuration to perform
        BIOS configuration reset. Leveraging the python-dracclient
        methods already available.

        :param task: a TaskManager instance containing the node to act on
        :raises: DracOperationError on an error from python-dracclient
        :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT
                  (deployment) if reset is in progress asynchronously or None
                  if it is completed.
        """
        node = task.node
        drac_job.validate_job_queue(node)
        client = drac_common.get_drac_client(node)
        lc_bios_reset_attrib = {"BIOS Reset To Defaults Requested": "True"}
        try:
            set_result = client.set_lifecycle_settings(lc_bios_reset_attrib)
        except drac_exceptions.BaseClientException as exc:
            LOG.error(
                'Failed to reset BIOS on the node %(node_uuid)s.'
                ' Reason: %(error)s.', {
                    'node_uuid': node.uuid,
                    'error': exc
                })
            raise exception.DracOperationError(error=exc)
        if not set_result['is_commit_required']:
            LOG.info("BIOS reset successful on the node "
                     "%(node_uuid)s", {"node_uuid": node.uuid})
            return
        else:
            # Rebooting the Node is compulsory, LC call returns
            # reboot_required=False/Optional, which is not desired
            reboot_needed = True
            try:
                factory_reset_time_before_reboot =\
                    client.get_system().last_system_inventory_time

                LOG.debug(
                    "Factory resetting node %(node_uuid)s "
                    "last inventory reboot time before factory reset "
                    "%(factory_reset_time_before_reboot)s", {
                        "node_uuid":
                        task.node.uuid,
                        "factory_reset_time_before_reboot":
                        factory_reset_time_before_reboot
                    })

                commit_job_id = client.commit_pending_lifecycle_changes(
                    reboot=reboot_needed)
                LOG.info(
                    "Commit job id of a node %(node_uuid)s."
                    "%(commit_job_id)s", {
                        'node_uuid': node.uuid,
                        "commit_job_id": commit_job_id
                    })
            except drac_exceptions.BaseClientException as exc:
                LOG.error(
                    'Failed to commit BIOS reset on node '
                    '%(node_uuid)s. Reason: %(error)s.', {
                        'node_uuid': node.uuid,
                        'error': exc
                    })
                raise exception.DracOperationError(error=exc)
            # Store the last inventory time on reboot for async job handler
            # _check_last_system_inventory_changed
            driver_internal_info = node.driver_internal_info
            driver_internal_info['factory_reset_time_before_reboot'] = \
                factory_reset_time_before_reboot
            # Store the current time to later check if factory reset times out
            driver_internal_info['factory_reset_time'] = str(
                timeutils.utcnow(with_timezone=True))

            node.driver_internal_info = driver_internal_info
            # rebooting the server to apply factory reset value
            client.set_power_state('REBOOT')

            # This method calls node.save(), bios_config_job_id will be
            # saved automatically
            # These flags are for the conductor to manage the asynchronous
            # jobs that have been initiated by this method
            deploy_utils.set_async_step_flags(node,
                                              reboot=reboot_needed,
                                              skip_current_step=True,
                                              polling=True)

            return deploy_utils.get_async_step_return_state(task.node)
Exemplo n.º 8
0
    def create_configuration(self,
                             task,
                             create_root_volume=True,
                             create_nonroot_volumes=True):
        """Create a RAID configuration on a bare metal using agent ramdisk.

        This method creates a RAID configuration on the given node.

        :param task: a TaskManager instance.
        :param create_root_volume: If True, a root volume is created
            during RAID configuration. Otherwise, no root volume is
            created. Default is True.
        :param create_nonroot_volumes: If True, non-root volumes are
            created. If False, no non-root volumes are created. Default
            is True.
        :raises: MissingParameterValue, if node.target_raid_config is missing
            or was found to be empty after skipping root volume and/or non-root
            volumes.
        :raises: NodeCleaningFailure, on failure to execute clean step.
        :raises: InstanceDeployFailure, on failure to execute deploy step.
        """
        node = task.node
        target_raid_config = raid.filter_target_raid_config(
            node,
            create_root_volume=create_root_volume,
            create_nonroot_volumes=create_nonroot_volumes)
        driver_internal_info = node.driver_internal_info
        driver_internal_info['target_raid_config'] = target_raid_config
        node.driver_internal_info = driver_internal_info
        node.save()
        LOG.debug(
            "Calling OOB RAID create_configuration for node %(node)s "
            "with the following target RAID configuration: %(target)s", {
                'node': node.uuid,
                'target': target_raid_config
            })
        ilo_object = ilo_common.get_ilo_object(node)

        try:
            # Raid configuration in progress, checking status
            if not driver_internal_info.get('ilo_raid_create_in_progress'):
                ilo_object.create_raid_configuration(target_raid_config)
                self._prepare_for_read_raid(task, 'create_raid')
                return deploy_utils.get_async_step_return_state(node)
            else:
                # Raid configuration is done, updating raid_config
                raid_conf = (ilo_object.read_raid_configuration(
                    raid_config=target_raid_config))
                fields = ['ilo_raid_create_in_progress']
                if node.clean_step:
                    fields.append('skip_current_clean_step')
                else:
                    fields.append('skip_current_deploy_step')
                self._pop_driver_internal_values(task, *fields)
                if len(raid_conf['logical_disks']):
                    raid.update_raid_info(node, raid_conf)
                    LOG.debug("Node %(uuid)s raid create clean step is done.",
                              {'uuid': node.uuid})
                else:
                    # Raid configuration failed
                    msg = (_("Step create_configuration failed "
                             "on node %(node)s with error: "
                             "Unable to create raid") % {
                                 'node': node.uuid
                             })
                    if node.clean_step:
                        raise exception.NodeCleaningFailure(msg)
                    else:
                        raise exception.InstanceDeployFailure(reason=msg)
        except ilo_error.IloError as ilo_exception:
            operation = (_("Failed to create raid configuration on node %s") %
                         node.uuid)
            fields = ['ilo_raid_create_in_progress']
            if node.clean_step:
                fields.append('skip_current_clean_step')
            else:
                fields.append('skip_current_deploy_step')
            self._pop_driver_internal_values(task, *fields)
            self._set_step_failed(task, operation, ilo_exception)
Exemplo n.º 9
0
    def delete_configuration(self, task):
        """Delete RAID configuration on the node.

        :param task: TaskManager object containing the node.
        :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
            if deletion is in progress asynchronously or None if it is
            complete.
        """
        node = task.node
        system = redfish_utils.get_system(node)
        vols_to_delete = []
        try:
            for storage in system.storage.get_members():
                controller = (storage.storage_controllers[0]
                              if storage.storage_controllers else None)
                controller_name = None
                if controller and controller.identifiers:
                    controller_name = controller.identifiers[0].durable_name
                for volume in storage.volumes.get_members():
                    if (volume.raid_type or volume.volume_type not in
                            [None, sushy.VOLUME_TYPE_RAW_DEVICE]):
                        vols_to_delete.append((storage.volumes, volume,
                                               controller_name))
        except sushy.exceptions.SushyError as exc:
            error_msg = _('Cannot get the list of volumes to delete for node '
                          '%(node_uuid)s. Reason: %(error)s.' %
                          {'node_uuid': node.uuid, 'error': exc})
            LOG.error(error_msg)
            raise exception.RedfishError(error=exc)

        self.pre_delete_configuration(task, vols_to_delete)

        reboot_required = False
        raid_configs = list()
        for vol_coll, volume, controller_name in vols_to_delete:
            raid_config = dict()
            apply_time = None
            apply_time_support = vol_coll.operation_apply_time_support
            if (apply_time_support
                    and apply_time_support.mapped_supported_values):
                supported_values = apply_time_support.mapped_supported_values
                if sushy.APPLY_TIME_IMMEDIATE in supported_values:
                    apply_time = sushy.APPLY_TIME_IMMEDIATE
                elif sushy.APPLY_TIME_ON_RESET in supported_values:
                    apply_time = sushy.APPLY_TIME_ON_RESET
            response = volume.delete(apply_time=apply_time)
            # only save the async tasks (task_monitors) in raid_config
            if (response is not None
                    and hasattr(response, 'task_monitor_uri')):
                raid_config['operation'] = 'delete'
                raid_config['raid_controller'] = controller_name
                raid_config['task_monitor_uri'] = response.task_monitor_uri
                reboot_required = True
                raid_configs.append(raid_config)

        driver_internal_info = node.driver_internal_info
        driver_internal_info['raid_configs'] = raid_configs
        node.driver_internal_info = driver_internal_info

        return_state = None
        deploy_utils.set_async_step_flags(
            node,
            reboot=reboot_required,
            skip_current_step=True,
            polling=True)
        if reboot_required:
            return_state = deploy_utils.get_async_step_return_state(task.node)
            deploy_opts = deploy_utils.build_agent_options(task.node)
            task.driver.boot.prepare_ramdisk(task, deploy_opts)
            manager_utils.node_power_action(task, states.REBOOT)

        return self.post_delete_configuration(
            task, raid_configs, return_state=return_state)
Exemplo n.º 10
0
    def create_configuration(self, task, create_root_volume=True,
                             create_nonroot_volumes=True,
                             delete_existing=False):
        """Create RAID configuration on the node.

        This method creates the RAID configuration as read from
        node.target_raid_config.  This method
        by default will create all logical disks.

        :param task: TaskManager object containing the node.
        :param create_root_volume: Setting this to False indicates
            not to create root volume that is specified in the node's
            target_raid_config. Default value is True.
        :param create_nonroot_volumes: Setting this to False indicates
            not to create non-root volumes (all except the root volume) in
            the node's target_raid_config.  Default value is True.
        :param delete_existing: Setting this to True indicates to delete RAID
            configuration prior to creating the new configuration. Default is
            False.
        :returns: states.CLEANWAIT if RAID configuration is in progress
            asynchronously or None if it is complete.
        :raises: RedfishError if there is an error creating the configuration
        """
        node = task.node

        logical_disks = node.target_raid_config['logical_disks']
        convert_drive_units(logical_disks, node)
        physical_disks, disk_to_controller = get_physical_disks(node)
        # TODO(billdodd): filter out physical disks that are already in use?
        #                 filter out disks with HotSpareType != "None"?
        logical_disks = _find_configuration(logical_disks, physical_disks,
                                            disk_to_controller)

        logical_disks_to_create = _filter_logical_disks(
            logical_disks, create_root_volume, create_nonroot_volumes)

        self.pre_create_configuration(task, logical_disks_to_create)

        reboot_required = False
        raid_configs = list()
        for logical_disk in logical_disks_to_create:
            raid_config = dict()
            response = create_virtual_disk(
                task,
                raid_controller=logical_disk.get('controller'),
                physical_disks=logical_disk['physical_disks'],
                raid_level=logical_disk['raid_level'],
                size_bytes=logical_disk['size_bytes'],
                disk_name=logical_disk.get('name'),
                span_length=logical_disk.get('span_length'),
                span_depth=logical_disk.get('span_depth'),
                error_handler=self.volume_create_error_handler)
            # only save the async tasks (task_monitors) in raid_config
            if (response is not None
                    and hasattr(response, 'task_monitor_uri')):
                raid_config['operation'] = 'create'
                raid_config['raid_controller'] = logical_disk.get(
                    'controller')
                raid_config['task_monitor_uri'] = response.task_monitor_uri
                reboot_required = True
                raid_configs.append(raid_config)

        driver_internal_info = node.driver_internal_info
        driver_internal_info['raid_configs'] = raid_configs
        node.driver_internal_info = driver_internal_info

        return_state = None
        deploy_utils.set_async_step_flags(
            node,
            reboot=reboot_required,
            skip_current_step=True,
            polling=True)
        if reboot_required:
            return_state = deploy_utils.get_async_step_return_state(task.node)
            deploy_opts = deploy_utils.build_agent_options(task.node)
            task.driver.boot.prepare_ramdisk(task, deploy_opts)
            manager_utils.node_power_action(task, states.REBOOT)

        return self.post_create_configuration(
            task, raid_configs, return_state=return_state)
Exemplo n.º 11
0
    def factory_reset(self, task):
        """Reset the BIOS settings of the node to the factory default.

        This uses the Lifecycle Controller configuration to perform
        BIOS configuration reset. Leveraging the python-dracclient
        methods already available.

        :param task: a TaskManager instance containing the node to act on
        :raises: DracOperationError on an error from python-dracclient
        :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT
                  (deployment) if reset is in progress asynchronously or None
                  if it is completed.
        """
        node = task.node
        drac_job.validate_job_queue(node)
        client = drac_common.get_drac_client(node)
        lc_bios_reset_attrib = {"BIOS Reset To Defaults Requested": "True"}
        try:
            set_result = client.set_lifecycle_settings(lc_bios_reset_attrib)
        except drac_exceptions.BaseClientException as exc:
            LOG.error(
                'Failed to reset BIOS on the node %(node_uuid)s.'
                ' Reason: %(error)s.', {
                    'node_uuid': node.uuid,
                    'error': exc
                })
            raise exception.DracOperationError(error=exc)
        if not set_result['is_commit_required']:
            LOG.info("BIOS reset successful on the node "
                     "%(node_uuid)s", {"node_uuid": node.uuid})
            return
        else:
            # Rebooting the Node is compulsory, LC call returns
            # reboot_required=False/Optional, which is not desired
            reboot_needed = True
            try:
                commit_job_id = client.commit_pending_lifecycle_changes(
                    reboot=reboot_needed)
            except drac_exceptions.BaseClientException as exc:
                LOG.error(
                    'Failed to commit BIOS reset on node '
                    '%(node_uuid)s. Reason: %(error)s.', {
                        'node_uuid': node.uuid,
                        'error': exc
                    })
                raise exception.DracOperationError(error=exc)

            # Store JobID for the async job handler _check_node_bios_jobs
            driver_internal_info = node.driver_internal_info
            driver_internal_info.setdefault('bios_config_job_ids',
                                            []).append(commit_job_id)
            node.driver_internal_info = driver_internal_info

            # This method calls node.save(), bios_config_job_id will be
            # saved automatically
            # These flags are for the conductor to manage the asynchronous
            # jobs that have been initiated by this method
            deploy_utils.set_async_step_flags(node,
                                              reboot=reboot_needed,
                                              skip_current_step=True,
                                              polling=True)

            return deploy_utils.get_async_step_return_state(task.node)
Exemplo n.º 12
0
    def import_configuration(self, task, import_configuration_location):
        """Import and apply the configuration to the server.

        Gets pre-created configuration from storage by given location and
        imports that into given server. Uses Dell's Server Configuration
        Profile (SCP).

        :param task: A task from TaskManager.
        :param import_configuration_location: URL of location to fetch desired
            configuration from.

        :raises: MissingParameterValue if missing configuration name of a file
            to fetch the configuration from
        """
        if not import_configuration_location:
            raise exception.MissingParameterValue(
                _('import_configuration_location missing'))

        configuration = molds.get_configuration(task,
                                                import_configuration_location)
        if not configuration:
            raise exception.DracOperationError(
                error=(_("No configuration found for node %(node)s by name "
                         "%(configuration_name)s") %
                       {
                           'node': task.node.uuid,
                           'configuration_name': import_configuration_location
                       }))

        interface = configuration["oem"]["interface"]
        if interface != "idrac-redfish":
            raise exception.DracOperationError(
                error=(_("Invalid configuration for node %(node)s "
                         "in %(configuration_name)s. Supports only "
                         "idrac-redfish, but found %(interface)s") %
                       {
                           'node': task.node.uuid,
                           'configuration_name': import_configuration_location,
                           'interface': interface
                       }))

        system = redfish_utils.get_system(task.node)

        if not system.managers:
            raise exception.DracOperationError(
                error=(_("No managers found for %(node)s") % {
                    'node': task.node.uuid
                }))

        for manager in system.managers:
            try:
                manager_oem = manager.get_oem_extension('Dell')
            except sushy.exceptions.OEMExtensionNotFoundError as e:
                error_msg = (_("Search for Sushy OEM extension Python package "
                               "'sushy-oem-idrac' failed for node %(node)s. "
                               "Ensure it is installed. Error: %(error)s") % {
                                   'node': task.node.uuid,
                                   'error': e
                               })
                LOG.error(error_msg)
                raise exception.RedfishError(error=error_msg)

            try:
                task_monitor = manager_oem.import_system_configuration(
                    json.dumps(configuration["oem"]["data"]))

                info = task.node.driver_internal_info
                info['import_task_monitor_url'] = task_monitor.task_monitor_uri
                task.node.driver_internal_info = info

                deploy_utils.set_async_step_flags(task.node,
                                                  reboot=True,
                                                  skip_current_step=True,
                                                  polling=True)
                deploy_opts = deploy_utils.build_agent_options(task.node)
                task.driver.boot.prepare_ramdisk(task, deploy_opts)
                manager_utils.node_power_action(task, states.REBOOT)

                return deploy_utils.get_async_step_return_state(task.node)
            except sushy.exceptions.SushyError as e:
                LOG.debug(
                    "Sushy OEM extension Python package "
                    "'sushy-oem-idrac' failed to import system "
                    " configuration for node %(node)s. Will try next "
                    "manager, if available. Error: %(error)s", {
                        'system':
                        system.uuid if system.uuid else system.identity,
                        'manager':
                        manager.uuid if manager.uuid else manager.identity,
                        'node': task.node.uuid,
                        'error': e
                    })
                continue

        raise exception.DracOperationError(
            error=(_("Failed to import configuration for node %(node)s") % {
                'node': task.node.uuid
            }))