Esempio n. 1
0
def node_remove_guest(
    env: LibraryEnvironment,
    node_identifier,
    skip_offline_nodes=False,
    allow_remove_multiple_nodes=False,
    allow_pacemaker_remote_service_fail=False,
    wait: WaitType = False,
):
    """
    remove a resource representing remote node and destroy remote node

    LibraryEnvironment env provides all for communication with externals
    string node_identifier -- node name, hostname or resource id
    bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
    bool allow_remove_multiple_nodes -- is a flag for allowing
        remove unexpected multiple occurrence of remote node for node_identifier
    bool allow_pacemaker_remote_service_fail -- is a flag for allowing
        successfully finish this command even if stoping/disabling
        pacemaker_remote not succeeded
    """
    wait_timeout = env.ensure_wait_satisfiable(wait)
    cib = env.get_cib()

    resource_element_list = _find_resources_to_remove(
        cib,
        env.report_processor,
        "guest",
        node_identifier,
        allow_remove_multiple_nodes,
        guest_node.find_node_resources,
    )

    node_names_list = sorted(
        {
            guest_node.get_node_name_from_resource(node_element)
            for node_element in resource_element_list
        }
    )

    if not env.is_cib_live:
        env.report_processor.report_list(
            _report_skip_live_parts_in_remove(node_names_list)
        )
    else:
        _destroy_pcmk_remote_env(
            env,
            node_names_list,
            skip_offline_nodes,
            allow_pacemaker_remote_service_fail,
        )

    for resource_element in resource_element_list:
        guest_node.unset_guest(resource_element)

    env.push_cib(wait_timeout=wait_timeout)

    # remove node from pcmk caches
    if env.is_cib_live:
        for node_name in node_names_list:
            remove_node(env.cmd_runner(), node_name)
Esempio n. 2
0
 def test_push_cib_not_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with(
         "mock cmd runner", '<cib/>', False
     )
     self.assertEqual([], env.report_processor.report_item_list)
Esempio n. 3
0
def create_alert(
    lib_env: LibraryEnvironment,
    alert_id,
    path,
    instance_attribute_dict,
    meta_attribute_dict,
    description=None,
):
    """
    Create new alert.
    Raises LibraryError if path is not specified, or any other failure.

    lib_env -- LibraryEnvironment
    alert_id -- id of alert to be created, if None it will be generated
    path -- path to script for alert
    instance_attribute_dict -- dictionary of instance attributes
    meta_attribute_dict -- dictionary of meta attributes
    description -- alert description description
    """
    if not path:
        raise LibraryError(
            ReportItem.error(
                reports.messages.RequiredOptionsAreMissing(["path"])))

    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    id_provider = IdProvider(cib)
    alert_el = alert.create_alert(cib, alert_id, path, description)
    arrange_first_instance_attributes(alert_el, instance_attribute_dict,
                                      id_provider)
    arrange_first_meta_attributes(alert_el, meta_attribute_dict, id_provider)

    lib_env.push_cib()
Esempio n. 4
0
def add_level(lib_env: LibraryEnvironment,
              level,
              target_type,
              target_value,
              devices,
              force_device=False,
              force_node=False):
    """
    Validate and add a new fencing level

    LibraryEnvironment lib_env -- environment
    int|string level -- level (index) of the new fencing level
    constant target_type -- the new fencing level target value type
    mixed target_value -- the new fencing level target value
    Iterable devices -- list of stonith devices for the new fencing level
    bool force_device -- continue even if a stonith device does not exist
    bool force_node -- continue even if a node (target) does not exist
    """
    version_check = None
    if target_type == TARGET_TYPE_REGEXP:
        version_check = Version(2, 3, 0)
    elif target_type == TARGET_TYPE_ATTRIBUTE:
        version_check = Version(2, 4, 0)

    cib = lib_env.get_cib(version_check)
    cib_fencing_topology.add_level(
        lib_env.report_processor, get_fencing_topology(cib),
        get_resources(cib), level, target_type, target_value, devices,
        ClusterState(get_cluster_status_xml(
            lib_env.cmd_runner())).node_section.nodes, force_device,
        force_node)
    if lib_env.report_processor.has_errors:
        raise LibraryError()
    lib_env.push_cib()
Esempio n. 5
0
def update_alert(
    lib_env: LibraryEnvironment,
    alert_id,
    path,
    instance_attribute_dict,
    meta_attribute_dict,
    description=None,
):
    """
    Update existing alert with specified id.

    lib_env -- LibraryEnvironment
    alert_id -- id of alert to be updated
    path -- new path, if None old value will stay unchanged
    instance_attribute_dict -- dictionary of instance attributes to update
    meta_attribute_dict -- dictionary of meta attributes to update
    description -- new description, if empty string, old description will be
        deleted, if None old value will stay unchanged
    """

    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    id_provider = IdProvider(cib)
    alert_el = alert.update_alert(cib, alert_id, path, description)
    arrange_first_instance_attributes(alert_el, instance_attribute_dict,
                                      id_provider)
    arrange_first_meta_attributes(alert_el, meta_attribute_dict, id_provider)

    lib_env.push_cib()
Esempio n. 6
0
def _defaults_create(
    env: LibraryEnvironment,
    cib_section_name: str,
    validator_options: Mapping[str, Any],
    nvpairs: Mapping[str, str],
    nvset_options: Mapping[str, str],
    nvset_rule: Optional[str] = None,
    force_flags: Optional[Container] = None,
) -> None:
    if force_flags is None:
        force_flags = set()
    force = (reports.codes.FORCE
             in force_flags) or (reports.codes.FORCE_OPTIONS in force_flags)

    required_cib_version = None
    nice_to_have_cib_version = None
    if nvset_rule:
        # Parse the rule to see if we need to upgrade CIB schema. All errors
        # would be properly reported by a validator called bellow, so we can
        # safely ignore them here.
        try:
            rule_tree = parse_rule(nvset_rule)
            if has_rsc_or_op_expression(rule_tree):
                required_cib_version = Version(3, 4, 0)
            if has_node_attr_expr_with_type_integer(rule_tree):
                nice_to_have_cib_version = Version(3, 5, 0)
        except RuleParseError:
            pass

    cib = env.get_cib(
        minimal_version=required_cib_version,
        nice_to_have_version=nice_to_have_cib_version,
    )
    id_provider = IdProvider(cib)

    validator = nvpair_multi.ValidateNvsetAppendNew(
        id_provider,
        nvpairs,
        nvset_options,
        nvset_rule=nvset_rule,
        **validator_options,
    )
    if env.report_processor.report_list(
            validator.validate(force_options=force)).has_errors:
        raise LibraryError()

    nvpair_multi.nvset_append_new(
        sections.get(cib, cib_section_name),
        id_provider,
        get_pacemaker_version_by_which_cib_was_validated(cib),
        nvpair_multi.NVSET_META,
        nvpairs,
        nvset_options,
        nvset_rule=validator.get_parsed_rule(),
    )

    env.report_processor.report(
        ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))
    env.push_cib()
Esempio n. 7
0
def cib_runner_nodes(lib_env: LibraryEnvironment, wait: WaitType):
    wait_timeout = lib_env.ensure_wait_satisfiable(wait)
    yield (
        lib_env.get_cib(),
        lib_env.cmd_runner(),
        ClusterState(lib_env.get_cluster_state()).node_section.nodes,
    )
    lib_env.push_cib(wait_timeout=wait_timeout)
Esempio n. 8
0
def remove_all_levels(lib_env: LibraryEnvironment):
    """
    Remove all fencing levels
    LibraryEnvironment lib_env -- environment
    """
    cib_fencing_topology.remove_all_levels(
        get_fencing_topology(lib_env.get_cib()))
    lib_env.push_cib()
Esempio n. 9
0
 def test_push_cib_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env._cib_upgraded = True
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with(
         "mock cmd runner",
         '<cib/>'
     )
     self.assertFalse(env.cib_upgraded)
Esempio n. 10
0
 def test_push_cib_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env._cib_upgraded = True
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with("mock cmd runner", '<cib/>',
                                              True)
     assert_report_item_list_equal(
         env.report_processor.report_item_list,
         [(severity.INFO, report_codes.CIB_UPGRADE_SUCCESSFUL, {})])
Esempio n. 11
0
def _defaults_remove(env: LibraryEnvironment, cib_section_name: str,
                     nvset_id_list: Iterable[str]) -> None:
    if not nvset_id_list:
        return
    nvset_elements, report_list = nvpair_multi.find_nvsets_by_ids(
        sections.get(env.get_cib(), cib_section_name), nvset_id_list)
    if env.report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    nvpair_multi.nvset_remove(nvset_elements)
    env.push_cib()
Esempio n. 12
0
class CibPushProxy(TestCase):
    def setUp(self):
        self.env = LibraryEnvironment(mock.MagicMock(logging.Logger),
                                      MockLibraryReportProcessor())
        get_cib_patcher = patch_env_object("get_cib", lambda self: "<cib />")
        self.addCleanup(get_cib_patcher.stop)
        get_cib_patcher.start()

    def test_push_loaded(self, mock_push_full, mock_push_diff):
        self.env.get_cib()
        self.env.push_cib()
        mock_push_full.assert_not_called()
        mock_push_diff.assert_called_once_with(False)

    def test_push_loaded_wait(self, mock_push_full, mock_push_diff):
        self.env.get_cib()
        self.env.push_cib(wait=10)
        mock_push_full.assert_not_called()
        mock_push_diff.assert_called_once_with(10)

    def test_push_custom(self, mock_push_full, mock_push_diff):
        self.env.get_cib()
        self.env.push_cib(custom_cib="<cib />")
        mock_push_full.assert_called_once_with("<cib />", False)
        mock_push_diff.assert_not_called()

    def test_push_custom_wait(self, mock_push_full, mock_push_diff):
        self.env.get_cib()
        self.env.push_cib(custom_cib="<cib />", wait=10)
        mock_push_full.assert_called_once_with("<cib />", 10)
        mock_push_diff.assert_not_called()
Esempio n. 13
0
 def test_push_cib_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env._cib_upgraded = True
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with(
         "mock cmd runner", '<cib/>', True
     )
     assert_report_item_list_equal(
         env.report_processor.report_item_list,
         [(
             severity.INFO,
             report_codes.CIB_UPGRADE_SUCCESSFUL,
             {}
         )]
     )
Esempio n. 14
0
class PushCib(TestCase):
    def setUp(self):
        self.env = LibraryEnvironment(mock.MagicMock(logging.Logger),
                                      MockLibraryReportProcessor())

    def test_run_only_push_when_without_wait(self, wait_for_idle,
                                             push_cib_xml):
        self.env.push_cib(etree.fromstring("<cib/>"))
        push_cib_xml.assert_called_once_with("<cib/>")
        wait_for_idle.assert_not_called()

    def test_run_wait_when_wait_specified(self, wait_for_idle, push_cib_xml):
        self.env.push_cib(etree.fromstring("<cib/>"), 10)
        push_cib_xml.assert_called_once_with("<cib/>")
        wait_for_idle.assert_called_once_with(self.env.cmd_runner(), 10)
Esempio n. 15
0
class PushCib(TestCase):
    def setUp(self):
        self.env = LibraryEnvironment(
            mock.MagicMock(logging.Logger),
            MockLibraryReportProcessor()
        )

    def test_run_only_push_when_without_wait(self, wait_for_idle, push_cib_xml):
        self.env.push_cib(etree.fromstring("<cib/>"))
        push_cib_xml.assert_called_once_with("<cib/>")
        wait_for_idle.assert_not_called()

    def test_run_wait_when_wait_specified(self, wait_for_idle, push_cib_xml):
        self.env.push_cib(etree.fromstring("<cib/>"), 10)
        push_cib_xml.assert_called_once_with("<cib/>")
        wait_for_idle.assert_called_once_with(self.env.cmd_runner(), 10)
Esempio n. 16
0
def update_scsi_devices_add_remove(
        env: LibraryEnvironment,
        stonith_id: str,
        add_device_list: Iterable[str],
        remove_device_list: Iterable[str],
        force_flags: Container[reports.types.ForceCode] = (),
) -> None:
    """
    Update scsi fencing devices without restart and affecting other resources.

    env -- provides all for communication with externals
    stonith_id -- id of stonith resource
    add_device_list -- paths to the scsi devices that would be added to the
        stonith resource
    remove_device_list -- paths to the scsi devices that would be removed from
        the stonith resource
    force_flags -- list of flags codes
    """
    runner = env.cmd_runner()
    (
        stonith_el,
        current_device_list,
    ) = _update_scsi_devices_get_element_and_devices(runner,
                                                     env.report_processor,
                                                     env.get_cib(), stonith_id)
    if env.report_processor.report_list(
            validate_add_remove_items(
                add_device_list,
                remove_device_list,
                current_device_list,
                reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
                reports.const.ADD_REMOVE_ITEM_TYPE_DEVICE,
                stonith_el.get("id", ""),
            )).has_errors:
        raise LibraryError()
    updated_device_set = (set(current_device_list).union(
        add_device_list).difference(remove_device_list))
    resource.stonith.update_scsi_devices_without_restart(
        env.cmd_runner(),
        env.get_cluster_state(),
        stonith_el,
        IdProvider(stonith_el),
        updated_device_set,
    )
    _unfencing_scsi_devices(env, stonith_el, current_device_list,
                            updated_device_set, force_flags)
    env.push_cib()
Esempio n. 17
0
def add_recipient(
    lib_env: LibraryEnvironment,
    alert_id,
    recipient_value,
    instance_attribute_dict,
    meta_attribute_dict,
    recipient_id=None,
    description=None,
    allow_same_value=False
):
    """
    Add new recipient to alert witch id alert_id.

    lib_env -- LibraryEnvironment
    alert_id -- id of alert to which new recipient should be added
    recipient_value -- value of new recipient
    instance_attribute_dict -- dictionary of instance attributes to update
    meta_attribute_dict -- dictionary of meta attributes to update
    recipient_id -- id of new recipient, if None it will be generated
    description -- recipient description
    allow_same_value -- if True unique recipient value is not required
    """
    if not recipient_value:
        raise LibraryError(
            reports.required_options_are_missing(["value"])
        )

    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    id_provider = IdProvider(cib)
    recipient = alert.add_recipient(
        lib_env.report_processor,
        cib,
        alert_id,
        recipient_value,
        recipient_id=recipient_id,
        description=description,
        allow_same_value=allow_same_value
    )
    arrange_first_instance_attributes(
        recipient, instance_attribute_dict, id_provider
    )
    arrange_first_meta_attributes(
        recipient, meta_attribute_dict, id_provider
    )

    lib_env.push_cib()
Esempio n. 18
0
def remove_recipient(lib_env: LibraryEnvironment, recipient_id_list):
    """
    Remove specified recipients.

    lib_env -- LibraryEnvironment
    recipient_id_list -- list of recipients ids to be removed
    """
    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    report_list: ReportItemList = []
    for recipient_id in recipient_id_list:
        try:
            alert.remove_recipient(cib, recipient_id)
        except LibraryError as e:
            report_list += e.args
    if lib_env.report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    lib_env.push_cib()
Esempio n. 19
0
def remove_alert(lib_env: LibraryEnvironment, alert_id_list):
    """
    Remove alerts with specified ids.

    lib_env -- LibraryEnvironment
    alert_id_list -- list of alerts ids which should be removed
    """
    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    report_list: ReportItemList = []
    for alert_id in alert_id_list:
        try:
            alert.remove_alert(cib, alert_id)
        except LibraryError as e:
            report_list += e.args

    if lib_env.report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    lib_env.push_cib()
Esempio n. 20
0
def update_recipient(
    lib_env: LibraryEnvironment,
    recipient_id,
    instance_attribute_dict,
    meta_attribute_dict,
    recipient_value=None,
    description=None,
    allow_same_value=False
):
    """
    Update existing recipient.

    lib_env -- LibraryEnvironment
    recipient_id -- id of recipient to be updated
    instance_attribute_dict -- dictionary of instance attributes to update
    meta_attribute_dict -- dictionary of meta attributes to update
    recipient_value -- new recipient value, if None old value will stay
        unchanged
    description -- new description, if empty string, old description will be
        deleted, if None old value will stay unchanged
    allow_same_value -- if True unique recipient value is not required
    """
    if not recipient_value and recipient_value is not None:
        raise LibraryError(
            reports.cib_alert_recipient_invalid_value(recipient_value)
        )
    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
    id_provider = IdProvider(cib)
    recipient = alert.update_recipient(
        lib_env.report_processor,
        cib,
        recipient_id,
        recipient_value=recipient_value,
        description=description,
        allow_same_value=allow_same_value
    )
    arrange_first_instance_attributes(
        recipient, instance_attribute_dict, id_provider
    )
    arrange_first_meta_attributes(
        recipient, meta_attribute_dict, id_provider
    )

    lib_env.push_cib()
Esempio n. 21
0
def _defaults_create(
    env: LibraryEnvironment,
    cib_section_name: str,
    validator_options: Mapping[str, Any],
    nvpairs: Mapping[str, str],
    nvset_options: Mapping[str, str],
    nvset_rule: Optional[str] = None,
    force_flags: Optional[Container] = None,
) -> None:
    if force_flags is None:
        force_flags = set()
    force = (reports.codes.FORCE
             in force_flags) or (reports.codes.FORCE_OPTIONS in force_flags)

    required_cib_version = None
    if nvset_rule:
        required_cib_version = Version(3, 4, 0)
    cib = env.get_cib(required_cib_version)
    id_provider = IdProvider(cib)

    validator = nvpair_multi.ValidateNvsetAppendNew(
        id_provider,
        nvpairs,
        nvset_options,
        nvset_rule=nvset_rule,
        **validator_options,
    )
    if env.report_processor.report_list(
            validator.validate(force_options=force)).has_errors:
        raise LibraryError()

    nvpair_multi.nvset_append_new(
        sections.get(cib, cib_section_name),
        id_provider,
        nvpair_multi.NVSET_META,
        nvpairs,
        nvset_options,
        nvset_rule=validator.get_parsed_rule(),
    )

    env.report_processor.report(
        ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))
    env.push_cib()
Esempio n. 22
0
def update_scsi_devices(
        env: LibraryEnvironment,
        stonith_id: str,
        set_device_list: Iterable[str],
        force_flags: Container[reports.types.ForceCode] = (),
) -> None:
    """
    Update scsi fencing devices without restart and affecting other resources.

    env -- provides all for communication with externals
    stonith_id -- id of stonith resource
    set_device_list -- paths to the scsi devices that would be set for stonith
        resource
    force_flags -- list of flags codes
    """
    if not set_device_list:
        env.report_processor.report(
            ReportItem.error(
                reports.messages.InvalidOptionValue("devices",
                                                    "",
                                                    None,
                                                    cannot_be_empty=True)))
    runner = env.cmd_runner()
    (
        stonith_el,
        current_device_list,
    ) = _update_scsi_devices_get_element_and_devices(runner,
                                                     env.report_processor,
                                                     env.get_cib(), stonith_id)
    if env.report_processor.has_errors:
        raise LibraryError()
    resource.stonith.update_scsi_devices_without_restart(
        runner,
        env.get_cluster_state(),
        stonith_el,
        IdProvider(stonith_el),
        set_device_list,
    )
    _unfencing_scsi_devices(env, stonith_el, current_device_list,
                            set_device_list, force_flags)
    env.push_cib()
Esempio n. 23
0
def _set_any_defaults(section_name, env: LibraryEnvironment, options):
    """
    string section_name -- determine the section of defaults
    env -- provides access to outside environment
    dict options -- are desired options with its values; when value is empty the
        option have to be removed
    """
    # Do not ever remove the nvset element, even if it is empty. There may be
    # ACLs set in pacemaker which allow "write" for nvpairs (adding, changing
    # and removing) but not nvsets. In such a case, removing the nvset would
    # cause the whole change to be rejected by pacemaker with a "permission
    # denied" message.
    # https://bugzilla.redhat.com/show_bug.cgi?id=1642514
    env.report_processor.report(
        ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))

    if not options:
        return

    cib = env.get_cib()

    # Do not create new defaults element if we are only removing values from it.
    only_removing = True
    for value in options.values():
        if value != "":
            only_removing = False
            break
    if only_removing and not sections.exists(cib, section_name):
        return

    defaults_section = sections.get(cib, section_name)
    arrange_first_meta_attributes(
        defaults_section,
        options,
        IdProvider(cib),
        new_id="{0}-options".format(section_name),
    )

    env.push_cib()
Esempio n. 24
0
def add_level(
    lib_env: LibraryEnvironment,
    level,
    target_type,
    target_value,
    devices,
    force_device=False,
    force_node=False,
):
    """
    Validate and add a new fencing level

    LibraryEnvironment lib_env -- environment
    int|string level -- level (index) of the new fencing level
    constant target_type -- the new fencing level target value type
    mixed target_value -- the new fencing level target value
    Iterable devices -- list of stonith devices for the new fencing level
    bool force_device -- continue even if a stonith device does not exist
    bool force_node -- continue even if a node (target) does not exist
    """
    cib = lib_env.get_cib()
    cib_fencing_topology.add_level(
        lib_env.report_processor,
        get_fencing_topology(cib),
        get_resources(cib),
        level,
        target_type,
        target_value,
        devices,
        ClusterState(lib_env.get_cluster_state()).node_section.nodes,
        force_device,
        force_node,
    )
    if lib_env.report_processor.has_errors:
        raise LibraryError()
    lib_env.push_cib()
Esempio n. 25
0
 def test_push_cib_not_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with("mock cmd runner", '<cib/>',
                                              False)
     self.assertEqual([], env.report_processor.report_item_list)
Esempio n. 26
0
def update_scsi_devices(
    env: LibraryEnvironment,
    stonith_id: str,
    set_device_list: Iterable[str],
    force_flags: Container[reports.types.ForceCode] = (),
) -> None:
    """
    Update scsi fencing devices without restart and affecting other resources.

    env -- provides all for communication with externals
    stonith_id -- id of stonith resource
    set_device_list -- paths to the scsi devices that would be set for stonith
        resource
    force_flags -- list of flags codes
    """
    if not is_getting_resource_digest_supported(env.cmd_runner()):
        raise LibraryError(
            ReportItem.error(
                reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported()
            )
        )
    cib = env.get_cib()
    if not set_device_list:
        env.report_processor.report(
            ReportItem.error(
                reports.messages.InvalidOptionValue(
                    "devices", "", None, cannot_be_empty=True
                )
            )
        )
    (
        stonith_el,
        report_list,
    ) = stonith.validate_stonith_restartless_update(cib, stonith_id)
    if env.report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    # for mypy, this should not happen because exeption would be raised
    if stonith_el is None:
        raise AssertionError("stonith element is None")

    stonith.update_scsi_devices_without_restart(
        env.cmd_runner(),
        env.get_cluster_state(),
        stonith_el,
        IdProvider(cib),
        set_device_list,
    )

    # Unfencing
    cluster_nodes_names, nodes_report_list = get_existing_nodes_names(
        env.get_corosync_conf(),
        error_on_missing_name=True,
    )
    env.report_processor.report_list(nodes_report_list)
    (
        target_report_list,
        cluster_nodes_target_list,
    ) = env.get_node_target_factory().get_target_list_with_reports(
        cluster_nodes_names,
        allow_skip=False,
    )
    env.report_processor.report_list(target_report_list)
    if env.report_processor.has_errors:
        raise LibraryError()
    com_cmd: AllSameDataMixin = GetCorosyncOnlineTargets(
        env.report_processor,
        skip_offline_targets=reports.codes.SKIP_OFFLINE_NODES in force_flags,
    )
    com_cmd.set_targets(cluster_nodes_target_list)
    online_corosync_target_list = run_and_raise(
        env.get_node_communicator(), com_cmd
    )
    com_cmd = Unfence(env.report_processor, sorted(set_device_list))
    com_cmd.set_targets(online_corosync_target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    env.push_cib()
Esempio n. 27
0
def create_in_cluster(
    env: LibraryEnvironment,
    ip,
    instance_name=None,
    allow_absent_resource_agent=False,
):
    """
    Create group with ip resource and booth resource

    env -- provides all for communication with externals
    string ip -- float ip address for the operation of the booth
    string instance_name -- booth instance name
    bool allow_absent_resource_agent -- allowing creating booth resource even
        if its agent is not installed
    """
    report_processor = env.report_processor
    booth_env = env.get_booth_env(instance_name)
    # Booth config path goes to CIB. Working with a mocked booth configs would
    # not work coorectly as the path would point to a mock file (the path to a
    # mock file is unknown to us in the lib anyway)
    # It makes sense to work with a mocked CIB, though. Users can do other
    # changes to the CIB and push them to the cluster at once.
    _ensure_live_booth_env(booth_env)
    resources_section = get_resources(env.get_cib())
    id_provider = IdProvider(resources_section)
    instance_name = booth_env.instance_name

    # validate
    if resource.find_for_config(resources_section, booth_env.config_path):
        report_processor.report(
            ReportItem.error(reports.messages.BoothAlreadyInCib(instance_name))
        )
    # verify the config exists and is readable
    try:
        booth_env.config.raw_file.read()
    except RawFileError as e:
        report_processor.report(raw_file_error_report(e))
    if report_processor.has_errors:
        raise LibraryError()
    # validation done

    create_id = partial(
        resource.create_resource_id, resources_section, instance_name
    )
    get_agent = partial(
        find_valid_resource_agent_by_name,
        env.report_processor,
        env.cmd_runner(),
        allowed_absent=allow_absent_resource_agent,
    )
    create_primitive = partial(
        primitive.create, env.report_processor, resources_section, id_provider
    )
    into_booth_group = partial(
        group.place_resource,
        group.provide_group(resources_section, create_id("group")),
    )

    into_booth_group(
        create_primitive(
            create_id("ip"),
            get_agent("ocf:heartbeat:IPaddr2"),
            instance_attributes={"ip": ip},
        )
    )
    into_booth_group(
        create_primitive(
            create_id("service"),
            get_agent("ocf:pacemaker:booth-site"),
            instance_attributes={"config": booth_env.config_path},
        )
    )

    env.push_cib()
Esempio n. 28
0
def remove_levels_by_params(
    lib_env: LibraryEnvironment,
    level=None,
    # TODO create a special type, so that it cannot accept any string
    target_type: Optional[str] = None,
    target_value=None,
    devices: Optional[Iterable[str]] = None,
    # TODO remove, deprecated backward compatibility layer
    ignore_if_missing: bool = False,
    # TODO remove, deprecated backward compatibility layer
    target_may_be_a_device: bool = False,
):
    """
    Remove specified fencing level(s).

    LibraryEnvironment lib_env -- environment
    int|string level -- level (index) of the fencing level to remove
    target_type -- the removed fencing level target value type
    mixed target_value -- the removed fencing level target value
    devices -- list of stonith devices of the removed fencing level
    ignore_if_missing -- when True, do not report if level not found
    target_may_be_a_device -- enables backward compatibility mode for old CLI
    """
    topology_el = get_fencing_topology(lib_env.get_cib())
    report_list = cib_fencing_topology.remove_levels_by_params(
        topology_el,
        level,
        target_type,
        target_value,
        devices,
        ignore_if_missing,
        validate_device_ids=(not target_may_be_a_device),
    )

    if not target_may_be_a_device or target_type != TARGET_TYPE_NODE:
        if lib_env.report_processor.report_list(report_list).has_errors:
            raise LibraryError()
        lib_env.push_cib()
        return

    # TODO remove, deprecated backward compatibility mode
    # CLI command parameters are: level, node, stonith, stonith... Both the
    # node and the stonith list are optional. If the node is ommited and the
    # stonith list is present, there is no way to figure it out, since there is
    # no specification of what the parameter is. Hence the pre-lib code tried
    # both. First it assumed the first parameter is a node. If that fence level
    # didn't exist, it assumed the first parameter is a device. Since it was
    # only possible to specify node as a target back then, this is enabled only
    # in that case.
    # CLI has no way to figure out what the first parameter is. Therefore, the
    # lib must try both cases if asked to do so.
    if not report.has_errors(report_list):
        lib_env.report_processor.report_list(report_list)
        lib_env.push_cib()
        return

    level_not_found = False
    for report_item in report_list:
        if (report_item.message.code ==
                report.codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST):
            level_not_found = True
            break
    if not level_not_found:
        lib_env.report_processor.report_list(report_list)
        raise LibraryError()

    target_and_devices = [target_value]
    if devices:
        target_and_devices.extend(devices)
    report_list_second = cib_fencing_topology.remove_levels_by_params(
        topology_el,
        level,
        None,
        None,
        target_and_devices,
        ignore_if_missing,
        validate_device_ids=(not target_may_be_a_device),
    )
    if not report.has_errors(report_list_second):
        lib_env.report_processor.report_list(report_list_second)
        lib_env.push_cib()
        return

    lib_env.report_processor.report_list(report_list)
    lib_env.report_processor.report_list(report_list_second)
    raise LibraryError()
Esempio n. 29
0
File: booth.py Progetto: vvidic/pcs
def create_in_cluster(
    env: LibraryEnvironment,
    ip: str,
    instance_name: Optional[str] = None,
    allow_absent_resource_agent: bool = False,
):
    """
    Create group with ip resource and booth resource

    env -- provides all for communication with externals
    ip -- float ip address for the operation of the booth
    instance_name -- booth instance name
    allow_absent_resource_agent -- allowing creating booth resource even
        if its agent is not installed
    """
    report_processor = env.report_processor
    booth_env = env.get_booth_env(instance_name)
    # Booth config path goes to CIB. Working with a mocked booth configs would
    # not work coorectly as the path would point to a mock file (the path to a
    # mock file is unknown to us in the lib anyway)
    # It makes sense to work with a mocked CIB, though. Users can do other
    # changes to the CIB and push them to the cluster at once.
    _ensure_live_booth_env(booth_env)
    resources_section = get_resources(env.get_cib())
    id_provider = IdProvider(resources_section)
    instance_name = booth_env.instance_name

    # validate
    if resource.find_for_config(resources_section, booth_env.config_path):
        report_processor.report(
            ReportItem.error(reports.messages.BoothAlreadyInCib(instance_name))
        )
    # verify the config exists and is readable
    try:
        booth_env.config.raw_file.read()
    except RawFileError as e:
        report_processor.report(raw_file_error_report(e))
    if report_processor.has_errors:
        raise LibraryError()
    # validation done

    create_id = partial(
        resource.create_resource_id, resources_section, instance_name
    )
    create_primitive = partial(
        primitive.create, env.report_processor, resources_section, id_provider
    )
    agent_factory = ResourceAgentFacadeFactory(
        env.cmd_runner(), report_processor
    )

    # Group id validation is not needed since create_id creates a new unique
    # booth group identifier
    hierarchy.move_resources_to_group(
        group.append_new(resources_section, create_id("group")),
        [
            create_primitive(
                create_id("ip"),
                _get_agent_facade(
                    env.report_processor,
                    agent_factory,
                    allow_absent_resource_agent,
                    ResourceAgentName("ocf", "heartbeat", "IPaddr2"),
                ),
                instance_attributes={"ip": ip},
            ),
            create_primitive(
                create_id("service"),
                _get_agent_facade(
                    env.report_processor,
                    agent_factory,
                    allow_absent_resource_agent,
                    ResourceAgentName("ocf", "pacemaker", "booth-site"),
                ),
                instance_attributes={"config": booth_env.config_path},
            ),
        ],
    )

    env.push_cib()
Esempio n. 30
0
 def test_push_cib_upgraded_live(self, mock_replace_cib):
     env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
     env._cib_upgraded = True
     env.push_cib(etree.XML('<cib/>'))
     mock_replace_cib.assert_called_once_with("mock cmd runner", '<cib/>')
     self.assertFalse(env.cib_upgraded)
Esempio n. 31
0
def _defaults_update(
    env: LibraryEnvironment,
    cib_section_name: str,
    nvset_id: Optional[str],
    nvpairs: Mapping[str, str],
    pcs_command: reports.types.PcsCommand,
) -> None:
    cib = env.get_cib()
    id_provider = IdProvider(cib)

    if nvset_id is None:
        # Backward compatibility code to support an old use case where no id
        # was requested and provided and the first meta_attributes nvset was
        # created / updated. However, we check that there is only one nvset
        # present in the CIB to prevent breaking the configuration with
        # multiple nvsets in place.

        # This is to be supported as it provides means of easily managing
        # defaults if only one set of defaults is needed.

        # TODO move this to a separate lib command.

        if not nvpairs:
            return

        # Do not create new defaults element if we are only removing values
        # from it.
        only_removing = True
        for value in nvpairs.values():
            if value != "":
                only_removing = False
                break
        if only_removing and not sections.exists(cib, cib_section_name):
            env.report_processor.report(
                ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))
            return

        nvset_elements = nvpair_multi.find_nvsets(
            sections.get(cib, cib_section_name))
        if len(nvset_elements) > 1:
            env.report_processor.report(
                reports.item.ReportItem.error(
                    reports.messages.CibNvsetAmbiguousProvideNvsetId(
                        pcs_command)))
            raise LibraryError()
        env.report_processor.report(
            ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))
        if len(nvset_elements) == 1:
            nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs)
        elif only_removing:
            # do not create new nvset if there is none and we are only removing
            # nvpairs
            return
        else:
            nvpair_multi.nvset_append_new(
                sections.get(cib, cib_section_name),
                id_provider,
                nvpair_multi.NVSET_META,
                nvpairs,
                {},
            )
        env.push_cib()
        return

    nvset_elements, report_list = nvpair_multi.find_nvsets_by_ids(
        sections.get(cib, cib_section_name), [nvset_id])
    if env.report_processor.report_list(report_list).has_errors:
        raise LibraryError()

    nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs)
    env.report_processor.report(
        ReportItem.warning(reports.messages.DefaultsCanBeOverriden()))
    env.push_cib()
Esempio n. 32
0
File: tag.py Progetto: zht750808/pcs
def cib_tags_section(env: LibraryEnvironment) -> Iterator[_Element]:
    yield get_tags(env.get_cib(REQUIRED_CIB_VERSION))
    env.push_cib()
Esempio n. 33
0
def node_add_guest(
    env: LibraryEnvironment,
    node_name,
    resource_id,
    options,
    skip_offline_nodes=False,
    allow_incomplete_distribution=False,
    allow_pacemaker_remote_service_fail=False,
    wait: WaitType = False,
):
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-statements
    """
    Make a guest node from the specified resource

    LibraryEnvironment env -- provides all for communication with externals
    string node_name -- name of the guest node
    string resource_id -- specifies resource that should become a guest node
    dict options -- guest node options (remote-port, remote-addr,
        remote-connect-timeout)
    bool skip_offline_nodes -- if True, ignore when some nodes are offline
    bool allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    bool allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    mixed wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    wait_timeout = env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib()
    id_provider = IdProvider(cib)
    corosync_conf: Optional[CorosyncConfigFacade]
    if env.is_cib_live:
        corosync_conf = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            ReportItem.info(
                reports.messages.CorosyncNodeConflictCheckSkipped(
                    reports.const.REASON_NOT_LIVE_CIB, )))
    (
        existing_nodes_names,
        existing_nodes_addrs,
        report_list,
    ) = get_existing_nodes_names_addrs(corosync_conf, cib)
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory,
            report_processor,
            existing_nodes_names,
            [node_name],
            skip_offline_nodes,
        )
        new_target = new_target_list[0] if new_target_list else None
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            if new_target:
                new_addr = new_target.first_addr
                new_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                new_addr = node_name
                new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME
            options["remote-addr"] = new_addr
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, new_addr, new_addr_source)))
    else:
        # default remote-addr to an address from known-hosts
        if "remote-addr" not in options or options["remote-addr"] is None:
            known_hosts = env.get_known_hosts([node_name])
            if known_hosts:
                new_addr = known_hosts[0].dest.addr
                new_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                new_addr = node_name
                new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME
            options["remote-addr"] = new_addr
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, new_addr, new_addr_source)))

    # validate inputs
    report_list = guest_node.validate_set_as_guest(cib, existing_nodes_names,
                                                   existing_nodes_addrs,
                                                   node_name, options)
    searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib))
    if searcher.element_found():
        resource_element = searcher.get_element()
        report_list.extend(guest_node.validate_is_not_guest(resource_element))
    else:
        report_list.extend(searcher.get_errors())

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    guest_node.set_as_guest(
        resource_element,
        id_provider,
        node_name,
        options.get("remote-addr", None),
        options.get("remote-port", None),
        options.get("remote-connect-timeout", None),
    )

    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib"))

    env.push_cib(wait_timeout=wait_timeout)
    if wait_timeout >= 0:
        _ensure_resource_running(env, resource_id)
Esempio n. 34
0
def node_add_remote(
    env: LibraryEnvironment,
    node_name: str,
    node_addr: Optional[str],
    operations: Iterable[Mapping[str, str]],
    meta_attributes: Mapping[str, str],
    instance_attributes: Mapping[str, str],
    skip_offline_nodes: bool = False,
    allow_incomplete_distribution: bool = False,
    allow_pacemaker_remote_service_fail: bool = False,
    allow_invalid_operation: bool = False,
    allow_invalid_instance_attributes: bool = False,
    use_default_operations: bool = True,
    wait: WaitType = False,
):
    # pylint: disable=too-many-arguments
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-statements
    """
    create an ocf:pacemaker:remote resource and use it as a remote node

    env -- provides all for communication with externals
    node_name -- the name of the new node
    node_addr -- the address of the new node or None for default
    operations -- attributes for each entered operation
    meta_attributes -- attributes for primitive/meta_attributes
    instance_attributes -- attributes for primitive/instance_attributes
    skip_offline_nodes -- if True, ignore when some nodes are offline
    allow_incomplete_distribution -- if True, allow this command to
        finish successfully even if file distribution did not succeed
    allow_pacemaker_remote_service_fail -- if True, allow this command to
        finish successfully even if starting/enabling pacemaker_remote did not
        succeed
    allow_invalid_operation -- if True, allow to use operations that
        are not listed in a resource agent metadata
    allow_invalid_instance_attributes -- if True, allow to use instance
        attributes that are not listed in a resource agent metadata and allow to
        omit required instance_attributes
    use_default_operations -- if True, add operations specified in
        a resource agent metadata to the resource
    wait -- a flag for controlling waiting for pacemaker idle mechanism
    """
    wait_timeout = env.ensure_wait_satisfiable(wait)

    report_processor = env.report_processor
    cib = env.get_cib(
        minimal_version=get_required_cib_version_for_primitive(operations))
    id_provider = IdProvider(cib)
    if env.is_cib_live:
        corosync_conf: Optional[CorosyncConfigFacade] = env.get_corosync_conf()
    else:
        corosync_conf = None
        report_processor.report(
            ReportItem.info(
                reports.messages.CorosyncNodeConflictCheckSkipped(
                    reports.const.REASON_NOT_LIVE_CIB, )))
    (
        existing_nodes_names,
        existing_nodes_addrs,
        report_list,
    ) = get_existing_nodes_names_addrs(corosync_conf, cib)
    if env.is_cib_live:
        # We just reported corosync checks are going to be skipped so we
        # shouldn't complain about errors related to corosync nodes
        report_processor.report_list(report_list)

    try:
        resource_agent_facade = ResourceAgentFacadeFactory(
            env.cmd_runner(),
            report_processor).facade_from_parsed_name(remote_node.AGENT_NAME)
    except ResourceAgentError as e:
        report_processor.report(resource_agent_error_to_report_item(e))
        raise LibraryError() from e

    existing_target_list = []
    if env.is_cib_live:
        target_factory = env.get_node_target_factory()
        existing_target_list, new_target_list = _get_targets_for_add(
            target_factory,
            report_processor,
            existing_nodes_names,
            [node_name],
            skip_offline_nodes,
        )
        new_target = new_target_list[0] if new_target_list else None
        # default node_addr to an address from known-hosts
        if node_addr is None:
            if new_target:
                node_addr = new_target.first_addr
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                node_addr = node_name
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME)
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, node_addr, node_addr_source)))
    else:
        # default node_addr to an address from known-hosts
        if node_addr is None:
            known_hosts = env.get_known_hosts([node_name])
            if known_hosts:
                node_addr = known_hosts[0].dest.addr
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS)
            else:
                node_addr = node_name
                node_addr_source = (
                    reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME)
            report_processor.report(
                ReportItem.info(
                    reports.messages.UsingDefaultAddressForHost(
                        node_name, node_addr, node_addr_source)))

    # validate inputs
    report_list = remote_node.validate_create(
        existing_nodes_names,
        existing_nodes_addrs,
        resource_agent_facade.metadata,
        node_name,
        node_addr,
        instance_attributes,
    )
    if report_processor.report_list(report_list).has_errors:
        raise LibraryError()
    # validation + cib setup
    # TODO extract the validation to a separate function
    try:
        remote_resource_element = remote_node.create(
            env.report_processor,
            resource_agent_facade,
            get_resources(cib),
            id_provider,
            node_addr,
            node_name,
            operations,
            meta_attributes,
            instance_attributes,
            allow_invalid_operation,
            allow_invalid_instance_attributes,
            use_default_operations,
        )
    except LibraryError as e:
        # Check unique id conflict with check against nodes. Until validation
        # resource create is not separated, we need to make unique post
        # validation.
        already_exists = []
        unified_report_list = []
        for report_item in report_list + list(e.args):
            # pylint: disable=no-member
            dto_obj = report_item.message.to_dto()
            if dto_obj.code not in (
                    reports.codes.ID_ALREADY_EXISTS,
                    reports.codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE,
            ):
                unified_report_list.append(report_item)
            elif ("id" in dto_obj.payload
                  and dto_obj.payload["id"] not in already_exists):
                unified_report_list.append(report_item)
                already_exists.append(dto_obj.payload["id"])
        report_list = unified_report_list

    report_processor.report_list(report_list)
    if report_processor.has_errors:
        raise LibraryError()

    # everything validated, let's set it up
    if env.is_cib_live:
        _prepare_pacemaker_remote_environment(
            env,
            report_processor,
            existing_target_list,
            new_target,
            node_name,
            skip_offline_nodes,
            allow_incomplete_distribution,
            allow_pacemaker_remote_service_fail,
        )
    else:
        report_processor.report_list(
            _reports_skip_new_node(node_name, "not_live_cib"))

    env.push_cib(wait_timeout=wait_timeout)
    if wait_timeout >= 0:
        _ensure_resource_running(env, remote_resource_element.attrib["id"])