Пример #1
0
def send_config_to_all_nodes(
    communicator, reporter, node_list, name, config_data, authfile=None,
    authfile_data=None, skip_offline=False
):
    """
    Send config_data of specified booth instance from local node to all nodes in
    node_list.

    communicator -- NodeCommunicator
    reporter -- report processor
    node_list -- NodeAddressesList
    name -- name of booth instance
    config_data -- config_data content as string
    authfile -- path to authfile
    authfile_data -- content of authfile as bytes
    skip_offline -- if True offline nodes will be skipped
    """
    reporter.process(reports.booth_distributing_config(name))
    parallel_nodes_communication_helper(
        _set_config_on_node,
        [
            (
                [
                    communicator, reporter, node, name, config_data,
                    authfile, authfile_data
                ],
                {}
            )
            for node in node_list
        ],
        reporter,
        skip_offline
    )
Пример #2
0
def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
    """
    remove configuration used by qdevice model net
    NodeAddressesList cluster_nodes list of cluster nodes addresses
    bool skip_offline_nodes continue even if not all nodes are accessible
    """
    reporter = lib_env.report_processor
    communicator = lib_env.node_communicator()

    reporter.process(
        reports.qdevice_certificate_removal_started()
    )
    def do_and_report(reporter, communicator, node):
        qdevice_net.remote_client_destroy(communicator, node)
        reporter.process(
            reports.qdevice_certificate_removed_from_node(node.label)
        )
    parallel_nodes_communication_helper(
        do_and_report,
        [
            [(reporter, communicator, node), {}]
            for node in cluster_nodes
        ],
        lib_env.report_processor,
        skip_offline_nodes
    )
Пример #3
0
def qdevice_reload_on_nodes(
    node_communicator, reporter, node_addr_list, skip_offline_nodes=False
):
    """
    Reload corosync-qdevice configuration on cluster nodes
    NodeAddressesList node_addr_list nodes to reload config on
    bool skip_offline_nodes don't raise an error on node communication errors
    """
    reporter.process(reports.qdevice_client_reload_started())
    parallel_params = [
        [(reporter, node_communicator, node), {}]
        for node in node_addr_list
    ]
    # catch an exception so we try to start qdevice on nodes where we stopped it
    report_items = []
    try:
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_stop,
            parallel_params,
            reporter,
            skip_offline_nodes
        )
    except LibraryError as e:
        report_items.extend(e.args)
    try:
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_start,
            parallel_params,
            reporter,
            skip_offline_nodes
        )
    except LibraryError as e:
        report_items.extend(e.args)
    reporter.process_list(report_items)
Пример #4
0
def send_config_to_all_nodes(
    communicator, reporter, node_list, name, config_data, authfile=None,
    authfile_data=None, skip_offline=False
):
    """
    Send config_data of specified booth instance from local node to all nodes in
    node_list.

    communicator -- NodeCommunicator
    reporter -- report processor
    node_list -- NodeAddressesList
    name -- name of booth instance
    config_data -- config_data content as string
    authfile -- path to authfile
    authfile_data -- content of authfile as bytes
    skip_offline -- if True offline nodes will be skipped
    """
    reporter.process(reports.booth_config_distribution_started())
    parallel_nodes_communication_helper(
        _set_config_on_node,
        [
            (
                [
                    communicator, reporter, node, name, config_data,
                    authfile, authfile_data
                ],
                {}
            )
            for node in node_list
        ],
        reporter,
        skip_offline
    )
Пример #5
0
def remove_device(lib_env, skip_offline_nodes=False):
    """
    Stop using quorum device, distribute and reload configs if live
    skip_offline_nodes continue even if not all nodes are accessible
    """
    __ensure_not_cman(lib_env)

    cfg = lib_env.get_corosync_conf()
    model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
    cfg.remove_quorum_device()

    if lib_env.is_corosync_conf_live:
        communicator = lib_env.node_communicator()
        # fix quorum options for SBD to work properly
        if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
            lib_env.report_processor.process(reports.sbd_requires_atb())
            cfg.set_quorum_options(
                lib_env.report_processor, {"auto_tie_breaker": "1"}
            )

        # disable qdevice
        lib_env.report_processor.process(
            reports.service_disable_started("corosync-qdevice")
        )
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_disable,
            [
                [(lib_env.report_processor, communicator, node), {}]
                for node in cfg.get_nodes()
            ],
            lib_env.report_processor,
            skip_offline_nodes
        )
        # stop qdevice
        lib_env.report_processor.process(
            reports.service_stop_started("corosync-qdevice")
        )
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_stop,
            [
                [(lib_env.report_processor, communicator, node), {}]
                for node in cfg.get_nodes()
            ],
            lib_env.report_processor,
            skip_offline_nodes
        )
        # handle model specific configuration
        if model == "net":
            _remove_device_model_net(
                lib_env,
                cfg.get_nodes(),
                skip_offline_nodes
            )

    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
Пример #6
0
def _add_device_model_net(lib_env, qnetd_host, cluster_name, cluster_nodes,
                          skip_offline_nodes):
    """
    setup cluster nodes for using qdevice model net
    string qnetd_host address of qdevice provider (qnetd host)
    string cluster_name name of the cluster to which qdevice is being added
    NodeAddressesList cluster_nodes list of cluster nodes addresses
    bool skip_offline_nodes continue even if not all nodes are accessible
    """
    communicator = lib_env.node_communicator()
    runner = lib_env.cmd_runner()
    reporter = lib_env.report_processor

    reporter.process(reports.qdevice_certificate_distribution_started())
    # get qnetd CA certificate
    try:
        qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate(
            communicator, qnetd_host)
    except NodeCommunicationException as e:
        raise LibraryError(node_communicator_exception_to_report_item(e))
    # init certificate storage on all nodes
    parallel_nodes_communication_helper(
        qdevice_net.remote_client_setup,
        [((communicator, node, qnetd_ca_cert), {})
         for node in cluster_nodes], reporter, skip_offline_nodes)
    # create client certificate request
    cert_request = qdevice_net.client_generate_certificate_request(
        runner, cluster_name)
    # sign the request on qnetd host
    try:
        signed_certificate = qdevice_net.remote_sign_certificate_request(
            communicator, qnetd_host, cert_request, cluster_name)
    except NodeCommunicationException as e:
        raise LibraryError(node_communicator_exception_to_report_item(e))
    # transform the signed certificate to pk12 format which can sent to nodes
    pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate)

    # distribute final certificate to nodes
    def do_and_report(reporter, communicator, node, pk12):
        qdevice_net.remote_client_import_certificate_and_key(
            communicator, node, pk12)
        reporter.process(
            reports.qdevice_certificate_accepted_by_node(node.label))

    parallel_nodes_communication_helper(
        do_and_report, [((reporter, communicator, node, pk12), {})
                        for node in cluster_nodes], reporter,
        skip_offline_nodes)
Пример #7
0
 def test_success(self):
     func = mock.MagicMock()
     lib.parallel_nodes_communication_helper(
         func,
         [([x], {"a": x*2,}) for x in range(3)],
         self.mock_reporter,
         skip_offline_nodes=False
     )
     expected_calls = [
         mock.call(0, a=0),
         mock.call(1, a=2),
         mock.call(2, a=4),
     ]
     self.assertEqual(len(expected_calls), len(func.mock_calls))
     func.assert_has_calls(expected_calls)
     self.assertEqual(self.mock_reporter.report_item_list, [])
Пример #8
0
def remove_device(lib_env, skip_offline_nodes=False):
    """
    Stop using quorum device, distribute and reload configs if live
    skip_offline_nodes continue even if not all nodes are accessible
    """
    __ensure_not_cman(lib_env)

    cfg = lib_env.get_corosync_conf()
    model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
    cfg.remove_quorum_device()

    if lib_env.is_corosync_conf_live:
        communicator = lib_env.node_communicator()
        # fix quorum options for SBD to work properly
        if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
            lib_env.report_processor.process(reports.sbd_requires_atb())
            cfg.set_quorum_options(lib_env.report_processor,
                                   {"auto_tie_breaker": "1"})

        # disable qdevice
        lib_env.report_processor.process(
            reports.service_disable_started("corosync-qdevice"))
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_disable,
            [[(lib_env.report_processor, communicator, node), {}]
             for node in cfg.get_nodes()], lib_env.report_processor,
            skip_offline_nodes)
        # stop qdevice
        lib_env.report_processor.process(
            reports.service_stop_started("corosync-qdevice"))
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_stop,
            [[(lib_env.report_processor, communicator, node), {}]
             for node in cfg.get_nodes()], lib_env.report_processor,
            skip_offline_nodes)
        # handle model specific configuration
        if model == "net":
            _remove_device_model_net(lib_env, cfg.get_nodes(),
                                     skip_offline_nodes)

    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
Пример #9
0
def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
    """
    remove configuration used by qdevice model net
    NodeAddressesList cluster_nodes list of cluster nodes addresses
    bool skip_offline_nodes continue even if not all nodes are accessible
    """
    reporter = lib_env.report_processor
    communicator = lib_env.node_communicator()

    reporter.process(reports.qdevice_certificate_removal_started())

    def do_and_report(reporter, communicator, node):
        qdevice_net.remote_client_destroy(communicator, node)
        reporter.process(
            reports.qdevice_certificate_removed_from_node(node.label))

    parallel_nodes_communication_helper(do_and_report,
                                        [[(reporter, communicator, node), {}]
                                         for node in cluster_nodes],
                                        lib_env.report_processor,
                                        skip_offline_nodes)
Пример #10
0
 def test_errors(self):
     func = self.fixture_raiser()
     assert_raise_library_error(
         lambda: lib.parallel_nodes_communication_helper(
             func,
             [([x], {"a": x*2,}) for x in range(4)],
             self.mock_reporter,
             skip_offline_nodes=False
         ),
         (
             severity.ERROR,
             report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
             {
                 "node": "node",
                 "reason": "reason",
                 "command": "command",
             },
             report_codes.SKIP_OFFLINE_NODES
         ),
         (
             severity.ERROR,
             report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
             {
                 "node": "node",
             }
         )
     )
     assert_report_item_list_equal(
         self.mock_reporter.report_item_list,
         [
             (
                 severity.ERROR,
                 report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
                 {
                     "node": "node",
                     "reason": "reason",
                     "command": "command",
                 },
                 report_codes.SKIP_OFFLINE_NODES
             ),
             (
                 severity.ERROR,
                 report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
                 {
                     "node": "node",
                 }
             )
         ]
     )
Пример #11
0
def _add_device_model_net(
    lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes
):
    """
    setup cluster nodes for using qdevice model net
    string qnetd_host address of qdevice provider (qnetd host)
    string cluster_name name of the cluster to which qdevice is being added
    NodeAddressesList cluster_nodes list of cluster nodes addresses
    bool skip_offline_nodes continue even if not all nodes are accessible
    """
    communicator = lib_env.node_communicator()
    runner = lib_env.cmd_runner()
    reporter = lib_env.report_processor

    reporter.process(
        reports.qdevice_certificate_distribution_started()
    )
    # get qnetd CA certificate
    try:
        qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate(
            communicator,
            qnetd_host
        )
    except NodeCommunicationException as e:
        raise LibraryError(
            node_communicator_exception_to_report_item(e)
        )
    # init certificate storage on all nodes
    parallel_nodes_communication_helper(
        qdevice_net.remote_client_setup,
        [
            ((communicator, node, qnetd_ca_cert), {})
            for node in cluster_nodes
        ],
        reporter,
        skip_offline_nodes
    )
    # create client certificate request
    cert_request = qdevice_net.client_generate_certificate_request(
        runner,
        cluster_name
    )
    # sign the request on qnetd host
    try:
        signed_certificate = qdevice_net.remote_sign_certificate_request(
            communicator,
            qnetd_host,
            cert_request,
            cluster_name
        )
    except NodeCommunicationException as e:
        raise LibraryError(
            node_communicator_exception_to_report_item(e)
        )
    # transform the signed certificate to pk12 format which can sent to nodes
    pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate)
    # distribute final certificate to nodes
    def do_and_report(reporter, communicator, node, pk12):
        qdevice_net.remote_client_import_certificate_and_key(
            communicator, node, pk12
        )
        reporter.process(
            reports.qdevice_certificate_accepted_by_node(node.label)
        )
    parallel_nodes_communication_helper(
        do_and_report,
        [
            ((reporter, communicator, node, pk12), {})
            for node in cluster_nodes
        ],
        reporter,
        skip_offline_nodes
    )
Пример #12
0
def add_device(
    lib_env, model, model_options, generic_options, force_model=False,
    force_options=False, skip_offline_nodes=False
):
    """
    Add quorum device to cluster, distribute and reload configs if live
    model quorum device model
    model_options model specific options dict
    generic_options generic quorum device options dict
    force_model continue even if the model is not valid
    force_options continue even if options are not valid
    skip_offline_nodes continue even if not all nodes are accessible
    """
    __ensure_not_cman(lib_env)

    cfg = lib_env.get_corosync_conf()
    # Try adding qdevice to corosync.conf. This validates all the options and
    # makes sure qdevice is not defined in corosync.conf yet.
    cfg.add_quorum_device(
        lib_env.report_processor,
        model,
        model_options,
        generic_options,
        force_model,
        force_options
    )

    # First setup certificates for qdevice, then send corosync.conf to nodes.
    # If anything fails, nodes will not have corosync.conf with qdevice in it,
    # so there is no effect on the cluster.
    if lib_env.is_corosync_conf_live:
        # do model specific configuration
        # if model is not known to pcs and was forced, do not configure antyhing
        # else but corosync.conf, as we do not know what to do anyways
        if model == "net":
            _add_device_model_net(
                lib_env,
                # we are sure it's there, it was validated in add_quorum_device
                model_options["host"],
                cfg.get_cluster_name(),
                cfg.get_nodes(),
                skip_offline_nodes
            )

        lib_env.report_processor.process(
            reports.service_enable_started("corosync-qdevice")
        )
        communicator = lib_env.node_communicator()
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_enable,
            [
                [(lib_env.report_processor, communicator, node), {}]
                for node in cfg.get_nodes()
            ],
            lib_env.report_processor,
            skip_offline_nodes
        )

    # everything set up, it's safe to tell the nodes to use qdevice
    lib_env.push_corosync_conf(cfg, skip_offline_nodes)

    # Now, when corosync.conf has been reloaded, we can start qdevice service.
    if lib_env.is_corosync_conf_live:
        lib_env.report_processor.process(
            reports.service_start_started("corosync-qdevice")
        )
        communicator = lib_env.node_communicator()
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_start,
            [
                [(lib_env.report_processor, communicator, node), {}]
                for node in cfg.get_nodes()
            ],
            lib_env.report_processor,
            skip_offline_nodes
        )
Пример #13
0
def add_device(lib_env,
               model,
               model_options,
               generic_options,
               force_model=False,
               force_options=False,
               skip_offline_nodes=False):
    """
    Add quorum device to cluster, distribute and reload configs if live
    model quorum device model
    model_options model specific options dict
    generic_options generic quorum device options dict
    force_model continue even if the model is not valid
    force_options continue even if options are not valid
    skip_offline_nodes continue even if not all nodes are accessible
    """
    __ensure_not_cman(lib_env)

    cfg = lib_env.get_corosync_conf()
    # Try adding qdevice to corosync.conf. This validates all the options and
    # makes sure qdevice is not defined in corosync.conf yet.
    cfg.add_quorum_device(lib_env.report_processor, model, model_options,
                          generic_options, force_model, force_options)

    # First setup certificates for qdevice, then send corosync.conf to nodes.
    # If anything fails, nodes will not have corosync.conf with qdevice in it,
    # so there is no effect on the cluster.
    if lib_env.is_corosync_conf_live:
        # do model specific configuration
        # if model is not known to pcs and was forced, do not configure antyhing
        # else but corosync.conf, as we do not know what to do anyways
        if model == "net":
            _add_device_model_net(
                lib_env,
                # we are sure it's there, it was validated in add_quorum_device
                model_options["host"],
                cfg.get_cluster_name(),
                cfg.get_nodes(),
                skip_offline_nodes)

        lib_env.report_processor.process(
            reports.service_enable_started("corosync-qdevice"))
        communicator = lib_env.node_communicator()
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_enable,
            [[(lib_env.report_processor, communicator, node), {}]
             for node in cfg.get_nodes()], lib_env.report_processor,
            skip_offline_nodes)

    # everything set up, it's safe to tell the nodes to use qdevice
    lib_env.push_corosync_conf(cfg, skip_offline_nodes)

    # Now, when corosync.conf has been reloaded, we can start qdevice service.
    if lib_env.is_corosync_conf_live:
        lib_env.report_processor.process(
            reports.service_start_started("corosync-qdevice"))
        communicator = lib_env.node_communicator()
        parallel_nodes_communication_helper(
            qdevice_client.remote_client_start,
            [[(lib_env.report_processor, communicator, node), {}]
             for node in cfg.get_nodes()], lib_env.report_processor,
            skip_offline_nodes)