コード例 #1
0
def config_setup(
    env: LibraryEnvironment,
    site_list,
    arbitrator_list,
    instance_name=None,
    overwrite_existing=False,
):
    """
    create booth configuration

    env
    list site_list -- site adresses of multisite
    list arbitrator_list -- arbitrator adresses of multisite
    string instance_name -- booth instance name
    bool overwrite_existing -- allow overwriting existing files
    """
    instance_name = instance_name or constants.DEFAULT_INSTANCE_NAME
    report_processor = env.report_processor

    report_processor.report_list(
        config_validators.check_instance_name(instance_name)
    )
    report_processor.report_list(
        config_validators.create(site_list, arbitrator_list)
    )
    if report_processor.has_errors:
        raise LibraryError()

    booth_env = env.get_booth_env(instance_name)

    booth_conf = booth_env.create_facade(site_list, arbitrator_list)
    booth_conf.set_authfile(booth_env.key_path)

    try:
        booth_env.key.write_raw(
            tools.generate_binary_key(
                random_bytes_count=settings.booth_authkey_bytes
            ),
            can_overwrite=overwrite_existing,
        )
        booth_env.config.write_facade(
            booth_conf, can_overwrite=overwrite_existing
        )
    except FileAlreadyExists as e:
        report_processor.report(
            ReportItem(
                severity=reports.item.get_severity(
                    reports.codes.FORCE,
                    overwrite_existing,
                ),
                message=reports.messages.FileAlreadyExists(
                    e.metadata.file_type_code,
                    e.metadata.path,
                ),
            )
        )
    except RawFileError as e:
        report_processor.report(raw_file_error_report(e))
    if report_processor.has_errors:
        raise LibraryError()
コード例 #2
0
ファイル: booth.py プロジェクト: thulyacloud/pcs
def config_setup(env, booth_configuration, overwrite_existing=False):
    """
    create boot configuration
    list site_list contains site adresses of multisite
    list arbitrator_list contains arbitrator adresses of multisite
    """

    config_content = config_exchange.from_exchange_format(booth_configuration)
    config_structure.validate_peers(
        *config_structure.take_peers(config_content))

    env.booth.create_key(
        tools.generate_binary_key(
            random_bytes_count=settings.booth_authkey_bytes),
        overwrite_existing)
    config_content = config_structure.set_authfile(config_content,
                                                   env.booth.key_path)
    env.booth.create_config(build(config_content), overwrite_existing)
コード例 #3
0
ファイル: booth.py プロジェクト: tomjelinek/pcs
def config_setup(env, booth_configuration, overwrite_existing=False):
    """
    create boot configuration
    list site_list contains site adresses of multisite
    list arbitrator_list contains arbitrator adresses of multisite
    """

    config_content = config_exchange.from_exchange_format(booth_configuration)
    config_structure.validate_peers(
        *config_structure.take_peers(config_content)
    )

    env.booth.create_key(
        tools.generate_binary_key(
            random_bytes_count=settings.booth_authkey_bytes
        ),
        overwrite_existing
    )
    config_content = config_structure.set_authfile(
        config_content,
        env.booth.key_path
    )
    env.booth.create_config(build(config_content), overwrite_existing)
コード例 #4
0
ファイル: remote_node.py プロジェクト: kmalyjur/pcs
def _prepare_pacemaker_remote_environment(
    env,
    report_processor,
    existing_nodes_target_list,
    new_node_target,
    new_node_name,
    skip_offline_nodes,
    allow_incomplete_distribution,
    allow_fails,
):
    if new_node_target:
        com_cmd = GetOnlineTargets(
            report_processor,
            ignore_offline_targets=skip_offline_nodes,
        )
        com_cmd.set_targets([new_node_target])
        online_new_target_list = run_com(env.get_node_communicator(), com_cmd)
        if not online_new_target_list and not skip_offline_nodes:
            raise LibraryError()
    else:
        online_new_target_list = []

    # check new nodes
    if online_new_target_list:
        com_cmd = GetHostInfo(report_processor)
        com_cmd.set_targets(online_new_target_list)
        report_processor.report_list(
            _host_check_remote_node(
                run_com(env.get_node_communicator(), com_cmd)))
        if report_processor.has_errors:
            raise LibraryError()
    else:
        report_processor.report_list(
            _reports_skip_new_node(new_node_name, "unreachable"))

    # share pacemaker authkey
    authkey_file = FileInstance.for_pacemaker_key()
    try:
        if authkey_file.raw_file.exists():
            authkey_content = authkey_file.read_raw()
            authkey_targets = online_new_target_list
        else:
            authkey_content = generate_binary_key(
                random_bytes_count=settings.pacemaker_authkey_bytes)
            authkey_targets = (existing_nodes_target_list +
                               online_new_target_list)
    except RawFileError as e:
        report_processor.report(raw_file_error_report(e))
    if report_processor.has_errors:
        raise LibraryError()

    if authkey_targets:
        com_cmd = DistributeFiles(
            report_processor,
            node_communication_format.pcmk_authkey_file(authkey_content),
            skip_offline_targets=skip_offline_nodes,
            allow_fails=allow_incomplete_distribution,
        )
        com_cmd.set_targets(authkey_targets)
        run_and_raise(env.get_node_communicator(), com_cmd)

    # start and enable pacemaker_remote
    if online_new_target_list:
        com_cmd = ServiceAction(
            report_processor,
            node_communication_format.create_pcmk_remote_actions([
                "start",
                "enable",
            ]),
            allow_fails=allow_fails,
        )
        com_cmd.set_targets(online_new_target_list)
        run_and_raise(env.get_node_communicator(), com_cmd)
コード例 #5
0
ファイル: cluster.py プロジェクト: bashims/pcs
def setup(env,
          cluster_name,
          nodes,
          transport_type=None,
          transport_options=None,
          link_list=None,
          compression_options=None,
          crypto_options=None,
          totem_options=None,
          quorum_options=None,
          wait=False,
          start=False,
          enable=False,
          force=False,
          force_unresolvable=False):
    """
    Set up cluster on specified nodes.
    Validation of the inputs is done here. Possible existing clusters are
    destroyed (when using force). Authkey files for corosync and pacemaer,
    known hosts and and newly generated corosync.conf are distributed to all
    nodes.
    Raise LibraryError on any error.

    env LibraryEnvironment
    cluster_name string -- name of a cluster to set up
    nodes list -- list of dicts which represents node.
        Supported keys are: name (required), addrs
    transport_type string -- transport type of a cluster
    transport_options dict -- transport specific options
    link_list list of dict -- list of links, depends of transport_type
    compression_options dict -- only available for transport_type == 'knet'. In
        corosync.conf they are prefixed 'knet_compression_'
    crypto_options dict -- only available for transport_type == 'knet'. In
        corosync.conf they are prefixed 'crypto_'
    totem_options dict -- options of section 'totem' in corosync.conf
    quorum_options dict -- options of section 'quorum' in corosync.conf
    wait -- specifies if command should try to wait for cluster to start up.
        Has no effect start is False. If set to False command will not wait for
        cluster to start. If None command will wait for some default timeout.
        If int wait set timeout to int value of seconds.
    start bool -- if True start cluster when it is set up
    enable bool -- if True enable cluster when it is set up
    force bool -- if True some validations errors are treated as warnings
    force_unresolvable bool -- if True not resolvable addresses of nodes are
        treated as warnings
    """
    _ensure_live_env(env)  # raises if env is not live

    transport_options = transport_options or {}
    link_list = link_list or []
    compression_options = compression_options or {}
    crypto_options = crypto_options or {}
    totem_options = totem_options or {}
    quorum_options = quorum_options or {}
    nodes = [_normalize_dict(node, {"addrs"}) for node in nodes]

    report_processor = SimpleReportProcessor(env.report_processor)
    target_factory = env.get_node_target_factory()

    # Get targets for all nodes and report unknown (== not-authorized) nodes.
    # If a node doesn't contain the 'name' key, validation of inputs reports it.
    # That means we don't report missing names but cannot rely on them being
    # present either.
    target_report_list, target_list = (
        target_factory.get_target_list_with_reports(
            [node["name"] for node in nodes if "name" in node],
            allow_skip=False,
        ))
    report_processor.report_list(target_report_list)

    # Use an address defined in known-hosts for each node with no addresses
    # specified. This allows users not to specify node addresses at all which
    # simplifies the whole cluster setup command / form significantly.
    addrs_defaulter = _get_addrs_defaulter(
        report_processor, {target.label: target
                           for target in target_list})
    nodes = [
        _set_defaults_in_dict(node, {"addrs": addrs_defaulter})
        for node in nodes
    ]

    # Validate inputs.
    report_processor.report_list(
        config_validators.create(cluster_name,
                                 nodes,
                                 transport_type,
                                 force_unresolvable=force_unresolvable))
    if transport_type in corosync_constants.TRANSPORTS_KNET:
        max_link_number = max([len(node["addrs"]) for node in nodes],
                              default=0)
        report_processor.report_list(
            config_validators.create_transport_knet(
                transport_options, compression_options, crypto_options) +
            config_validators.create_link_list_knet(link_list, max_link_number)
        )
    elif transport_type in corosync_constants.TRANSPORTS_UDP:
        report_processor.report_list(
            config_validators.create_transport_udp(
                transport_options, compression_options, crypto_options) +
            config_validators.create_link_list_udp(link_list))
    report_processor.report_list(
        config_validators.create_totem(totem_options) +
        # We are creating the config and we know there is no qdevice in it.
        config_validators.create_quorum_options(quorum_options, False))

    # Validate flags
    wait_timeout = _get_validated_wait_timeout(report_processor, wait, start)

    # Validate the nodes
    com_cmd = GetHostInfo(report_processor)
    com_cmd.set_targets(target_list)
    report_processor.report_list(
        _host_check_cluster_setup(
            run_com(env.get_node_communicator(), com_cmd), force))

    if report_processor.has_errors:
        raise LibraryError()

    # Validation done. If errors occured, an exception has been raised and we
    # don't get below this line.

    # Destroy cluster on all nodes.
    com_cmd = cluster.Destroy(env.report_processor)
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    # Distribute auth tokens.
    com_cmd = UpdateKnownHosts(
        env.report_processor,
        known_hosts_to_add=env.get_known_hosts(
            [target.label for target in target_list]),
        known_hosts_to_remove=[],
    )
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    # Distribute configuration files except corosync.conf. Sending
    # corosync.conf serves as a "commit" as its presence on a node marks the
    # node as a part of a cluster.
    corosync_authkey = generate_binary_key(random_bytes_count=128)
    pcmk_authkey = generate_binary_key(random_bytes_count=128)
    actions = {}
    actions.update(
        node_communication_format.corosync_authkey_file(corosync_authkey))
    actions.update(node_communication_format.pcmk_authkey_file(pcmk_authkey))
    com_cmd = DistributeFilesWithoutForces(env.report_processor, actions)
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)
    # TODO This should be in the previous call but so far we don't have a call
    # which allows to save and delete files at the same time.
    com_cmd = RemoveFilesWithoutForces(
        env.report_processor,
        {"pcsd settings": {
            "type": "pcsd_settings"
        }},
    )
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    # Distribute and reload pcsd SSL certificate
    report_processor.report(
        reports.pcsd_ssl_cert_and_key_distribution_started(
            [target.label for target in target_list]))
    ssl_key_raw = ssl.generate_key()
    ssl_key = ssl.dump_key(ssl_key_raw)
    ssl_cert = ssl.dump_cert(
        ssl.generate_cert(ssl_key_raw, target_list[0].label))
    com_cmd = SendPcsdSslCertAndKey(env.report_processor, ssl_cert, ssl_key)
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    # Create and distribute corosync.conf. Once a node saves corosync.conf it
    # is considered to be in a cluster.
    corosync_conf = config_facade.ConfigFacade.create(cluster_name, nodes,
                                                      transport_type)
    corosync_conf.set_totem_options(totem_options)
    corosync_conf.set_quorum_options(quorum_options)
    corosync_conf.create_link_list(link_list)
    if transport_type in corosync_constants.TRANSPORTS_KNET:
        corosync_conf.set_transport_knet_options(transport_options,
                                                 compression_options,
                                                 crypto_options)
    elif transport_type in corosync_constants.TRANSPORTS_UDP:
        corosync_conf.set_transport_udp_options(transport_options)

    com_cmd = DistributeFilesWithoutForces(
        env.report_processor,
        node_communication_format.corosync_conf_file(
            corosync_conf.config.export()),
    )
    com_cmd.set_targets(target_list)
    run_and_raise(env.get_node_communicator(), com_cmd)

    env.report_processor.process(reports.cluster_setup_success())

    # Optionally enable and start cluster services.
    if enable:
        com_cmd = EnableCluster(env.report_processor)
        com_cmd.set_targets(target_list)
        run_and_raise(env.get_node_communicator(), com_cmd)
    if start:
        _start_cluster(
            env.communicator_factory,
            env.report_processor,
            target_list,
            wait_timeout=wait_timeout,
        )
コード例 #6
0
ファイル: remote_node.py プロジェクト: tomjelinek/pcs
def _prepare_pacemaker_remote_environment(
    env, report_processor, existing_nodes_target_list, new_node_target,
    new_node_name, skip_offline_nodes, allow_incomplete_distribution,
    allow_fails
):
    if new_node_target:
        com_cmd = GetOnlineTargets(
            report_processor,
            ignore_offline_targets=skip_offline_nodes,
        )
        com_cmd.set_targets([new_node_target])
        online_new_target_list = run_com(env.get_node_communicator(), com_cmd)
        if not online_new_target_list and not skip_offline_nodes:
            raise LibraryError()
    else:
        online_new_target_list = []

    # check new nodes
    if online_new_target_list:
        com_cmd = GetHostInfo(report_processor)
        com_cmd.set_targets(online_new_target_list)
        report_processor.report_list(
            _host_check_remote_node(
                run_com(env.get_node_communicator(), com_cmd)
            )
        )
        if report_processor.has_errors:
            raise LibraryError()
    else:
        report_processor.report_list(
            _reports_skip_new_node(new_node_name, "unreachable")
        )

    # share pacemaker authkey
    if env.pacemaker.has_authkey:
        authkey_content = env.pacemaker.get_authkey_content()
        authkey_targets = online_new_target_list
    else:
        authkey_content = generate_binary_key(
            random_bytes_count=settings.pacemaker_authkey_bytes
        )
        authkey_targets = existing_nodes_target_list + online_new_target_list
    if authkey_targets:
        com_cmd = DistributeFiles(
            report_processor,
            node_communication_format.pcmk_authkey_file(authkey_content),
            skip_offline_targets=skip_offline_nodes,
            allow_fails=allow_incomplete_distribution,
        )
        com_cmd.set_targets(authkey_targets)
        run_and_raise(env.get_node_communicator(), com_cmd)

    # start and enable pacemaker_remote
    if online_new_target_list:
        com_cmd = ServiceAction(
            report_processor,
            node_communication_format.create_pcmk_remote_actions([
                "start",
                "enable",
            ]),
            allow_fails=allow_fails,
        )
        com_cmd.set_targets(online_new_target_list)
        run_and_raise(env.get_node_communicator(), com_cmd)