Exemplo n.º 1
0
def get_replicaset_leaders_by_aliases(specified_replicaset_leaders,
                                      control_console):
    if not specified_replicaset_leaders:
        return {}, None

    cluster_instances = helpers.get_cluster_instances(control_console)
    cluster_replicasets = helpers.get_cluster_replicasets(control_console)

    replicaset_leaders = {}
    for replicaset_alias, leader_alias in specified_replicaset_leaders.items():
        cluster_replicaset = cluster_replicasets.get(replicaset_alias)
        if cluster_replicaset is None:
            return None, "Replicaset '%s' isn't found in cluster" % replicaset_alias

        if leader_alias not in cluster_replicaset['instances']:
            return None, "Instance '%s' isn't found in replicaset '%s'" % (
                leader_alias, replicaset_alias)

        leader_instance = cluster_instances.get(leader_alias)
        if leader_instance is None:
            return None, "Instance '%s' isn't found in cluster" % leader_alias

        leader_instance_uuid = leader_instance.get('uuid')
        if not leader_instance_uuid:
            return None, "Instance '%s' has no UUID" % leader_alias

        replicaset_leaders.update({
            cluster_replicaset['uuid']:
            leader_instance_uuid,
        })

    return replicaset_leaders, None
def edit_topology(params):
    console_sock = params['console_sock']
    module_hostvars = params['module_hostvars']
    play_hosts = params['play_hosts']
    healthy_timeout = params['healthy_timeout']
    allow_missed_instances = params['allow_missed_instances']

    replicasets = get_configured_replicasets(module_hostvars, play_hosts)
    instances = get_instances_to_configure(module_hostvars, play_hosts)

    if not replicasets and not instances:
        return helpers.ModuleRes(changed=False)

    control_console = helpers.get_control_console(console_sock)

    helpers.set_twophase_options_from_params(control_console, params)
    set_enabled_roles(replicasets, control_console)

    cluster_instances = helpers.get_cluster_instances(control_console)
    cluster_replicasets = helpers.get_cluster_replicasets(control_console)

    # Configure replicasets and instances:
    # * Create new replicasets.
    # * Edit existent replicasets and join new instances to them.
    #   In this case failover_priority isn't changed since
    #   new instances hasn't UUIDs before join.
    # * Expel instances.
    # * Configure instances that are already joined.
    #   New instances aren't configured here since they don't have
    #   UUIDs before join.
    topology_params, err = get_topology_params(replicasets,
                                               cluster_replicasets, instances,
                                               cluster_instances,
                                               allow_missed_instances)
    if err is not None:
        return helpers.ModuleRes(
            failed=True,
            msg="Failed to collect edit topology params: %s" % err)

    topology_changed = False

    if topology_params:
        res, err = control_console.eval_res_err(edit_topology_func_body,
                                                topology_params)
        if err is not None:
            return helpers.ModuleRes(failed=True,
                                     msg="Failed to edit topology: %s" % err)

        topology_changed = True

        # Without this `Peer closed` error is returned on second `edit_topology`
        # call in some cases (e.g. when new instance is joined at first call
        # and then it's configured on second)
        # See https://github.com/tarantool/cartridge/issues/1320
        # The simplest w/a is to add a little delay between this calls,
        # and we just perform `is_healthy` call here.
        # If everything is Ok - this call doesn't take a long time, but
        # guarantees that next `edit_topology` call wouldn't fail.
        # If cluster isn't healthy then it's good to show error.
        if not wait_for_cluster_is_healthy(control_console, healthy_timeout):
            return helpers.ModuleRes(
                failed=True,
                msg="Cluster isn't healthy after editing topology")

        # Now we need to get updated instances and replicasets
        # configuration to check if we need one more call.
        # `edit_topology` returns summary of updated instances
        # so let's use it to update cluster_instances and cluster_replicasets.
        update_cluster_instances_and_replicasets(res, instances,
                                                 cluster_instances,
                                                 cluster_replicasets)

    # Configure failover_priority and instances that were joined on previous call:
    # * Edit failover_priority of replicasets if it's needed.
    # * Configure instances that weren't configured on first `edit_topology` call.
    topology_params, err = get_replicasets_failover_priority_and_instances_params(
        replicasets, cluster_replicasets, instances, cluster_instances,
        allow_missed_instances)
    if err is not None:
        return helpers.ModuleRes(
            failed=True,
            msg=
            "Failed to collect edit topology params for changing failover_priority "
            "and configuring new instances: %s" % err)

    if topology_params:
        res, err = control_console.eval_res_err(edit_topology_func_body,
                                                topology_params)
        if err is not None:
            return helpers.ModuleRes(
                failed=True,
                msg=
                "Failed to edit failover priority and configure instances: %s"
                % err)

        topology_changed = True

        if not wait_for_cluster_is_healthy(control_console, healthy_timeout):
            return helpers.ModuleRes(
                failed=True,
                msg=
                "Cluster isn't healthy after editing failover priority and configuring instances"
            )

    return helpers.ModuleRes(changed=topology_changed)