def test_list_nodes(self, mock_check_output):
     self.assertSequenceEqual(pcmk.list_nodes(), [
         'juju-982848-zaza-ce47c58f6c88-10',
         'juju-982848-zaza-ce47c58f6c88-11',
         'juju-982848-zaza-ce47c58f6c88-9'
     ])
     mock_check_output.assert_called_once_with(['crm', 'node', 'status'])
def configure_legacy_stonith():
    if config('stonith_enabled') not in ['true', 'True', True]:
        if configure_pacemaker_remote_stonith_resource():
            log('Not disabling STONITH as pacemaker remotes are present',
                level=INFO)
        else:
            log('Disabling STONITH', level=INFO)
            cmd = "crm configure property stonith-enabled=false"
            pcmk.commit(cmd)
    else:
        log('Enabling STONITH for all nodes in cluster.', level=INFO)
        # configure stontih resources for all nodes in cluster.
        # note: this is totally provider dependent and requires
        # access to the MAAS API endpoint, using endpoint and credentials
        # set in config.
        url = config('maas_url')
        creds = config('maas_credentials')
        if None in [url, creds]:
            msg = 'maas_url and maas_credentials must be set ' \
                  'in config to enable STONITH.'
            status_set('blocked', msg)
            raise Exception(msg)

        nodes = maas.MAASHelper(url, creds).list_nodes()
        if not nodes:
            msg = 'Could not obtain node inventory from ' \
                  'MAAS @ %s.' % url
            status_set('blocked', msg)
            raise Exception(msg)

        cluster_nodes = pcmk.list_nodes()
        for node in cluster_nodes:
            rsc, constraint = pcmk.maas_stonith_primitive(nodes, node)
            if not rsc:
                msg = 'Failed to determine STONITH primitive for ' \
                      'node %s' % node
                status_set('blocked', msg)
                raise Exception(msg)

            rsc_name = str(rsc).split(' ')[1]
            if not pcmk.is_resource_present(rsc_name):
                log('Creating new STONITH primitive %s.' % rsc_name,
                    level=DEBUG)
                cmd = 'crm -F configure %s' % rsc
                pcmk.commit(cmd)
                if constraint:
                    cmd = 'crm -F configure %s' % constraint
                    pcmk.commit(cmd)
            else:
                log('STONITH primitive already exists for node.', level=DEBUG)

        pcmk.commit("crm configure property stonith-enabled=true")
Exemple #3
0
def configure_stonith():
    if config('stonith_enabled') not in ['true', 'True', True]:
        if configure_pacemaker_remote_stonith_resource():
            log('Not disabling STONITH as pacemaker remotes are present',
                level=INFO)
        else:
            log('Disabling STONITH', level=INFO)
            cmd = "crm configure property stonith-enabled=false"
            pcmk.commit(cmd)
    else:
        log('Enabling STONITH for all nodes in cluster.', level=INFO)
        # configure stontih resources for all nodes in cluster.
        # note: this is totally provider dependent and requires
        # access to the MAAS API endpoint, using endpoint and credentials
        # set in config.
        url = config('maas_url')
        creds = config('maas_credentials')
        if None in [url, creds]:
            msg = 'maas_url and maas_credentials must be set ' \
                  'in config to enable STONITH.'
            status_set('blocked', msg)
            raise Exception(msg)

        nodes = maas.MAASHelper(url, creds).list_nodes()
        if not nodes:
            msg = 'Could not obtain node inventory from ' \
                  'MAAS @ %s.' % url
            status_set('blocked', msg)
            raise Exception(msg)

        cluster_nodes = pcmk.list_nodes()
        for node in cluster_nodes:
            rsc, constraint = pcmk.maas_stonith_primitive(nodes, node)
            if not rsc:
                msg = 'Failed to determine STONITH primitive for ' \
                      'node %s' % node
                status_set('blocked', msg)
                raise Exception(msg)

            rsc_name = str(rsc).split(' ')[1]
            if not pcmk.is_resource_present(rsc_name):
                log('Creating new STONITH primitive %s.' % rsc_name,
                    level=DEBUG)
                cmd = 'crm -F configure %s' % rsc
                pcmk.commit(cmd)
                if constraint:
                    cmd = 'crm -F configure %s' % constraint
                    pcmk.commit(cmd)
            else:
                log('STONITH primitive already exists for node.', level=DEBUG)

        pcmk.commit("crm configure property stonith-enabled=true")
def add_location_rules_for_local_nodes(res_name):
    """Add location rules for running resource on local nodes.

    Add location rules allowing the given resource to run on local nodes (eg
    not remote nodes).

    :param res_name: Resource name to create location rules for.
    :type res_name: str
    """
    for node in pcmk.list_nodes():
        loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
        if not pcmk.crm_opt_exists(loc_constraint_name):
            cmd = 'crm -w -F configure location {} {} 0: {}'.format(
                loc_constraint_name, res_name, node)
            pcmk.commit(cmd, failure_is_fatal=True)
            log('%s' % cmd, level=DEBUG)
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
    """Add location rules as needed for resources, clones and groups

    If remote nodes should not run resources then add location rules then add
    location rules to enable them on local nodes.

    :param resources: Resource definitions
    :type resources: dict
    :param clones: Clone definitions
    :type clones: dict
    :param groups: Group definitions
    :type groups: dict
    """
    clones = clones or {}
    groups = groups or {}
    try:
        resources_on_remote = need_resources_on_remotes()
    except ValueError:
        msg = 'Unable to calculate whether resources should run on remotes'
        log(msg, level=WARNING)
        return
    if resources_on_remote:
        msg = ('Resources are permitted to run on remotes, no need to create '
               'location constraints')
        log(msg, level=WARNING)
        return
    pacemaker_remotes = []
    for res_name, res_type in resources.items():
        if res_name not in list(clones.values()) + list(groups.values()):
            if res_type == 'ocf:pacemaker:remote':
                pacemaker_remotes.append(res_name)
            else:
                add_location_rules_for_local_nodes(res_name)
    add_location_rules_for_pacemaker_remotes(pacemaker_remotes)
    for cl_name in clones:
        add_location_rules_for_local_nodes(cl_name)
        # Limit clone resources to only running on X number of nodes where X
        # is the number of local nodes. Otherwise they will show as offline
        # on the remote nodes.
        node_count = len(pcmk.list_nodes())
        cmd = ('crm_resource --resource {} --set-parameter clone-max '
               '--meta --parameter-value {}').format(cl_name, node_count)
        pcmk.commit(cmd, failure_is_fatal=True)
        log('%s' % cmd, level=DEBUG)
    for grp_name in groups:
        add_location_rules_for_local_nodes(grp_name)
def add_location_rules_for_pacemaker_remotes(res_names):
    """Add location rules for pacemaker remote resources on local nodes.

    Add location rules allowing the pacemaker remote resource to run on a local
    node. Use location score rules to spread resources out.

    :param res_names: Pacemaker remote resource names.
    :type res_names: List[str]
    """
    res_names = sorted(res_names)
    nodes = sorted(pcmk.list_nodes())
    prefered_nodes = list(zip(res_names, itertools.cycle(nodes)))
    for res_name in res_names:
        for node in nodes:
            location_score = 0
            if (res_name, node) in prefered_nodes:
                location_score = 200
            add_score_location_rule(res_name, node, location_score)
Exemple #7
0
def add_location_rules_for_local_nodes(res_name):
    """Add location rules for running resource on local nodes.

    Add location rules allowing the given resource to run on local nodes (eg
    not remote nodes).

    :param res_name: Resource name to create location rules for.
    :type res_name: str
    """
    for node in pcmk.list_nodes():
        loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
        if not pcmk.crm_opt_exists(loc_constraint_name):
            cmd = 'crm -w -F configure location {} {} 0: {}'.format(
                loc_constraint_name,
                res_name,
                node)
            pcmk.commit(cmd, failure_is_fatal=True)
            log('%s' % cmd, level=DEBUG)
Exemple #8
0
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
    """Add location rules as needed for resources, clones and groups

    If remote nodes should not run resources then add location rules then add
    location rules to enable them on local nodes.

    :param resources: Resource definitions
    :type resources: dict
    :param clones: Clone definitions
    :type clones: dict
    :param groups: Group definitions
    :type groups: dict
    """
    clones = clones or {}
    groups = groups or {}
    try:
        resources_on_remote = need_resources_on_remotes()
    except ValueError:
        msg = 'Unable to calculate whether resources should run on remotes'
        log(msg, level=WARNING)
        return
    if resources_on_remote:
        msg = ('Resources are permitted to run on remotes, no need to create '
               'location constraints')
        log(msg, level=WARNING)
        return
    for res_name, res_type in resources.items():
        if res_name not in list(clones.values()) + list(groups.values()):
            add_location_rules_for_local_nodes(res_name)
    for cl_name in clones:
        add_location_rules_for_local_nodes(cl_name)
        # Limit clone resources to only running on X number of nodes where X
        # is the number of local nodes. Otherwise they will show as offline
        # on the remote nodes.
        node_count = len(pcmk.list_nodes())
        cmd = ('crm_resource --resource {} --set-parameter clone-max '
               '--meta --parameter-value {}').format(cl_name, node_count)
        pcmk.commit(cmd, failure_is_fatal=True)
        log('%s' % cmd, level=DEBUG)
    for grp_name in groups:
        add_location_rules_for_local_nodes(grp_name)