Beispiel #1
0
    def local_network_split_addresses(self):
        """Map of local units addresses for each address type

           NOTE: This excludes private-address
           @return dict of backends and networks for local unit e.g.
               {'this_unit_admin_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_admin_addr'},
                    'network': 'this_unit_admin_addr/admin_netmask'},
                'this_unit_internal_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_internal_addr'},
                    'network': 'this_unit_internal_addr/internal_netmask'},
                'this_unit_public_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_public_addr'},
                    'network': 'this_unit_public_addr/public_netmask'}}
        """
        config = hookenv.config()
        _cluster_hosts = {}
        for addr_type in ADDRESS_TYPES:
            cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
            laddr = ch_ip.get_address_in_network(config.get(cfg_opt))
            if laddr:
                netmask = ch_ip.get_netmask_for_address(laddr)
                _cluster_hosts[laddr] = {
                    'network': "{}/{}".format(laddr, netmask),
                    'backends': {self.local_unit_name: laddr}}
        return _cluster_hosts
Beispiel #2
0
    def test_get_netmask_for_address(self, _interfaces, _ifaddresses):
        def mock_ifaddresses(iface):
            return DUMMY_ADDRESSES[iface]

        _interfaces.return_value = ['eth0', 'eth1']
        _ifaddresses.side_effect = mock_ifaddresses
        self.assertEquals(net_ip.get_netmask_for_address('192.168.1.220'),
                          '255.255.255.0')
        self.assertEquals(net_ip.get_netmask_for_address('10.5.20.4'),
                          '255.255.0.0')
        self.assertEquals(net_ip.get_netmask_for_address('172.4.5.5'), None)
        self.assertEquals(
            net_ip.get_netmask_for_address(
                '2a01:348:2f4:0:685e:5748:ae62:210f'), '64')
        self.assertEquals(net_ip.get_netmask_for_address('2001:db8:1::'),
                          '128')
Beispiel #3
0
    def local_network_split_addresses(self):
        """Map of local units addresses for each address type

           NOTE: This excludes private-address
           @return dict of backends and networks for local unit e.g.
               {'this_unit_admin_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_admin_addr'},
                    'network': 'this_unit_admin_addr/admin_netmask'},
                'this_unit_internal_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_internal_addr'},
                    'network': 'this_unit_internal_addr/internal_netmask'},
                'this_unit_public_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_public_addr'},
                    'network': 'this_unit_public_addr/public_netmask'}}
        """
        config = hookenv.config()
        _cluster_hosts = {}
        for addr_type in ADDRESS_TYPES:
            cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
            laddr = ch_ip.get_relation_ip(
                os_ip.ADDRESS_MAP[addr_type]['binding'], config.get(cfg_opt))
            if laddr:
                netmask = ch_ip.get_netmask_for_address(laddr)
                _cluster_hosts[laddr] = {
                    'network':
                    "{}/{}".format(laddr, netmask),
                    'backends':
                    collections.OrderedDict([(self.local_unit_name, laddr)])
                }
        return _cluster_hosts
Beispiel #4
0
def ha_relation_joined(relation_id=None):
    # Obtain the config values necessary for the cluster config. These
    # include multicast port and interface to bind to.
    cluster_config = get_hacluster_config()

    # Obtain resources
    resources = {'res_swift_haproxy': 'lsb:haproxy'}
    resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_swift_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_swift_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = get_iface_for_address(vip)
            if iface is not None:
                vip_key = 'res_swift_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log(
                            "Resource '{}' (vip='{}') already exists in "
                            "vip group - skipping".format(vip_key, vip),
                            WARNING)
                        continue

                resources[vip_key] = res_swift_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'
                    ''.format(ip=vip_params,
                              vip=vip,
                              iface=iface,
                              netmask=get_netmask_for_address(vip)))
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})

    init_services = {'res_swift_haproxy': 'haproxy'}
    clones = {'cl_swift_haproxy': 'res_swift_haproxy'}

    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
def ha_joined():
    cluster_config = get_hacluster_config()
    resources = {
        'res_nova_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_nova_haproxy': 'op monitor interval="5s"',
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_nova_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_nova_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_nova_{}_vip'.format(iface)
            resources[vip_key] = res_nova_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})

    init_services = {
        'res_nova_haproxy': 'haproxy'
    }
    clones = {
        'cl_nova_haproxy': 'res_nova_haproxy'
    }
    colocations = {}

    if config('single-nova-consoleauth') and console_attributes('protocol'):
        colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
        init_services['res_nova_consoleauth'] = 'nova-consoleauth'
        resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS

    relation_set(init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones,
                 colocations=colocations)
Beispiel #6
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()

    resources = {
        'res_cinder_haproxy': 'lsb:haproxy'
    }

    resource_params = {
        'res_cinder_haproxy': 'op monitor interval="5s"'
    }

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_cinder_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_cinder_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or
                     config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or
                       config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_cinder_{}_vip'.format(iface)
                resources[vip_key] = res_cinder_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'.format(ip=vip_params,
                                            vip=vip,
                                            iface=iface,
                                            netmask=netmask)
                )
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id,
                         groups={'grp_cinder_vips': ' '.join(vip_group)})

    init_services = {
        'res_cinder_haproxy': 'haproxy'
    }
    clones = {
        'cl_cinder_haproxy': 'res_cinder_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Beispiel #7
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_ks_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_ks_haproxy': 'op monitor interval="5s"'
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_ks_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_ks_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_ks_{}_vip'.format(iface)
            if vip_key in vip_group:
                log("Resource '%s' (vip='%s') already exists in "
                    "vip group - skipping" % (vip_key, vip),
                    WARNING)
                continue

            vip_group.append(vip_key)
            resources[vip_key] = res_ks_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )

    if len(vip_group) >= 1:
        relation_set(relation_id=relation_id,
                     groups={CLUSTER_RES: ' '.join(vip_group)})

    init_services = {
        'res_ks_haproxy': 'haproxy'
    }
    clones = {
        'cl_ks_haproxy': 'res_ks_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_nova_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_nova_haproxy': 'op monitor interval="5s"',
    }
    init_services = {'res_nova_haproxy': 'haproxy'}
    clones = {'cl_nova_haproxy': 'res_nova_haproxy'}
    colocations = {}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_nova_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_nova_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_nova_{}_vip'.format(iface)
                resources[vip_key] = res_nova_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'.format(ip=vip_params,
                                            vip=vip,
                                            iface=iface,
                                            netmask=netmask))
                vip_group.append(vip_key)

            if len(vip_group) >= 1:
                relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})

        if (config('single-nova-consoleauth')
                and console_attributes('protocol')):
            colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
            init_services['res_nova_consoleauth'] = 'nova-consoleauth'
            resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
            resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS

    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones,
                 colocations=colocations)
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_ks_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_ks_haproxy': 'op monitor interval="5s"'
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_ks_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_ks_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_ks_{}_vip'.format(iface)
            if vip_key in vip_group:
                log("Resource '%s' (vip='%s') already exists in "
                    "vip group - skipping" % (vip_key, vip),
                    WARNING)
                continue

            vip_group.append(vip_key)
            resources[vip_key] = res_ks_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )

    if len(vip_group) >= 1:
        relation_set(relation_id=relation_id,
                     groups={CLUSTER_RES: ' '.join(vip_group)})

    init_services = {
        'res_ks_haproxy': 'haproxy'
    }
    clones = {
        'cl_ks_haproxy': 'res_ks_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
def ha_joined():
    cluster_config = get_hacluster_config()

    resources = {
        'res_ceilometer_haproxy': 'lsb:haproxy',
        'res_ceilometer_agent_central': ('ocf:openstack:'
                                         'ceilometer-agent-central')
    }

    resource_params = {
        'res_ceilometer_haproxy': 'op monitor interval="5s"',
        'res_ceilometer_agent_central': 'op monitor interval="30s"'
    }

    amqp_ssl_port = None
    for rel_id in relation_ids('amqp'):
        for unit in related_units(rel_id):
            amqp_ssl_port = relation_get('ssl_port', unit, rel_id)

    if amqp_ssl_port:
        params = ('params amqp_server_port="%s" op monitor interval="30s"' %
                  (amqp_ssl_port))
        resource_params['res_ceilometer_agent_central'] = params

    vip_group = []
    for vip in cluster_config['vip'].split():
        res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
        vip_params = 'ip'

        iface = get_iface_for_address(vip)
        if iface is not None:
            vip_key = 'res_ceilometer_{}_vip'.format(iface)
            resources[vip_key] = res_ceilometer_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=get_netmask_for_address(vip))
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_set(groups={'grp_ceilometer_vips': ' '.join(vip_group)})

    init_services = {
        'res_ceilometer_haproxy': 'haproxy'
    }
    clones = {
        'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
    }
    relation_set(init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Beispiel #11
0
def _resolve_network_cidr(ip_address):
    '''
    Resolves the full address cidr of an ip_address based on
    configured network interfaces

    This is in charmhelpers trunk but not in pypi. Please revert to using
    charmhelpers version when pypi has been updated
    '''
    netmask = net_ip.get_netmask_for_address(ip_address)
    return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
Beispiel #12
0
def _resolve_network_cidr(ip_address):
    '''
    Resolves the full address cidr of an ip_address based on
    configured network interfaces

    This is in charmhelpers trunk but not in pypi. Please revert to using
    charmhelpers version when pypi has been updated
    '''
    netmask = net_ip.get_netmask_for_address(ip_address)
    return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def ha_relation_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    sstpsswd = config('sst-password')
    resources = {'res_mysql_monitor': 'ocf:percona:mysql_monitor'}
    resource_params = {'res_mysql_monitor':
                       RES_MONITOR_PARAMS % {'sstpass': sstpsswd}}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
        group_name = 'grp_{}_hostnames'.format(charm_name())
        groups = {group_name: 'res_{}_access_hostname'.format(charm_name())}

    else:
        vip_iface = (get_iface_for_address(cluster_config['vip']) or
                     config('vip_iface'))
        vip_cidr = (get_netmask_for_address(cluster_config['vip']) or
                    config('vip_cidr'))

        if config('prefer-ipv6'):
            res_mysql_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
                         (cluster_config['vip'], vip_cidr, vip_iface)
        else:
            res_mysql_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                         (cluster_config['vip'], vip_cidr, vip_iface)

        resources['res_mysql_vip'] = res_mysql_vip

        resource_params['res_mysql_vip'] = vip_params

        group_name = 'grp_percona_cluster'
        groups = {group_name: 'res_mysql_vip'}

    clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}

    colocations = {'colo_percona_cluster': 'inf: {} cl_mysql_monitor'
                                           ''.format(group_name)}

    locations = {'loc_percona_cluster':
                 '{} rule inf: writable eq 1'
                 ''.format(group_name)}

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     groups=groups,
                     clones=clones,
                     colocations=colocations,
                     locations=locations)
Beispiel #14
0
    def _add_ha_vips_config(self, hacluster):
        """Add a VirtualIP object for each user specified vip to self.resources

        @param hacluster instance of interface class HAClusterRequires
        """
        for vip in self.config.get(VIP_KEY, '').split():
            iface = (ch_ip.get_iface_for_address(vip) or
                     self.config.get(IFACE_KEY))
            netmask = (ch_ip.get_netmask_for_address(vip) or
                       self.config.get(CIDR_KEY))
            if iface is not None:
                hacluster.add_vip(self.name, vip, iface, netmask)
Beispiel #15
0
    def _add_ha_vips_config(self, hacluster):
        """Add a VirtualIP object for each user specified vip to self.resources

        @param hacluster instance of interface class HAClusterRequires
        """
        for vip in self.config.get(VIP_KEY, '').split():
            iface = (ch_ip.get_iface_for_address(vip)
                     or self.config.get(IFACE_KEY))
            netmask = (ch_ip.get_netmask_for_address(vip)
                       or self.config.get(CIDR_KEY))
            if iface is not None:
                hacluster.add_vip(self.name, vip, iface, netmask)
def ha_relation_joined():
    # Obtain the config values necessary for the cluster config. These
    # include multicast port and interface to bind to.
    corosync_bindiface = config('ha-bindiface')
    corosync_mcastport = config('ha-mcastport')
    vip = config('vip')
    if not vip:
        msg = 'Unable to configure hacluster as vip not provided'
        raise SwiftProxyCharmException(msg)

    # Obtain resources
    resources = {'res_swift_haproxy': 'lsb:haproxy'}
    resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}

    vip_group = []
    for vip in vip.split():
        if is_ipv6(vip):
            res_swift_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_swift_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = get_iface_for_address(vip)
        if iface is not None:
            vip_key = 'res_swift_{}_vip'.format(iface)
            resources[vip_key] = res_swift_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=get_netmask_for_address(vip))
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})

    init_services = {'res_swift_haproxy': 'haproxy'}
    clones = {'cl_swift_haproxy': 'res_swift_haproxy'}

    relation_set(init_services=init_services,
                 corosync_bindiface=corosync_bindiface,
                 corosync_mcastport=corosync_mcastport,
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Beispiel #17
0
    def local_default_addresses(self):
        """Map of local units private address

           @return dict of private address info local unit e.g.
               {'this_unit_private_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_private_addr'},
                    'network': 'this_unit_private_addr/private_netmask'}}

        """
        netmask = ch_ip.get_netmask_for_address(self.local_address)
        _local_map = {
            self.local_address: {
                'network': "{}/{}".format(self.local_address, netmask),
                'backends': {self.local_unit_name: self.local_address}}}
        return _local_map
def ha_relation_joined():
    vip = config('vip')
    vip_iface = get_iface_for_address(vip) or config('vip_iface')
    vip_cidr = get_netmask_for_address(vip) or config('vip_cidr')
    corosync_bindiface = config('ha-bindiface')
    corosync_mcastport = config('ha-mcastport')

    if None in [vip, vip_cidr, vip_iface]:
        log('Insufficient VIP information to configure cluster')
        sys.exit(1)

    if config('prefer-ipv6'):
        res_mysql_vip = 'ocf:heartbeat:IPv6addr'
        vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
                     (vip, vip_cidr, vip_iface)
    else:
        res_mysql_vip = 'ocf:heartbeat:IPaddr2'
        vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                     (vip, vip_cidr, vip_iface)

    resources = {'res_mysql_vip': res_mysql_vip,
                 'res_mysql_monitor': 'ocf:percona:mysql_monitor'}

    sstpsswd = config('sst-password')
    resource_params = {'res_mysql_vip': vip_params,
                       'res_mysql_monitor':
                       RES_MONITOR_PARAMS % {'sstpass': sstpsswd}}
    groups = {'grp_percona_cluster': 'res_mysql_vip'}

    clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}

    colocations = {'vip_mysqld': 'inf: grp_percona_cluster cl_mysql_monitor'}

    locations = {'loc_percona_cluster':
                 'grp_percona_cluster rule inf: writable eq 1'}

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id,
                     corosync_bindiface=corosync_bindiface,
                     corosync_mcastport=corosync_mcastport,
                     resources=resources,
                     resource_params=resource_params,
                     groups=groups,
                     clones=clones,
                     colocations=colocations,
                     locations=locations)
    def local_default_addresses(self):
        """Map of local units private address

           @return dict of private address info local unit e.g.
               {'this_unit_private_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_private_addr'},
                    'network': 'this_unit_private_addr/private_netmask'}}

        """
        netmask = ch_ip.get_netmask_for_address(self.local_address)
        _local_map = {
            self.local_address: {
                'network': "{}/{}".format(self.local_address, netmask),
                'backends': collections.OrderedDict(
                    [(self.local_unit_name, self.local_address)])}}
        return _local_map
Beispiel #20
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {"res_ks_haproxy": "lsb:haproxy"}
    resource_params = {"res_ks_haproxy": 'op monitor interval="5s"'}

    if config("dns-ha"):
        update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config["vip"].split():
            if is_ipv6(vip):
                res_ks_vip = "ocf:heartbeat:IPv6addr"
                vip_params = "ipv6addr"
            else:
                res_ks_vip = "ocf:heartbeat:IPaddr2"
                vip_params = "ip"

            iface = get_iface_for_address(vip) or config("vip_iface")
            netmask = get_netmask_for_address(vip) or config("vip_cidr")

            if iface is not None:
                vip_key = "res_ks_{}_vip".format(iface)
                if vip_key in vip_group:
                    log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

                vip_group.append(vip_key)
                resources[vip_key] = res_ks_vip
                resource_params[vip_key] = 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(
                    ip=vip_params, vip=vip, iface=iface, netmask=netmask
                )

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id, groups={CLUSTER_RES: " ".join(vip_group)})

    init_services = {"res_ks_haproxy": "haproxy"}
    clones = {"cl_ks_haproxy": "res_ks_haproxy"}
    relation_set(
        relation_id=relation_id,
        init_services=init_services,
        corosync_bindiface=cluster_config["ha-bindiface"],
        corosync_mcastport=cluster_config["ha-mcastport"],
        resources=resources,
        resource_params=resource_params,
        clones=clones,
    )
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vip_group:
                if vip not in relation_data['resource_params'][vip_key]:
                    vip_key = '{}_{}'.format(vip_key, vip_params)
                else:
                    log("Resource '%s' (vip='%s') already exists in "
                        "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

            relation_data['resources'][vip_key] = res_neutron_vip
            relation_data['resource_params'][vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}" '
                'nic="{iface}"'.format(ip=vip_params,
                                       vip=vip,
                                       iface=iface,
                                       netmask=netmask)
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_data['groups'] = {
            'grp_{}_vips'.format(service): ' '.join(vip_group)
        }
Beispiel #22
0
def get_vip_settings(vip):
    """Calculate which nic is on the correct network for the given vip.

    If nic or netmask discovery fail then fallback to using charm supplied
    config. If fallback is used this is indicated via the fallback variable.

    @param vip: VIP to lookup nic and cidr for.
    @returns (str, str, bool): eg (iface, netmask, fallback)
    """
    iface = get_iface_for_address(vip)
    netmask = get_netmask_for_address(vip)
    fallback = False
    if iface is None:
        iface = config('vip_iface')
        fallback = True
    if netmask is None:
        netmask = config('vip_cidr')
        fallback = True
    return iface, netmask, fallback
Beispiel #23
0
def get_vip_settings(vip):
    """Calculate which nic is on the correct network for the given vip.

    If nic or netmask discovery fail then fallback to using charm supplied
    config. If fallback is used this is indicated via the fallback variable.

    @param vip: VIP to lookup nic and cidr for.
    @returns (str, str, bool): eg (iface, netmask, fallback)
    """
    iface = get_iface_for_address(vip)
    netmask = get_netmask_for_address(vip)
    fallback = False
    if iface is None:
        iface = config('vip_iface')
        fallback = True
    if netmask is None:
        netmask = config('vip_cidr')
        fallback = True
    return iface, netmask, fallback
Beispiel #24
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vip_group:
                if vip not in relation_data['resource_params'][vip_key]:
                    vip_key = '{}_{}'.format(vip_key, vip_params)
                else:
                    log(
                        "Resource '%s' (vip='%s') already exists in "
                        "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

            relation_data['resources'][vip_key] = res_neutron_vip
            relation_data['resource_params'][vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}" '
                'nic="{iface}"'.format(ip=vip_params,
                                       vip=vip,
                                       iface=iface,
                                       netmask=netmask))
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_data['groups'] = {
            'grp_{}_vips'.format(service): ' '.join(vip_group)
        }
Beispiel #25
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    delete_resources = []
    delete_resources.append('res_ceilometer_polling')

    resources = {
        'res_ceilometer_haproxy': 'lsb:haproxy',
        'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
    }

    resource_params = {
        'res_ceilometer_haproxy': 'op monitor interval="5s"',
        'res_ceilometer_agent_central': 'op monitor interval="30s"'
    }

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_ceilometer_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = get_iface_for_address(vip)
            if iface is not None:
                vip_key = 'res_ceilometer_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log(
                            "Resource '%s' (vip='%s') already exists in "
                            "vip group - skipping" % (vip_key, vip), WARNING)
                        continue

                resources[vip_key] = res_ceilometer_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'
                    ''.format(ip=vip_params,
                              vip=vip,
                              iface=iface,
                              netmask=get_netmask_for_address(vip)))
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id,
                         groups={'grp_ceilometer_vips': ' '.join(vip_group)})

    init_services = {'res_ceilometer_haproxy': 'haproxy'}
    clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 delete_resources=delete_resources,
                 clones=clones)
def ha_relation_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    sstpsswd = sst_password()
    resources = {'res_percona': 'ocf:heartbeat:galera'}
    resource_params = {}
    vip_iface = (get_iface_for_address(cluster_config['vip'])
                 or config('vip_iface'))
    vip_cidr = (get_netmask_for_address(cluster_config['vip'])
                or config('vip_cidr'))

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
        group_name = 'grp_{}_hostnames'.format(charm_name())
        groups = {group_name: 'res_{}_access_hostname'.format(charm_name())}

    if config('prefer-ipv6'):
        res_mysql_vip = 'ocf:heartbeat:IPv6addr'
        vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
                     (cluster_config['vip'], vip_cidr, vip_iface)
    else:
        res_mysql_vip = 'ocf:heartbeat:IPaddr2'
        vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                     (cluster_config['vip'], vip_cidr, vip_iface)

    hostname_list = get_cluster_hostnames()
    percona_params = \
        ' params ' \
        ' wsrep_cluster_address="gcomm://' + ",".join(hostname_list) + '"' \
        ' config="' + resolve_cnf_file() + '"' \
        ' datadir="/var/lib/percona-xtradb-cluster"' \
        ' socket="/var/run/mysqld/mysqld.sock" ' \
        ' pid="/var/run/mysqld/mysqld.pid"' \
        ' check_user=sstuser check_passwd="' + sstpsswd + '"' \
        ' binary="/usr/bin/mysqld_safe"' \
        ' op monitor timeout=120 interval=20 depth=0' \
        ' op monitor role=Master timeout=120 interval=10 depth=0' \
        ' op monitor role=Slave timeout=120 interval=30 depth=0'

    percona_ms = {
        'ms_percona':
        'res_percona meta notify=true '
        'interleave=true master-max=3 '
        'ordered=true target-role=Started'
    }

    resource_params['res_percona'] = percona_params

    resources['res_mysql_vip'] = res_mysql_vip

    resource_params['res_mysql_vip'] = vip_params

    groups = {'grp_percona_cluster': 'res_mysql_vip'}

    colocations = {
        'colo_percona_cluster': '+inf: grp_percona_cluster ms_percona:Master'
    }

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     ms=percona_ms,
                     groups=groups,
                     colocations=colocations)
Beispiel #27
0
    def __call__(self):
        if not relation_ids('cluster') and not self.singlenode_mode:
            return {}

        if config('prefer-ipv6'):
            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            addr = get_host_ip(unit_get('private-address'))

        l_unit = local_unit().replace('/', '-')
        cluster_hosts = {}

        # NOTE(jamespage): build out map of configured network endpoints
        # and associated backends
        for addr_type in ADDRESS_TYPES:
            cfg_opt = 'os-{}-network'.format(addr_type)
            laddr = get_address_in_network(config(cfg_opt))
            if laddr:
                netmask = get_netmask_for_address(laddr)
                cluster_hosts[laddr] = {
                    'network': "{}/{}".format(laddr, netmask),
                    'backends': {
                        l_unit: laddr
                    }
                }
                for rid in relation_ids('cluster'):
                    for unit in related_units(rid):
                        _laddr = relation_get('{}-address'.format(addr_type),
                                              rid=rid,
                                              unit=unit)
                        if _laddr:
                            _unit = unit.replace('/', '-')
                            cluster_hosts[laddr]['backends'][_unit] = _laddr

        # NOTE(jamespage) add backend based on private address - this
        # with either be the only backend or the fallback if no acls
        # match in the frontend
        cluster_hosts[addr] = {}
        netmask = get_netmask_for_address(addr)
        cluster_hosts[addr] = {
            'network': "{}/{}".format(addr, netmask),
            'backends': {
                l_unit: addr
            }
        }
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _laddr = relation_get('private-address', rid=rid, unit=unit)
                if _laddr:
                    _unit = unit.replace('/', '-')
                    cluster_hosts[addr]['backends'][_unit] = _laddr

        ctxt = {'frontends': cluster_hosts, 'default_backend': addr}

        if config('haproxy-server-timeout'):
            ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')

        if config('haproxy-client-timeout'):
            ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')

        if config('prefer-ipv6'):
            ctxt['ipv6'] = True
            ctxt['local_host'] = 'ip6-localhost'
            ctxt['haproxy_host'] = '::'
            ctxt['stat_port'] = ':::8888'
        else:
            ctxt['local_host'] = '127.0.0.1'
            ctxt['haproxy_host'] = '0.0.0.0'
            ctxt['stat_port'] = ':8888'

        for frontend in cluster_hosts:
            if (len(cluster_hosts[frontend]['backends']) > 1
                    or self.singlenode_mode):
                # Enable haproxy when we have enough peers.
                log('Ensuring haproxy enabled in /etc/default/haproxy.',
                    level=DEBUG)
                with open('/etc/default/haproxy', 'w') as out:
                    out.write('ENABLED=1\n')

                return ctxt

        log('HAProxy context is incomplete, this unit has no peers.',
            level=INFO)
        return {}
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_neutron_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_neutron_haproxy': 'op monitor interval="5s"'
    }
    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_neutron_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_neutron_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or
                     config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or
                       config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_neutron_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log("Resource '%s' (vip='%s') already exists in "
                            "vip group - skipping" % (vip_key, vip), WARNING)
                        continue

                resources[vip_key] = res_neutron_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}"'.format(ip=vip_params,
                                           vip=vip,
                                           iface=iface,
                                           netmask=netmask)
                )
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(
                relation_id=relation_id,
                json_groups=json.dumps({
                    'grp_neutron_vips': ' '.join(vip_group)
                }, sort_keys=True)
            )

    init_services = {
        'res_neutron_haproxy': 'haproxy'
    }
    clones = {
        'cl_nova_haproxy': 'res_neutron_haproxy'
    }
    relation_set(relation_id=relation_id,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 json_init_services=json.dumps(init_services,
                                               sort_keys=True),
                 json_resources=json.dumps(resources,
                                           sort_keys=True),
                 json_resource_params=json.dumps(resource_params,
                                                 sort_keys=True),
                 json_clones=json.dumps(clones,
                                        sort_keys=True))

    # NOTE(jamespage): Clear any non-json based keys
    relation_set(relation_id=relation_id,
                 groups=None, init_services=None,
                 resources=None, resource_params=None,
                 clones=None)
Beispiel #29
0
    def __call__(self):
        '''
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        '''
        if not relation_ids('cluster'):
            return {}

        l_unit = local_unit().replace('/', '-')

        if config('prefer-ipv6'):
            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            addr = get_host_ip(unit_get('private-address'))

        cluster_hosts = {}

        # NOTE(jamespage): build out map of configured network endpoints
        # and associated backends
        for addr_type in ADDRESS_TYPES:
            laddr = get_address_in_network(
                config('os-{}-network'.format(addr_type)))
            if laddr:
                cluster_hosts[laddr] = {}
                cluster_hosts[laddr]['network'] = "{}/{}".format(
                    laddr,
                    get_netmask_for_address(laddr)
                )
                cluster_hosts[laddr]['backends'] = {}
                cluster_hosts[laddr]['backends'][l_unit] = laddr
                for rid in relation_ids('cluster'):
                    for unit in related_units(rid):
                        _unit = unit.replace('/', '-')
                        _laddr = relation_get('{}-address'.format(addr_type),
                                              rid=rid, unit=unit)
                        if _laddr:
                            cluster_hosts[laddr]['backends'][_unit] = _laddr

        # NOTE(jamespage) no split configurations found, just use
        # private addresses
        if not cluster_hosts:
            cluster_hosts[addr] = {}
            cluster_hosts[addr]['network'] = "{}/{}".format(
                addr,
                get_netmask_for_address(addr)
            )
            cluster_hosts[addr]['backends'] = {}
            cluster_hosts[addr]['backends'][l_unit] = addr
            for rid in relation_ids('cluster'):
                for unit in related_units(rid):
                    _unit = unit.replace('/', '-')
                    _laddr = relation_get('private-address',
                                          rid=rid, unit=unit)
                    if _laddr:
                        cluster_hosts[addr]['backends'][_unit] = _laddr

        ctxt = {
            'frontends': cluster_hosts,
        }

        if config('haproxy-server-timeout'):
            ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
        if config('haproxy-client-timeout'):
            ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')

        if config('prefer-ipv6'):
            ctxt['local_host'] = 'ip6-localhost'
            ctxt['haproxy_host'] = '::'
            ctxt['stat_port'] = ':::8888'
        else:
            ctxt['local_host'] = '127.0.0.1'
            ctxt['haproxy_host'] = '0.0.0.0'
            ctxt['stat_port'] = ':8888'

        for frontend in cluster_hosts:
            if len(cluster_hosts[frontend]['backends']) > 1:
                # Enable haproxy when we have enough peers.
                log('Ensuring haproxy enabled in /etc/default/haproxy.')
                with open('/etc/default/haproxy', 'w') as out:
                    out.write('ENABLED=1\n')
                return ctxt
        log('HAProxy context is incomplete, this unit has no peers.')
        return {}
Beispiel #30
0
    def __call__(self):
        if not relation_ids('cluster') and not self.singlenode_mode:
            return {}

        if config('prefer-ipv6'):
            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            addr = get_host_ip(unit_get('private-address'))

        l_unit = local_unit().replace('/', '-')
        cluster_hosts = {}

        # NOTE(jamespage): build out map of configured network endpoints
        # and associated backends
        for addr_type in ADDRESS_TYPES:
            cfg_opt = 'os-{}-network'.format(addr_type)
            laddr = get_address_in_network(config(cfg_opt))
            if laddr:
                netmask = get_netmask_for_address(laddr)
                cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
                                                                  netmask),
                                        'backends': {l_unit: laddr}}
                for rid in relation_ids('cluster'):
                    for unit in related_units(rid):
                        _laddr = relation_get('{}-address'.format(addr_type),
                                              rid=rid, unit=unit)
                        if _laddr:
                            _unit = unit.replace('/', '-')
                            cluster_hosts[laddr]['backends'][_unit] = _laddr

        # NOTE(jamespage) add backend based on private address - this
        # with either be the only backend or the fallback if no acls
        # match in the frontend
        cluster_hosts[addr] = {}
        netmask = get_netmask_for_address(addr)
        cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
                               'backends': {l_unit: addr}}
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _laddr = relation_get('private-address',
                                      rid=rid, unit=unit)
                if _laddr:
                    _unit = unit.replace('/', '-')
                    cluster_hosts[addr]['backends'][_unit] = _laddr

        ctxt = {
            'frontends': cluster_hosts,
            'default_backend': addr
        }

        if config('haproxy-server-timeout'):
            ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')

        if config('haproxy-client-timeout'):
            ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')

        if config('prefer-ipv6'):
            ctxt['ipv6'] = True
            ctxt['local_host'] = 'ip6-localhost'
            ctxt['haproxy_host'] = '::'
            ctxt['stat_port'] = ':::8888'
        else:
            ctxt['local_host'] = '127.0.0.1'
            ctxt['haproxy_host'] = '0.0.0.0'
            ctxt['stat_port'] = ':8888'

        for frontend in cluster_hosts:
            if (len(cluster_hosts[frontend]['backends']) > 1 or
                    self.singlenode_mode):
                # Enable haproxy when we have enough peers.
                log('Ensuring haproxy enabled in /etc/default/haproxy.',
                    level=DEBUG)
                with open('/etc/default/haproxy', 'w') as out:
                    out.write('ENABLED=1\n')

                return ctxt

        log('HAProxy context is incomplete, this unit has no peers.',
            level=INFO)
        return {}
Beispiel #31
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    vips_to_delete = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = get_iface_for_address(vip)
        netmask = get_netmask_for_address(vip)

        fallback_params = False
        if iface is None:
            iface = config('vip_iface')
            fallback_params = True
        if netmask is None:
            netmask = config('vip_cidr')
            fallback_params = True

        if iface is not None:
            # NOTE(jamespage): Delete old VIP resources
            # Old style naming encoding iface in name
            # does not work well in environments where
            # interface/subnet wiring is not consistent
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vips_to_delete:
                vip_key = '{}_{}'.format(vip_key, vip_params)
            vips_to_delete.append(vip_key)

            vip_key = 'res_{}_{}_vip'.format(
                service,
                hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])

            relation_data['resources'][vip_key] = res_vip
            # NOTE(jamespage):
            # Use option provided vip params if these where used
            # instead of auto-detected values
            if fallback_params:
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}"'.format(ip=vip_params,
                                           vip=vip,
                                           iface=iface,
                                           netmask=netmask))
            else:
                # NOTE(jamespage):
                # let heartbeat figure out which interface and
                # netmask to configure, which works nicely
                # when network interface naming is not
                # consistent across units.
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}"'.format(ip=vip_params, vip=vip))

            vip_group.append(vip_key)

    if vips_to_delete:
        try:
            relation_data['delete_resources'].extend(vips_to_delete)
        except KeyError:
            relation_data['delete_resources'] = vips_to_delete

    if len(vip_group) >= 1:
        key = 'grp_{}_vips'.format(service)
        try:
            relation_data['groups'][key] = ' '.join(vip_group)
        except KeyError:
            relation_data['groups'] = {key: ' '.join(vip_group)}
def ha_joined(relation_id=None):
    if config('controller-app-mode') == 'msm' or config('controller-app-mode') == 'doctl':
        cluster_config = get_hacluster_config()
        resources = {
            'res_msm_haproxy': 'lsb:haproxy',
        }
        resource_params = {
            'res_msm_haproxy': 'op monitor interval="5s"'
        }
        if config('dns-ha'):
            update_dns_ha_resource_params(relation_id=relation_id,
                                          resources=resources,
                                          resource_params=resource_params)
        else:
            vip_group = []
            for vip in cluster_config['vip'].split():
                if is_ipv6(vip):
                    res_msm_vip = 'ocf:heartbeat:IPv6addr'
                    vip_params = 'ipv6addr'
                else:
                    res_msm_vip = 'ocf:heartbeat:IPaddr2'
                    vip_params = 'ip'

                iface = (get_iface_for_address(vip) or
                         config('vip_iface'))
                netmask = (get_netmask_for_address(vip) or
                           config('vip_cidr'))

                if iface is not None:
                    vip_key = 'res_msm_{}_vip'.format(iface)
                    if vip_key in vip_group:
                        if vip not in resource_params[vip_key]:
                            vip_key = '{}_{}'.format(vip_key, vip_params)
                        else:
                            log("Resource '%s' (vip='%s') already exists in "
                                "vip group - skipping" % (vip_key,
                                                          vip), WARNING)
                            continue

                    resources[vip_key] = res_msm_vip
                    resource_params[vip_key] = (
                        'params {ip}="{vip}" cidr_netmask="{netmask}" '
                        'nic="{iface}"'.format(ip=vip_params,
                                               vip=vip,
                                               iface=iface,
                                               netmask=netmask)
                    )
                    vip_group.append(vip_key)

            if len(vip_group) >= 1:
                relation_set(
                    relation_id=relation_id,
                    json_groups=json.dumps({
                        'grp_msm_vips': ' '.join(vip_group)
                    }, sort_keys=True)
                )

        init_services = {
            'res_msm_haproxy': 'haproxy'
        }
        clones = {
            'cl_msm_haproxy': 'res_msm_haproxy'
        }
        relation_set(relation_id=relation_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     json_init_services=json.dumps(init_services,
                                                   sort_keys=True),
                     json_resources=json.dumps(resources,
                                               sort_keys=True),
                     json_resource_params=json.dumps(resource_params,
                                                     sort_keys=True),
                     json_clones=json.dumps(clones,
                                            sort_keys=True))

        # NOTE(jamespage): Clear any non-json based keys
        relation_set(relation_id=relation_id,
                     groups=None, init_services=None,
                     resources=None, resource_params=None,
                     clones=None)