Exemplo n.º 1
0
def check_optional_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either 'unknown', '' if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # NOTE: misc multi-site relation and config checks
    multisite_config = (config('realm'),
                        config('zonegroup'),
                        config('zone'))
    if relation_ids('master') or relation_ids('slave'):
        if not all(multisite_config):
            return ('blocked',
                    'multi-site configuration incomplete '
                    '(realm={realm}, zonegroup={zonegroup}'
                    ', zone={zone})'.format(**config()))
    if (all(multisite_config) and not
            (relation_ids('master') or relation_ids('slave'))):
        return ('blocked',
                'multi-site configuration but master/slave '
                'relation missing')
    if (all(multisite_config) and relation_ids('slave')):
        multisite_ready = False
        for rid in relation_ids('slave'):
            for unit in related_units(rid):
                if relation_get('url', unit=unit, rid=rid):
                    multisite_ready = True
                    continue
        if not multisite_ready:
            return ('waiting',
                    'multi-site master relation incomplete')
    master_configured = (
        leader_get('access_key'),
        leader_get('secret'),
        leader_get('restart_nonce'),
    )
    if (all(multisite_config) and
            relation_ids('master') and
            not all(master_configured)):
        return ('waiting',
                'waiting for configuration of master zone')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
Exemplo n.º 2
0
def check_optional_config_and_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either 'unknown', '' if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except Exception:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # NOTE: misc multi-site relation and config checks
    multisite_config = (config('realm'), config('zonegroup'), config('zone'))
    if relation_ids('master') or relation_ids('slave'):
        if not all(multisite_config):
            return ('blocked', 'multi-site configuration incomplete '
                    '(realm={realm}, zonegroup={zonegroup}'
                    ', zone={zone})'.format(**config()))
    if (all(multisite_config)
            and not (relation_ids('master') or relation_ids('slave'))):
        return ('blocked', 'multi-site configuration but master/slave '
                'relation missing')
    if (all(multisite_config) and relation_ids('slave')):
        multisite_ready = False
        for rid in relation_ids('slave'):
            for unit in related_units(rid):
                if relation_get('url', unit=unit, rid=rid):
                    multisite_ready = True
                    continue
        if not multisite_ready:
            return ('waiting', 'multi-site master relation incomplete')
    master_configured = (
        leader_get('access_key'),
        leader_get('secret'),
        leader_get('restart_nonce'),
    )
    if (all(multisite_config) and relation_ids('master')
            and not all(master_configured)):
        return ('waiting', 'waiting for configuration of master zone')

    # Check that provided Ceph BlueStoe configuration is valid.
    try:
        bluestore_compression = context.CephBlueStoreCompressionContext()
        bluestore_compression.validate()
    except ValueError as e:
        return ('blocked', 'Invalid configuration: {}'.format(str(e)))

    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
Exemplo n.º 3
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids("ha"):
        required_interfaces["ha"] = ["cluster"]
        try:
            get_hacluster_config()
        except:
            return ("blocked", "hacluster missing configuration: " "vip, vip_iface, vip_cidr")

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return "unknown", "No optional relations"
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Exemplo n.º 5
0
def ha_joined():
    config = get_hacluster_config()
    resources = {
        'res_cinder_vip': 'ocf:heartbeat:IPaddr2',
        'res_cinder_haproxy': 'lsb:haproxy'
    }

    vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                 (config['vip'], config['vip_cidr'], config['vip_iface'])
    resource_params = {
        'res_cinder_vip': vip_params,
        'res_cinder_haproxy': 'op monitor interval="5s"'
    }
    init_services = {
        'res_cinder_haproxy': 'haproxy'
    }
    clones = {
        'cl_cinder_haproxy': 'res_cinder_haproxy'
    }
    relation_set(init_services=init_services,
                 corosync_bindiface=config['ha-bindiface'],
                 corosync_mcastport=config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
def check_optional_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either "unknown", "" if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('external-dns'):
        if config('designate_endpoint') is not None:
            if config('reverse-dns-lookup'):
                ipv4_prefix_size = config('ipv4-ptr-zone-prefix-size')
                valid_ipv4_prefix_size = (
                    (8 <= ipv4_prefix_size <= 24) and
                    (ipv4_prefix_size % 8) == 0)
                if not valid_ipv4_prefix_size:
                    log('Invalid ipv4-ptr-zone-prefix-size. Value of '
                        'ipv4-ptr-zone-prefix-size has to be multiple'
                        ' of 8, with maximum value of 24 and minimum value '
                        'of 8.', level=DEBUG)
                    return ('blocked',
                            'Invalid configuration: '
                            'ipv4-ptr-zone-prefix-size')
                ipv6_prefix_size = config('ipv6-ptr-zone-prefix-size')
                valid_ipv6_prefix_size = (
                    (4 <= ipv6_prefix_size <= 124) and
                    (ipv6_prefix_size % 4) == 0)
                if not valid_ipv6_prefix_size:
                    log('Invalid ipv6-ptr-zone-prefix-size. Value of '
                        'ipv6-ptr-zone-prefix-size has to be multiple'
                        ' of 4, with maximum value of 124 and minimum value '
                        'of 4.', level=DEBUG)
                    return ('blocked',
                            'Invalid configuration: '
                            'ipv6-ptr-zone-prefix-size')
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
def ha_joined():
    cluster_config = get_hacluster_config()
    resources = {
        'res_nova_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_nova_haproxy': 'op monitor interval="5s"',
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_nova_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_nova_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_nova_{}_vip'.format(iface)
            resources[vip_key] = res_nova_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})

    init_services = {
        'res_nova_haproxy': 'haproxy'
    }
    clones = {
        'cl_nova_haproxy': 'res_nova_haproxy'
    }
    colocations = {}

    if config('single-nova-consoleauth') and console_attributes('protocol'):
        colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
        init_services['res_nova_consoleauth'] = 'nova-consoleauth'
        resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS

    relation_set(init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones,
                 colocations=colocations)
Exemplo n.º 9
0
def ha_relation_joined(relation_id=None):
    # Obtain the config values necessary for the cluster config. These
    # include multicast port and interface to bind to.
    cluster_config = get_hacluster_config()

    # Obtain resources
    resources = {'res_swift_haproxy': 'lsb:haproxy'}
    resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_swift_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_swift_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = get_iface_for_address(vip)
            if iface is not None:
                vip_key = 'res_swift_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log(
                            "Resource '{}' (vip='{}') already exists in "
                            "vip group - skipping".format(vip_key, vip),
                            WARNING)
                        continue

                resources[vip_key] = res_swift_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'
                    ''.format(ip=vip_params,
                              vip=vip,
                              iface=iface,
                              netmask=get_netmask_for_address(vip)))
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})

    init_services = {'res_swift_haproxy': 'haproxy'}
    clones = {'cl_swift_haproxy': 'res_swift_haproxy'}

    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Exemplo n.º 10
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()

    resources = {
        'res_cinder_haproxy': 'lsb:haproxy'
    }

    resource_params = {
        'res_cinder_haproxy': 'op monitor interval="5s"'
    }

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_cinder_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_cinder_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or
                     config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or
                       config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_cinder_{}_vip'.format(iface)
                resources[vip_key] = res_cinder_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'.format(ip=vip_params,
                                            vip=vip,
                                            iface=iface,
                                            netmask=netmask)
                )
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id,
                         groups={'grp_cinder_vips': ' '.join(vip_group)})

    init_services = {
        'res_cinder_haproxy': 'haproxy'
    }
    clones = {
        'cl_cinder_haproxy': 'res_cinder_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Exemplo n.º 11
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_ks_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_ks_haproxy': 'op monitor interval="5s"'
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_ks_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_ks_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_ks_{}_vip'.format(iface)
            if vip_key in vip_group:
                log("Resource '%s' (vip='%s') already exists in "
                    "vip group - skipping" % (vip_key, vip),
                    WARNING)
                continue

            vip_group.append(vip_key)
            resources[vip_key] = res_ks_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )

    if len(vip_group) >= 1:
        relation_set(relation_id=relation_id,
                     groups={CLUSTER_RES: ' '.join(vip_group)})

    init_services = {
        'res_ks_haproxy': 'haproxy'
    }
    clones = {
        'cl_ks_haproxy': 'res_ks_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_nova_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_nova_haproxy': 'op monitor interval="5s"',
    }
    init_services = {'res_nova_haproxy': 'haproxy'}
    clones = {'cl_nova_haproxy': 'res_nova_haproxy'}
    colocations = {}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_nova_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_nova_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_nova_{}_vip'.format(iface)
                resources[vip_key] = res_nova_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'.format(ip=vip_params,
                                            vip=vip,
                                            iface=iface,
                                            netmask=netmask))
                vip_group.append(vip_key)

            if len(vip_group) >= 1:
                relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})

        if (config('single-nova-consoleauth')
                and console_attributes('protocol')):
            colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
            init_services['res_nova_consoleauth'] = 'nova-consoleauth'
            resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
            resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS

    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones,
                 colocations=colocations)
Exemplo n.º 13
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_ks_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_ks_haproxy': 'op monitor interval="5s"'
    }

    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_ks_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_ks_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_ks_{}_vip'.format(iface)
            if vip_key in vip_group:
                log("Resource '%s' (vip='%s') already exists in "
                    "vip group - skipping" % (vip_key, vip),
                    WARNING)
                continue

            vip_group.append(vip_key)
            resources[vip_key] = res_ks_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=netmask)
            )

    if len(vip_group) >= 1:
        relation_set(relation_id=relation_id,
                     groups={CLUSTER_RES: ' '.join(vip_group)})

    init_services = {
        'res_ks_haproxy': 'haproxy'
    }
    clones = {
        'cl_ks_haproxy': 'res_ks_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Exemplo n.º 14
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    if cmp_pkgrevno('radosgw', '0.55') >= 0 and \
            relation_ids('identity-service'):
        required_interfaces['identity'] = ['identity-service']
    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Exemplo n.º 15
0
def ha_joined():
    cluster_config = get_hacluster_config()

    resources = {
        'res_ceilometer_haproxy': 'lsb:haproxy',
        'res_ceilometer_agent_central': ('ocf:openstack:'
                                         'ceilometer-agent-central')
    }

    resource_params = {
        'res_ceilometer_haproxy': 'op monitor interval="5s"',
        'res_ceilometer_agent_central': 'op monitor interval="30s"'
    }

    amqp_ssl_port = None
    for rel_id in relation_ids('amqp'):
        for unit in related_units(rel_id):
            amqp_ssl_port = relation_get('ssl_port', unit, rel_id)

    if amqp_ssl_port:
        params = ('params amqp_server_port="%s" op monitor interval="30s"' %
                  (amqp_ssl_port))
        resource_params['res_ceilometer_agent_central'] = params

    vip_group = []
    for vip in cluster_config['vip'].split():
        res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
        vip_params = 'ip'

        iface = get_iface_for_address(vip)
        if iface is not None:
            vip_key = 'res_ceilometer_{}_vip'.format(iface)
            resources[vip_key] = res_ceilometer_vip
            resource_params[vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
                ' nic="{iface}"'.format(ip=vip_params,
                                        vip=vip,
                                        iface=iface,
                                        netmask=get_netmask_for_address(vip))
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_set(groups={'grp_ceilometer_vips': ' '.join(vip_group)})

    init_services = {
        'res_ceilometer_haproxy': 'haproxy'
    }
    clones = {
        'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
    }
    relation_set(init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Exemplo n.º 16
0
def check_optional_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either "unknown", "" if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except Exception:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
def ha_relation_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    sstpsswd = config('sst-password')
    resources = {'res_mysql_monitor': 'ocf:percona:mysql_monitor'}
    resource_params = {'res_mysql_monitor':
                       RES_MONITOR_PARAMS % {'sstpass': sstpsswd}}

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
        group_name = 'grp_{}_hostnames'.format(charm_name())
        groups = {group_name: 'res_{}_access_hostname'.format(charm_name())}

    else:
        vip_iface = (get_iface_for_address(cluster_config['vip']) or
                     config('vip_iface'))
        vip_cidr = (get_netmask_for_address(cluster_config['vip']) or
                    config('vip_cidr'))

        if config('prefer-ipv6'):
            res_mysql_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
                         (cluster_config['vip'], vip_cidr, vip_iface)
        else:
            res_mysql_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                         (cluster_config['vip'], vip_cidr, vip_iface)

        resources['res_mysql_vip'] = res_mysql_vip

        resource_params['res_mysql_vip'] = vip_params

        group_name = 'grp_percona_cluster'
        groups = {group_name: 'res_mysql_vip'}

    clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}

    colocations = {'colo_percona_cluster': 'inf: {} cl_mysql_monitor'
                                           ''.format(group_name)}

    locations = {'loc_percona_cluster':
                 '{} rule inf: writable eq 1'
                 ''.format(group_name)}

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     groups=groups,
                     clones=clones,
                     colocations=colocations,
                     locations=locations)
Exemplo n.º 18
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids("ha"):
        required_interfaces["ha"] = ["cluster"]
        try:
            get_hacluster_config()
        except:
            return ("blocked", "hacluster missing configuration: " "vip, vip_iface, vip_cidr")

    if relation_ids("ceph") or relation_ids("object-store"):
        required_interfaces["storage-backend"] = ["ceph", "object-store"]

    if relation_ids("amqp"):
        required_interfaces["messaging"] = ["amqp"]

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return "unknown", "No optional relations"
Exemplo n.º 19
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    if relation_ids('quantum-network-service'):
        required_interfaces['quantum'] = ['quantum-network-service']
    if relation_ids('cinder-volume-service'):
        required_interfaces['cinder'] = ['cinder-volume-service']
    if relation_ids('neutron-api'):
        required_interfaces['neutron-api'] = ['neutron-api']
    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
def check_optional_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either "unknown", "" if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    if relation_ids('quantum-network-service'):
        required_interfaces['quantum'] = ['quantum-network-service']
    if relation_ids('cinder-volume-service'):
        required_interfaces['cinder'] = ['cinder-volume-service']
    if relation_ids('neutron-api'):
        required_interfaces['neutron-api'] = ['neutron-api']
    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Exemplo n.º 22
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if relation_ids('ceph') or relation_ids('object-store'):
        required_interfaces['storage-backend'] = ['ceph', 'object-store']

    if relation_ids('amqp'):
        required_interfaces['messaging'] = ['amqp']

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Exemplo n.º 23
0
    def test_get_hacluster_config_complete(self):
        '''It fetches all hacluster charm config'''
        conf = {
            'ha-bindiface': 'eth1',
            'ha-mcastport': '3333',
            'vip': '10.0.0.1',
        }

        def _fake_config_get(setting):
            return conf[setting]

        self.config_get.side_effect = _fake_config_get
        self.assertEquals(conf, cluster_utils.get_hacluster_config())
Exemplo n.º 24
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if relation_ids('ceph') or relation_ids('object-store'):
        required_interfaces['storage-backend'] = ['ceph', 'object-store']

    if relation_ids('amqp'):
        required_interfaces['messaging'] = ['amqp']

    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Exemplo n.º 25
0
def check_optional_config_and_relations(configs):
    """Validate optional configuration and relations when present.

    This function is called from assess_status/set_os_workload_status as the
    charm_func and needs to return either None, None if there is no problem or
    the status, message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    # Check that if we have a relation_id for high availability that we can
    # get the hacluster config.  If we can't then we are blocked.
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except Exception:
            return ('blocked', 'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')

    if relation_ids('ceph'):
        # Check that provided Ceph BlueStoe configuration is valid.
        try:
            bluestore_compression = context.CephBlueStoreCompressionContext()
            bluestore_compression.validate()
        except AttributeError:
            # The charm does late installation of the `ceph-common` package and
            # the class initializer above will throw an exception until it is.
            pass
        except ValueError as e:
            return ('blocked', 'Invalid configuration: {}'.format(str(e)))
        # ceph pkgs are only installed after the ceph relation is etablished
        # so gate checking broker requests on ceph relation being completed.
        if ('ceph' in configs.complete_contexts()
                and not is_request_complete(get_ceph_request())):
            return ('waiting', 'Ceph broker request incomplete')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return "unknown", ""
Exemplo n.º 26
0
    def test_get_hacluster_config_with_excludes(self):
        '''It fetches all hacluster charm config'''
        conf = {
            'ha-bindiface': 'eth1',
            'ha-mcastport': '3333',
        }

        def _fake_config_get(setting):
            return conf[setting]

        self.config_get.side_effect = _fake_config_get
        exclude_keys = ['vip']
        result = cluster_utils.get_hacluster_config(exclude_keys)
        self.assertEquals(conf, result)
Exemplo n.º 27
0
    def test_get_hacluster_config_with_excludes(self, valid_hacluster_config):
        '''It fetches all hacluster charm config'''
        conf = {
            'ha-bindiface': 'eth1',
            'ha-mcastport': '3333',
        }
        valid_hacluster_config.return_value = True

        def _fake_config_get(setting):
            return conf[setting]

        self.config_get.side_effect = _fake_config_get
        exclude_keys = ['vip', 'os-admin-hostname', 'os-internal-hostname',
                        'os-public-hostname', 'os-access-hostname']
        result = cluster_utils.get_hacluster_config(exclude_keys)
        self.assertEquals(conf, result)
Exemplo n.º 28
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {"res_ks_haproxy": "lsb:haproxy"}
    resource_params = {"res_ks_haproxy": 'op monitor interval="5s"'}

    if config("dns-ha"):
        update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config["vip"].split():
            if is_ipv6(vip):
                res_ks_vip = "ocf:heartbeat:IPv6addr"
                vip_params = "ipv6addr"
            else:
                res_ks_vip = "ocf:heartbeat:IPaddr2"
                vip_params = "ip"

            iface = get_iface_for_address(vip) or config("vip_iface")
            netmask = get_netmask_for_address(vip) or config("vip_cidr")

            if iface is not None:
                vip_key = "res_ks_{}_vip".format(iface)
                if vip_key in vip_group:
                    log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

                vip_group.append(vip_key)
                resources[vip_key] = res_ks_vip
                resource_params[vip_key] = 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(
                    ip=vip_params, vip=vip, iface=iface, netmask=netmask
                )

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id, groups={CLUSTER_RES: " ".join(vip_group)})

    init_services = {"res_ks_haproxy": "haproxy"}
    clones = {"cl_ks_haproxy": "res_ks_haproxy"}
    relation_set(
        relation_id=relation_id,
        init_services=init_services,
        corosync_bindiface=cluster_config["ha-bindiface"],
        corosync_mcastport=cluster_config["ha-mcastport"],
        resources=resources,
        resource_params=resource_params,
        clones=clones,
    )
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or
                 config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or
                   config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vip_group:
                if vip not in relation_data['resource_params'][vip_key]:
                    vip_key = '{}_{}'.format(vip_key, vip_params)
                else:
                    log("Resource '%s' (vip='%s') already exists in "
                        "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

            relation_data['resources'][vip_key] = res_neutron_vip
            relation_data['resource_params'][vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}" '
                'nic="{iface}"'.format(ip=vip_params,
                                       vip=vip,
                                       iface=iface,
                                       netmask=netmask)
            )
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_data['groups'] = {
            'grp_{}_vips'.format(service): ' '.join(vip_group)
        }
Exemplo n.º 30
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = (get_iface_for_address(vip) or config('vip_iface'))
        netmask = (get_netmask_for_address(vip) or config('vip_cidr'))

        if iface is not None:
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vip_group:
                if vip not in relation_data['resource_params'][vip_key]:
                    vip_key = '{}_{}'.format(vip_key, vip_params)
                else:
                    log(
                        "Resource '%s' (vip='%s') already exists in "
                        "vip group - skipping" % (vip_key, vip), WARNING)
                    continue

            relation_data['resources'][vip_key] = res_neutron_vip
            relation_data['resource_params'][vip_key] = (
                'params {ip}="{vip}" cidr_netmask="{netmask}" '
                'nic="{iface}"'.format(ip=vip_params,
                                       vip=vip,
                                       iface=iface,
                                       netmask=netmask))
            vip_group.append(vip_key)

    if len(vip_group) >= 1:
        relation_data['groups'] = {
            'grp_{}_vips'.format(service): ' '.join(vip_group)
        }
Exemplo n.º 31
0
    def test_get_hacluster_config_complete(self, valid_hacluster_config):
        '''It fetches all hacluster charm config'''
        conf = {
            'ha-bindiface': 'eth1',
            'ha-mcastport': '3333',
            'vip': '10.0.0.1',
            'os-admin-hostname': None,
            'os-public-hostname': None,
            'os-internal-hostname': None,
            'os-access-hostname': None,
        }

        valid_hacluster_config.return_value = True

        def _fake_config_get(setting):
            return conf[setting]

        self.config_get.side_effect = _fake_config_get
        self.assertEquals(conf, cluster_utils.get_hacluster_config())
Exemplo n.º 32
0
def ha_relation_joined():
    if config('ha-legacy-mode'):
        log('ha-relation-changed update_legacy_ha_files')
        install_legacy_ha_files()
        cache_env_data()
        cluster_config = get_hacluster_config(exclude_keys=['vip'])
        resources = {
            'res_monitor': 'ocf:canonical:NeutronAgentMon',
        }
        resource_params = {
            'res_monitor': 'op monitor interval="60s"',
        }
        clones = {
            'cl_monitor': 'res_monitor meta interleave="true"',
        }

        relation_set(corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     clones=clones)
def ha_relation_joined():
    if config('ha-legacy-mode'):
        log('ha-relation-changed update_legacy_ha_files')
        install_legacy_ha_files()
        cache_env_data()
        cluster_config = get_hacluster_config(exclude_keys=['vip'])
        resources = {
            'res_monitor': 'ocf:canonical:NeutronAgentMon',
        }
        resource_params = {
            'res_monitor': 'op monitor interval="60s"',
        }
        clones = {
            'cl_monitor': 'res_monitor meta interleave="true"',
        }

        relation_set(corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     clones=clones)
def ha_relation_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    sstpsswd = sst_password()
    resources = {'res_percona': 'ocf:heartbeat:galera'}
    resource_params = {}
    vip_iface = (get_iface_for_address(cluster_config['vip'])
                 or config('vip_iface'))
    vip_cidr = (get_netmask_for_address(cluster_config['vip'])
                or config('vip_cidr'))

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
        group_name = 'grp_{}_hostnames'.format(charm_name())
        groups = {group_name: 'res_{}_access_hostname'.format(charm_name())}

    if config('prefer-ipv6'):
        res_mysql_vip = 'ocf:heartbeat:IPv6addr'
        vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
                     (cluster_config['vip'], vip_cidr, vip_iface)
    else:
        res_mysql_vip = 'ocf:heartbeat:IPaddr2'
        vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                     (cluster_config['vip'], vip_cidr, vip_iface)

    hostname_list = get_cluster_hostnames()
    percona_params = \
        ' params ' \
        ' wsrep_cluster_address="gcomm://' + ",".join(hostname_list) + '"' \
        ' config="' + resolve_cnf_file() + '"' \
        ' datadir="/var/lib/percona-xtradb-cluster"' \
        ' socket="/var/run/mysqld/mysqld.sock" ' \
        ' pid="/var/run/mysqld/mysqld.pid"' \
        ' check_user=sstuser check_passwd="' + sstpsswd + '"' \
        ' binary="/usr/bin/mysqld_safe"' \
        ' op monitor timeout=120 interval=20 depth=0' \
        ' op monitor role=Master timeout=120 interval=10 depth=0' \
        ' op monitor role=Slave timeout=120 interval=30 depth=0'

    percona_ms = {
        'ms_percona':
        'res_percona meta notify=true '
        'interleave=true master-max=3 '
        'ordered=true target-role=Started'
    }

    resource_params['res_percona'] = percona_params

    resources['res_mysql_vip'] = res_mysql_vip

    resource_params['res_mysql_vip'] = vip_params

    groups = {'grp_percona_cluster': 'res_mysql_vip'}

    colocations = {
        'colo_percona_cluster': '+inf: grp_percona_cluster ms_percona:Master'
    }

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     resources=resources,
                     resource_params=resource_params,
                     ms=percona_ms,
                     groups=groups,
                     colocations=colocations)
Exemplo n.º 35
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    vips_to_delete = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface, netmask, fallback = get_vip_settings(vip)

        vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"'
        if iface is not None:
            # NOTE(jamespage): Delete old VIP resources
            # Old style naming encoding iface in name
            # does not work well in environments where
            # interface/subnet wiring is not consistent
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vips_to_delete:
                vip_key = '{}_{}'.format(vip_key, vip_params)
            vips_to_delete.append(vip_key)

            vip_key = 'res_{}_{}_vip'.format(
                service,
                hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])

            relation_data['resources'][vip_key] = res_vip
            # NOTE(jamespage):
            # Use option provided vip params if these where used
            # instead of auto-detected values
            if fallback:
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}" {vip_monitoring}'.format(
                        ip=vip_params,
                        vip=vip,
                        iface=iface,
                        netmask=netmask,
                        vip_monitoring=vip_monitoring))
            else:
                # NOTE(jamespage):
                # let heartbeat figure out which interface and
                # netmask to configure, which works nicely
                # when network interface naming is not
                # consistent across units.
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" {vip_monitoring}'.format(
                        ip=vip_params,
                        vip=vip,
                        vip_monitoring=vip_monitoring))

            vip_group.append(vip_key)

    if vips_to_delete:
        try:
            relation_data['delete_resources'].extend(vips_to_delete)
        except KeyError:
            relation_data['delete_resources'] = vips_to_delete

    if len(vip_group) >= 1:
        key = VIP_GROUP_NAME.format(service=service)
        try:
            relation_data['groups'][key] = ' '.join(vip_group)
        except KeyError:
            relation_data['groups'] = {
                key: ' '.join(vip_group)
            }
Exemplo n.º 36
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    resources = {
        'res_neutron_haproxy': 'lsb:haproxy',
    }
    resource_params = {
        'res_neutron_haproxy': 'op monitor interval="5s"'
    }
    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_neutron_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_neutron_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or
                     config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or
                       config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_neutron_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log("Resource '%s' (vip='%s') already exists in "
                            "vip group - skipping" % (vip_key, vip), WARNING)
                        continue

                resources[vip_key] = res_neutron_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}"'.format(ip=vip_params,
                                           vip=vip,
                                           iface=iface,
                                           netmask=netmask)
                )
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(
                relation_id=relation_id,
                json_groups=json.dumps({
                    'grp_neutron_vips': ' '.join(vip_group)
                }, sort_keys=True)
            )

    init_services = {
        'res_neutron_haproxy': 'haproxy'
    }
    clones = {
        'cl_nova_haproxy': 'res_neutron_haproxy'
    }
    relation_set(relation_id=relation_id,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 json_init_services=json.dumps(init_services,
                                               sort_keys=True),
                 json_resources=json.dumps(resources,
                                           sort_keys=True),
                 json_resource_params=json.dumps(resource_params,
                                                 sort_keys=True),
                 json_clones=json.dumps(clones,
                                        sort_keys=True))

    # NOTE(jamespage): Clear any non-json based keys
    relation_set(relation_id=relation_id,
                 groups=None, init_services=None,
                 resources=None, resource_params=None,
                 clones=None)
Exemplo n.º 37
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()
    delete_resources = []
    delete_resources.append('res_ceilometer_polling')

    resources = {
        'res_ceilometer_haproxy': 'lsb:haproxy',
        'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
    }

    resource_params = {
        'res_ceilometer_haproxy': 'op monitor interval="5s"',
        'res_ceilometer_agent_central': 'op monitor interval="30s"'
    }

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_ceilometer_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = get_iface_for_address(vip)
            if iface is not None:
                vip_key = 'res_ceilometer_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log(
                            "Resource '%s' (vip='%s') already exists in "
                            "vip group - skipping" % (vip_key, vip), WARNING)
                        continue

                resources[vip_key] = res_ceilometer_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'
                    ''.format(ip=vip_params,
                              vip=vip,
                              iface=iface,
                              netmask=get_netmask_for_address(vip)))
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id,
                         groups={'grp_ceilometer_vips': ' '.join(vip_group)})

    init_services = {'res_ceilometer_haproxy': 'haproxy'}
    clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 delete_resources=delete_resources,
                 clones=clones)
Exemplo n.º 38
0
def ha_joined(relation_id=None):
    if config('controller-app-mode') == 'msm' or config('controller-app-mode') == 'doctl':
        cluster_config = get_hacluster_config()
        resources = {
            'res_msm_haproxy': 'lsb:haproxy',
        }
        resource_params = {
            'res_msm_haproxy': 'op monitor interval="5s"'
        }
        if config('dns-ha'):
            update_dns_ha_resource_params(relation_id=relation_id,
                                          resources=resources,
                                          resource_params=resource_params)
        else:
            vip_group = []
            for vip in cluster_config['vip'].split():
                if is_ipv6(vip):
                    res_msm_vip = 'ocf:heartbeat:IPv6addr'
                    vip_params = 'ipv6addr'
                else:
                    res_msm_vip = 'ocf:heartbeat:IPaddr2'
                    vip_params = 'ip'

                iface = (get_iface_for_address(vip) or
                         config('vip_iface'))
                netmask = (get_netmask_for_address(vip) or
                           config('vip_cidr'))

                if iface is not None:
                    vip_key = 'res_msm_{}_vip'.format(iface)
                    if vip_key in vip_group:
                        if vip not in resource_params[vip_key]:
                            vip_key = '{}_{}'.format(vip_key, vip_params)
                        else:
                            log("Resource '%s' (vip='%s') already exists in "
                                "vip group - skipping" % (vip_key,
                                                          vip), WARNING)
                            continue

                    resources[vip_key] = res_msm_vip
                    resource_params[vip_key] = (
                        'params {ip}="{vip}" cidr_netmask="{netmask}" '
                        'nic="{iface}"'.format(ip=vip_params,
                                               vip=vip,
                                               iface=iface,
                                               netmask=netmask)
                    )
                    vip_group.append(vip_key)

            if len(vip_group) >= 1:
                relation_set(
                    relation_id=relation_id,
                    json_groups=json.dumps({
                        'grp_msm_vips': ' '.join(vip_group)
                    }, sort_keys=True)
                )

        init_services = {
            'res_msm_haproxy': 'haproxy'
        }
        clones = {
            'cl_msm_haproxy': 'res_msm_haproxy'
        }
        relation_set(relation_id=relation_id,
                     corosync_bindiface=cluster_config['ha-bindiface'],
                     corosync_mcastport=cluster_config['ha-mcastport'],
                     json_init_services=json.dumps(init_services,
                                                   sort_keys=True),
                     json_resources=json.dumps(resources,
                                               sort_keys=True),
                     json_resource_params=json.dumps(resource_params,
                                                     sort_keys=True),
                     json_clones=json.dumps(clones,
                                            sort_keys=True))

        # NOTE(jamespage): Clear any non-json based keys
        relation_set(relation_id=relation_id,
                     groups=None, init_services=None,
                     resources=None, resource_params=None,
                     clones=None)
Exemplo n.º 39
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    vips_to_delete = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface, netmask, fallback = get_vip_settings(vip)

        vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"'
        if iface is not None:
            # NOTE(jamespage): Delete old VIP resources
            # Old style naming encoding iface in name
            # does not work well in environments where
            # interface/subnet wiring is not consistent
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vips_to_delete:
                vip_key = '{}_{}'.format(vip_key, vip_params)
            vips_to_delete.append(vip_key)

            vip_key = 'res_{}_{}_vip'.format(
                service,
                hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])

            relation_data['resources'][vip_key] = res_vip
            # NOTE(jamespage):
            # Use option provided vip params if these where used
            # instead of auto-detected values
            if fallback:
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}" {vip_monitoring}'.format(
                        ip=vip_params,
                        vip=vip,
                        iface=iface,
                        netmask=netmask,
                        vip_monitoring=vip_monitoring))
            else:
                # NOTE(jamespage):
                # let heartbeat figure out which interface and
                # netmask to configure, which works nicely
                # when network interface naming is not
                # consistent across units.
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" {vip_monitoring}'.format(
                        ip=vip_params, vip=vip, vip_monitoring=vip_monitoring))

            vip_group.append(vip_key)

    if vips_to_delete:
        try:
            relation_data['delete_resources'].extend(vips_to_delete)
        except KeyError:
            relation_data['delete_resources'] = vips_to_delete

    if len(vip_group) >= 1:
        key = VIP_GROUP_NAME.format(service=service)
        try:
            relation_data['groups'][key] = ' '.join(vip_group)
        except KeyError:
            relation_data['groups'] = {key: ' '.join(vip_group)}
Exemplo n.º 40
0
def update_hacluster_vip(service, relation_data):
    """ Configure VIP resources based on provided configuration

    @param service: Name of the service being configured
    @param relation_data: Pointer to dictionary of relation data.
    """
    cluster_config = get_hacluster_config()
    vip_group = []
    vips_to_delete = []
    for vip in cluster_config['vip'].split():
        if is_ipv6(vip):
            res_vip = 'ocf:heartbeat:IPv6addr'
            vip_params = 'ipv6addr'
        else:
            res_vip = 'ocf:heartbeat:IPaddr2'
            vip_params = 'ip'

        iface = get_iface_for_address(vip)
        netmask = get_netmask_for_address(vip)

        fallback_params = False
        if iface is None:
            iface = config('vip_iface')
            fallback_params = True
        if netmask is None:
            netmask = config('vip_cidr')
            fallback_params = True

        if iface is not None:
            # NOTE(jamespage): Delete old VIP resources
            # Old style naming encoding iface in name
            # does not work well in environments where
            # interface/subnet wiring is not consistent
            vip_key = 'res_{}_{}_vip'.format(service, iface)
            if vip_key in vips_to_delete:
                vip_key = '{}_{}'.format(vip_key, vip_params)
            vips_to_delete.append(vip_key)

            vip_key = 'res_{}_{}_vip'.format(
                service,
                hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])

            relation_data['resources'][vip_key] = res_vip
            # NOTE(jamespage):
            # Use option provided vip params if these where used
            # instead of auto-detected values
            if fallback_params:
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
                    'nic="{iface}"'.format(ip=vip_params,
                                           vip=vip,
                                           iface=iface,
                                           netmask=netmask))
            else:
                # NOTE(jamespage):
                # let heartbeat figure out which interface and
                # netmask to configure, which works nicely
                # when network interface naming is not
                # consistent across units.
                relation_data['resource_params'][vip_key] = (
                    'params {ip}="{vip}"'.format(ip=vip_params, vip=vip))

            vip_group.append(vip_key)

    if vips_to_delete:
        relation_data['delete_resources'] = vips_to_delete

    if len(vip_group) >= 1:
        relation_data['groups'] = {
            'grp_{}_vips'.format(service): ' '.join(vip_group)
        }