Exemplo n.º 1
0
 def __init__(self, *args, **kwargs):
     if hookenv.relations_of_type('nrpe-external-master'):
         self.name = 'nrpe-external-master'
         self.interface = 'nrpe-external-master'
     elif hookenv.relations_of_type('general-info'):
         self.name = 'general-info'
         self.interface = 'juju-info'
     elif hookenv.relations_of_type('local-monitors'):
         self.name = 'local-monitors'
         self.interface = 'local-monitors'
     super(PrincipalRelation, self).__init__(*args, **kwargs)
 def __init__(self, *args, **kwargs):
     """Set name and interface."""
     if hookenv.relations_of_type("nrpe-external-master"):
         self.name = "nrpe-external-master"
         self.interface = "nrpe-external-master"
     elif hookenv.relations_of_type("general-info"):
         self.name = "general-info"
         self.interface = "juju-info"
     elif hookenv.relations_of_type("local-monitors"):
         self.name = "local-monitors"
         self.interface = "local-monitors"
     super(PrincipalRelation, self).__init__(*args, **kwargs)
Exemplo n.º 3
0
def replica_set_relation_changed():
    juju_log("replica_set_relation_changed")
    my_hostname = unit_get('public-address')
    my_port = config('port')
    my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
    my_replicaset_master = config('replicaset_master')

    # If we are joining an existing replicaset cluster, just join and leave.
    if my_replicaset_master != "auto":
        return (join_replset(my_replicaset_master, my_hostname))

    # Default to this node being the master
    master_hostname = my_hostname
    master_port = my_port
    master_install_order = my_install_order

    # Check the nodes in the relation to find the master
    for member in relations_of_type('replica-set'):
        member = member['__unit__']
        juju_log("replica_set_relation_changed: member: %s" % member)
        hostname = relation_get('hostname', member)
        port = relation_get('port', member)
        inst_ordr = relation_get('install-order', member)
        juju_log("replica_set_relation_changed: install_order: %s" % inst_ordr)
        if inst_ordr is None:
            juju_log("replica_set_relation_changed: install_order is None."
                     "  relation is not ready")
            break
        if int(inst_ordr) < int(master_install_order):
            master_hostname = hostname
            master_port = port
            master_install_order = inst_ordr

    # Initiate the replset
    init_replset("%s:%s" % (master_hostname, master_port))

    # Add the rest of the nodes to the replset
    for member in relations_of_type('replica-set'):
        hostname = relation_get('hostname', member['__unit__'])
        port = relation_get('port', member['__unit__'])
        if master_hostname != hostname:
            if hostname == my_hostname:
                subprocess.call(
                    ['mongo', '--eval',
                     "rs.add(\"%s\")" % hostname])
            else:
                join_replset("%s:%s" % (master_hostname, master_port),
                             "%s:%s" % (hostname, port))

    # Add this node to the replset ( if needed )
    if master_hostname != my_hostname:
        join_replset("%s:%s" % (master_hostname, master_port),
                     "%s:%s" % (my_hostname, my_port))
    def __call__(self):
        hookenv.log("Generating template ctxt for simplestreams-image-service")
        config = hookenv.config()

        modify_hook_scripts = []
        image_modifiers = hookenv.relations_of_type('image-modifier')
        if len(image_modifiers) > 1:
            raise MultipleImageModifierSubordinatesIsNotSupported()

        if len(image_modifiers) == 1:
            im = image_modifiers[0]
            try:
                modify_hook_scripts.append(im['script-path'])

            except KeyError as ke:
                hookenv.log('relation {} yielded '
                            'exception {} - ignoring.'.format(
                                repr(im), repr(ke)))

        # default no-op so that None still means "missing" for config
        # validation (see elsewhere)
        if len(modify_hook_scripts) == 0:
            modify_hook_scripts.append('/bin/true')

        return dict(mirror_list=config['mirror_list'],
                    modify_hook_scripts=', '.join(modify_hook_scripts),
                    name_prefix=config['name_prefix'],
                    content_id_template=config['content_id_template'],
                    use_swift=config['use_swift'],
                    region=config['region'],
                    cloud_name=config['cloud_name'],
                    user_agent=config['user_agent'],
                    hypervisor_mapping=config['hypervisor_mapping'])
def collector_relation_changed():
    host = relation_get("hostname")
    port = relation_get("port")

    # Check the list length so pop doesn't fail
    if host is None or port is None:
        log("host or port is none")
        return
    else:
        relation_data = relations_of_type("ceph")
        if not relation_data:
            return
        try:
            hostname = subprocess.check_output(["hostname", "-f"]).replace(".", "_").rstrip("\n")
            relation = relation_data[0]["__unit__"]
            unit_tag = "unit-{0}".format(relation.replace("/", "-"))
            log("unit_tag: " + str(unit_tag))
            root_key = "{unit_tag}.{hostname}.ceph".format(unit_tag=unit_tag, hostname=hostname)

            carbon = {"host": host, "port": port, "root_key": root_key}

            update_service_config(service_dict={"outputs": ["carbon"], "carbon": carbon})
            restart()
        except subprocess.CalledProcessError as err:
            log("Service restart failed with err: " + err.message)
Exemplo n.º 6
0
def elasticsearch_input(es):
    template = """
[[inputs.elasticsearch]]
  servers = {{ servers }}
"""
    hosts = []
    rels = hookenv.relations_of_type('elasticsearch')
    for rel in rels:
        es_host = rel.get('host')
        port = rel.get('port')
        if not es_host or not port:
            hookenv.log('No host received for relation: {}.'.format(rel))
            continue
        hosts.append("http://{}:{}".format(es_host, port))
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'elasticsearch')
    if hosts:
        context = {"servers": json.dumps(hosts)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "elasticsearch")
        hookenv.log("Updating {} plugin config file".format('elasticsearch'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.elasticsearch.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
        remove_state('plugins.elasticsearch.configured')
Exemplo n.º 7
0
def mongodb_input(mongodb):
    template = """
[[inputs.mongodb]]
  servers = {{ servers }}
"""
    rels = hookenv.relations_of_type('mongodb')
    mongo_addresses = []
    for rel in rels:
        addr = rel['private-address']
        port = rel.get('port', None)
        if port:
            mongo_address = '{}:{}'.format(addr, port)
        else:
            mongo_address = addr
        mongo_addresses.append(mongo_address)
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'mongodb')
    if mongo_addresses:
        context = {"servers": json.dumps(mongo_addresses)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "mongodb")
        hookenv.log("Updating {} plugin config file".format('mongodb'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.mongodb.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 8
0
def apache_input(apache):
    template = """
[[inputs.apache]]
  urls = {{ urls }}
"""
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'apache')
    port = '8080'
    vhost = render(source='apache-server-status.tmpl',
                   templates_dir=get_templates_dir(),
                   target=None,
                   context={'port': port})
    relation_info = {"ports": port,
                     "domain": "apache-status",
                     "enabled": True,
                     "site_config": vhost,
                     "site_modules": "status"}
    urls = []
    rels = hookenv.relations_of_type('apache')
    for rel in rels:
        hookenv.relation_set(rel['__relid__'], relation_settings=relation_info)
        addr = rel['private-address']
        url = 'http://{}:{}/server-status?auto'.format(addr, port)
        urls.append(url)
    if urls:
        context = {"urls": json.dumps(urls)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "apache")
        hookenv.log("Updating {} plugin config file".format('apache'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.apache.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 9
0
def exec_input_departed():
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'exec')
    rels = hookenv.relations_of_type('exec')
    if not rels:
        remove_state('plugins.exec.configured')
        if os.path.exists(config_path):
            os.unlink(config_path)
    def __call__(self):
        hookenv.log("Generating template ctxt for simplestreams-image-service")
        config = hookenv.config()

        modify_hook_scripts = []
        image_modifiers = hookenv.relations_of_type('image-modifier')
        if len(image_modifiers) > 1:
            raise MultipleImageModifierSubordinatesIsNotSupported()

        if len(image_modifiers) == 1:
            im = image_modifiers[0]
            try:
                modify_hook_scripts.append(im['script-path'])

            except KeyError as ke:
                hookenv.log('relation {} yielded '
                            'exception {} - ignoring.'.format(repr(im),
                                                              repr(ke)))

        # default no-op so that None still means "missing" for config
        # validation (see elsewhere)
        if len(modify_hook_scripts) == 0:
            modify_hook_scripts.append('/bin/true')

        return dict(mirror_list=config['mirror_list'],
                    modify_hook_scripts=', '.join(modify_hook_scripts),
                    name_prefix=config['name_prefix'],
                    content_id_template=config['content_id_template'],
                    use_swift=config['use_swift'],
                    region=config['region'],
                    cloud_name=config['cloud_name'])
Exemplo n.º 11
0
def influxdb_api_output(influxdb):
    required_keys = ['hostname', 'port', 'user', 'password']
    rels = hookenv.relations_of_type('influxdb-api')
    endpoints = []
    user = None
    password = None
    for rel in rels:
        if all([rel.get(key) for key in required_keys]):
            endpoints.append("http://{}:{}".format(rel['hostname'], rel['port']))
            if user is None:
                user = rel['user']
            if password is None:
                password = rel['password']
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'influxdb-api')
    if endpoints:
        hookenv.log("Updating {} plugin config file".format('influxdb-api'))
        content = render(source='influxdb-api.conf.tmpl', target=None,
                         templates_dir=get_templates_dir(),
                         context={'urls': json.dumps(endpoints),
                                  'username': '******'.format(user),
                                  'password': '******'.format(password)})
        extra_opts = render_extra_options("outputs", "influxdb")
        host.write_file(config_path, '\n'.join([content, extra_opts]).encode('utf-8'))
        set_state('plugins.influxdb-api.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 12
0
def get_remote_unit_name():
    for rel_type in hookenv.metadata()['requires'].keys():
        rels = hookenv.relations_of_type(rel_type)
        if rels and len(rels) >= 1:
            rel = rels[0]
            if rel['private-address'] == hookenv.unit_private_ip():
                return rel['__unit__']
def config_changed():
    if config('prefer-ipv6'):
        status_set('maintenance', 'Configuring ipv6')
        assert_charm_supports_ipv6()

    ensure_swift_directories()
    setup_rsync()

    if not config('action-managed-upgrade') and \
            openstack_upgrade_available('swift'):
        status_set('maintenance', 'Running openstack upgrade')
        do_openstack_upgrade(configs=CONFIGS)

    setup_storage()

    for rid in relation_ids('swift-storage'):
        swift_storage_relation_joined(rid=rid)

    CONFIGS.write_all()

    save_script_rc()
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-swift-storage-charm.conf')

    add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
Exemplo n.º 14
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                if config('fsid'):
                    fsid = config('fsid')
                else:
                    fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                status_set('maintenance', 'Creating FSID and Monitor Secret')
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                log("Settings for the cluster are: {}".format(opts))
                leader_set(opts)
        elif cfg.changed('no-bootstrap') and \
                is_relation_made('bootstrap-source'):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()
    elif leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        sys.exit(0)

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        ceph.wait_for_bootstrap()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()
Exemplo n.º 15
0
def prometheus_client_departed():
    hookenv.log("prometheus-client relation not available")
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'prometheus-client')
    rels = hookenv.relations_of_type('prometheus-client')
    if not rels and os.path.exists(config_path):
        hookenv.log("Deleting {} plugin config file".format('prometheus-client'))
        os.unlink(config_path)
        remove_state('plugins.prometheus-client.configured')
Exemplo n.º 16
0
def get_nagios_hostname(relation_name='nrpe-external-master'):
    """
    Query relation with nrpe subordinate, return the nagios_hostname

    :param str relation_name: Name of relation nrpe sub joined to
    """
    for rel in relations_of_type(relation_name):
        if 'nagios_hostname' in rel:
            return rel['nagios_hostname']
Exemplo n.º 17
0
def mongos_relation_broken():
    config_servers = load_config_servers(default_mongos_list)
    for member in relations_of_type('mongos'):
        hostname = relation_get('hostname', member)
        port = relation_get('port', member)
        if '%s:%s' % (hostname, port) in config_servers:
            config_servers.remove('%s:%s' % (hostname, port))

    update_file(default_mongos_list, '\n'.join(config_servers))
Exemplo n.º 18
0
def get_nagios_hostname(relation_name='nrpe-external-master'):
    """
    Query relation with nrpe subordinate, return the nagios_hostname

    :param str relation_name: Name of relation nrpe sub joined to
    """
    for rel in relations_of_type(relation_name):
        if 'nagios_hostname' in rel:
            return rel['nagios_hostname']
Exemplo n.º 19
0
def update_nrpe_checks():
    log("Refreshing nrpe checks")
    # Find out if nrpe set nagios_hostname
    hostname = None
    for rel in relations_of_type("nrpe-external-master"):
        if "nagios_hostname" in rel:
            hostname = rel["nagios_hostname"]
            break
    nrpe = NRPE(hostname=hostname)
    nrpe.add_check(shortname="mysql_proc", description="Check MySQL process", check_cmd="check_procs -c 1:1 -C mysqld")
    nrpe.add_check(
        shortname="mysql",
        description="Check MySQL connectivity",
        check_cmd="check_mysql -u nagios -p {}".format(nagios_password()),
    )
    nrpe.write()
Exemplo n.º 20
0
def update_nrpe_checks():
    log('Refreshing nrpe checks')
    # Find out if nrpe set nagios_hostname
    hostname = None
    for rel in relations_of_type('nrpe-external-master'):
        if 'nagios_hostname' in rel:
            hostname = rel['nagios_hostname']
            break
    nrpe = NRPE(hostname=hostname)
    nrpe.add_check(shortname='mysql_proc',
                   description='Check MySQL process',
                   check_cmd='check_procs -c 1:1 -C mysqld')
    nrpe.add_check(shortname='mysql',
                   description='Check MySQL connectivity',
                   check_cmd='check_mysql -u nagios -p {}'.format(
                       nagios_password()))
    nrpe.write()
def config_changed():
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    # Check if an upgrade was requested
    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not leader_get('fsid') or not leader_get('monitor-secret'):
            if config('fsid'):
                fsid = config('fsid')
            else:
                fsid = "{}".format(uuid.uuid1())
            if config('monitor-secret'):
                mon_secret = config('monitor-secret')
            else:
                mon_secret = "{}".format(ceph.generate_monitor_secret())
            status_set('maintenance', 'Creating FSID and Monitor Secret')
            opts = {
                'fsid': fsid,
                'monitor-secret': mon_secret,
            }
            log("Settings for the cluster are: {}".format(opts))
            leader_set(opts)
    else:
        if leader_get('fsid') is None or leader_get('monitor-secret') is None:
            log('still waiting for leader to setup keys')
            status_set('waiting', 'Waiting for leader to setup keys')
            sys.exit(0)

    emit_cephconf()

    # Support use of single node ceph
    if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1:
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(config('monitor-secret'))
        ceph.wait_for_bootstrap()
    install_apparmor_profile()
Exemplo n.º 22
0
def config_changed():
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    # Pre-flight checks
    if not config('fsid'):
        log('No fsid supplied, cannot proceed.', level=ERROR)
        sys.exit(1)
    if not config('monitor-secret'):
        log('No monitor-secret supplied, cannot proceed.', level=ERROR)
        sys.exit(1)
    if config('osd-format') not in ceph.DISK_FORMATS:
        log('Invalid OSD disk format configuration specified', level=ERROR)
        sys.exit(1)

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')

    emit_cephconf()

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    osd_journal = get_osd_journal()
    if (osd_journal and not os.path.exists(JOURNAL_ZAPPED)
            and os.path.exists(osd_journal)):
        ceph.zap_disk(osd_journal)
        with open(JOURNAL_ZAPPED, 'w') as zapped:
            zapped.write('DONE')

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(config('monitor-secret'))
        ceph.wait_for_bootstrap()

    storage_changed()

    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
Exemplo n.º 23
0
def config_changed():
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    # Pre-flight checks
    if not config('fsid'):
        log('No fsid supplied, cannot proceed.', level=ERROR)
        sys.exit(1)
    if not config('monitor-secret'):
        log('No monitor-secret supplied, cannot proceed.', level=ERROR)
        sys.exit(1)
    if config('osd-format') not in ceph.DISK_FORMATS:
        log('Invalid OSD disk format configuration specified', level=ERROR)
        sys.exit(1)

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')

    emit_cephconf()

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    osd_journal = get_osd_journal()
    if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and
            os.path.exists(osd_journal)):
        ceph.zap_disk(osd_journal)
        with open(JOURNAL_ZAPPED, 'w') as zapped:
            zapped.write('DONE')

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(config('monitor-secret'))
        ceph.wait_for_bootstrap()

    storage_changed()

    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
Exemplo n.º 24
0
    def test_gets_relations_for_type(self, relations_for_id, relation_ids,
                                     relation_type, in_relation_hook):
        reltype = 'foo-type'
        relids = [123, 234]
        relations = [
            [
                {
                    'foo': 'bar'
                },
                {
                    'foo2': 'bar2'
                },
            ],
            [
                {
                    'FOO': 'BAR'
                },
                {
                    'FOO2': 'BAR2'
                },
            ],
        ]
        is_in_relation = True

        relation_type.return_value = reltype
        relation_ids.return_value = relids
        relations_for_id.side_effect = relations
        in_relation_hook.return_value = is_in_relation

        result = hookenv.relations_of_type()

        self.assertEqual(result[0]['__relid__'], 123)
        self.assertEqual(result[0]['foo'], 'bar')
        self.assertEqual(result[1]['__relid__'], 123)
        self.assertEqual(result[1]['foo2'], 'bar2')
        self.assertEqual(result[2]['__relid__'], 234)
        self.assertEqual(result[2]['FOO'], 'BAR')
        self.assertEqual(result[3]['__relid__'], 234)
        self.assertEqual(result[3]['FOO2'], 'BAR2')
        relation_ids.assert_called_with(reltype)
        self.assertEqual(relations_for_id.mock_calls, [
            call(123),
            call(234),
        ])
Exemplo n.º 25
0
def update_relations(context, namespace_separator=':'):
    """Update the context with the relation data."""
    # Add any relation data prefixed with the relation type.
    relation_type = hookenv.relation_type()
    relations = []
    context['current_relation'] = {}
    if relation_type is not None:
        relation_data = hookenv.relation_get()
        context['current_relation'] = relation_data
        # Deprecated: the following use of relation data as keys
        # directly in the context will be removed.
        relation_data = dict(
            ("{relation_type}{namespace_separator}{key}".format(
                relation_type=relation_type,
                key=key,
                namespace_separator=namespace_separator), val)
            for key, val in relation_data.items())
        relation_data = dict_keys_without_hyphens(relation_data)
        context.update(relation_data)
        relations = hookenv.relations_of_type(relation_type)
        relations = [dict_keys_without_hyphens(rel) for rel in relations]

    context['relations_full'] = hookenv.relations()

    # the hookenv.relations() data structure is effectively unusable in
    # templates and other contexts when trying to access relation data other
    # than the current relation. So provide a more useful structure that works
    # with any hook.
    local_unit = hookenv.local_unit()
    relations = {}
    for rname, rids in context['relations_full'].items():
        relations[rname] = []
        for rid, rdata in rids.items():
            data = rdata.copy()
            if local_unit in rdata:
                data.pop(local_unit)
            for unit_name, rel_data in data.items():
                new_data = {'__relid__': rid, '__unit__': unit_name}
                new_data.update(rel_data)
                relations[rname].append(new_data)
    context['relations'] = relations
Exemplo n.º 26
0
def postgresql_input(db):
    template = """
[[inputs.postgresql]]
  address = "host={{host}} user={{user}} password={{password}} dbname={{database}}"
"""
    required_keys = ['host', 'user', 'password', 'database']
    rels = hookenv.relations_of_type('postgresql')
    inputs = []
    for rel in rels:
        if all([rel.get(key) for key in required_keys]) \
                and hookenv.local_unit() in rel.get('allowed-units') \
                and rel['private-address'] == hookenv.unit_private_ip():
            context = rel.copy()
            inputs.append(render_template(template, context) +
                          render_extra_options("inputs", "postgresql"))
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'postgresql')
    if inputs:
        hookenv.log("Updating {} plugin config file".format('postgresql'))
        host.write_file(config_path, '\n'.join(inputs).encode('utf-8'))
        set_state('plugins.postgresql.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 27
0
def haproxy_input(haproxy):
    template = """
[[inputs.haproxy]]
  servers = {{ servers }}
"""
    rels = hookenv.relations_of_type('haproxy')
    haproxy_addresses = []
    for rel in rels:
        enabled = rel.get('enabled', False)
        # Juju gives us a string instead of a boolean, fix it
        if isinstance(enabled, str):
            if enabled in ['y', 'yes', 'true', 't', 'on', 'True']:
                enabled = True
            else:
                enabled = False
        if not enabled:
            continue
        addr = rel['private-address']
        if addr == hookenv.unit_private_ip():
            addr = "localhost"
        port = rel['port']
        user = rel['user']
        password = rel.get('password', None)
        userpass = user
        if password:
            userpass += ":{}".format(password)
        haproxy_address = 'http://{}@{}:{}'.format(userpass, addr, port)
        haproxy_addresses.append(haproxy_address)
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'haproxy')
    if haproxy_addresses:
        input_config = render_template(template, {"servers": json.dumps(haproxy_addresses)}) + \
            render_extra_options("inputs", "haproxy")
        hookenv.log("Updating {} plugin config file".format('haproxy'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.haproxy.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 28
0
def memcached_input(memcache):
    template = """
[[inputs.memcached]]
  servers = {{ servers }}
"""
    required_keys = ['host', 'port']
    rels = hookenv.relations_of_type('memcached')
    addresses = []
    for rel in rels:
        if all([rel.get(key) for key in required_keys]):
            addr = rel['host']
            port = rel['port']
            address = '{}:{}'.format(addr, port)
            addresses.append(address)
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'memcached')
    if addresses:
        context = {"servers": json.dumps(addresses)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "memcached")
        hookenv.log("Updating {} plugin config file".format('memcached'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.memcached.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Exemplo n.º 29
0
def config_changed():
    if config('enable-firewall'):
        initialize_ufw()
    else:
        ufw.disable()

    if config('ephemeral-unmount'):
        umount(config('ephemeral-unmount'), persist=True)

    if config('prefer-ipv6'):
        status_set('maintenance', 'Configuring ipv6')
        assert_charm_supports_ipv6()

    ensure_swift_directories()
    setup_rsync()

    if not config('action-managed-upgrade') and \
            openstack_upgrade_available('swift'):
        status_set('maintenance', 'Running openstack upgrade')
        do_openstack_upgrade(configs=CONFIGS)

    install_vaultlocker()

    configure_storage()

    CONFIGS.write_all()

    save_script_rc()
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-swift-storage-charm.conf')

    add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
Exemplo n.º 30
0
def start_application_service():
    # Remove source.available to allow it to be re-triggered
    remove_state('wsgi.source.available')
    remove_state('wsgi.available')

    # Install application dependencies
    status_set('maintenance', '[wsgi] Installing application dependencies')

    cache_dir = config('pip_cache_dir')

    if os.path.isfile('requirements.txt'):
        if cache_dir:
            log('[wsgi] Installing pip dependencies from {}'.format(cache_dir))
            subprocess.check_call([
                'pip3',
                'install',
                '--no-index',
                '--find-links',
                cache_dir,
                '--requirement',
                'requirements.txt',
            ],
                                  cwd=layer_config['application_root'],
                                  env=dict(LC_ALL='C.UTF-8',
                                           **get_env(env_file)))
        else:
            log('[wsgi] Installing pip dependencies from PyPi')
            subprocess.check_call(
                ['pip3', 'install', '--requirement', 'requirements.txt'],
                cwd=layer_config['application_root'],
                env=dict(LC_ALL='C.UTF-8', **get_env(env_file)))

    set_state('wsgi.ready')

    # Check for a database connection
    log('[wsgi] Checking for database connection')
    postgres_relations = relations_of_type('postgres')
    mongo_relations = relations_of_type('mongo')
    db_relation = None

    if postgres_relations:
        db_relation = postgres_relations[0]
        db_scheme = "postgresql"
    elif mongo_relations:
        db_relation = mongo_relations[0]
        db_scheme = "mongodb"

    if db_relation:
        db_host = db_relation.get('host') or db_relation.get('hostname')
        db_port = db_relation.get('port')
        log('[wsgi] Using database at {}:{}'.format(db_host, db_port))
        database_url = urlunparse((db_scheme,
                                   build_url_host(db_host, db_port,
                                                  db_relation.get('user'),
                                                  db_relation.get('password')),
                                   db_relation.get('database',
                                                   ''), None, None, None))
        set_env_values(env_file, {'DATABASE_URL': database_url})

        provision_command = layer_config.get('provision_command')

        if provision_command:
            status_set('maintenance', '[wsgi] Provisioning database')
            subprocess.check_call(provision_command.split(),
                                  cwd=layer_config['application_root'],
                                  env=get_env(env_file),
                                  preexec_fn=demote(
                                      get_user(layer_config['username'])))
    else:
        log('[wsgi] No database attached')
        delete_env_value(env_file, 'DATABASE_URL')

    # Open the port, ready
    status_set('maintenance', '[wsgi] Opening port {}'.format(config('port')))
    log('[wsgi] Opening port {}'.format(config('port')))
    open_port(config('port'))

    # Configure circus daemon to run gunicorn
    service_name = 'gunicorn3.service'
    service_file = '/etc/systemd/system/{}'.format(service_name)
    log('[wsgi] Writing systemd config to {}'.format(service_file))
    status_set('maintenance', '[wsgi] Preparing daemon')
    render(source='{}.j2'.format(service_name),
           target=service_file,
           perms=0o644,
           context={
               'application_root': layer_config['application_root'],
               'env_file': env_file,
               'wsgi_module': config('wsgi_module'),
               'user': layer_config['username'],
               'group': layer_config['username'],
               'port': config('port'),
               'env': get_env(env_file)
           })
    subprocess.check_call(['systemctl', 'daemon-reload'])

    if service_running(service_name):
        log('[wsgi] Reloading {}'.format(service_name))
        service_reload(service_name)
    else:
        log('[wsgi] Starting {}'.format(service_name))
        service_start(service_name)

    # Try 5 times to check if the service started
    service_responding = False
    for attempt in range(0, 10):
        log('[wsgi] Waiting for service on port {} (attempt {})'.format(
            config('port'), attempt))
        if service_running(service_name) and is_port_open(config('port')):
            service_responding = True
            break
        sleep(6)

    if service_responding:
        log('[wsgi] Service responded on port {}'.format(config('port')))
        status_set('active',
                   '[wsgi] Service started on port {}'.format(config('port')))
        set_state('wsgi.available')
    else:
        raise socket.error('Service not responding')
Exemplo n.º 31
0
def configure_firewall():
    '''Configure firewall rules using ufw.

    This is primarily to block access to the replication and JMX ports,
    as juju's default port access controls are not strict enough and
    allow access to the entire environment.
    '''
    config = hookenv.config()
    ufw.enable(soft_fail=True)

    # Enable SSH from anywhere, relying on Juju and external firewalls
    # to control access.
    ufw.service('ssh', 'open')
    ufw.service('nrpe', 'open')  # Also NRPE for nagios checks.

    # Clients need client access. These protocols are configured to
    # require authentication.
    client_keys = ['native_transport_port', 'rpc_port']
    client_ports = [config[key] for key in client_keys]

    # Peers need replication access. This protocols does not
    # require authentication, so firewall it from other nodes.
    peer_ports = [config['storage_port'], config['ssl_storage_port']]

    # Enable client access from anywhere. Juju and external firewalls
    # can still restrict this further of course (ie. 'juju expose').
    for key in client_keys:
        if config.changed(key) and config.previous(key) is not None:
            # First close old ports. We use this order in the unlikely case
            # someone is trying to swap the native and Thrift ports.
            ufw.service(config.previous(key), 'close')
    for port in client_ports:
        # Then open or close the configured ports.
        ufw.service(port, 'open')

    desired_rules = set()  # ufw.grant_access/remove_access commands.

    # Rules for peers
    for relinfo in hookenv.relations_of_type('cluster'):
        if relinfo['private-address']:
            for port in peer_ports:
                desired_rules.add((relinfo['private-address'], 'any', port))
    # Rules for admin connections. We allow database-admin relations access
    # to the cluster communication ports so that tools like sstableloader
    # can run.
    for relinfo in hookenv.relations_of_type('database-admin'):
        if relinfo['private-address']:
            for port in peer_ports:
                desired_rules.add((relinfo['private-address'], 'any', port))

    previous_rules = set(tuple(rule) for rule in config.get('ufw_rules', []))

    # Close any rules previously opened that are no longer desired.
    for rule in sorted(list(previous_rules - desired_rules)):
        ufw.revoke_access(*rule)

    # Open all the desired rules.
    for rule in sorted(list(desired_rules)):
        ufw.grant_access(*rule)

    # Store our rules for next time. Note that this is inherantly racy -
    # this value is only persisted if the hook exits cleanly. If the
    # hook fails, then someone changes port configuration or IP
    # addresses change, then the failed hook retried, we can lose track
    # of previously granted rules and they will never be revoked. It is
    # impossible to remove this race entirely, so we stick with this
    # simple approach.
    config['ufw_rules'] = list(desired_rules)  # A list because JSON.
Exemplo n.º 32
0
def create_services():
    services_dict = get_config_services()

    # Augment services_dict with service definitions from relation data.
    relation_data = relations_of_type("reverseproxy")
    # Handle relations which specify their own services clauses
    for relation_info in relation_data:
        if "services" in relation_info:
            services_dict = parse_services_yaml(services_dict,
                                                relation_info['services'])

    if len(services_dict) == 0:
        log("No services configured, exiting.")
        return

    for relation_info in relation_data:
        unit = relation_info['__unit__']

        # Skip entries that specify their own services clauses, this was
        # handled earlier.
        if "services" in relation_info:
            log("Unit '%s' overrides 'services', "
                "skipping further processing." % unit)
            continue

        juju_service_name = unit.rpartition('/')[0]

        relation_ok = True
        for required in ("port", "private-address"):
            if not required in relation_info:
                log("No %s in relation data for '%s', skipping." %
                    (required, unit))
                relation_ok = False
                break

        if not relation_ok:
            continue

        # Mandatory switches ( private-address, port )
        host = relation_info['private-address']
        port = relation_info['port']
        server_name = ("%s-%s" % (unit.replace("/", "-"), port))

        # Optional switches ( service_name, sitenames )
        service_names = set()
        if 'service_name' in relation_info:
            if relation_info['service_name'] in services_dict:
                service_names.add(relation_info['service_name'])
            else:
                log("Service '%s' does not exist." %
                    relation_info['service_name'])
                continue

        if 'sitenames' in relation_info:
            sitenames = relation_info['sitenames'].split()
            for sitename in sitenames:
                if sitename in services_dict:
                    service_names.add(sitename)

        if juju_service_name + "_service" in services_dict:
            service_names.add(juju_service_name + "_service")

        if juju_service_name in services_dict:
            service_names.add(juju_service_name)

        if not service_names:
            service_names.add(services_dict[None]["service_name"])

        for service_name in service_names:
            service = services_dict[service_name]

            # Add the server entries
            servers = service.setdefault("servers", [])
            servers.append((server_name, host, port,
                            services_dict[service_name].get(
                                'server_options', [])))

    has_servers = False
    for service_name, service in services_dict.iteritems():
        if service.get("servers", []):
            has_servers = True

    if not has_servers:
        log("No backend servers, exiting.")

    del services_dict[None]
    services_dict = ensure_service_host_port(services_dict)
    services_dict = apply_peer_config(services_dict)
    write_service_config(services_dict)
    return services_dict
Exemplo n.º 33
0
def maybe_refresh_nrpe_files():
    """if the nrpe-external-master relation exists then refresh the nrpe
    configuration -- this is called during a charm upgrade
    """
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
Exemplo n.º 34
0
def maybe_refresh_nrpe_files():
    """if the nrpe-external-master relation exists then refresh the nrpe
    configuration -- this is called during a charm upgrade
    """
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
Exemplo n.º 35
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
    if config('enable-dashboard') and cmp_pkgrevno('ceph', '14.2.0') >= 0:
        apt_install(packages=filter_installed_packages(['ceph-mgr-dashboard']))

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                if config('fsid'):
                    fsid = config('fsid')
                else:
                    fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                try:
                    leader_set(opts)
                    status_set('maintenance',
                               'Created FSID and Monitor Secret')
                    log("Settings for the cluster are: {}".format(opts))
                except Exception as e:
                    # we're probably not the leader an exception occured
                    # let's log it anyway.
                    log("leader_set failed: {}".format(str(e)))
        elif (cfg.changed('no-bootstrap')
              and is_relation_made('bootstrap-source')):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()

        # This will only ensure that we are enabled if the 'pg-autotune' option
        # is explicitly set to 'true', and not if it is 'auto' or 'false'
        if (config('pg-autotune') == 'true'
                and cmp_pkgrevno('ceph', '14.2.0') >= 0):
            # The return value of the enable_module call will tell us if the
            # module was already enabled, in which case, we don't need to
            # re-configure the already configured pools
            if mgr_enable_module('pg_autoscaler'):
                ceph.monitor_key_set('admin', 'autotune', 'true')
                for pool in ceph.list_pools():
                    enable_pg_autoscale('admin', pool)
        if (config('enable-dashboard')
                and cmp_pkgrevno('ceph', '14.2.0') >= 0):
            log("enable-dashboard: {}".format(str(config('enable-dashboard'))))
            if mgr_enable_module('dashboard'):
                pass
            log("configure-dashboard")
            configure_dashboard()
    # unconditionally verify that the fsid and monitor-secret are set now
    # otherwise we exit until a leader does this.
    if leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        # the following call raises an exception if it can't add the keyring
        try:
            ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        except FileNotFoundError as e:  # NOQA -- PEP8 is still PY2
            log("Couldn't bootstrap the monitor yet: {}".format(str(e)))
            return
        ceph.wait_for_bootstrap()
        ceph.wait_for_quorum()
        ceph.create_keyrings()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()

    # Update client relations
    notify_client()
Exemplo n.º 36
0
def apply_peer_config(services_dict):
    peer_data = relations_of_type("peer")

    peer_services = {}
    for relation_info in peer_data:
        unit_name = relation_info["__unit__"]
        peer_services_data = relation_info.get("all_services")
        if peer_services_data is None:
            continue
        service_data = yaml.safe_load(peer_services_data)
        for service in service_data:
            service_name = service["service_name"]
            if service_name in services_dict:
                peer_service = peer_services.setdefault(service_name, {})
                peer_service["service_name"] = service_name
                peer_service["service_host"] = service["service_host"]
                peer_service["service_port"] = service["service_port"]
                peer_service["service_options"] = [
                    "balance leastconn", "mode tcp", "option tcplog"
                ]
                servers = peer_service.setdefault("servers", [])
                servers.append(
                    (unit_name.replace("/",
                                       "-"), relation_info["private-address"],
                     service["service_port"] + 1, ["check"]))

    if not peer_services:
        return services_dict

    unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
    private_address = unit_get("private-address")
    for service_name, peer_service in peer_services.iteritems():
        original_service = services_dict[service_name]

        # If the original service has timeout settings, copy them over to the
        # peer service.
        for option in original_service.get("service_options", ()):
            if "timeout" in option:
                peer_service["service_options"].append(option)

        servers = peer_service["servers"]
        # Add ourselves to the list of servers for the peer listen stanza.
        servers.append((unit_name, private_address,
                        original_service["service_port"] + 1, ["check"]))

        # Make all but the first server in the peer listen stanza a backup
        # server.
        servers.sort()
        for server in servers[1:]:
            server[3].append("backup")

        # Remap original service port, will now be used by peer listen stanza.
        original_service["service_port"] += 1

        # Remap original service to a new name, stuff peer listen stanza into
        # it's place.
        be_service = service_name + "_be"
        original_service["service_name"] = be_service
        services_dict[be_service] = original_service
        services_dict[service_name] = peer_service

    return services_dict
Exemplo n.º 37
0
def apply_peer_config(services_dict):
    peer_data = relations_of_type("peer")

    peer_services = {}
    for relation_info in peer_data:
        unit_name = relation_info["__unit__"]
        peer_services_data = relation_info.get("all_services")
        if peer_services_data is None:
            continue
        service_data = yaml.safe_load(peer_services_data)
        for service in service_data:
            service_name = service["service_name"]
            if service_name in services_dict:
                peer_service = peer_services.setdefault(service_name, {})
                peer_service["service_name"] = service_name
                peer_service["service_host"] = service["service_host"]
                peer_service["service_port"] = service["service_port"]
                peer_service["service_options"] = ["balance leastconn",
                                                   "mode tcp",
                                                   "option tcplog"]
                servers = peer_service.setdefault("servers", [])
                servers.append((unit_name.replace("/", "-"),
                                relation_info["private-address"],
                                service["service_port"] + 1, ["check"]))

    if not peer_services:
        return services_dict

    unit_name = os.environ["JUJU_UNIT_NAME"].replace("/", "-")
    private_address = unit_get("private-address")
    for service_name, peer_service in peer_services.iteritems():
        original_service = services_dict[service_name]

        # If the original service has timeout settings, copy them over to the
        # peer service.
        for option in original_service.get("service_options", ()):
            if "timeout" in option:
                peer_service["service_options"].append(option)

        servers = peer_service["servers"]
        # Add ourselves to the list of servers for the peer listen stanza.
        servers.append((unit_name, private_address,
                        original_service["service_port"] + 1,
                        ["check"]))

        # Make all but the first server in the peer listen stanza a backup
        # server.
        servers.sort()
        for server in servers[1:]:
            server[3].append("backup")

        # Remap original service port, will now be used by peer listen stanza.
        original_service["service_port"] += 1

        # Remap original service to a new name, stuff peer listen stanza into
        # it's place.
        be_service = service_name + "_be"
        original_service["service_name"] = be_service
        services_dict[be_service] = original_service
        services_dict[service_name] = peer_service

    return services_dict
Exemplo n.º 38
0
def create_services():
    services_dict = get_config_services()

    # Augment services_dict with service definitions from relation data.
    relation_data = relations_of_type("reverseproxy")

    # Handle relations which specify their own services clauses
    for relation_info in relation_data:
        if "services" in relation_info:
            services_dict = parse_services_yaml(services_dict,
                                                relation_info['services'])

    if len(services_dict) == 0:
        log("No services configured, exiting.")
        return

    for relation_info in relation_data:
        unit = relation_info['__unit__']

        # Skip entries that specify their own services clauses, this was
        # handled earlier.
        if "services" in relation_info:
            log("Unit '%s' overrides 'services', "
                "skipping further processing." % unit)
            continue

        juju_service_name = unit.rpartition('/')[0]

        relation_ok = True
        for required in ("port", "private-address"):
            if not required in relation_info:
                log("No %s in relation data for '%s', skipping." %
                    (required, unit))
                relation_ok = False
                break

        if not relation_ok:
            continue

        # Mandatory switches ( private-address, port )
        host = relation_info['private-address']
        port = relation_info['port']
        server_name = ("%s-%s" % (unit.replace("/", "-"), port))

        # Optional switches ( service_name, sitenames )
        service_names = set()
        if 'service_name' in relation_info:
            if relation_info['service_name'] in services_dict:
                service_names.add(relation_info['service_name'])
            else:
                log("Service '%s' does not exist." %
                    relation_info['service_name'])
                continue

        if 'sitenames' in relation_info:
            sitenames = relation_info['sitenames'].split()
            for sitename in sitenames:
                if sitename in services_dict:
                    service_names.add(sitename)

        if juju_service_name + "_service" in services_dict:
            service_names.add(juju_service_name + "_service")

        if juju_service_name in services_dict:
            service_names.add(juju_service_name)

        if not service_names:
            service_names.add(services_dict[None]["service_name"])

        for service_name in service_names:
            service = services_dict[service_name]

            # Add the server entries
            servers = service.setdefault("servers", [])
            servers.append(
                (server_name, host, port,
                 services_dict[service_name].get('server_options', [])))

    has_servers = False
    for service_name, service in services_dict.iteritems():
        if service.get("servers", []):
            has_servers = True

    if not has_servers:
        log("No backend servers, exiting.")
        return

    del services_dict[None]
    services_dict = ensure_service_host_port(services_dict)
    services_dict = apply_peer_config(services_dict)
    write_service_config(services_dict)
    return services_dict
Exemplo n.º 39
0
def reassign_agent_resources():
    """ Use agent scheduler API to detect down agents and re-schedule """
    env = NetworkServiceContext()()
    if not env:
        log("Unable to re-assign resources at this time")
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        """ Try to import neutronclient instead for havana+ """
        from neutronclient.v2_0 import client

    auth_url = "%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0" % env
    quantum = client.Client(
        username=env["service_username"],
        password=env["service_password"],
        tenant_name=env["service_tenant"],
        auth_url=auth_url,
        region_name=env["region"],
    )

    partner_gateways = [unit_private_ip().split(".")[0]]
    for partner_gateway in relations_of_type(reltype="cluster"):
        gateway_hostname = get_hostname(partner_gateway["private-address"])
        partner_gateways.append(gateway_hostname.partition(".")[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents["agents"]:
        if not agent["alive"]:
            log("DHCP Agent %s down" % agent["id"])
            for network in quantum.list_networks_on_dhcp_agent(agent["id"])["networks"]:
                networks[network["id"]] = agent["id"]
        else:
            if agent["host"].partition(".")[0] in partner_gateways:
                dhcp_agents.append(agent["id"])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents["agents"]:
        if not agent["alive"]:
            log("L3 Agent %s down" % agent["id"])
            for router in quantum.list_routers_on_l3_agent(agent["id"])["routers"]:
                routers[router["id"]] = agent["id"]
        else:
            if agent["host"].split(".")[0] in partner_gateways:
                l3_agents.append(agent["id"])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log(
            "Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster"
            % (len(dhcp_agents), len(l3_agents))
        )
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log("Moving router %s from %s to %s" % (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id], router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent], body={"router_id": router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log("Moving network %s from %s to %s" % (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id], network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent], body={"network_id": network_id})
        index += 1
Exemplo n.º 40
0
def reassign_agent_resources():
    ''' Use agent scheduler API to detect down agents and re-schedule '''
    env = NetworkServiceContext()()
    if not env:
        log('Unable to re-assign resources at this time')
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        ''' Try to import neutronclient instead for havana+ '''
        from neutronclient.v2_0 import client

    auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' % env
    quantum = client.Client(username=env['service_username'],
                            password=env['service_password'],
                            tenant_name=env['service_tenant'],
                            auth_url=auth_url,
                            region_name=env['region'])

    partner_gateways = [unit_private_ip().split('.')[0]]
    for partner_gateway in relations_of_type(reltype='cluster'):
        gateway_hostname = get_hostname(partner_gateway['private-address'])
        partner_gateways.append(gateway_hostname.partition('.')[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('DHCP Agent %s down' % agent['id'])
            for network in \
                    quantum.list_networks_on_dhcp_agent(
                        agent['id'])['networks']:
                networks[network['id']] = agent['id']
        else:
            if agent['host'].partition('.')[0] in partner_gateways:
                dhcp_agents.append(agent['id'])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('L3 Agent %s down' % agent['id'])
            for router in \
                    quantum.list_routers_on_l3_agent(
                        agent['id'])['routers']:
                routers[router['id']] = agent['id']
        else:
            if agent['host'].split('.')[0] in partner_gateways:
                l3_agents.append(agent['id'])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log('Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log('Moving router %s from %s to %s' %
            (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
                                            router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
                                       body={'router_id': router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log('Moving network %s from %s to %s' %
            (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
                                               network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent],
                                          body={'network_id': network_id})
        index += 1
Exemplo n.º 41
0
def reassign_agent_resources():
    ''' Use agent scheduler API to detect down agents and re-schedule '''
    env = NetworkServiceContext()()
    if not env:
        log('Unable to re-assign resources at this time')
        return
    try:
        from quantumclient.v2_0 import client
    except ImportError:
        ''' Try to import neutronclient instead for havana+ '''
        from neutronclient.v2_0 import client

    auth_url = '%(auth_protocol)s://%(keystone_host)s:%(auth_port)s/v2.0' % env
    quantum = client.Client(username=env['service_username'],
                            password=env['service_password'],
                            tenant_name=env['service_tenant'],
                            auth_url=auth_url,
                            region_name=env['region'])

    partner_gateways = [unit_private_ip().split('.')[0]]
    for partner_gateway in relations_of_type(reltype='cluster'):
        gateway_hostname = get_hostname(partner_gateway['private-address'])
        partner_gateways.append(gateway_hostname.partition('.')[0])

    agents = quantum.list_agents(agent_type=DHCP_AGENT)
    dhcp_agents = []
    l3_agents = []
    networks = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('DHCP Agent %s down' % agent['id'])
            for network in \
                    quantum.list_networks_on_dhcp_agent(
                        agent['id'])['networks']:
                networks[network['id']] = agent['id']
        else:
            if agent['host'].partition('.')[0] in partner_gateways:
                dhcp_agents.append(agent['id'])

    agents = quantum.list_agents(agent_type=L3_AGENT)
    routers = {}
    for agent in agents['agents']:
        if not agent['alive']:
            log('L3 Agent %s down' % agent['id'])
            for router in \
                    quantum.list_routers_on_l3_agent(
                        agent['id'])['routers']:
                routers[router['id']] = agent['id']
        else:
            if agent['host'].split('.')[0] in partner_gateways:
                l3_agents.append(agent['id'])

    if len(dhcp_agents) == 0 or len(l3_agents) == 0:
        log('Unable to relocate resources, there are %s dhcp_agents and %s \
             l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
        return

    index = 0
    for router_id in routers:
        agent = index % len(l3_agents)
        log('Moving router %s from %s to %s' %
            (router_id, routers[router_id], l3_agents[agent]))
        quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
                                            router_id=router_id)
        quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
                                       body={'router_id': router_id})
        index += 1

    index = 0
    for network_id in networks:
        agent = index % len(dhcp_agents)
        log('Moving network %s from %s to %s' %
            (network_id, networks[network_id], dhcp_agents[agent]))
        quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
                                               network_id=network_id)
        quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent],
                                          body={'network_id': network_id})
        index += 1
Exemplo n.º 42
0
def configure_firewall():
    '''Configure firewall rules using ufw.

    This is primarily to block access to the replication and JMX ports,
    as juju's default port access controls are not strict enough and
    allow access to the entire environment.
    '''
    config = hookenv.config()
    ufw.enable(soft_fail=True)

    # Enable SSH from anywhere, relying on Juju and external firewalls
    # to control access.
    ufw.service('ssh', 'open')
    ufw.service('nrpe', 'open')  # Also NRPE for nagios checks.

    # Clients need client access. These protocols are configured to
    # require authentication.
    client_keys = ['native_transport_port', 'rpc_port']
    client_ports = [config[key] for key in client_keys]

    # Peers need replication access. This protocols does not
    # require authentication, so firewall it from other nodes.
    peer_ports = [config['storage_port'], config['ssl_storage_port']]

    # Enable client access from anywhere. Juju and external firewalls
    # can still restrict this further of course (ie. 'juju expose').
    for key in client_keys:
        if config.changed(key) and config.previous(key) is not None:
            # First close old ports. We use this order in the unlikely case
            # someone is trying to swap the native and Thrift ports.
            ufw.service(config.previous(key), 'close')
    for port in client_ports:
        # Then open or close the configured ports.
        ufw.service(port, 'open')

    desired_rules = set()  # ufw.grant_access/remove_access commands.

    # Rules for peers
    for relinfo in hookenv.relations_of_type('cluster'):
        if relinfo['private-address']:
            for port in peer_ports:
                desired_rules.add((relinfo['private-address'], 'any', port))
    # Rules for admin connections. We allow database-admin relations access
    # to the cluster communication ports so that tools like sstableloader
    # can run.
    for relinfo in hookenv.relations_of_type('database-admin'):
        if relinfo['private-address']:
            for port in peer_ports:
                desired_rules.add((relinfo['private-address'], 'any', port))

    previous_rules = set(tuple(rule) for rule in config.get('ufw_rules', []))

    # Close any rules previously opened that are no longer desired.
    for rule in sorted(list(previous_rules - desired_rules)):
        ufw.revoke_access(*rule)

    # Open all the desired rules.
    for rule in sorted(list(desired_rules)):
        ufw.grant_access(*rule)

    # Store our rules for next time. Note that this is inherantly racy -
    # this value is only persisted if the hook exits cleanly. If the
    # hook fails, then someone changes port configuration or IP
    # addresses change, then the failed hook retried, we can lose track
    # of previously granted rules and they will never be revoked. It is
    # impossible to remove this race entirely, so we stick with this
    # simple approach.
    config['ufw_rules'] = list(desired_rules)  # A list because JSON.
Exemplo n.º 43
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                try:
                    leader_set(opts)
                    status_set('maintenance',
                               'Created FSID and Monitor Secret')
                    log("Settings for the cluster are: {}".format(opts))
                except Exception as e:
                    # we're probably not the leader an exception occured
                    # let's log it anyway.
                    log("leader_set failed: {}".format(str(e)))
        elif (cfg.changed('no-bootstrap')
              and is_relation_made('bootstrap-source')):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()
    # unconditionally verify that the fsid and monitor-secret are set now
    # otherwise we exit until a leader does this.
    if leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        # the following call raises an exception if it can't add the keyring
        try:
            ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        except FileNotFoundError as e:  # NOQA -- PEP8 is still PY2
            log("Couldn't bootstrap the monitor yet: {}".format(str(e)))
            return
        ceph.wait_for_bootstrap()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()

    # Update client relations
    notify_client()