示例#1
0
文件: k8s.py 项目: mbruzek/layer-k8s
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context data with SDN data.
    context.update(gather_sdn_data())
    # Add the charm configuration data to the context.
    context.update(hookenv.config())
    if reldata:
        connection_string = reldata.get_connection_string()
        # Define where the etcd tls files will be kept.
        etcd_dir = '/etc/ssl/etcd'
        # Create paths to the etcd client ca, key, and cert file locations.
        ca = os.path.join(etcd_dir, 'client-ca.pem')
        key = os.path.join(etcd_dir, 'client-key.pem')
        cert = os.path.join(etcd_dir, 'client-cert.pem')
        # Save the client credentials (in relation data) to the paths provided.
        reldata.save_client_credentials(key, cert, ca)
        # Update the context so the template has the etcd information.
        context.update({'etcd_dir': etcd_dir,
                        'connection_string': connection_string,
                        'etcd_ca': ca,
                        'etcd_key': key,
                        'etcd_cert': cert})

    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)

    # Update the context with extra values, arch, manifest dir, and private IP.
    context.update({'arch': arch(),
                    'master_address': leader_get('master-address'),
                    'manifest_directory': rendered_manifest_dir,
                    'public_address': hookenv.unit_get('public-address'),
                    'private_address': hookenv.unit_get('private-address')})

    # Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    render('docker-compose.yml', target, context)

    if is_leader():
        # Source: https://github.com/kubernetes/...master/cluster/images/hyperkube  # noqa
        target = os.path.join(rendered_manifest_dir, 'master.json')
        # Render the files/manifests/master.json that contains parameters for
        # the apiserver, controller, and controller-manager
        render('master.json', target, context)
        # Source: ...cluster/addons/dns/skydns-svc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
        # Render files/kubernetes/kubedns-svc.yaml for the DNS service.
        render('kubedns-svc.yaml', target, context)
        # Source: ...cluster/addons/dns/skydns-rc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
        # Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
        render('kubedns-rc.yaml', target, context)
示例#2
0
def manage():
    config = hookenv.config()
    manager = services.ServiceManager([
        {
            'service': 'dockerfile/rethinkdb',
            'ports': [80, 28015, 29015],
            'provided_data': [WebsiteRelation()],
            'required_data': [
                docker.DockerPortMappings({
                    80: 8080,
                    28015: 28015,
                    29015: 29015,
                }),
                docker.DockerVolumes(mapped_volumes={config['storage-path']: '/rethinkdb'}),
                docker.DockerContainerArgs(
                    'rethinkdb',
                    '--bind', 'all',
                    '--canonical-address', hookenv.unit_get('public-address'),
                    '--canonical-address', hookenv.unit_get('private-address'),
                    '--machine-name', socket.gethostname().replace('-', '_'),
                ),
                ClusterPeers(),
            ],
            'start': docker.docker_start,
            'stop': docker.docker_stop,
        },
    ])
    manager.manage()
示例#3
0
def db_joined():
    relation_set(quantum_username=DB_USER,
                 quantum_database=QUANTUM_DB,
                 quantum_hostname=unit_get('private-address'),
                 nova_username=NOVA_DB_USER,
                 nova_database=NOVA_DB,
                 nova_hostname=unit_get('private-address'))
def db_joined():
    relation_set(nova_database=config('database'),
                 nova_username=config('database-user'),
                 nova_hostname=unit_get('private-address'))
    if network_manager() in ['quantum', 'neutron']:
        # XXX: Renaming relations from quantum_* to neutron_* here.
        relation_set(neutron_database=config('neutron-database'),
                     neutron_username=config('neutron-database-user'),
                     neutron_hostname=unit_get('private-address'))
示例#5
0
文件: etcd.py 项目: cmars/layer-etcd
 def __init__(self):
     self.db = unitdata.kv()
     self.public_address = unit_get('public-address')
     self.private_address = unit_get('private-address')
     self.hook_data = unitdata.HookData()
     self.unit_name = getenv('JUJU_UNIT_NAME').replace('/', '')
     self.port = config('port')
     self.management_port = config('management_port')
     self.init_cluster_cache()
示例#6
0
def render_files():
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    config = hookenv.config()
    # Add the charm configuration data to the context.
    context.update(config)

    # Update the context with extra values: arch, and networking information
    context.update({'arch': arch(),
                    'master_address': hookenv.unit_get('private-address'),
                    'public_address': hookenv.unit_get('public-address'),
                    'private_address': hookenv.unit_get('private-address')})

    api_opts = FlagManager('kube-apiserver')
    controller_opts = FlagManager('kube-controller-manager')
    scheduler_opts = FlagManager('kube-scheduler')

    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    ca_cert_path = layer_options.get('ca_certificate_path')
    server_cert_path = layer_options.get('server_certificate_path')
    server_key_path = layer_options.get('server_key_path')

    # Handle static options for now
    api_opts.add('--min-request-timeout', '300')
    api_opts.add('--v', '4')
    api_opts.add('--client-ca-file', ca_cert_path)
    api_opts.add('--tls-cert-file', server_cert_path)
    api_opts.add('--tls-private-key-file', server_key_path)

    scheduler_opts.add('--v', '2')

    # Default to 3 minute resync. TODO: Make this configureable?
    controller_opts.add('--min-resync-period', '3m')
    controller_opts.add('--v', '2')
    controller_opts.add('--root-ca-file', ca_cert_path)

    context.update({'kube_apiserver_flags': api_opts.to_s(),
                    'kube_scheduler_flags': scheduler_opts.to_s(),
                    'kube_controller_manager_flags': controller_opts.to_s()})

    # Render the configuration files that contains parameters for
    # the apiserver, scheduler, and controller-manager
    render_service('kube-apiserver', context)
    render_service('kube-controller-manager', context)
    render_service('kube-scheduler', context)

    # explicitly render the generic defaults file
    render('kube-defaults.defaults', '/etc/default/kube-defaults', context)

    # when files change on disk, we need to inform systemd of the changes
    call(['systemctl', 'daemon-reload'])
    call(['systemctl', 'enable', 'kube-apiserver'])
    call(['systemctl', 'enable', 'kube-controller-manager'])
    call(['systemctl', 'enable', 'kube-scheduler'])
示例#7
0
 def configure(self, port, private_address=None, hostname=None):
     if not hostname:
         hostname = hookenv.unit_get('private-address')
     if not private_address:
         private_address = hookenv.unit_get('private-address')
     relation_info = {
         'hostname': hostname,
         'private-address': private_address,
         'port': port,
     }
     self.set_remote(**relation_info)
def db_joined(rid=None):
    relation_set(relation_id=rid,
                 nova_database=config('database'),
                 nova_username=config('database-user'),
                 nova_hostname=unit_get('private-address'))
    if (network_manager() in ['quantum', 'neutron']
            and neutron_plugin() == 'ovs'):
        # XXX: Renaming relations from quantum_* to neutron_* here.
        relation_set(relation_id=rid,
                     neutron_database=config('neutron-database'),
                     neutron_username=config('neutron-database-user'),
                     neutron_hostname=unit_get('private-address'))
示例#9
0
def install_openvpn_xenial():
    puppet = Puppet()
    try:
        os.makedirs('/opt/openvpn-puppet')
    except OSError as exception:
        if exception.errno != errno.EEXIST:
            raise
    conf = config()
    dns_info = get_dns_info()
    clients = conf['clients'].split()
    eipndict = get_extip_and_networks()
    ext_ip = eipndict['external-ip']
    pub_ip = eipndict['external-ip']
    # If public-address is different from private-address, we're probably in a
    # juju-supported cloud that we can trust to give us the right address that
    # clients need to use to connect to us.
    if unit_get('private-address') != unit_get('public-address'):
        pub_ip = unit_get('public-address')
    internal_networks = eipndict['internal-networks']
    context = {
        'servername': SERVERNAME,
        'country': conf['key-country'],
        'province': conf['key-province'],
        'city': conf['key-city'],
        'organization': conf['key-org'],
        'email': conf['key-email'],
        'protocol': conf['protocol'],
        'port': conf['port'],
        'duplicate_cn': conf['duplicate-cn'],
        'push_dns': conf['push-dns'],
        'push_default_gateway': conf['push-default-gateway'],
        'dns_server': dns_info.get('nameserver', "8.8.8.8"),
        'dns_search_domains': dns_info.get('search', []),
        'clients': clients,
        'ext_ip': ext_ip,
        'pub_ip': pub_ip,
        'internal_networks': internal_networks,
    }
    templating.render(
        source='init.pp',
        target='/opt/openvpn-puppet/init.pp',
        context=context
    )
    kv_store = unitdata.kv()
    if kv_store.get('previous-port') and kv_store.get('previous-protocol'):
        close_port(kv_store.get('previous-port'),
                   protocol=kv_store.get('previous-protocol'))
    puppet.apply('/opt/openvpn-puppet/init.pp')
    copy_client_configs_to_home(clients)
    status_set('active', 'Ready')
    open_port(conf['port'], protocol=conf['protocol'].upper())
    kv_store.set('previous-port', conf['port'])
    kv_store.set('previous-protocol', conf['protocol'].upper())
示例#10
0
    def setup_kafka_config(self):
        '''
        copy the default configuration files to kafka_conf property
        defined in dist.yaml
        '''
        default_conf = self.dist_config.path('kafka') / 'config'
        kafka_conf = self.dist_config.path('kafka_conf')
        kafka_conf.rmtree_p()
        default_conf.copytree(kafka_conf)
        # Now remove the conf included in the tarball and symlink our real conf
        # dir. we've seen issues where kafka still looks for config in
        # KAFKA_HOME/config.
        default_conf.rmtree_p()
        kafka_conf.symlink(default_conf)

        # Configure immutable bits
        kafka_bin = self.dist_config.path('kafka') / 'bin'
        with utils.environment_edit_in_place('/etc/environment') as env:
            if kafka_bin not in env['PATH']:
                env['PATH'] = ':'.join([env['PATH'], kafka_bin])
            env['LOG_DIR'] = self.dist_config.path('kafka_app_logs')

        # note: we set the advertised.host.name below to the public_address
        # to ensure that external (non-Juju) clients can connect to Kafka
        public_address = hookenv.unit_get('public-address')
        private_ip = utils.resolve_private_address(hookenv.unit_get('private-address'))
        kafka_server_conf = self.dist_config.path('kafka_conf') / 'server.properties'
        service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
        utils.re_edit_in_place(kafka_server_conf, {
            r'^broker.id=.*': 'broker.id=%s' % unit_num,
            r'^port=.*': 'port=%s' % self.dist_config.port('kafka'),
            r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'),
            r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % public_address,
        })

        kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties'
        utils.re_edit_in_place(kafka_log4j, {
            r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'),
        })

        # fix for lxc containers and some corner cases in manual provider
        # ensure that public_address is resolvable internally by mapping it to the private IP
        utils.update_etc_hosts({private_ip: public_address})

        templating.render(
            'upstart.conf',
            '/etc/init/kafka.conf',
            context={
                'kafka_conf': self.dist_config.path('kafka_conf'),
                'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka'))
            },
        )
def get_mgmt_interface():
    '''
    Returns the managment interface.
    '''
    mgmt_interface = config('mgmt-interface')
    if not mgmt_interface:
        return get_iface_from_addr(unit_get('private-address'))
    elif interface_exists(mgmt_interface):
        return mgmt_interface
    else:
        log('Provided managment interface %s does not exist'
            % mgmt_interface)
        return get_iface_from_addr(unit_get('private-address'))
示例#12
0
def install_etcd_certificates():
    ''' Copy the server cert and key to /etc/ssl/etcd and set the
    etcd.ssl.placed state. '''
    etcd_ssl_path = '/etc/ssl/etcd'
    if not os.path.exists(etcd_ssl_path):
        os.makedirs(etcd_ssl_path)

    kv = unitdata.kv()
    cert = kv.get('tls.server.certificate')
    with open('{}/server.pem'.format(etcd_ssl_path), 'w+') as f:
        f.write(cert)
    with open('{}/ca.pem'.format(etcd_ssl_path), 'w+') as f:
        f.write(leader_get('certificate_authority'))

    # schenanigans - each server makes its own key, when generating
    # the CSR. This is why its "magically" present.
    keypath = 'easy-rsa/easyrsa3/pki/private/{}.key'
    server = os.getenv('JUJU_UNIT_NAME').replace('/', '_')
    if os.path.exists(keypath.format(server)):
        shutil.copyfile(keypath.format(server),
                        '{}/server-key.pem'.format(etcd_ssl_path))
    else:
        shutil.copyfile(keypath.format(unit_get('public-address')),
                        '{}/server-key.pem'.format(etcd_ssl_path))

    set_state('etcd.ssl.placed')
示例#13
0
def db_joined():
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if network_manager() in ['quantum', 'neutron']:
        config_neutron = True
    else:
        config_neutron = False

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if config_neutron:
            sync_db_with_multi_ipv6_addresses(config('neutron-database'),
                                              config('neutron-database-user'),
                                              relation_prefix='neutron')
    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host)

        if config_neutron:
            # XXX: Renaming relations from quantum_* to neutron_* here.
            relation_set(neutron_database=config('neutron-database'),
                         neutron_username=config('neutron-database-user'),
                         neutron_hostname=host)
示例#14
0
文件: k8s.py 项目: AzTron/kubernetes
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context manager with sdn and config data.
    context.update(gather_sdn_data())
    context.update(hookenv.config())
    if reldata:
        context.update({'connection_string': reldata.connection_string()})
    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)
    # Add the manifest directory so the docker-compose file can have.
    context.update({'manifest_directory': rendered_manifest_dir,
                    'private_address': hookenv.unit_get('private-address')})

    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    render('docker-compose.yml', target, context)
    # Render the files/manifests/master.json that contains parameters for the
    # apiserver, controller, and controller-manager
    target = os.path.join(rendered_manifest_dir, 'master.json')
    render('master.json', target, context)
    # Render files/kubernetes/skydns-svc.yaml for SkyDNS service
    target = os.path.join(rendered_manifest_dir, 'skydns-svc.yml')
    render('skydns-svc.yml', target, context)
    # Render files/kubernetes/skydns-rc.yaml for SkyDNS pods
    target = os.path.join(rendered_manifest_dir, 'skydns-rc.yml')
    render('skydns-rc.yml', target, context)
示例#15
0
def balance_rings():
    '''handle doing ring balancing and distribution.'''
    new_ring = False
    for ring in SWIFT_RINGS.itervalues():
        if balance_ring(ring):
            log('Balanced ring %s' % ring)
            new_ring = True
    if not new_ring:
        return

    for ring in SWIFT_RINGS.keys():
        f = '%s.ring.gz' % ring
        shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f),
                        os.path.join(WWW_DIR, f))

    if cluster.eligible_leader(SWIFT_HA_RES):
        msg = 'Broadcasting notification to all storage nodes that new '\
              'ring is ready for consumption.'
        log(msg)
        path = WWW_DIR.split('/var/www/')[1]
        trigger = uuid.uuid4()

        if cluster.is_clustered():
            hostname = config('vip')
        else:
            hostname = unit_get('private-address')

        rings_url = 'http://%s/%s' % (hostname, path)
        # notify storage nodes that there is a new ring to fetch.
        for relid in relation_ids('swift-storage'):
            relation_set(relation_id=relid, swift_hash=get_swift_hash(),
                         rings_url=rings_url, trigger=trigger)

    service_restart('swift-proxy')
示例#16
0
文件: hooks.py 项目: 40a/kansible
def get_template_data():
    rels = hookenv.relations()
    template_data = hookenv.Config()
    template_data.CONFIG_FILE_NAME = '.unit-state'

    overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
    etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
    api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))

    # kubernetes master isn't ha yet.
    if api_servers:
        api_info = api_servers.pop()
        api_servers = 'http://%s:%s' % (api_info[0], api_info[1])

    template_data['overlay_type'] = overlay_type
    template_data['kubelet_bind_addr'] = _bind_addr(
        hookenv.unit_private_ip())
    template_data['proxy_bind_addr'] = _bind_addr(
        hookenv.unit_get('public-address'))
    template_data['kubeapi_server'] = api_servers
    template_data['etcd_servers'] = ','.join([
        'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
    template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
        '/', '-')
    return _encode(template_data)
def sapi_post_ips():
    """
    Posts PLUMgrid nodes IPs to solutions api server.
    """
    pg_edge_ips = _pg_edge_ips()
    pg_dir_ips = _pg_dir_ips()
    pg_gateway_ips = _pg_gateway_ips()
    pg_dir_ips.append(get_host_ip(unit_get('private-address')))
    pg_edge_ips = '"edge_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_edge_ips))
    pg_dir_ips = '"director_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_dir_ips))
    pg_gateway_ips = '"gateway_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_gateway_ips))
    opsvm_ip = '"opsvm_ip"' + ':' + '"{}"'.format(config('opsvm-ip'))
    virtual_ip = '"virtual_ip"' + ':' \
        + '"{}"'.format(config('plumgrid-virtual-ip'))
    JSON_IPS = ','.join([pg_dir_ips, pg_edge_ips, pg_gateway_ips,
                        opsvm_ip, virtual_ip])
    status = (
        'curl -H \'Content-Type: application/json\' -X '
        'PUT -d \'{{{0}}}\' http://{1}' + ':' + '{2}/v1/zones/{3}/allIps'
    ).format(JSON_IPS, config('lcm-ip'), config('sapi-port'),
             config('sapi-zone'))
    POST_ZONE_IPs = _exec_cmd_output(
        status,
        'Posting Zone IPs to Solutions API server failed!')
    if POST_ZONE_IPs:
        if 'success' in POST_ZONE_IPs:
            log('Successfully posted Zone IPs to Solutions API server!')
        log(POST_ZONE_IPs)
示例#18
0
def lxd_relation_joined(rid=None):
    settings = {}
    settings['password'] = lxd_trust_password()
    settings['hostname'] = gethostname()
    settings['address'] = unit_get('private-address')
    relation_set(relation_id=rid,
                 relation_settings=settings)
示例#19
0
def get_ha_nodes():
    ha_units = peer_ips(peer_relation='hanode')
    ha_nodes = {}
    for unit in ha_units:
        corosync_id = get_corosync_id(unit)
        addr = ha_units[unit]
        if config('prefer-ipv6'):
            if not utils.is_ipv6(addr):
                # Not an error since cluster may still be forming/updating
                log("Expected an ipv6 address but got %s" % (addr),
                    level=WARNING)

            ha_nodes[corosync_id] = addr
        else:
            ha_nodes[corosync_id] = get_host_ip(addr)

    corosync_id = get_corosync_id(local_unit())
    if config('prefer-ipv6'):
        addr = get_ipv6_addr()
    else:
        addr = get_host_ip(unit_get('private-address'))

    ha_nodes[corosync_id] = addr

    return ha_nodes
示例#20
0
def prepare_end_user_package():
    """ Prepare the tarball package for clients to use to connet to the
        swarm cluster using the default client credentials. """

    # If we are a follower, we dont have keys and need to fetch them
    # from leader-data, which triggered `leadership.set.client_cert`
    # So it better be there!
    if not path.exists("swarm_credentials"):
        makedirs("swarm_credentials")
        with open("swarm_credentials/key.pem", "w+") as fp:
            fp.write(leader_get("client_key"))
        with open("swarm_credentials/cert.pem", "w+") as fp:
            fp.write(leader_get("client_cert"))
        with open("swarm_credentials/ca.pem", "w+") as fp:
            fp.write(leader_get("certificate_authority"))

    # Render the client package script
    template_vars = {"public_address": unit_get("public-address")}
    render("enable.sh", "./swarm_credentials/enable.sh", template_vars)

    # clear out any stale credentials package
    if path.exists("swarm_credentials.tar"):
        remove("swarm_credentials.tar")

    cmd = "tar cvfz swarm_credentials.tar.gz swarm_credentials"
    subprocess.check_call(split(cmd))
    copyfile("swarm_credentials.tar.gz", "/home/ubuntu/swarm_credentials.tar.gz")
    set_state("client.credentials.placed")
示例#21
0
    def get_network_addresses(self):
        """For each network configured, return corresponding address and vip
           (if available).

        Returns a list of tuples of the form:

            [(address_in_net_a, vip_in_net_a),
             (address_in_net_b, vip_in_net_b),
             ...]

            or, if no vip(s) available:

            [(address_in_net_a, address_in_net_a),
             (address_in_net_b, address_in_net_b),
             ...]
        """
        addresses = []
        for net_type in ADDRESS_TYPES:
            net_cfg_opt = os_ip.ADDRESS_MAP[net_type]['config'].replace('-',
                                                                        '_')
            config_cidr = getattr(self, net_cfg_opt, None)
            addr = ch_ip.get_address_in_network(
                config_cidr,
                hookenv.unit_get('private-address'))
            addresses.append(
                (addr, os_ip.resolve_address(endpoint_type=net_type)))
        return sorted(addresses)
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
    def __call__(self):
        '''
        Horizon specific HAProxy context; haproxy is used all the time
        in the openstack dashboard charm so a single instance just
        self refers
        '''
        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        cluster_hosts[l_unit] = unit_get('private-address')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        log('Ensuring haproxy enabled in /etc/default/haproxy.')
        with open('/etc/default/haproxy', 'w') as out:
            out.write('ENABLED=1\n')

        ctxt = {
            'units': cluster_hosts,
            'service_ports': {
                'dash_insecure': [80, 70],
                'dash_secure': [443, 433]
            }
        }
        return ctxt
def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
    """Create symlinks for SAN records

    :param ssl_dir: str Directory to create symlinks in
    :param custom_hostname_link: str Additional link to be created
    """
    hostname = get_hostname(unit_get('private-address'))
    hostname_cert = os.path.join(
        ssl_dir,
        'cert_{}'.format(hostname))
    hostname_key = os.path.join(
        ssl_dir,
        'key_{}'.format(hostname))
    # Add links to hostname cert, used if os-hostname vars not set
    for net_type in [INTERNAL, ADMIN, PUBLIC]:
        try:
            addr = resolve_address(endpoint_type=net_type)
            cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
            key = os.path.join(ssl_dir, 'key_{}'.format(addr))
            if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
                os.symlink(hostname_cert, cert)
                os.symlink(hostname_key, key)
        except NoNetworkBinding:
            log("Skipping creating cert symlink for ip in {} space, no "
                "local address found".format(net_type), WARNING)
    if custom_hostname_link:
        custom_cert = os.path.join(
            ssl_dir,
            'cert_{}'.format(custom_hostname_link))
        custom_key = os.path.join(
            ssl_dir,
            'key_{}'.format(custom_hostname_link))
        if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert):
            os.symlink(hostname_cert, custom_cert)
            os.symlink(hostname_key, custom_key)
示例#25
0
    def __call__(self):
        '''
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        '''
        if not relation_ids('cluster'):
            return {}

        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        cluster_hosts[l_unit] = unit_get('private-address')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        ctxt = {
            'units': cluster_hosts,
        }
        if len(cluster_hosts.keys()) > 1:
            # Enable haproxy when we have enough peers.
            log('Ensuring haproxy enabled in /etc/default/haproxy.')
            with open('/etc/default/haproxy', 'w') as out:
                out.write('ENABLED=1\n')
            return ctxt
        log('HAProxy context is incomplete, this unit has no peers.')
        return {}
def agent_changed(rid=None, unit=None):
    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return
    if 'amqp' not in CONFIGS.complete_contexts():
        juju_log('amqp relation incomplete. Peer not ready?')
        return
    if 'identity-service' not in CONFIGS.complete_contexts():
        juju_log('identity-service relation incomplete. Peer not ready?')
        return

    juju_log('**********rid is %s' % str(rid))
    juju_log('**********unit is %s' % str(unit))
    rel_settings = relation_get(rid=rid, unit=unit)
    agent_hostname = rel_settings.get('hostname')
    agent_hostaddress = rel_settings.get('hostaddress')
    juju_log('**********agent_hostname is %s' % agent_hostname)
    juju_log('**********agent_hostaddress is %s' % agent_hostaddress)
    host = unit_get('private-address')
    hostname = get_hostname(host)
    hostaddress = get_host_ip(host)
    juju_log('**********host is %s' % host)
    juju_log('**********hostname is %s' % hostname)
    juju_log('**********hostaddress is %s' % hostaddress)
    hosts = [agent_hostname, hostname, agent_hostaddress, hostaddress]
    for host in hosts:
        if host:
            add_known_host(host, unit=unit, user='******')
示例#27
0
def resolve_address(endpoint_type=PUBLIC):
    resolved_address = None
    if is_clustered():
        if config(_address_map[endpoint_type]['config']) is None:
            # Assume vip is simple and pass back directly
            resolved_address = config('vip')
        else:
            for vip in config('vip').split():
                if is_address_in_network(
                        config(_address_map[endpoint_type]['config']),
                        vip):
                    resolved_address = vip
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr()
        else:
            fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
        resolved_address = get_address_in_network(
            config(_address_map[endpoint_type]['config']), fallback_addr)

    if resolved_address is None:
        raise ValueError('Unable to resolve a suitable IP address'
                         ' based on charm state and configuration')
    else:
        return resolved_address
示例#28
0
def install_keystone():
    saved_path = os.getcwd()
    os.chdir('{}'.format(CHARM_LIB_DIR + "oracle_keystone"))
    pip_install('.')
    os.chdir(saved_path)
    with open("/etc/apache2/apache2.conf", "a") as apache_conf:
        apache_conf.write("ServerName {}".format(unit_get('private-address')))
示例#29
0
 def configure(self, port, user):
     relation_info = {
         'hostname': hookenv.unit_get('private-address'),
         'port': port,
         'user': user,
     }
     self.set_remote(**relation_info)
示例#30
0
def enable_client_tls():
    """
    Copy the TLS certificates in place and generate mount points for the swarm
    manager to mount the certs. This enables client-side TLS security on the
    TCP service.
    """
    if not path.exists("/etc/docker"):
        makedirs("/etc/docker")

    kv = unitdata.kv()
    cert = kv.get("tls.server.certificate")
    with open("/etc/docker/server.pem", "w+") as f:
        f.write(cert)
    with open("/etc/docker/ca.pem", "w+") as f:
        f.write(leader_get("certificate_authority"))

    # schenanigans
    keypath = "easy-rsa/easyrsa3/pki/private/{}.key"
    server = getenv("JUJU_UNIT_NAME").replace("/", "_")
    if path.exists(keypath.format(server)):
        copyfile(keypath.format(server), "/etc/docker/server-key.pem")
    else:
        copyfile(keypath.format(unit_get("public-address")), "/etc/docker/server-key.pem")

    opts = DockerOpts()
    config_dir = "/etc/docker"
    cert_path = "{}/server.pem".format(config_dir)
    ca_path = "{}/ca.pem".format(config_dir)
    key_path = "{}/server-key.pem".format(config_dir)
    opts.add("tlscert", cert_path)
    opts.add("tlscacert", ca_path)
    opts.add("tlskey", key_path)
    opts.add("tlsverify", None)
    render("docker.defaults", "/etc/default/docker", {"opts": opts.to_s()})
def compute_joined(rid=None, remote_restart=False):
    cons_settings = console_settings()
    relation_set(relation_id=rid, **cons_settings)
    rel_settings = {
        'network_manager': network_manager(),
        'volume_service': 'cinder',
        # (comment from bash vers) XXX Should point to VIP if clustered, or
        # this may not even be needed.
        'ec2_host': unit_get('private-address'),
        'region': config('region'),
    }
    rel_settings.update(serial_console_settings())
    # update relation setting if we're attempting to restart remote
    # services
    if remote_restart:
        rel_settings['restart_trigger'] = str(uuid.uuid4())

    rel_settings.update(keystone_compute_settings())
    relation_set(relation_id=rid, **rel_settings)
def get_cluster_host_ip():
    """Get the this host's IP address for use with percona cluster peers

    @returns IP to pass to cluster peers
    """

    cluster_network = config('cluster-network')
    if cluster_network:
        cluster_addr = get_address_in_network(cluster_network, fatal=True)
    else:
        try:
            cluster_addr = network_get_primary_address('cluster')
        except NotImplementedError:
            # NOTE(jamespage): fallback to previous behaviour
            cluster_addr = resolve_hostname_to_ip(
                unit_get('private-address')
            )

    return cluster_addr
示例#33
0
def resolve_address(endpoint_type=PUBLIC):
    """Return unit address depending on net config.

    If unit is clustered with vip(s) and has net splits defined, return vip on
    correct network. If clustered with no nets defined, return primary vip.

    If not clustered, return unit address ensuring address is on configured net
    split if one is configured.

    :param endpoint_type: Network endpoing type
    """
    resolved_address = None
    vips = config('vip')
    if vips:
        vips = vips.split()

    net_type = ADDRESS_MAP[endpoint_type]['config']
    net_addr = config(net_type)
    net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
    clustered = is_clustered()
    if clustered:
        if not net_addr:
            # If no net-splits defined, we expect a single vip
            resolved_address = vips[0]
        else:
            for vip in vips:
                if is_address_in_network(net_addr, vip):
                    resolved_address = vip
                    break
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr(exc_list=vips)[0]
        else:
            fallback_addr = unit_get(net_fallback)

        resolved_address = get_address_in_network(net_addr, fallback_addr)

    if resolved_address is None:
        raise ValueError("Unable to resolve a suitable IP address based on "
                         "charm state and configuration. (net_type=%s, "
                         "clustered=%s)" % (net_type, clustered))

    return resolved_address
def canonical_url(configs, vip_setting='vip'):
    '''
    Returns the correct HTTP URL to this host given the state of HTTPS
    configuration and hacluster.

    :configs    : OSTemplateRenderer: A config tempating object to inspect for
                                      a complete https context.

    :vip_setting:                str: Setting in charm config that specifies
                                      VIP address.
    '''
    scheme = 'http'
    if 'https' in configs.complete_contexts():
        scheme = 'https'
    if is_clustered():
        addr = config_get(vip_setting)
    else:
        addr = unit_get('private-address')
    return '%s://%s' % (scheme, addr)
示例#35
0
def identity_service_joined(relation_id=None):
    config = hookenv.config()

    # Generate temporary bogus service URL to make keystone charm
    # happy. The sync script will replace it with the endpoint for
    # swift, because when this hook is fired, we do not yet
    # necessarily know the swift endpoint URL (it might not even exist
    # yet).

    url = 'http://' + hookenv.unit_get('private-address')
    relation_data = {
        'service': 'image-stream',
        'region': config['region'],
        'public_url': url,
        'admin_url': url,
        'internal_url': url
    }

    hookenv.relation_set(relation_id=relation_id, **relation_data)
示例#36
0
def local_address(unit_get_fallback='public-address'):
    """Return a network address for this unit.

    Attempt to retrieve a 'default' IP address for this unit
    from network-get. If this is running with an old version of Juju then
    fallback to unit_get.

    Note on juju < 2.9 the binding to juju-info may not exist, so fall back to
    the unit-get.

    :param unit_get_fallback: Either 'public-address' or 'private-address'.
                              Only used with old versions of Juju.
    :type unit_get_fallback: str
    :returns: IP Address
    :rtype: str
    """
    try:
        return network_get_primary_address('juju-info')
    except (NotImplementedError, NoNetworkBinding):
        return unit_get(unit_get_fallback)
示例#37
0
def extension_relation_joined():
    log("Updating extension interface with up-to-date data.")
    # Fish out the current zuul address from any relation we have.
    zuul_address = ""
    for rid in relation_ids('zuul'):
        for unit in related_units(rid):
            zuul_address = relation_get(
                rid=rid, unit=unit, attribute='private-address')

    for rid in relation_ids('extension'):
        r_settings = {
            'admin_username': config('username'),
            'admin_password': get_jenkins_password(),
            'jenkins_url': 'http://%s:8080' % unit_get('private-address'),
            'jenkins-admin-user': config('jenkins-admin-user'),
            'jenkins-token': config('jenkins-token')
        }
        relation_set(relation_id=rid, relation_settings=r_settings)
        if zuul_address:
            relation_set(relation_id=rid, zuul_address=zuul_address)
 def __call__(self):
     ctxt = {}
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             rel = {'rid': rid, 'unit': unit}
             proto = relation_get('console_access_protocol', **rel)
             if not proto:
                 # only bother with units that have a proto set.
                 continue
             ctxt['console_keymap'] = relation_get('console_keymap', **rel)
             ctxt['console_access_protocol'] = proto
             ctxt['console_vnc_type'] = True if 'vnc' in proto else False
             if proto == 'vnc':
                 ctxt = dict(ctxt, **self.get_console_info('xvpvnc', **rel))
                 ctxt = dict(ctxt, **self.get_console_info('novnc', **rel))
             else:
                 ctxt = dict(ctxt, **self.get_console_info(proto, **rel))
             break
     ctxt['console_listen_addr'] = get_host_ip(unit_get('private-address'))
     return ctxt
示例#39
0
def setup_mysql(mysql):
    env.log('Writing DB Info')
    env.log(mysql.host())
    # [username[:password]@][protocol[(address)]]/dbname
    dsn = "{0}:{1}@{2}:{3}/{4}".format(
        mysql.user(),
        mysql.password(),
        mysql.host(),
        mysql.port(),
        mysql.database(),
    )

    # Writing our configuration file to 'example.cfg'
    with open(HTMLFILE, 'w') as config:
        config.write(HTMLTPL.format(dsn))

    env.log('{} written'.format(HTMLFILE))
    ipaddr = env.unit_get('public-address')
    env.log(ipaddr)
    env.status_set('active', 'http://{}/index.html'.format(ipaddr))
示例#40
0
    def __call__(self):
        api_settings = super(NeutronGatewayContext, self).__call__()
        ctxt = {
            'shared_secret': get_shared_secret(),
            'local_ip':
            get_address_in_network(config('os-data-network'),
                                   get_host_ip(unit_get('private-address'))),
            'core_plugin': core_plugin(),
            'plugin': config('plugin'),
            'debug': config('debug'),
            'verbose': config('verbose'),
            'instance_mtu': config('instance-mtu'),
            'l2_population': api_settings['l2_population'],
            'enable_dvr': api_settings['enable_dvr'],
            'enable_l3ha': api_settings['enable_l3ha'],
            'overlay_network_type':
            api_settings['overlay_network_type'],
        }

        mappings = config('bridge-mappings')
        if mappings:
            ctxt['bridge_mappings'] = ','.join(mappings.split())

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        dnsmasq_flags = config('dnsmasq-flags')
        if dnsmasq_flags:
            ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags)

        net_dev_mtu = api_settings['network_device_mtu']
        if net_dev_mtu:
            ctxt['network_device_mtu'] = net_dev_mtu
            ctxt['veth_mtu'] = net_dev_mtu

        return ctxt
示例#41
0
def replica_set_relation_joined():
    juju_log("replica_set_relation_joined")
    my_hostname = unit_get('public-address')
    my_port = config('port')
    my_replset = config('replicaset')
    my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
    juju_log("my_hostname: %s" % my_hostname)
    juju_log("my_port: %s" % my_port)
    juju_log("my_replset: %s" % my_replset)
    juju_log("my_install_order: %s" % my_install_order)
    enable_replset(my_replset)
    restart_mongod()

    relation_set(
        relation_id(), {
            'hostname': my_hostname,
            'port': my_port,
            'replset': my_replset,
            'install-order': my_install_order,
            'type': 'replset',
        })
示例#42
0
def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

    else:
        host = None
        try:
            # NOTE: try to use network spaces
            host = network_get_primary_address('shared-db')
        except NotImplementedError:
            # NOTE: fallback to private-address
            host = unit_get('private-address')

        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)
示例#43
0
def unprovision_control():
    if not remote_unit():
        return
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    relation = relation_type()
    a_ip = None
    a_port = None
    if relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [
            (gethostbyname(relation_get("private-address", unit,
                                        rid)), relation_get("port", unit, rid))
            for rid in relation_ids("contrail-api")
            for unit in related_units(rid)
        ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [(relation_get("service_username", unit, rid),
                                   relation_get("service_password", unit, rid),
                                   relation_get("service_tenant_name", unit,
                                                rid))
                                  for rid in relation_ids("identity-admin")
                                  for unit in related_units(rid)][0]
    log("Unprovisioning control {}".format(host_ip))
    check_call([
        "contrail-provision-control", "--host_name", host_name, "--host_ip",
        host_ip, "--router_asn", "64512", "--api_server_ip", a_ip,
        "--api_server_port",
        str(a_port), "--oper", "del", "--admin_user", user, "--admin_password",
        password, "--admin_tenant_name", tenant
    ])
示例#44
0
def mongos_relation_changed():
    juju_log("mongos_relation_changed")
    config_data = config()
    retVal = False

    hostname = relation_get('hostname')
    port = relation_get('port')
    rel_type = relation_get('type')
    if hostname is None or port is None or rel_type is None:
        print("mongos_relation_changed: relation data not ready.")
        return
    if rel_type == 'configsvr':
        config_servers = load_config_servers(default_mongos_list)
        print "Adding config server: %s:%s" % (hostname, port)
        if hostname is not None and \
        port is not None and \
        hostname != '' and \
        port != '' and \
        "%s:%s" % (hostname, port) not in config_servers:
            config_servers.append("%s:%s" % (hostname, port))
        disable_mongos(config_data['mongos_port'])
        retVal = enable_mongos(config_data, config_servers)
        if retVal:
            update_file(default_mongos_list, '\n'.join(config_servers))
    elif rel_type == 'database':
        if mongos_ready():
            mongos_host = "%s:%s" % (unit_get('public-address'),
                                     config('mongos_port'))
            shard_command1 = "sh.addShard(\"%s:%s\")" % (hostname, port)
            mongo_client(mongos_host, shard_command1)
            replicaset = relation_get('replset')
            shard_command2 = "sh.addShard(\"%s/%s:%s\")" %  \
            (replicaset, hostname, port)
            mongo_client(mongos_host, shard_command2)

    else:
        print("mongos_relation_change: undefined rel_type: %s" % rel_type)
        return

    print("mongos_relation_changed returns: %s" % retVal)
示例#45
0
def provision_control():
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    a_ip, a_port = [(gethostbyname(relation_get("private-address", unit,
                                                rid)), port)
                    for rid in relation_ids("contrail-api")
                    for unit, port in ((unit, relation_get("port", unit, rid))
                                       for unit in related_units(rid))
                    if port][0]
    user, password, tenant = [(relation_get("service_username", unit, rid),
                               relation_get("service_password", unit, rid),
                               relation_get("service_tenant_name", unit, rid))
                              for rid in relation_ids("identity-admin")
                              for unit in related_units(rid)][0]
    log("Provisioning control {}".format(host_ip))
    check_call([
        "contrail-provision-control", "--host_name", host_name, "--host_ip",
        host_ip, "--router_asn", "64512", "--api_server_ip", a_ip,
        "--api_server_port",
        str(a_port), "--oper", "add", "--admin_user", user, "--admin_password",
        password, "--admin_tenant_name", tenant
    ])
示例#46
0
    def __call__(self):
        if isinstance(self.external_ports, basestring):
            self.external_ports = [self.external_ports]
        if (not self.external_ports or not https()):
            return {}

        self.configure_cert()
        self.enable_modules()

        ctxt = {
            'namespace': self.service_namespace,
            'private_address': unit_get('private-address'),
            'endpoints': []
        }
        for ext_port in self.external_ports:
            if peer_units() or is_clustered():
                int_port = determine_haproxy_port(ext_port)
            else:
                int_port = determine_api_port(ext_port)
            portmap = (int(ext_port), int(int_port))
            ctxt['endpoints'].append(portmap)
        return ctxt
示例#47
0
def cluster_string():
    cluster = ""
    cluster_rels = hook_data.rels['cluster'][1].keys()
    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster
    client_cluster = ['http://{}:2380'.format(unit_get('private-address'))]
    if hook_data.rels['cluster'][1]:
        reldata = hook_data.rels['cluster'][1][cluster_rels[0]]
        for unit in reldata:
            private = reldata[unit]['private-address']
            cluster = '{}{}=http://{}:2380,'.format(cluster,
                                                    unit.replace('/', ''),
                                                    private)
            client_cluster.append('http://{}:2380'.format(private))
    else:
        cluster = "{}=http://{}:2380".format(unit_name, private_address)

    # Only the leader will be communicating with clients. Because he is
    # the grand poobah of Juju's ETCD story. The end.
    if is_leader():
        db.set('etcd.connection_string', ','.join(client_cluster))
    return cluster.rstrip(',')
示例#48
0
def get_node_flags(flag):
    """Nodes which have advertised the given flag.

    :param flag: Flag to check peers relation data for.
    :type flag: str
    :returns: List of IPs of nodes that are ready to join the cluster
    :rtype: List
    """
    hosts = []
    if config('prefer-ipv6'):
        hosts.append(get_ipv6_addr())
    else:
        hosts.append(unit_get('private-address'))

    for relid in relation_ids('hanode'):
        for unit in related_units(relid):
            if relation_get(flag, rid=relid, unit=unit):
                hosts.append(
                    relation_get('private-address', rid=relid, unit=unit))

    hosts.sort()
    return hosts
示例#49
0
def get_unit_ip(config_override='multicast-intf', address_family=ni.AF_INET):
    """Get the IP of this unit for cplane-controller relationship

    If the config override interface is configured use that address otherwise
    consult network-get for the correct address. As a last resort use the
    fallback interface.

    @param config_overide: The string name of the configuration value that can
                           override the use of network spaces
    @param address_family: The netifaces address familiy
                           i.e. for IPv4 AF_INET
                           Only used when config(config_override) is configured
    @returns: IP address for this unit for the cplane-controller relationship
    """

    # If the config override is not set to an interface use network-get
    # to leverage network spaces in MAAS 2.x
    if not config(config_override):
        try:
            return network_get_primary_address('cplane-controller')
        except NotImplementedError:
            # Juju 1.x enviornment
            return unit_get('private-address')

    interface = config(config_override)
    try:
        interface_config = ni.ifaddresses(interface).get(address_family)
        if interface_config:
            for link in interface_config:
                addr = link['addr']
                if addr:
                    return addr
    except ValueError as e:
        raise UnconfiguredInterface("Interface {} is invalid: {}"
                                    "".format(interface, e.message))
    raise UnconfiguredInterface("{} interface has no address in the "
                                "address family {}".format(
                                    interface, address_family))
示例#50
0
def create_neutron_endpoint():
    keystone_ip = get_auth_url()
    private_ip = unit_get('private-address')
    cmd = "openstack --os-username admin \
--os-password password \
--os-project-name admin \
--os-user-domain-name Default \
--os-project-domain-name Default \
--os-auth-url http://{}:35357/v3 \
--os-identity-api-version 3 \
endpoint create  --region RegionOne network public \
http://{}:9696".format(keystone_ip, private_ip)
    if keystone_ip:
        os.system(cmd)

    cmd = "openstack --os-username admin \
--os-password password \
--os-project-name admin \
--os-user-domain-name Default \
--os-project-domain-name Default \
--os-auth-url http://{}:35357/v3 \
--os-identity-api-version 3 \
endpoint create  --region RegionOne network internal \
http://{}:9696".format(keystone_ip, private_ip)
    if keystone_ip:
        os.system(cmd)

    cmd = "openstack --os-username admin \
--os-password password \
--os-project-name admin \
--os-user-domain-name Default \
--os-project-domain-name Default \
--os-auth-url http://{}:35357/v3 \
--os-identity-api-version 3 \
endpoint create  --region RegionOne network admin \
http://{}:9696".format(keystone_ip, private_ip)
    if keystone_ip:
        os.system(cmd)
示例#51
0
    def get_peer_info(self, address_key='private-address'):
        """Return peer information mapped by unit names.

        An example return value is:

        {
            'nfs/0': {'address': '172.16.0.1'},
            'nfs/1': {'address': '172.16.0.2'},
        }

        :param address_key: the key to use to fetch the remote unit's
            address.
        :return: a dict mapping unit names to dicts containing peer
            information, including the address.
        """
        info = {
            hookenv.local_unit(): {'address': hookenv.unit_get(address_key)},
        }
        for unit in self.all_joined_units:
            info[unit.unit_name] = {
                'address': unit.received_raw.get(address_key),
            }
        return info
def db_joined():
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))
    else:
        host = None
        try:
            # NOTE: try to use network spaces
            host = network_get_primary_address('shared-db')
        except NotImplementedError:
            # NOTE: fallback to private-address
            host = unit_get('private-address')

        relation_set(database=config('database'),
                     username=config('database-user'),
                     hostname=host)
    def __call__(self):
        ''' Grab cert and key from configuraton for SSL config '''
        ctxt = {
            'http_port': 70,
            'https_port': 433
        }

        if config('enforce-ssl'):
            # NOTE(dosaboy): if ssl is not configured we shouldn't allow this
            if all(get_cert()):
                if config('vip'):
                    addr = config('vip')
                elif config('prefer-ipv6'):
                    addr = format_ipv6_addr(get_ipv6_addr()[0])
                else:
                    addr = get_host_ip(unit_get('private-address'))

                ctxt['ssl_addr'] = addr
            else:
                log("Enforce ssl redirect requested but ssl not configured - "
                    "skipping redirect", level=WARNING)

        return ctxt
示例#54
0
def restart_mongod(wait_for=default_wait_for, max_tries=default_max_tries):
    my_hostname = unit_get('public-address')
    my_port = config('port')
    current_try = 0

    service('stop', 'mongodb')
    if os.path.exists('/var/lib/mongodb/mongod.lock'):
        os.remove('/var/lib/mongodb/mongod.lock')

    if not service('start', 'mongodb'):
        return False

    while (service('status', 'mongodb')
           and not port_check(my_hostname, my_port)
           and current_try < max_tries):
        juju_log(
            "restart_mongod: Waiting for MongoDB to be ready ({}/{})".format(
                current_try, max_tries))
        time.sleep(wait_for)
        current_try += 1

    return ((service('status', 'mongodb') == port_check(my_hostname, my_port))
            is True)
示例#55
0
def restart_for_quorum(zkpeer):
    '''
    If we're the next node in the restart queue, restart, and then
    inform the leader that we've restarted. (If we are the leader,
    remove ourselves from the queue, and update the leadership data.)

    '''
    private_address = hookenv.unit_get('private-address')
    queue = json.loads(leader_get('restart_queue') or '[]')

    if not queue:
        # Everything has restarted.
        return

    if private_address == queue[0]:
        # It's our turn to restart.
        _restart_zookeeper('rolling restart for quorum update')
        if is_state('leadership.is_leader'):
            queue = queue[1:]
            hookenv.log('Leader updating restart queue: {}'.format(queue))
            leader_set(restart_queue=json.dumps(queue))
        else:
            zkpeer.inform_restart()
示例#56
0
def get_context():
    ctx = {}
    ctx["ssl_enabled"] = config.get("ssl_enabled", False)
    ctx["log_level"] = config.get("log-level", "SYS_NOTICE")

    ips = get_controller_addresses()
    ctx["api_servers"] = ips
    ctx["api_port"] = config.get("api_port")
    ctx["control_nodes"] = [
        relation_get("private-address", unit, rid)
        for rid in relation_ids("contrail-controller")
        for unit in related_units(rid)
    ]
    ctx["analytics_nodes"] = _load_json_from_config("analytics_servers")
    info = _load_json_from_config("orchestrator_info")
    ctx["metadata_shared_secret"] = info.get("metadata_shared_secret")

    ctx["control_network_ip"] = get_control_network_ip()

    ctx["vhost_ip"] = config["vhost-cidr"]
    ctx["vhost_gateway"] = config["vhost-gateway-ip"]
    ctx["vhost_physical"] = config["vhost-physical"]

    if config["dpdk"]:
        ctx["dpdk"] = True
        ctx["physical_interface_address"] = config["dpdk-pci"]
        ctx["physical_interface_mac"] = config["dpdk-mac"]
        ctx["physical_uio_driver"] = config.get("dpdk-driver")

    plugin_ips = json.loads(config.get("plugin-ips", "{}"))
    my_ip = unit_get("private-address")
    ctx["plugin_settings"] = plugin_ips.get(my_ip, dict())

    log("CTX: " + str(ctx))

    ctx.update(_load_json_from_config("auth_info"))
    return ctx
示例#57
0
 def first_setup(self, parser, domain='example.com'):
     # Insert SOA and NS records
     hostname = unit_get('public-address')
     addr = resolve_hostname_to_ip(hostname)
     parser.dict_to_zone({
         'rr': 'SOA',
         'addr': 'ns.%s.' % domain,
         'owner': 'root.%s.' % domain,
         'serial': randint(12345678, 22345678),
         'refresh': '12h',
         'update-retry': '15m',
         'expiry': '3w',
         'minimum': '3h'
     })
     parser.dict_to_zone({
         'rr': 'NS',
         'alias': '@',
         'addr': 'ns1.%s.' % domain
     })
     parser.dict_to_zone({
         'rr': 'A',
         'alias': '@',
         'addr': addr,
         'ttl': 300
     })
     parser.dict_to_zone({
         'rr': 'A',
         'alias': 'ns1',
         'addr': addr,
         'ttl': 300
     })
     parser.dict_to_zone({
         'rr': 'CNAME',
         'alias': 'ns',
         'addr': 'ns1.example.com.',
         'ttl': 300
     })
示例#58
0
def get_template_data():
    rels = hookenv.relations()
    template_data = hookenv.Config()
    template_data.CONFIG_FILE_NAME = '.unit-state'

    overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
    etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
    api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))

    # kubernetes master isn't ha yet.
    if api_servers:
        api_info = api_servers.pop()
        api_servers = 'http://%s:%s' % (api_info[0], api_info[1])

    template_data['overlay_type'] = overlay_type
    template_data['kubelet_bind_addr'] = _bind_addr(hookenv.unit_private_ip())
    template_data['proxy_bind_addr'] = _bind_addr(
        hookenv.unit_get('public-address'))
    template_data['kubeapi_server'] = api_servers
    template_data['etcd_servers'] = ','.join(
        ['http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
    template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
        '/', '-')
    return _encode(template_data)
def get_host_ip(hostname=None):
    try:
        import dns.resolver
    except ImportError:
        apt_install(filter_installed_packages(['python-dnspython']),
                    fatal=True)
        import dns.resolver

    if config('prefer-ipv6'):
        # Ensure we have a valid ipv6 address configured
        get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0]
        return socket.gethostname()

    hostname = hostname or unit_get('private-address')
    try:
        # Test to see if already an IPv4 address
        socket.inet_aton(hostname)
        return hostname
    except socket.error:
        # This may throw an NXDOMAIN exception; in which case
        # things are badly broken so just let it kill the hook
        answers = dns.resolver.query(hostname, 'A')
        if answers:
            return answers[0].address
def get_ingress_address(endpoint_name):
    try:
        network_info = hookenv.network_get(endpoint_name)
    except NotImplementedError:
        network_info = {}

    if not network_info or 'ingress-addresses' not in network_info:
        # if they don't have ingress-addresses they are running a juju that
        # doesn't support spaces, so just return the private address
        return hookenv.unit_get('private-address')

    addresses = network_info['ingress-addresses']

    # Need to prefer non-fan IP addresses due to various issues, e.g.
    # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
    # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
    # prioritize those last. Not technically correct, but good enough.
    try:
        sort_key = lambda a: int(a.partition('.')[0]) >= 240  # noqa: E731
        addresses = sorted(addresses, key=sort_key)
    except Exception:
        hookenv.log(traceback.format_exc())

    return addresses[0]