Ejemplo n.º 1
0
def configure_monitor_host():
    """Configure extra monitor host for better network failure detection"""
    log('Checking monitor host configuration', level=DEBUG)
    monitor_host = config('monitor_host')
    if monitor_host:
        if not pcmk.crm_opt_exists('ping'):
            log('Implementing monitor host configuration (host: %s)' %
                monitor_host, level=DEBUG)
            monitor_interval = config('monitor_interval')
            cmd = ('crm -w -F configure primitive ping '
                   'ocf:pacemaker:ping params host_list="%s" '
                   'multiplier="100" op monitor interval="%s" ' %
                   (monitor_host, monitor_interval))
            pcmk.commit(cmd)
            cmd = ('crm -w -F configure clone cl_ping ping '
                   'meta interleave="true"')
            pcmk.commit(cmd)
        else:
            log('Reconfiguring monitor host configuration (host: %s)' %
                monitor_host, level=DEBUG)
            cmd = ('crm -w -F resource param ping set host_list="%s"' %
                   monitor_host)
    else:
        if pcmk.crm_opt_exists('ping'):
            log('Disabling monitor host configuration', level=DEBUG)
            pcmk.commit('crm -w -F resource stop ping')
            pcmk.commit('crm -w -F configure delete ping')
Ejemplo n.º 2
0
def download_from_upstream():
    if not config('fallback_url') or not config('fallback_sum'):
        status_set('blocked', 'Missing configuration: ')
        return None
    client = ArchiveUrlFetchHandler()
    return client.download_and_validate(config('fallback_url'),
                                        config('fallback_sum'))
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if os_release('nova-common') >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id)
                for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()
    if console_attributes('protocol'):
        if not git_install_requested():
            status_set('maintenance', 'Configuring guest console access')
            apt_update()
            packages = console_attributes('packages') or []
            filtered = filter_installed_packages(packages)
            if filtered:
                apt_install(filtered, fatal=True)

        [compute_joined(rid=rid)
            for rid in relation_ids('cloud-compute')]

    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
Ejemplo n.º 4
0
    def action_generate_hmac(self, hsm):
        """Generate an HMAC on a connected HSM.  Requires that an HSM is
        avaiable via the barbican-hsm-plugin interface, generically known as
        'hsm'.

        Uses the barbican-manage command.

        :param hsm: instance of BarbicanRequires() class from the
                    barbican-hsm-plugin interface
        """
        plugin_data = hsm.plugin_data
        cmd = [
            'barbican-manage', 'hsm', 'gen_hmac',
            '--library-path', plugin_data['library_path'],
            '--passphrase', plugin_data['login'],
            '--slot-id', plugin_data['slot_id'],
            '--length', str(hookenv.config('hmac-key-length')),
            '--label', hookenv.config('label-hmac'),
        ]
        try:
            subprocess.check_call(cmd)
            hookenv.log("barbican-mangage hsm gen_hmac succeeded")
        except subprocess.CalledProcessError:
            str_err = "barbican-manage hsm gen_hmac failed."
            hookenv.log(str_err)
            raise Exception(str_err)
def update_nova_consoleauth_config():
    """
    Configure nova-consoleauth pacemaker resources
    """
    relids = relation_ids('ha')
    if len(relids) == 0:
        log('Related to {} ha services'.format(len(relids)), level='DEBUG')
        ha_relid = None
        data = {}
    else:
        ha_relid = relids[0]
        data = relation_get(rid=ha_relid) or {}

    # initialize keys in case this is a new dict
    data.setdefault('delete_resources', [])
    for k in ['colocations', 'init_services', 'resources', 'resource_params']:
        data.setdefault(k, {})

    if config('single-nova-consoleauth') and console_attributes('protocol'):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            try:
                data['delete_resources'].remove(item)
            except ValueError:
                pass  # nothing to remove, we are good

        # the new pcmkr resources have to be added to the existing ones
        data['colocations']['vip_consoleauth'] = COLO_CONSOLEAUTH
        data['init_services']['res_nova_consoleauth'] = 'nova-consoleauth'
        data['resources']['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        data['resource_params']['res_nova_consoleauth'] = AGENT_CA_PARAMS

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        # nova-consoleauth will be managed by pacemaker, so mark it as manual
        if relation_ids('ha'):
            with open(NOVA_CONSOLEAUTH_OVERRIDE, 'w') as fp:
                fp.write('manual\n')
                fp.flush()

    elif (not config('single-nova-consoleauth') and
          console_attributes('protocol')):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            if item not in data['delete_resources']:
                data['delete_resources'].append(item)

        # remove them from the rel, so they aren't recreated when the hook
        # is recreated
        data['colocations'].pop('vip_consoleauth', None)
        data['init_services'].pop('res_nova_consoleauth', None)
        data['resources'].pop('res_nova_consoleauth', None)
        data['resource_params'].pop('res_nova_consoleauth', None)

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        try:
            os.remove(NOVA_CONSOLEAUTH_OVERRIDE)
        except FileNotFoundError as e:
            log(str(e), level='DEBUG')
Ejemplo n.º 6
0
def determine_packages(source=None):
    # currently all packages match service names
    packages = [] + BASE_PACKAGES

    for v in resource_map().values():
        packages.extend(v['services'])
        if manage_plugin():
            pkgs = neutron_plugin_attribute(config('neutron-plugin'),
                                            'server_packages',
                                            'neutron')
            packages.extend(pkgs)

    if get_os_codename_install_source(source) >= 'kilo':
        packages.extend(KILO_PACKAGES)

    if config('neutron-plugin') == 'vsp':
        nuage_pkgs = config('nuage-packages').split()
        packages += nuage_pkgs

    if git_install_requested():
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        packages = list(set(packages))
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)
        if get_os_codename_install_source(source) >= 'kilo':
            for p in GIT_PACKAGE_BLACKLIST_KILO:
                packages.remove(p)

    return list(set(packages))
Ejemplo n.º 7
0
def neutron_settings():
    neutron_settings = {}
    if is_relation_made('neutron-api', 'neutron-plugin'):
        neutron_api_info = NeutronAPIContext()()
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin': neutron_api_info['neutron_plugin'],
            'region': config('region'),
            'quantum_security_groups':
            neutron_api_info['neutron_security_groups'],
            'quantum_url': neutron_api_info['neutron_url'],
        })
    else:
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin': neutron_plugin(),
            'region': config('region'),
            'quantum_security_groups': config('quantum-security-groups'),
            'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
                                          str(api_port('neutron-server'))),
        })
    neutron_url = urlparse(neutron_settings['quantum_url'])
    neutron_settings['quantum_host'] = neutron_url.hostname
    neutron_settings['quantum_port'] = neutron_url.port
    return neutron_settings
Ejemplo n.º 8
0
def install_load_balancer(apiserver, tls):
    ''' Create the default vhost template for load balancing '''
    # Get the tls paths from the layer data.
    layer_options = layer.options('tls-client')
    server_cert_path = layer_options.get('server_certificate_path')
    cert_exists = server_cert_path and os.path.isfile(server_cert_path)
    server_key_path = layer_options.get('server_key_path')
    key_exists = server_key_path and os.path.isfile(server_key_path)
    # Do both the the key and certificate exist?
    if cert_exists and key_exists:
        # At this point the cert and key exist, and they are owned by root.
        chown = ['chown', 'www-data:www-data', server_cert_path]
        # Change the owner to www-data so the nginx process can read the cert.
        subprocess.call(chown)
        chown = ['chown', 'www-data:www-data', server_key_path]
        # Change the owner to www-data so the nginx process can read the key.
        subprocess.call(chown)

        hookenv.open_port(hookenv.config('port'))
        services = apiserver.services()
        nginx.configure_site(
                'apilb',
                'apilb.conf',
                server_name='_',
                services=services,
                port=hookenv.config('port'),
                server_certificate=server_cert_path,
                server_key=server_key_path,
        )
        hookenv.status_set('active', 'Loadbalancer ready.')
def cluster_wait():
    ''' Wait for operations based on modulo distribution

    Use the distributed_wait function to determine how long to wait before
    running an operation like restart or cluster join. By setting modulo to
    the exact number of nodes in the cluster we get serial operations.

    Check for explicit configuration parameters for modulo distribution.
    The config setting modulo-nodes has first priority. If modulo-nodes is not
    set, check min-cluster-size. Finally, if neither value is set, determine
    how many peers there are from the cluster relation.

    @side_effect: distributed_wait is called which calls time.sleep()
    @return: None
    '''
    wait = config('known-wait')
    if config('modulo-nodes') is not None:
        # modulo-nodes has first priority
        num_nodes = config('modulo-nodes')
    elif config('min-cluster-size'):
        # min-cluster-size is consulted next
        num_nodes = config('min-cluster-size')
    else:
        # If nothing explicit is configured, determine cluster size based on
        # peer relations
        num_nodes = 1
        for rid in relation_ids('cluster'):
            num_nodes += len(related_units(rid))
    distributed_wait(modulo=num_nodes, wait=wait)
def sapi_post_ips():
    """
    Posts PLUMgrid nodes IPs to solutions api server.
    """
    pg_edge_ips = _pg_edge_ips()
    pg_dir_ips = _pg_dir_ips()
    pg_gateway_ips = _pg_gateway_ips()
    pg_dir_ips.append(get_host_ip(unit_get('private-address')))
    pg_edge_ips = '"edge_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_edge_ips))
    pg_dir_ips = '"director_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_dir_ips))
    pg_gateway_ips = '"gateway_ips"' + ':' \
        + '"{}"'.format(','.join(str(i) for i in pg_gateway_ips))
    opsvm_ip = '"opsvm_ip"' + ':' + '"{}"'.format(config('opsvm-ip'))
    virtual_ip = '"virtual_ip"' + ':' \
        + '"{}"'.format(config('plumgrid-virtual-ip'))
    JSON_IPS = ','.join([pg_dir_ips, pg_edge_ips, pg_gateway_ips,
                        opsvm_ip, virtual_ip])
    status = (
        'curl -H \'Content-Type: application/json\' -X '
        'PUT -d \'{{{0}}}\' http://{1}' + ':' + '{2}/v1/zones/{3}/allIps'
    ).format(JSON_IPS, config('lcm-ip'), config('sapi-port'),
             config('sapi-zone'))
    POST_ZONE_IPs = _exec_cmd_output(
        status,
        'Posting Zone IPs to Solutions API server failed!')
    if POST_ZONE_IPs:
        if 'success' in POST_ZONE_IPs:
            log('Successfully posted Zone IPs to Solutions API server!')
        log(POST_ZONE_IPs)
def get_cluster_id():
    """ Return cluster id (lp1776171)

    Return cluster ID for MySQL asynchronous replication
    :returns: int cluster_id
    """
    if not config('cluster-id'):
        msg = ("Master / Slave relation requires 'cluster-id' option")
        status_set("blocked", msg)
        raise ClusterIDRequired(msg)
    cluster_id = config('cluster-id')
    for rid in relation_ids('master'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    return cluster_id
Ejemplo n.º 12
0
    def __call__(self):
        self.database = self.database or config('database')
        self.user = self.user or config('database-user')
        if None in [self.database, self.user]:
            log('Could not generate shared_db context. '
                'Missing required charm config options. '
                '(database name and user)')
            raise OSContextError
        ctxt = {}

        password_setting = 'password'
        if self.relation_prefix:
            password_setting = self.relation_prefix + '_password'

        for rid in relation_ids('shared-db'):
            for unit in related_units(rid):
                passwd = relation_get(password_setting, rid=rid, unit=unit)
                ctxt = {
                    'database_host': relation_get('db_host', rid=rid,
                                                  unit=unit),
                    'database': self.database,
                    'database_user': self.user,
                    'database_password': passwd,
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
def post_pg_license():
    '''
    Posts PLUMgrid License if it hasnt been posted already.
    '''
    key = config('plumgrid-license-key')
    if key is None:
        log('PLUMgrid License Key not specified')
        return 0
    PG_VIP = config('plumgrid-virtual-ip')
    if not is_ip(PG_VIP):
        raise ValueError('Invalid IP Provided')
    LICENSE_POST_PATH = 'https://%s/0/tenant_manager/license_key' % PG_VIP
    LICENSE_GET_PATH = 'https://%s/0/tenant_manager/licenses' % PG_VIP
    PG_CURL = '%s/opt/pg/scripts/pg_curl.sh' % PG_LXC_PATH
    license = {"key1": {"license": key}}
    licence_post_cmd = [
        PG_CURL,
        '-u',
        'plumgrid:plumgrid',
        LICENSE_POST_PATH,
        '-d',
        json.dumps(license)]
    licence_get_cmd = [PG_CURL, '-u', 'plumgrid:plumgrid', LICENSE_GET_PATH]
    try:
        old_license = subprocess.check_output(licence_get_cmd)
    except subprocess.CalledProcessError:
        log('No response from specified virtual IP')
        return 0
    _exec_cmd(cmd=licence_post_cmd,
              error_msg='Unable to post License', fatal=False)
    new_license = subprocess.check_output(licence_get_cmd)
    if old_license == new_license:
        log('No change in PLUMgrid License')
        return 0
    return 1
Ejemplo n.º 14
0
def config_changed():
    # Determine whether vaultlocker is required and install
    if use_vaultlocker():
        installed = len(filter_installed_packages(['vaultlocker'])) == 0
        if not installed:
            apt_install('vaultlocker', fatal=True)

    # Check if an upgrade was requested
    check_for_upgrade()

    # Pre-flight checks
    if config('osd-format') not in ceph.DISK_FORMATS:
        log('Invalid OSD disk format configuration specified', level=ERROR)
        sys.exit(1)

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf')

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)
    prepare_disks_and_activate()
    install_apparmor_profile()
    add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
Ejemplo n.º 15
0
def get_devices():
    devices = []
    if config('osd-devices'):
        for path in config('osd-devices').split(' '):
            path = path.strip()
            # Ensure that only block devices
            # are considered for evaluation as block devices.
            # This avoids issues with relative directories
            # being passed via configuration, and ensures that
            # the path to a block device provided by the user
            # is used, rather than its target which may change
            # between reboots in the case of bcache devices.
            if is_block_device(path):
                devices.append(path)
            # Make sure its a device which is specified using an
            # absolute path so that the current working directory
            # or any relative path under this directory is not used
            elif os.path.isabs(path):
                devices.append(os.path.realpath(path))

    # List storage instances for the 'osd-devices'
    # store declared for this charm too, and add
    # their block device paths to the list.
    storage_ids = storage_list('osd-devices')
    devices.extend((storage_get('location', s) for s in storage_ids))

    # Filter out any devices in the action managed unit-local device blacklist
    _blacklist = get_blacklist()
    return [device for device in devices if device not in _blacklist]
def install():
    juju_log('**********install.real')
    rsync(
        charm_dir() + '/packages/vsm-dep-repo',
        '/opt'
    )
    rsync(
        charm_dir() + '/packages/vsmrepo',
        '/opt'
    )
    rsync(
        charm_dir() + '/files/apt.conf',
        '/etc/apt'
    )
    rsync(
        charm_dir() + '/files/vsm.list',
        '/etc/apt/sources.list.d'
    )
    rsync(
        charm_dir() + '/files/vsm-dep.list',
        '/etc/apt/sources.list.d'
    )
    apt_update()
    apt_install(VSM_PACKAGES)
    juju_log('**********finished to install vsm vsm-dashboard python-vsmclient')
    add_source(config('ceph-source'), config('ceph-key'))
    apt_update(fatal=True)
    apt_install(packages=PRE_INSTALL_PACKAGES, fatal=True)
Ejemplo n.º 17
0
def install():
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    apt_install(packages=ceph.determine_packages(), fatal=True)
    if config('autotune'):
        tune_network_adapters()
    install_udev_rules()
Ejemplo n.º 18
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    src = config('openstack-origin')
    if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and
            src == 'distro'):
        src = 'cloud:precise-icehouse'
    configure_installation_source(src)
    status_set('maintenance', 'Installing apt packages')
    apt_update(fatal=True)
    apt_install('python-six', fatal=True)  # Force upgrade
    if valid_plugin():
        apt_install(filter_installed_packages(get_early_packages()),
                    fatal=True)
        apt_install(filter_installed_packages(get_packages()),
                    fatal=True)
        status_set('maintenance', 'Git install')
        git_install(config('openstack-origin-git'))
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)

    # Legacy HA for Icehouse
    update_legacy_ha_files()

    # Install systemd overrides to remove service startup race between
    # n-gateway and n-cloud-controller services.
    install_systemd_override()
def identity_joined(rid=None):
    juju_log('**********identity-service-relation-joined')
    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint registration')
        return

    public_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, PUBLIC),
        config('api-listening-port')
    )
    internal_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, INTERNAL),
        config('api-listening-port')
    )
    admin_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, ADMIN),
        config('api-listening-port')
    )
    settings = {
        'region': None,
        'service': None,
        'public_url': None,
        'internal_url': None,
        'admin_url': None,
        'vsm_region': config('region'),
        'vsm_service': 'vsm',
        'vsm_public_url': public_url,
        'vsm_internal_url': internal_url,
        'vsm_admin_url': admin_url,
    }
    juju_log("**********settings is %s" % str(settings))
    juju_log("**********relation_id is %s" % str(rid))
    relation_set(relation_id=rid, **settings)
Ejemplo n.º 20
0
def get_packages():
    """Return a list of packages for install based on the configured plugin"""
    plugin = remap_plugin(config("plugin"))
    packages = deepcopy(GATEWAY_PKGS[networking_name()][plugin])
    source = get_os_codename_install_source(config("openstack-origin"))
    if plugin == "ovs":
        if source >= "icehouse" and lsb_release()["DISTRIB_CODENAME"] < "utopic":
            # NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
            # icehouse but openswan was removed in utopic.
            packages.remove("neutron-l3-agent")
            packages.append("neutron-vpn-agent")
            packages.append("openswan")
        if source >= "kilo":
            packages.append("python-neutron-fwaas")
        if source >= "liberty":
            # Switch out mysql driver
            packages.remove("python-mysqldb")
            packages.append("python-pymysql")
            # Switch out to actual metering agent package
            packages.remove("neutron-plugin-metering-agent")
            packages.append("neutron-metering-agent")
    packages.extend(determine_l3ha_packages())

    if git_install_requested():
        packages = list(set(packages))
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)

    return packages
def setup_amqp_req(amqp):
    """Use the amqp interface to request access to the amqp broker using our
    local configuration.
    """
    amqp.request_access(username=hookenv.config('rabbit-user'),
                        vhost=hookenv.config('rabbit-vhost'))
    barbican.assess_status()
Ejemplo n.º 22
0
def send_data(tls):
    '''Send the data that is required to create a server certificate for
    this server.'''
    # Use the public ip of this unit as the Common Name for the certificate.
    common_name = hookenv.unit_public_ip()

    # Get the SDN gateway based on the cidr address.
    kubernetes_service_ip = get_kubernetes_service_ip()

    domain = hookenv.config('dns_domain')
    # Create SANs that the tls layer will add to the server cert.
    sans = [
        hookenv.unit_public_ip(),
        hookenv.unit_private_ip(),
        socket.gethostname(),
        kubernetes_service_ip,
        'kubernetes',
        'kubernetes.{0}'.format(domain),
        'kubernetes.default',
        'kubernetes.default.svc',
        'kubernetes.default.svc.{0}'.format(domain)
    ]

    # maybe they have extra names they want as SANs
    extra_sans = hookenv.config('extra_sans')
    if extra_sans and not extra_sans == "":
        sans.extend(extra_sans.split())

    # Create a path safe name by removing path characters from the unit name.
    certificate_name = hookenv.local_unit().replace('/', '_')
    # Request a server cert with this information.
    tls.request_server_cert(common_name, sans, certificate_name)
def neutron_api_relation_joined(rid=None):
    base_url = canonical_url(CONFIGS, INTERNAL)
    neutron_url = '%s:%s' % (base_url, api_port('neutron-server'))
    relation_data = {
        'enable-sriov': config('enable-sriov'),
        'neutron-url': neutron_url,
        'neutron-plugin': config('neutron-plugin'),
    }
    if config('neutron-security-groups'):
        relation_data['neutron-security-groups'] = "yes"
    else:
        relation_data['neutron-security-groups'] = "no"

    if is_api_ready(CONFIGS):
        relation_data['neutron-api-ready'] = "yes"
    else:
        relation_data['neutron-api-ready'] = "no"

    # LP Bug#1805645
    dns_domain = get_dns_domain()
    if dns_domain:
        relation_data['dns-domain'] = dns_domain

    relation_set(relation_id=rid, **relation_data)
    # Nova-cc may have grabbed the neutron endpoint so kick identity-service
    # relation to register that its here
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id, relation_trigger=True)
    def install_benchmark(self):
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/spark-bench'
        if install_sb:
            if not unitdata.kv().get('spark_bench.installed', False):
                if utils.cpu_arch() == 'ppc64le':
                    sb_url = hookenv.config()['spark_bench_ppc64le']
                else:
                    # TODO: may need more arch cases (go with x86 sb for now)
                    sb_url = hookenv.config()['spark_bench_x86_64']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/spark-bench-*')[0]
                # SparkBench expects to live in ~/spark-bench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)
        else:
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
def identity_joined(rid=None, relation_trigger=False):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC),
                                api_port('neutron-server'))
    admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN),
                               api_port('neutron-server'))
    internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL),
                                  api_port('neutron-server')
                                  )
    rel_settings = {
        'neutron_service': 'neutron',
        'neutron_region': config('region'),
        'neutron_public_url': public_url,
        'neutron_admin_url': admin_url,
        'neutron_internal_url': internal_url,
        'quantum_service': None,
        'quantum_region': None,
        'quantum_public_url': None,
        'quantum_admin_url': None,
        'quantum_internal_url': None,
    }
    if relation_trigger:
        rel_settings['relation_trigger'] = str(uuid.uuid4())
    relation_set(relation_id=rid, relation_settings=rel_settings)
Ejemplo n.º 26
0
def configure_sources(update=False,
                      sources_var='install_sources',
                      keys_var='install_keys'):
    """
    Configure multiple sources from charm configuration

    Example config:
        install_sources:
          - "ppa:foo"
          - "http://example.com/repo precise main"
        install_keys:
          - null
          - "a1b2c3d4"

    Note that 'null' (a.k.a. None) should not be quoted.
    """
    sources = safe_load(config(sources_var))
    keys = config(keys_var)
    if keys is not None:
        keys = safe_load(keys)
    if isinstance(sources, basestring) and (
            keys is None or isinstance(keys, basestring)):
        add_source(sources, keys)
    else:
        if not len(sources) == len(keys):
            msg = 'Install sources and keys lists are different lengths'
            raise SourceConfigError(msg)
        for src_num in range(len(sources)):
            add_source(sources[src_num], keys[src_num])
    if update:
        apt_update(fatal=True)
def determine_packages():
    packages = [] + BASE_PACKAGES

    net_manager = network_manager()
    if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
            config('multi-host').lower() == 'yes'):
        packages.extend(['nova-api', 'nova-network'])
    elif net_manager == 'neutron' and neutron_plugin_legacy_mode():
        plugin = neutron_plugin()
        pkg_lists = neutron_plugin_attribute(plugin, 'packages', net_manager)
        for pkg_list in pkg_lists:
            packages.extend(pkg_list)

    if relation_ids('ceph'):
        packages.append('ceph-common')

    virt_type = config('virt-type')
    try:
        packages.extend(VIRT_TYPES[virt_type])
    except KeyError:
        log('Unsupported virt-type configured: %s' % virt_type)
        raise
    if enable_nova_metadata():
        packages.append('nova-api-metadata')

    if git_install_requested():
        packages = list(set(packages))
        packages.extend(BASE_GIT_PACKAGES)
        # don't include packages that will be installed from git
        for p in GIT_PACKAGE_BLACKLIST:
            if p in packages:
                packages.remove(p)

    return packages
Ejemplo n.º 28
0
def create_ogr_zone(args):
    aggr_name = action_get('aggregate-name')
    avail_zone = action_get('avail-zone')
    ogr_compute = action_get('ogr-compute')

    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-create {} {}'"\
          .format(aggr_name, avail_zone)
    commands.getoutput(cmd)
    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-add-host {} {}'"\
          .format(aggr_name, ogr_compute)
    commands.getoutput(cmd)
    if config("openstack-version") == "liberty" or \
       config("openstack-version") == "mitaka":
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-details {}'"\
              .format(aggr_name)
    else:
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-show {}'"\
              .format(aggr_name)
    res = commands.getoutput(cmd)
    action_set({'result-map.message': res})
    relation_info = {
        'aggr-name': aggr_name
    }
    if config("openstack-version") == "pike" or \
       config("openstack-version") == "ocata":
        for rid in relation_ids('neutron-api-cplane'):
            for unit in related_units(rid):
                relation_set(relation_id=rid, relation_settings=relation_info)
    def __call__(self):
        ctxt = super(NovaComputeCephContext, self).__call__()
        if not ctxt:
            return {}
        svc = service_name()
        # secret.xml
        ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID
        # nova.conf
        ctxt['service_name'] = svc
        ctxt['rbd_user'] = svc
        ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID
        ctxt['rbd_pool'] = config('rbd-pool')

        if (config('libvirt-image-backend') == 'rbd' and
                assert_libvirt_rbd_imagebackend_allowed()):
            ctxt['libvirt_rbd_images_ceph_conf'] = ceph_config_file()

        rbd_cache = config('rbd-client-cache') or ""
        if rbd_cache.lower() == "enabled":
            # We use write-though only to be safe for migration
            ctxt['rbd_client_cache_settings'] = \
                {'rbd cache': 'true',
                 'rbd cache size': '64 MiB',
                 'rbd cache max dirty': '0 MiB',
                 'rbd cache writethrough until flush': 'true',
                 'admin socket': '/var/run/ceph/rbd-client-$pid.asok'}

            asok_path = '/var/run/ceph/'
            if not os.path.isdir(asok_path):
                os.mkdir(asok_path)

        elif rbd_cache.lower() == "disabled":
            ctxt['rbd_client_cache_settings'] = {'rbd cache': 'false'}

        return ctxt
Ejemplo n.º 30
0
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)
Ejemplo n.º 31
0
def update_autopurge_purge_interval():
    purge_interval = hookenv.config().get('autopurge_purge_interval')
    if data_changed('zk.autopurge_purge_interval', purge_interval):
        _restart_zookeeper('updating snapshot purge interval')
Ejemplo n.º 32
0
def get_ceph_context(upgrading=False):
    """Returns the current context dictionary for generating ceph.conf

    :param upgrading: bool - determines if the context is invoked as
                      part of an upgrade proedure Setting this to true
                      causes settings useful during an upgrade to be
                      defined in the ceph.conf file
    """
    mon_hosts = get_mon_hosts()
    log('Monitor hosts are ' + repr(mon_hosts))

    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': get_auth(),
        'mon_hosts': ' '.join(mon_hosts),
        'fsid': get_fsid(),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'crush_initial_weight': config('crush-initial-weight'),
        'osd_journal_size': config('osd-journal-size'),
        'osd_max_backfills': config('osd-max-backfills'),
        'osd_recovery_max_active': config('osd-recovery-max-active'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
        'short_object_len': use_short_objects(),
        'upgrade_in_progress': upgrading,
        'bluestore': config('bluestore'),
        'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    if config('customize-failure-domain'):
        az = az_info()
        if az:
            cephcontext['crush_location'] = "root=default {} host={}" \
                .format(az, socket.gethostname())
        else:
            log(
                "Your Juju environment doesn't"
                "have support for Availability Zones"
            )

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'osd']
    cephcontext.update(CephConfContext(permitted_sections=sections)())
    return cephcontext
Ejemplo n.º 33
0
def install():
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    apt_install(packages=ceph.determine_packages(), fatal=True)
    if config('autotune'):
        tune_network_adapters()
Ejemplo n.º 34
0
    local_unit,
    related_units,
    relation_ids,
    relation_get,
    relation_set,
    status_set,
    leader_get,
    log,
    INFO,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.core.unitdata import kv
import common_utils
import docker_utils

config = config()

MODULE = "controller"

BASE_CONFIGS_PATH = "/etc/contrail"

CONFIG_API_CONFIGS_PATH = BASE_CONFIGS_PATH + "/config_api"
CONFIG_DATABASE_CONFIGS_PATH = BASE_CONFIGS_PATH + "/config_database"
CONTROL_CONFIGS_PATH = BASE_CONFIGS_PATH + "/control"
WEBUI_CONFIGS_PATH = BASE_CONFIGS_PATH + "/webui"
REDIS_CONFIGS_PATH = BASE_CONFIGS_PATH + "/redis"

IMAGES = [
    "contrail-node-init",
    "contrail-nodemgr",
    "contrail-controller-config-api",
Ejemplo n.º 35
0
def assess_status(configs):
    assess_status_func(configs)()
    hookenv.application_version_set(config('cplane-version'))
Ejemplo n.º 36
0
]

METADATA_AGENT_INI = '/etc/neutron/metadata_agent.ini'
NEUTRON_CONF_DIR = "/etc/neutron"
NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR

ML2_CONFIG = '/etc/neutron/plugins/ml2/ml2_conf.ini'
if release >= 'mitaka':
    ML2_CONFIG = '/etc/neutron/plugins/ml2/linuxbridge_agent.ini'

BASE_RESOURCE_MAP = OrderedDict([
    (NEUTRON_CONF, {
        'services': ['neutron'],
        'contexts': [
            context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR),
            context.SharedDBContext(user=config('database-user'),
                                    database=config('database'),
                                    ssl_dir=NEUTRON_CONF_DIR),
            context.PostgresqlDBContext(database=config('database')),
            cplane_context.IdentityServiceContext(),
            cplane_context.NeutronCCContext(),
            context.SyslogContext(),
            context.ZeroMQContext(),
            context.NotificationDriverContext(),
            context.BindHostContext(),
            context.WorkerConfigContext()
        ],
    }),
    (METADATA_AGENT_INI, {
        'services': ['metadata-agent'],
        'contexts': [
Ejemplo n.º 37
0
def update_autopurge_snap_retain_count():
    snap_retain = hookenv.config().get('autopurge_snap_retain_count')
    if data_changed('zk.autopurge_snap_retain_count', snap_retain):
        _restart_zookeeper('updating number of retained snapshots')
def enable_local_dhcp():
    return config('enable-local-dhcp-and-metadata')
def use_dpdk():
    '''Determine whether DPDK should be used'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    return (cmp_release >= 'mitaka' and config('enable-dpdk'))
Ejemplo n.º 40
0
import shutil

from subprocess import call, check_call

from charmhelpers.core import hookenv
from charmhelpers.core import unitdata

from charms import reactive
from charms.reactive import hook
from charms.reactive import helpers
from charms.reactive import when, when_not, when_any

db = unitdata.kv()
config = hookenv.config()

app_name = 'ims-tools'
docker_image = 'tads2015da/ims-tools-server:latest'

@when('docker.available')
@when_not('app.available')
def install_app():
    hookenv.status_set('maintenance', 'Pulling application')
    check_call(['docker', 'pull', docker_image])

    # open ports: HTTP
    hookenv.open_port(7071, 'TCP')

    reactive.set_state('app.available')

def remove_container():
    try:
def configure_ovs():
    status_set('maintenance', 'Configuring ovs')
    if not service_running('openvswitch-switch'):
        full_restart()
    datapath_type = determine_datapath_type()
    add_bridge(INT_BRIDGE, datapath_type)
    add_bridge(EXT_BRIDGE, datapath_type)
    ext_port_ctx = None
    if use_dvr():
        ext_port_ctx = ExternalPortContext()()
    if ext_port_ctx and ext_port_ctx['ext_port']:
        add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])

    bridgemaps = None
    if not use_dpdk():
        portmaps = DataPortContext()()
        bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
        for br in bridgemaps.values():
            add_bridge(br, datapath_type)
            if not portmaps:
                continue

            for port, _br in portmaps.items():
                if _br == br:
                    if not is_linuxbridge_interface(port):
                        add_bridge_port(br, port, promisc=True)
                    else:
                        add_ovsbridge_linuxbridge(br, port)
    else:
        # NOTE: when in dpdk mode, add based on pci bus order
        #       with type 'dpdk'
        bridgemaps = neutron_ovs_context.resolve_dpdk_bridges()
        bondmaps = neutron_ovs_context.resolve_dpdk_bonds()
        device_index = 0
        bridge_bond_map = DPDKBridgeBondMap()
        for pci_address, br in bridgemaps.items():
            add_bridge(br, datapath_type)
            portname = 'dpdk{}'.format(device_index)
            if pci_address in bondmaps:
                bond = bondmaps[pci_address]
                bridge_bond_map.add_port(br, bond, portname, pci_address)
            else:
                dpdk_add_bridge_port(br, portname, pci_address)
            device_index += 1

        bond_configs = DPDKBondsConfig()
        for br, bonds in bridge_bond_map.items():
            for bond, t in bonds.items():
                dpdk_add_bridge_bond(br, bond, *t)
                dpdk_set_bond_config(bond, bond_configs.get_bond_config(bond))

    target = config('ipfix-target')
    bridges = [INT_BRIDGE, EXT_BRIDGE]
    if bridgemaps:
        bridges.extend(bridgemaps.values())

    if target:
        for bridge in bridges:
            disable_ipfix(bridge)
            enable_ipfix(bridge, target)
    else:
        # NOTE: removing ipfix setting from a bridge is idempotent and
        #       will pass regardless of the existence of the setting
        for bridge in bridges:
            disable_ipfix(bridge)

    # Ensure this runs so that mtu is applied to data-port interfaces if
    # provided.
    # NOTE(ajkavanagh) for pause/resume we don't gate this as it's not a
    # running service, but rather running a few commands.
    service_restart('os-charm-phy-nic-mtu')
def enable_sriov():
    '''Determine whether SR-IOV is enabled and supported'''
    cmp_release = CompareOpenStackReleases(
        os_release('neutron-common', base='icehouse'))
    return (cmp_release >= 'kilo' and config('enable-sriov'))
Ejemplo n.º 43
0
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    config = hookenv.config()

    required_fields = [
        'auth_url',
        'region',
        'username',
        'password',
        'user_domain_name',
        'project_domain_name',
        'project_name',
    ]
    optional_fields = [
        'endpoint_tls_ca',
    ]
    # pre-populate with empty values to avoid key and arg errors
    creds_data = {field: '' for field in required_fields + optional_fields}

    try:
        # try to use Juju's trust feature
        try:
            log('Checking credentials-get for credentials')
            result = subprocess.run(['credential-get'],
                                    check=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            _creds_data = yaml.safe_load(result.stdout.decode('utf8'))
            _merge_if_set(creds_data, _normalize_creds(_creds_data))
        except FileNotFoundError:
            pass  # juju trust not available
        except subprocess.CalledProcessError as e:
            if 'permission denied' not in e.stderr.decode('utf8'):
                raise

        # merge in combined credentials config
        if config['credentials']:
            try:
                log('Using "credentials" config values for credentials')
                _creds_data = b64decode(config['credentials']).decode('utf8')
                _creds_data = json.loads(_creds_data)
                _merge_if_set(creds_data, _normalize_creds(_creds_data))
            except (ValueError, TypeError, binascii.Error,
                    json.JSONDecodeError, UnicodeDecodeError) as e:
                if isinstance(e, ValueError) and \
                   str(e).startswith('unsupported auth-type'):
                    raise  # handled below
                log_err('Invalid value for credentials config\n{}',
                        format_exc())
                status.blocked('invalid value for credentials config: '
                               '{}'.format(e))
                return False

        # merge in individual config
        _merge_if_set(creds_data, _normalize_creds(config))
    except ValueError as e:
        if str(e).startswith('unsupported auth-type'):
            log_err(str(e))
            status.blocked(str(e))
            return False

    if all(creds_data[k] for k in required_fields):
        _save_creds(creds_data)
        return True
    elif not any(creds_data[k] for k in required_fields):
        # no creds provided
        status.blocked('missing credentials; '
                       'grant with `juju trust` or set via config')
        return False
    else:
        missing = [k for k in required_fields if not creds_data[k]]
        s = 's' if len(missing) > 1 else ''
        msg = 'missing required credential{}: {}'.format(s, ', '.join(missing))
        log_err(msg)
        status.blocked(msg)
        return False
def configure_sriov():
    '''Configure SR-IOV devices based on provided configuration options

    NOTE(fnordahl): Boot time configuration is done by init script
    intalled by this charm.

    This function only does runtime configuration!
    '''
    charm_config = config()
    if not enable_sriov():
        return

    # make sure init script has correct mode and that boot time execution
    # is enabled
    os.chmod(NEUTRON_SRIOV_INIT_SCRIPT, 0o755)
    service('enable', 'neutron-openvswitch-networking-sriov')

    if charm_config.changed('sriov-numvfs'):
        devices = PCINetDevices()
        sriov_numvfs = charm_config.get('sriov-numvfs')

        # automatic configuration of all SR-IOV devices
        if sriov_numvfs == 'auto':
            log('Configuring SR-IOV device VF functions in auto mode')
            for device in devices.pci_devices:
                if device and device.sriov:
                    log("Configuring SR-IOV device"
                        " {} with {} VF's".format(device.interface_name,
                                                  device.sriov_totalvfs))
                    # NOTE(fnordahl): run-time change of numvfs is disallowed
                    # without resetting to 0 first.
                    device.set_sriov_numvfs(0)
                    device.set_sriov_numvfs(device.sriov_totalvfs)
        else:
            # Single int blanket configuration
            try:
                log('Configuring SR-IOV device VF functions'
                    ' with blanket setting')
                for device in devices.pci_devices:
                    if device and device.sriov:
                        numvfs = min(int(sriov_numvfs), device.sriov_totalvfs)
                        if int(sriov_numvfs) > device.sriov_totalvfs:
                            log('Requested value for sriov-numvfs ({}) too '
                                'high for interface {}. Falling back to '
                                'interface totalvfs '
                                'value: {}'.format(sriov_numvfs,
                                                   device.interface_name,
                                                   device.sriov_totalvfs))
                        log("Configuring SR-IOV device {} with {} "
                            "VFs".format(device.interface_name, numvfs))
                        # NOTE(fnordahl): run-time change of numvfs is
                        # disallowed without resetting to 0 first.
                        device.set_sriov_numvfs(0)
                        device.set_sriov_numvfs(numvfs)
            except ValueError:
                # <device>:<numvfs>[ <device>:numvfs] configuration
                sriov_numvfs = sriov_numvfs.split()
                for device_config in sriov_numvfs:
                    log('Configuring SR-IOV device VF functions per interface')
                    interface_name, numvfs = device_config.split(':')
                    device = devices.get_device_from_interface_name(
                        interface_name)
                    if device and device.sriov:
                        if int(numvfs) > device.sriov_totalvfs:
                            log('Requested value for sriov-numfs ({}) too '
                                'high for interface {}. Falling back to '
                                'interface totalvfs '
                                'value: {}'.format(numvfs,
                                                   device.interface_name,
                                                   device.sriov_totalvfs))
                            numvfs = device.sriov_totalvfs
                        log("Configuring SR-IOV device {} with {} "
                            "VF's".format(device.interface_name, numvfs))
                        # NOTE(fnordahl): run-time change of numvfs is
                        # disallowed without resetting to 0 first.
                        device.set_sriov_numvfs(0)
                        device.set_sriov_numvfs(int(numvfs))

        # Trigger remote restart in parent application
        remote_restart('neutron-plugin', 'nova-compute')

        # Restart of SRIOV agent is required after changes to system runtime
        # VF configuration
        cmp_release = CompareOpenStackReleases(
            os_release('neutron-common', base='icehouse'))
        if cmp_release >= 'mitaka':
            service_restart('neutron-sriov-agent')
        else:
            service_restart('neutron-plugin-sriov-agent')
Ejemplo n.º 45
0
def send_port(http):
    http.configure(config('port'))
Ejemplo n.º 46
0
from subprocess import run, CalledProcessError, PIPE
from charms.reactive import (
    when,
    when_not,
    set_flag,
    endpoint_from_flag,
)
from charmhelpers.core.hookenv import (
    log,
    status_set,
    config,
    charm_dir,
)

conf = config()


@when_not('nodejs.installed')
def install_layer_nodejs():
    # Install nvm
    try:
        output = run([charm_dir() + "/files/install_nvm.sh"],
                     preexec_fn=demote(1000, 1000))
        output.check_returncode()
    except CalledProcessError as e:
        log(e)
        status_set('blocked', 'Failed to install Nodejs.')
        return
    # Create symb links so root can find nvm / node commands
    os.symlink("/home/ubuntu/bin/nvm", "/usr/local/bin/nvm")
Ejemplo n.º 47
0
def ha_joined(relation_id=None):
    cluster_config = get_hacluster_config()

    resources = {
        'res_cinder_haproxy': 'lsb:haproxy'
    }

    resource_params = {
        'res_cinder_haproxy': 'op monitor interval="5s"'
    }

    if config('dns-ha'):
        update_dns_ha_resource_params(relation_id=relation_id,
                                      resources=resources,
                                      resource_params=resource_params)
    else:
        vip_group = []
        for vip in cluster_config['vip'].split():
            if is_ipv6(vip):
                res_cinder_vip = 'ocf:heartbeat:IPv6addr'
                vip_params = 'ipv6addr'
            else:
                res_cinder_vip = 'ocf:heartbeat:IPaddr2'
                vip_params = 'ip'

            iface = (get_iface_for_address(vip) or
                     config('vip_iface'))
            netmask = (get_netmask_for_address(vip) or
                       config('vip_cidr'))

            if iface is not None:
                vip_key = 'res_cinder_{}_vip'.format(iface)
                if vip_key in vip_group:
                    if vip not in resource_params[vip_key]:
                        vip_key = '{}_{}'.format(vip_key, vip_params)
                    else:
                        log("Resource '%s' (vip='%s') already exists in "
                            "vip group - skipping" % (vip_key, vip), WARNING)
                        continue

                resources[vip_key] = res_cinder_vip
                resource_params[vip_key] = (
                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
                    ' nic="{iface}"'.format(ip=vip_params,
                                            vip=vip,
                                            iface=iface,
                                            netmask=netmask)
                )
                vip_group.append(vip_key)

        if len(vip_group) >= 1:
            relation_set(relation_id=relation_id,
                         groups={'grp_cinder_vips': ' '.join(vip_group)})

    init_services = {
        'res_cinder_haproxy': 'haproxy'
    }
    clones = {
        'cl_cinder_haproxy': 'res_cinder_haproxy'
    }
    relation_set(relation_id=relation_id,
                 init_services=init_services,
                 corosync_bindiface=cluster_config['ha-bindiface'],
                 corosync_mcastport=cluster_config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Ejemplo n.º 48
0
def start_application_service():
    # Remove source.available to allow it to be re-triggered
    remove_state('wsgi.source.available')
    remove_state('wsgi.available')

    # Install application dependencies
    status_set('maintenance', '[wsgi] Installing application dependencies')

    cache_dir = config('pip_cache_dir')

    if os.path.isfile('requirements.txt'):
        if cache_dir:
            log('[wsgi] Installing pip dependencies from {}'.format(cache_dir))
            subprocess.check_call([
                'pip3',
                'install',
                '--no-index',
                '--find-links',
                cache_dir,
                '--requirement',
                'requirements.txt',
            ],
                                  cwd=layer_config['application_root'],
                                  env=dict(LC_ALL='C.UTF-8',
                                           **get_env(env_file)))
        else:
            log('[wsgi] Installing pip dependencies from PyPi')
            subprocess.check_call(
                ['pip3', 'install', '--requirement', 'requirements.txt'],
                cwd=layer_config['application_root'],
                env=dict(LC_ALL='C.UTF-8', **get_env(env_file)))

    set_state('wsgi.ready')

    # Check for a database connection
    log('[wsgi] Checking for database connection')
    postgres_relations = relations_of_type('postgres')
    mongo_relations = relations_of_type('mongo')
    db_relation = None

    if postgres_relations:
        db_relation = postgres_relations[0]
        db_scheme = "postgresql"
    elif mongo_relations:
        db_relation = mongo_relations[0]
        db_scheme = "mongodb"

    if db_relation:
        db_host = db_relation.get('host') or db_relation.get('hostname')
        db_port = db_relation.get('port')
        log('[wsgi] Using database at {}:{}'.format(db_host, db_port))
        database_url = urlunparse((db_scheme,
                                   build_url_host(db_host, db_port,
                                                  db_relation.get('user'),
                                                  db_relation.get('password')),
                                   db_relation.get('database',
                                                   ''), None, None, None))
        set_env_values(env_file, {'DATABASE_URL': database_url})

        provision_command = layer_config.get('provision_command')

        if provision_command:
            status_set('maintenance', '[wsgi] Provisioning database')
            subprocess.check_call(provision_command.split(),
                                  cwd=layer_config['application_root'],
                                  env=get_env(env_file),
                                  preexec_fn=demote(
                                      get_user(layer_config['username'])))
    else:
        log('[wsgi] No database attached')
        delete_env_value(env_file, 'DATABASE_URL')

    # Open the port, ready
    status_set('maintenance', '[wsgi] Opening port {}'.format(config('port')))
    log('[wsgi] Opening port {}'.format(config('port')))
    open_port(config('port'))

    # Configure circus daemon to run gunicorn
    service_name = 'gunicorn3.service'
    service_file = '/etc/systemd/system/{}'.format(service_name)
    log('[wsgi] Writing systemd config to {}'.format(service_file))
    status_set('maintenance', '[wsgi] Preparing daemon')
    render(source='{}.j2'.format(service_name),
           target=service_file,
           perms=0o644,
           context={
               'application_root': layer_config['application_root'],
               'env_file': env_file,
               'wsgi_module': config('wsgi_module'),
               'user': layer_config['username'],
               'group': layer_config['username'],
               'port': config('port'),
               'env': get_env(env_file)
           })
    subprocess.check_call(['systemctl', 'daemon-reload'])

    if service_running(service_name):
        log('[wsgi] Reloading {}'.format(service_name))
        service_reload(service_name)
    else:
        log('[wsgi] Starting {}'.format(service_name))
        service_start(service_name)

    # Try 5 times to check if the service started
    service_responding = False
    for attempt in range(0, 10):
        log('[wsgi] Waiting for service on port {} (attempt {})'.format(
            config('port'), attempt))
        if service_running(service_name) and is_port_open(config('port')):
            service_responding = True
            break
        sleep(6)

    if service_responding:
        log('[wsgi] Service responded on port {}'.format(config('port')))
        status_set('active',
                   '[wsgi] Service started on port {}'.format(config('port')))
        set_state('wsgi.available')
    else:
        raise socket.error('Service not responding')
Ejemplo n.º 49
0
def amqp_joined(relation_id=None):
    conf = config()
    relation_set(relation_id=relation_id,
                 username=conf['rabbit-user'], vhost=conf['rabbit-vhost'])
Ejemplo n.º 50
0
def install_from_config(config_var_name):
    """Install a file from config."""
    charm_config = config()
    source = charm_config[config_var_name]
    return install_remote(source)
Ejemplo n.º 51
0
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            max_agents = config('max-l3-agents-per-router')
            min_agents = config('min-l3-agents-per-router')
            if max_agents < min_agents:
                raise ValueError("max-l3-agents-per-router ({}) must be >= "
                                 "min-l3-agents-per-router "
                                 "({})".format(max_agents, min_agents))

            ctxt['max_l3_agents_per_router'] = max_agents
            ctxt['min_l3_agents_per_router'] = min_agents

        ctxt['allow_automatic_l3agent_failover'] = \
            config('allow-automatic-l3agent-failover')
        ctxt['allow_automatic_dhcp_failover'] = \
            config('allow-automatic-dhcp-failover')

        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['tenant_network_types'] = self.neutron_tenant_network_types
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        release = os_release('neutron-server')
        cmp_release = CompareOpenStackReleases(release)
        ctxt['enable_igmp_snooping'] = self.neutron_igmp_snoop
        if config('neutron-plugin') == 'vsp' and cmp_release < 'newton':
            _config = config()
            for k, v in _config.items():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if cmp_release >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['vsd_cms_id'] = cms_id_value
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        enable_dns_extension_driver = False

        dns_domain = get_dns_domain()
        if dns_domain:
            enable_dns_extension_driver = True
            ctxt['dns_domain'] = dns_domain

        if cmp_release >= 'mitaka':
            for rid in relation_ids('external-dns'):
                if related_units(rid):
                    enable_dns_extension_driver = True

            # AZAwareWeightScheduler inherits from WeightScheduler and is
            # available as of mitaka
            ctxt['network_scheduler_driver'] = (
                'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler'
            )
            ctxt['dhcp_load_type'] = config('dhcp-load-type')

        extension_drivers = []
        if config('enable-ml2-port-security'):
            extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY)
        if enable_dns_extension_driver:
            if cmp_release < 'queens':
                extension_drivers.append(EXTENSION_DRIVER_DNS)
            else:
                extension_drivers.append(EXTENSION_DRIVER_DNS_DOMAIN_PORTS)

        if is_qos_requested_and_valid():
            extension_drivers.append(EXTENSION_DRIVER_QOS)

        if extension_drivers:
            ctxt['extension_drivers'] = ','.join(extension_drivers)

        ctxt['enable_sriov'] = config('enable-sriov')

        if cmp_release >= 'mitaka':
            if config('global-physnet-mtu'):
                ctxt['global_physnet_mtu'] = config('global-physnet-mtu')
                if config('path-mtu'):
                    ctxt['path_mtu'] = config('path-mtu')
                else:
                    ctxt['path_mtu'] = config('global-physnet-mtu')
                physical_network_mtus = config('physical-network-mtus')
                if physical_network_mtus:
                    ctxt['physical_network_mtus'] = ','.join(
                        physical_network_mtus.split())

        if 'kilo' <= cmp_release <= 'mitaka':
            pci_vendor_devs = config('supported-pci-vendor-devs')
            if pci_vendor_devs:
                ctxt['supported_pci_vendor_devs'] = \
                    ','.join(pci_vendor_devs.split())

        ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers()

        n_load_balancer_settings = NeutronLoadBalancerContext()()
        if n_load_balancer_settings:
            ctxt.update(n_load_balancer_settings)

        if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']:
            ctxt['service_plugins'] = []
            service_plugins = {
                'icehouse': [
                    ('neutron.services.l3_router.l3_router_plugin.'
                     'L3RouterPlugin'),
                    'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                    'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                    'neutron.services.vpn.plugin.VPNDriverPlugin',
                    ('neutron.services.metering.metering_plugin.'
                     'MeteringPlugin')],
                'juno': [
                    ('neutron.services.l3_router.l3_router_plugin.'
                     'L3RouterPlugin'),
                    'neutron.services.firewall.fwaas_plugin.FirewallPlugin',
                    'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
                    'neutron.services.vpn.plugin.VPNDriverPlugin',
                    ('neutron.services.metering.metering_plugin.'
                     'MeteringPlugin')],
                'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'],
                'liberty': ['router', 'firewall', 'lbaas', 'vpnaas',
                            'metering'],
                'mitaka': ['router', 'firewall', 'lbaas', 'vpnaas',
                           'metering'],
                'newton': ['router', 'firewall', 'vpnaas', 'metering',
                           ('neutron_lbaas.services.loadbalancer.plugin.'
                            'LoadBalancerPluginv2')],
                'ocata': ['router', 'firewall', 'vpnaas', 'metering',
                          ('neutron_lbaas.services.loadbalancer.plugin.'
                           'LoadBalancerPluginv2'), 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
                'pike': ['router', 'firewall', 'metering', 'segments',
                         ('neutron_lbaas.services.loadbalancer.plugin.'
                          'LoadBalancerPluginv2'),
                         ('neutron_dynamic_routing.'
                          'services.bgp.bgp_plugin.BgpPlugin')],
                'queens': ['router', 'firewall', 'metering', 'segments',
                           ('neutron_lbaas.services.loadbalancer.plugin.'
                            'LoadBalancerPluginv2'),
                           ('neutron_dynamic_routing.'
                            'services.bgp.bgp_plugin.BgpPlugin')],
                'rocky': ['router', 'firewall', 'metering', 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
                'stein': ['router', 'firewall_v2', 'metering', 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
                'train': ['router', 'firewall_v2', 'metering', 'segments',
                          ('neutron_dynamic_routing.'
                           'services.bgp.bgp_plugin.BgpPlugin')],
                # TODO: FWaaS was deprecated at Ussuri and will be removed
                # during the W cycle
            }
            if cmp_release >= 'rocky' and cmp_release < 'train':
                if ctxt.get('load_balancer_name', None):
                    # TODO(fnordahl): Remove when ``neutron_lbaas`` is retired
                    service_plugins[release].append('lbaasv2-proxy')
                else:
                    # TODO(fnordahl): Remove fall-back in next charm release
                    service_plugins[release].append('lbaasv2')

            # TODO: FWaaS was deprecated at Ussuri and will be removed
            # during the W cycle
            if cmp_release >= 'stein':
                ctxt['firewall_v2'] = True

            ctxt['service_plugins'] = service_plugins.get(
                release, service_plugins['stein'])

            if is_nsg_logging_enabled() or is_nfg_logging_enabled():
                ctxt['service_plugins'].append('log')

            if is_port_forwarding_enabled():
                ctxt['service_plugins'].append('port_forwarding')

            if is_qos_requested_and_valid():
                ctxt['service_plugins'].append('qos')

            if is_vlan_trunking_requested_and_valid():
                ctxt['service_plugins'].append('trunk')

            ctxt['service_plugins'] = ','.join(ctxt['service_plugins'])

        return ctxt
Ejemplo n.º 52
0
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint '
                 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)
Ejemplo n.º 53
0
 def neutron_security_groups(self):
     return config('neutron-security-groups')
Ejemplo n.º 54
0
def get_l2population():
    plugin = config('neutron-plugin')
    return config('l2-population') if plugin == "ovs" else False
Ejemplo n.º 55
0
 def __call__(self):
     ctxt = super(IdentityServiceContext, self).__call__()
     if not ctxt:
         return
     ctxt['region'] = config('region')
     return ctxt
Ejemplo n.º 56
0
 def neutron_igmp_snoop(self):
     return config('enable-igmp-snooping')
 def __call__(self):
     ctxt = {}
     rate_rules = hookenv.config('api-rate-limit-rules')
     if rate_rules:
         ctxt['api_rate_limit_rules'] = rate_rules
     return ctxt
Ejemplo n.º 58
0
 def plugin(self):
     return config('neutron-plugin')
 def __call__(self):
     ctxt = super(NovaIPv6Context, self).__call__()
     ctxt['use_ipv6'] = hookenv.config('prefer-ipv6')
     return ctxt
Ejemplo n.º 60
0
def set_available(cni):
    ''' Indicate to the CNI provider that we're ready. '''
    cni.set_config(cidr=config('cidr'), cni_conf_file='10-flannel.conflist')
    set_state('flannel.cni.available')