Exemplo n.º 1
0
def update_etc_hosts():
    hostname = socket.gethostname()
    addr = cassandra.listen_ip_address()
    hosts_map = {addr: hostname}
    # only need to add myself to /etc/hosts
    update_hosts_file('/etc/hosts', hosts_map)
    reactive.set_flag('cassandra.etchosts.done')
Exemplo n.º 2
0
def reset_auth_keyspace_replication():
    # Cassandra requires you to manually set the replication factor of
    # the system_auth keyspace, to ensure availability and redundancy.
    # The recommendation is to set the replication factor so that every
    # node has a copy.
    ep = reactive.endpoint_from_name('cluster')
    num_nodes = len(ep.all_bootstrapped_units) + 1
    datacenter = cassandra.config()['datacenter']
    with cassandra.connect() as session:
        strategy_opts = cassandra.get_auth_keyspace_replication(session)
        rf = int(strategy_opts.get(datacenter, -1))
        hookenv.log('Current system_auth replication strategy is {!r}'.format(strategy_opts))
        if rf != num_nodes:
            strategy_opts['class'] = 'NetworkTopologyStrategy'
            strategy_opts[datacenter] = num_nodes
            if 'replication_factor' in strategy_opts:
                del strategy_opts['replication_factor']
            hookenv.log('New system_auth replication strategy is {!r}'.format(strategy_opts))
            status, msg = hookenv.status_get()
            helpers.status_set(status, 'Updating system_auth rf to {!r}'.format(strategy_opts))
            cassandra.set_auth_keyspace_replication(session, strategy_opts)
            if rf < num_nodes:
                # Increasing rf, need to run repair.
                cassandra.repair_auth_keyspace()
            helpers.status_set(status, msg)
    reactive.set_flag('cassandra.authkeyspace.done')
Exemplo n.º 3
0
def configure_ntpmon():
    """
    Reconfigure ntpmon - does nothing at present
    """
    log('configuring ntpmon')
    set_flag('ntpmon.configured')
    clear_flag('ntpmon.started')
Exemplo n.º 4
0
def reset_limits():
    '''Set /etc/security/limits.d correctly for Ubuntu, so the
    startup scripts don't emit a spurious warning.

    Per Cassandra documentation, Ubuntu needs some extra
    twiddling in /etc/security/limits.d. I have no idea why
    the packages don't do this, since they are already
    setting limits for the cassandra user correctly. The real
    bug is that the limits of the user running the startup script
    are being checked, rather than the limits of the user that will
    actually run the process.
    '''
    contents = dedent('''\
                      # Maintained by Juju
                      root - memlock unlimited
                      root - nofile 100000
                      root - nproc 32768
                      root - as unlimited
                      ubuntu - memlock unlimited
                      ubuntu - nofile 100000
                      ubuntu - nproc 32768
                      ubuntu - as unlimited
                      ''')
    host.write_file('/etc/security/limits.d/cassandra-charm.conf', contents.encode('US-ASCII'))
    reactive.set_flag("cassandra.limits.done")
Exemplo n.º 5
0
def install_ntpmon():
    """
    Install package dependencies, source files, and startup configuration.
    """
    install_dir = layer.options.get('ntpmon', 'install-dir')
    service_name = layer.options.get('ntpmon', 'service-name')
    using_systemd = host.init_is_systemd()
    if install_dir:
        log('installing ntpmon')
        host.mkdir(os.path.dirname(install_dir))
        host.rsync('src/', '{}/'.format(install_dir))

        if service_name:
            if using_systemd:
                systemd_config = '/etc/systemd/system/' + service_name + '.service'
                log('installing systemd service: {}'.format(service_name))
                with open(systemd_config, 'w') as conffile:
                    conffile.write(templating.render('src/' + service_name + '.systemd', layer.options.get('ntpmon')))
                subprocess.call(['systemd', 'daemon-reload'])
            else:
                upstart_config = '/etc/init/' + service_name + '.conf'
                log('installing upstart service: {}'.format(service_name))
                with open(upstart_config, 'w') as conffile:
                    conffile.write(templating.render('src/' + service_name + '.upstart', layer.options.get('ntpmon')))

    set_flag('ntpmon.installed')
    clear_flag('ntpmon.configured')
Exemplo n.º 6
0
def disable_ufw():
    '''Older versions of the charm used UFW to block access to
    replication and JMX ports. Turn it off, as we now trust Juju
    to control this, and firewalls used appropriately when it can't
    '''
    hookenv.log('Disabling UFW, no longer used by this charm')
    ufw.disable()
    reactive.set_flag('cassandra.ufw.disabled')
Exemplo n.º 7
0
def install_maintenance_crontab():
    # Every unit should run repair once per week (at least once per
    # GCGraceSeconds, which defaults to 10 days but can be changed per
    # keyspace).
    unit_num = int(hookenv.local_unit().split('/')[-1])
    # Distribute the repair time evenly over the week.
    dow, hour, minute = helpers.week_spread(unit_num)
    cron_path = "/etc/cron.d/cassandra-maintenance"
    templating.render('cassandra_maintenance_cron.tmpl', cron_path, vars())
    reactive.set_flag('cassandra.crontab.installed')
Exemplo n.º 8
0
def publish():
    for rel in reactive.endpoint_from_name('database').relations:
        if rel.application_name is not None:
            publish_credentials(rel, False)
            publish_general(rel)
    for rel in reactive.endpoint_from_name('database-admin').relations:
        if rel.application_name is not None:
            publish_credentials(rel, True)
            publish_general(rel)
    reactive.set_flag('cassandra.client.published')
Exemplo n.º 9
0
def test_connection():
    status_set('maintenance', 'configuring ssh connection')
    remove_flag('vyos-proxy.ready')
    try:
        who, _ = run('whoami')
    except MgmtNotConfigured as e:
        remove_flag('vyos-proxy.configured')
        status_set('blocked', str(e))
    except subprocess.CalledProcessError as e:
        remove_flag('vyos-proxy.configured')
        status_set('blocked', e.output)
    else:
        set_flag('vyos-proxy.configured')
Exemplo n.º 10
0
def reset_all_io_schedulers():
    cassandra.ensure_all_database_directories()
    dirs = cassandra.get_all_database_directories()
    dirs = (dirs['data_file_directories'] + [dirs['commitlog_directory']] + [dirs['saved_caches_directory']])
    config = cassandra.config()
    missing = False
    for d in dirs:
        if os.path.isdir(d):
            helpers.set_io_scheduler(config['io_scheduler'], d)
        else:
            # If we see this, we should add guards to run this handler later.
            hookenv.log("{} does not exist, deferring io scheduler update".format(d), WARNING)
            missing = True
    if not missing:
        reactive.set_flag('cassandra.io_schedulers.done')
Exemplo n.º 11
0
def open_ports():
    config = cassandra.config()
    port_keys = ['rpc_port', 'native_transport_port']
    for k in port_keys:
        prev_k = '{}.previous'.format(k)
        prev = config.get(prev_k)
        want = config[k]
        if want == prev:
            continue
        if k != 'rpc_port' or cassandra.has_thrift_support():
            hookenv.open_port(want)
        if prev is not None:
            hookenv.close_port(prev)
        config[prev_k] = want
    reactive.set_flag('cassandra.ports.opened')
Exemplo n.º 12
0
def reset_sysctl():
    if host.is_container():
        hookenv.log("In a container, not changing kernel settings")
    else:
        cassandra_sysctl_file = os.path.join('/', 'etc', 'sysctl.d', '99-cassandra.conf')
        contents = b"vm.max_map_count = 131072\n"
        try:
            host.write_file(cassandra_sysctl_file, contents)
            subprocess.check_call(['sysctl', '-p', cassandra_sysctl_file])
        except OSError as e:
            if e.errno == errno.EACCES:
                hookenv.log("Ignoring permission Denied trying to set the "
                            "sysctl settings at {}".format(cassandra_sysctl_file),
                            WARNING)
            else:
                raise
    reactive.set_flag("cassandra.kernelsettings.done")
Exemplo n.º 13
0
def nrpe_external_master_relation(*ignored):
    ''' Configure the nrpe-external-master relation '''
    local_plugins = local_plugins_dir()
    if not os.path.exists(local_plugins):
        # Error because this should have been created by the subordinate before
        # the nrpe-external-master.available flag was set.
        hookenv.log('plugins directory {} does not exist'.format(local_plugins), ERROR)
        hookenv.status_set('waiting', 'Waiting for {} to exist'.format(local_plugins))
        return

    src = os.path.join(hookenv.charm_dir(), "files", "check_cassandra_heap.sh")
    with open(src, 'rb') as f:
        host.write_file(os.path.join(local_plugins, 'check_cassandra_heap.sh'),
                        f.read(), perms=0o555)

    nrpe_compat = nrpe.NRPE()
    conf = hookenv.config()

    cassandra_heap_warn = conf.get('nagios_heapchk_warn_pct')
    cassandra_heap_crit = conf.get('nagios_heapchk_crit_pct')
    if cassandra_heap_warn and cassandra_heap_crit:
        nrpe_compat.add_check(
            shortname="cassandra_heap",
            description="Check Cassandra Heap",
            check_cmd="check_cassandra_heap.sh localhost {} {}"
                      "".format(cassandra_heap_warn, cassandra_heap_crit))

    cassandra_disk_warn = conf.get('nagios_disk_warn_pct')
    cassandra_disk_crit = conf.get('nagios_disk_crit_pct')
    dirs = cassandra.get_all_database_directories()
    dirs = set(dirs['data_file_directories'] +
               [dirs['commitlog_directory'], dirs['saved_caches_directory']])
    # We need to check the space on the mountpoint, not on the actual
    # directory, as the nagios user won't have access to the actual directory.
    mounts = set(mountpoint(d) for d in dirs)
    for disk in mounts:
        check_name = re.sub('[^A-Za-z0-9_]', '_', disk)
        if cassandra_disk_warn and cassandra_disk_crit:
            shortname = "cassandra_disk{}".format(check_name)
            hookenv.log("Adding disk utilization check {}".format(shortname))
            nrpe_compat.add_check(
                shortname=shortname, description="Check Cassandra Disk {}".format(disk),
                check_cmd="check_disk -u GB -w {}% -c {}% -K 5% -p {}"
                          "".format(cassandra_disk_warn, cassandra_disk_crit, disk))
    nrpe_compat.write()
    reactive.set_flag('cassandra.nrpe.installed')
Exemplo n.º 14
0
def start_ntpmon():
    """
    Start the ntpmon daemon process.
    If no NTP server is installed, do nothing.
    """
    started = False
    service_name = layer.options.get('ntpmon', 'service-name')
    if service_name:
        for f in (CHRONY_CONF, NTP_CONF):
            if os.path.exists(f):
                log('{} present; enabling and starting ntpmon'.format(f))
                host.service_resume(service_name)
                started = True
                break
        if not started:
            log('No supported NTP service present; disabling ntpmon')
            host.service_pause(service_name)
    set_flag('ntpmon.started')
Exemplo n.º 15
0
def validate_config():
    config = cassandra.config()
    new_config = dict(hookenv.config())

    invalid = False
    silent_unchangeable_keys = set(['dse_version'])
    for k in (UNCHANGEABLE_KEYS - silent_unchangeable_keys):
        old = config.get(k, None)
        new = new_config.get(k, None)
        if old is not None and old != new:
            # While only the most recent will be visible in status,
            # all will be visible in the status log.
            helpers.status_set('blocked', 'Config {!r} may not be changed after deployment, was {!r}'.format(k, old))
            invalid = True

    for k, vals in ENUMS.items():
        for v in vals:
            if new_config[k].lower() == v.lower():
                hookenv.log("new_confg[{}] = {}".format(k, v))
                new_config[k] = v
                break
        else:
            helpers.status_set('blocked', 'Invalid value {!r} for config setting {}'.format(new_config[k], k))
            invalid = True

    if invalid:
        return  # cassandra.config.validated state not set, charm will not continue.

    # Update stored config to match the validated charm config. Convert enums to lowercase.
    gone = set(k for k in config if k not in new_config)
    for k in gone:
        del config[k]
    for k, v in new_config.items():
        if k in UNCHANGEABLE_KEYS:
            # Don't update unchangeable keys once set. Other handlers
            # may need to override, such as populating dse_version from
            # deployments prior to the setting existing.
            config.setdefault(k, v)
        else:
            config[k] = v

    reactive.set_flag('cassandra.config.validated')
Exemplo n.º 16
0
def swapoff(fstab_path='/etc/fstab'):
    '''Turn off swapping on the machine, permanently.'''
    # Turn off swap in the current session
    if host.is_container():
        hookenv.log("In a container, not touching swap.")
    else:
        try:
            hookenv.log("Turning off swap (swapoff -a)")
            subprocess.check_call(['swapoff', '-a'])
            hookenv.log("Removing swap entries from {}".format(fstab_path))
            with closing(fstab.Fstab(fstab_path)) as f:
                while True:
                    swap_entry = f.get_entry_by_attr('filesystem', 'swap')
                    if swap_entry is None:
                        break
                    f.remove_entry(swap_entry)
        except Exception as e:
            hookenv.log("Ignoring an error trying to turn off swap: {}".format(e), WARNING)
            return  # cassandra.swapoff.done state not set, will be attempted again.
    reactive.set_flag('cassandra.swapoff.done')
Exemplo n.º 17
0
def install_packages():
    pin_dse()

    apt.queue_install(cassandra.get_deb_packages())

    if reactive.is_flag_set('apt.queued_installs'):
        with helpers.autostart_disabled():
            if not apt.install_queued():
                return  # apt layer already left us in a blocked state

    if cassandra.get_edition() == 'apache-snap':
        snap.install('cassandra')
    elif cassandra.get_jre() == 'oracle':
        tb = fetch_oracle_jre()
        if tb is None:
            return
        install_oracle_jre_tarball(tb)
    elif cassandra.get_jre() == 'openjdk':
        subprocess.check_call(['update-java-alternatives', '--jre-headless', '--set', 'java-1.8.0-openjdk-amd64'])
    reactive.set_flag('cassandra.installed')
Exemplo n.º 18
0
def vyos_proxy_ready():
    status_set('active', 'ready')
    set_flag('vyos-proxy.ready')
Exemplo n.º 19
0
def sdn_joined():
    reactive.set_flag('sdn-subordinate.connected')
    reactive.set_flag('sdn-subordinate.available')
def install_test_action_fail():
    set_flag('test-action-fail.installed')
def update_reverseproxy_config():
    '''Configure a reverse proxy.

    The lead unit is responsible for setting appropriate proxy config for all
    known registry peers. The oldest known peer will be configured as the
    primary proxied server. Other peers will be configured as backup servers
    which can take over if the primary fails.
    '''
    website = endpoint_from_flag('website.available')
    port = hookenv.config().get('registry-port')

    # Gather data about our peers, including ourself
    peers = peer_ips(peer_relation="peer")
    peers[hookenv.local_unit()] = hookenv.unit_private_ip()

    # Construct a list of server stanzas
    # NB: use oldest peer (the first unit name in our sorted peers list)
    # versus juju leadership to determine primary vs backup servers:
    #  https://bugs.launchpad.net/layer-docker-registry/+bug/1815459
    common_opts = "check inter 2000 rise 2 fall 5 maxconn 4096"
    is_primary = True
    tls_opts = ""
    if (is_flag_set('config.set.tls-cert-blob')
            and is_flag_set('config.set.tls-key-blob')):
        tls_opts = "ssl check-ssl crt /var/lib/haproxy/default.pem ca-file %s verify required" % (
            hookenv.config().get('tls-ca-path'))
    servers = []
    for unit in sorted(peers):
        if is_primary:
            server_opts = common_opts
            is_primary = False
        else:
            server_opts = common_opts + ' backup'
        server_opts = "{} {}".format(server_opts, tls_opts)
        servers.append('   - [{name}, {ip}, {port}, {opts}]'.format(
            name=unit.replace('/', '-'),
            ip=peers[unit],
            port=port,
            opts=server_opts))

    services_yaml = """
- service_name: %(app)s
  service_host: 0.0.0.0
  service_port: %(port)s
  service_options:
   - mode %(mode)s
   - balance leastconn
   - option httpchk GET / HTTP/1.0
  servers:
%(servers)s
""" % {
        'mode': 'tcp' if tls_opts is not '' else 'http',
        'app': hookenv.application_name(),
        'port': port,
        'servers': "\n".join(servers),
    }
    # Send yaml to the proxy on initial relation and when it changes.
    if data_changed('proxy_stanza', services_yaml):
        # NB: interface needs configure() to set ip/host/port data and
        # set_remote for the blob of services.
        website.configure(port=port)
        website.set_remote(services=services_yaml)

    # A proxy may change our netloc; if we have clients, tell them.
    netloc = layer.docker_registry.get_netloc()
    if (is_flag_set('charm.docker-registry.client-configured')
            and data_changed('proxy_netloc', netloc)):
        configure_client()

    # Early versions of this charm incorrectly set an 'all_services'
    # key on the website relation. Kill it.
    if not is_flag_set('charm.docker-registry.proxy-data.validated'):
        website.set_remote(all_services=None)
        set_flag('charm.docker-registry.proxy-data.validated')

    # Ensure we'll validate website relation data from a follower perspective
    # if we ever lose leadership.
    clear_flag('charm.docker-registry.proxy-follower.validated')
Exemplo n.º 22
0
 def data_changed(self):
     """Set flag to indicate to charm relation data has changed."""
     if self.all_joined_units.received.get('network_manager'):
         set_flag(self.expand_name('{endpoint_name}.available'))
def install():
    render_config()
    if update_keytab():
        status_set('active', 'Unit is ready')
        set_flag('kerberos.installed')
Exemplo n.º 24
0
def install_dsnviewer():
    env.status_set('maintenance', 'Installing webserver')
    fetch.apt_install(['nginx'])
    set_flag('dsnviewer.installed')
    env.open_port(80)
Exemplo n.º 25
0
def set_secrets_local():
    kv.set('password', leader_get()['password'])
    set_flag('secrets.configured')
Exemplo n.º 26
0
def finishing_up_setting_up_sites():
    host.service_reload('apache2')
    set_flag('apache.start')
Exemplo n.º 27
0
 def peer_joined(self):
     set_flag(self.expand_name('peer.joined'))
Exemplo n.º 28
0
 def peer_departed(self):
     set_flag(self.expand_name('peer.departed'))
Exemplo n.º 29
0
 def peer_changed(self):
     set_flag(self.expand_name('peer.changed'))
Exemplo n.º 30
0
def install_kafka():
    status.maintenance('Installing Kafka')

    # Check if mimimum amount of brokers are available
    min_brokers = config().get('broker-count')
    broker_count = 1
    if min_brokers > 1 and is_flag_set('endpoint.broker.joined'):
        kafka_peers = endpoint_from_flag('endpoint.broker.joined')
        broker_count = kafka_peers.kafka_broker_count()

    if broker_count != min_brokers:
        status.blocked(
            "Waiting for {} units to start bootstrapping.".format(min_brokers))
        return

    # Install Java
    status.maintenance('Installing Java')
    install_java()

    # Unpack Kafka files and setup user/group
    status.maintenance('Unpacking Kafka files')
    filename = resource_get('apache-kafka')
    filepath = filename and Path(filename)
    if filepath and filepath.exists() and filepath.stat().st_size:
        tar = tarfile.open(filepath, "r:gz")
        tar.extractall("/usr/lib")
        tar.close()

    distconfig = utils.DistConfig("{}/files/setup.yaml".format(charm_dir()))
    distconfig.add_users()
    distconfig.add_dirs()

    if not os.path.exists('/usr/lib/kafka'):
        # Assumes that there is only 1 kafka_* dir
        kafka_dir = glob.glob('/usr/lib/kafka_*')[0]
        os.symlink(kafka_dir, '/usr/lib/kafka')

    if not os.path.exists('/usr/lib/kafka/logs'):
        os.makedirs('/usr/lib/kafka/logs')
        os.symlink('/usr/lib/kafka/logs', '/var/log/kafka')
        os.chmod('/var/log/kafka', 0o775)
        shutil.chown('/var/log/kafka', user='******', group='kafka')

    # Create server.properties
    status.maintenance('Creating Kafka config')
    zookeepers = endpoint_from_flag('zookeeper.ready')
    zoo_brokers = []
    for zoo in zookeepers.zookeepers():
        zoo_brokers.append("{}:{}".format(zoo['host'], zoo['port']))

    render(source="server.properties.j2",
           target='/usr/lib/kafka/config/server.properties',
           context={
               'broker_count': min_brokers,
               'transaction_min_isr':
               1 if min_brokers == 1 else min_brokers - 1,
               'zookeeper_brokers': ",".join(zoo_brokers),
           })

    # Create systemd service
    render(source='kafka.service.j2',
           target='/etc/systemd/system/kafka.service',
           context={
               'java_home': java_home(),
               'jmx': 1 if config().get('enable-jmx') else 0,
           })

    # Start systemd service
    status.maintenance('Starting Kafka services')
    try:
        check_call(['systemctl', 'daemon-reload'])
        check_call(['systemctl', 'start', 'kafka.service'])
        check_call(['systemctl', 'enable', 'kafka.service'])
    except CalledProcessError as e:
        log(e)
        status.blocked('Could not start Kafka services')
        return

    open_port(9092)
    if config().get('enable-jmx'):
        open_port(9999)
    status.active('Ready')
    set_flag('kafka.installed')
Exemplo n.º 31
0
def configure_cassandra():
    cassandra.write_cassandra_yaml(cassandra.get_cassandra_yaml())
    cassandra.write_cassandra_env(cassandra.get_cassandra_env())
    cassandra.write_cassandra_rackdc(cassandra.get_cassandra_rackdc())
    reactive.set_flag('cassandra.configured')
Exemplo n.º 32
0
def check_admin_pass():
    admin_pass = config()['admin-pass']
    if admin_pass:
        set_flag('admin-pass')
    else:
        clear_flag('admin-pass')
Exemplo n.º 33
0
def set_bootstrapped():
    u = reactive.endpoint_from_flag('endpoint.cluster.joined')
    u.set_bootstrapped(cassandra.listen_ip_address())
    reactive.set_flag('cassandra.bootstrapped.published')
Exemplo n.º 34
0
 def joined(self):
     set_flag(self.expand_name('{endpoint_name}.connected'))
Exemplo n.º 35
0
def bootstrap():
    if restart():
        if wait_for_bootstrap():
            reactive.set_flag('cassandra.bootstrapped')
Exemplo n.º 36
0
 def joined(self):
     """Set flag to indicate to charm relation has been joined."""
     set_flag(self.expand_name('{endpoint_name}.connected'))
Exemplo n.º 37
0
def prometheus_available(prometheus):
    config = hookenv.config()
    prometheus.configure(port=jujushell.get_ports(config)[0])
    set_flag('prometheus.configured')
Exemplo n.º 38
0
def install_leadership_flex():
    set_flag('leadership-flex.installed')
    status_set('active', 'ready to go')
Exemplo n.º 39
0
def start():
    set_flag('jujushell.start')
Exemplo n.º 40
0
def start_service():
    hookenv.status_set('maintenance', 'starting the jujushell service')
    host.service_start('jujushell')
    hookenv.status_set('active', 'jujushell running')
    clear_flag('jujushell.restart')
    set_flag('jujushell.running')
Exemplo n.º 41
0
def maybe_restart():
    for k in RESTART_REQUIRED_KEYS:
        if reactive.is_flag_set('config.changed.{}'.format(k)):
            hookenv.log('{} changed, restart required'.format(k))
            reactive.set_flag('cassandra.needs_restart')
Exemplo n.º 42
0
def upgrade_charm():
    clear_flag('jujushell.resource.available.jujushell')
    clear_flag('jujushell.resource.available.termserver')
    clear_flag('jujushell.lxd.image.imported.termserver')
    set_flag('jujushell.restart')
Exemplo n.º 43
0
def ready():
    status_set('active', 'Ready!')
    set_flag('netutils.ready')
Exemplo n.º 44
0
def install_charm2_proxy_charm():
    """Set the status to active when ssh configured."""
    set_flag('charm2.installed')
    status_set('active', 'Ready!')
Exemplo n.º 45
0
def secrets_plugin_configure():
    hookenv.log('Received information about secrets plugin',
                level=hookenv.INFO)
    reactive.clear_flag('secrets.new-plugin')
    reactive.set_flag('secrets.available')
    reactive.set_flag('config.changed')
Exemplo n.º 46
0
 def foo():
     calls.append('foo')
     reactive.set_flag('bar')
Exemplo n.º 47
0
def do_restart():
    hookenv.log('Reloading nagios-nrpe-server')
    host.service_restart('nagios-nrpe-server')
    hookenv.status_set('active', 'Unit is ready')
    set_flag('contrail-service-checks.started')
Exemplo n.º 48
0
 def changed(self):
     if any(unit.received_raw['mountpoint']
            for unit in self.all_joined_units):
         set_flag(self.expand_name('{endpoint_name}.available'))
Exemplo n.º 49
0
def finishing_up_setting_up_sites():
    log("website.available flag - functie")
    host.service_reload('apache2')
    set_flag('apache.start')
Exemplo n.º 50
0
def request_bootstrap():
    reactive.set_flag('cassandra.needs_restart')
Exemplo n.º 51
0
def start_charm():
    layer.status.maintenance('configuring container')

    config = hookenv.config()
    image_info = layer.docker_resource.get_info('oci-image')
    service_name = hookenv.service_name()

    hub_port = 8000
    api_port = 8081

    if is_flag_set('endpoint.ambassador.joined'):
        annotations = {
            'getambassador.io/config': yaml.dump_all(
                [
                    {
                        'apiVersion': 'ambassador/v0',
                        'kind': 'Mapping',
                        'name': 'tf_hub',
                        'prefix': '/hub',
                        'rewrite': '/hub',
                        'service': f'{service_name}:{hub_port}',
                        'use_websocket': True,
                        'timeout_ms': 30000,
                    },
                    {
                        'apiVersion': 'ambassador/v0',
                        'kind': 'Mapping',
                        'name': 'tf_hub_user',
                        'prefix': '/user',
                        'rewrite': '/user',
                        'service': f'{service_name}:{hub_port}',
                        'use_websocket': True,
                        'timeout_ms': 30000,
                    },
                ]
            )
        }
    else:
        annotations = {}

    pip_installs = [
        'kubernetes==9.0.0',
        'jhub-remote-user-authenticator',
        'jupyterhub-dummyauthenticator',
        'jupyterhub-kubespawner',
        'oauthenticator',
    ]

    layer.caas_base.pod_spec_set(
        {
            'service': {'annotations': annotations},
            'containers': [
                {
                    'name': 'jupyterhub',
                    'imageDetails': {
                        'imagePath': image_info.registry_path,
                        'username': image_info.username,
                        'password': image_info.password,
                    },
                    'command': [
                        'bash',
                        '-c',
                        f'pip install {" ".join(pip_installs)} && jupyterhub -f /etc/config/jupyterhub_config.py',
                    ],
                    'ports': [
                        {'name': 'hub', 'containerPort': hub_port},
                        {'name': 'api', 'containerPort': api_port},
                    ],
                    'config': {
                        'K8S_SERVICE_NAME': service_name,
                        'AUTHENTICATOR': config['authenticator'],
                        'NOTEBOOK_STORAGE_SIZE': config['notebook-storage-size'],
                        'NOTEBOOK_STORAGE_CLASS': config['notebook-storage-class'],
                        'NOTEBOOK_IMAGE': config['notebook-image'],
                    },
                    'files': [
                        {
                            'name': 'configs',
                            'mountPath': '/etc/config',
                            'files': {
                                Path(filename).name: Path(filename).read_text()
                                for filename in glob('files/*')
                            },
                        }
                    ],
                }
            ],
        }
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Exemplo n.º 52
0
def set_live_noauth():
    reactive.set_flag('cassandra.live')
def initial_nrpe_config(nagios=None):
    set_flag('nrpe-external-master.initial-config')
    update_nrpe_config(nagios)
Exemplo n.º 54
0
def initial_seeds():
    leadership.leader_set(seeds=cassandra.listen_ip_address())
    reactive.set_flag('cassandra.seeds.done')
def configure_hacluster():
    add_service_to_hacluster('nginx', 'nginx')
    set_flag('hacluster-configured')
Exemplo n.º 56
0
def install_simple_proxy_charm():
    set_flag('simple.installed')
    status_set('active', 'Ready!')
Exemplo n.º 57
0
def start_charm():
    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    namespace = os.environ["JUJU_MODEL_NAME"]

    issuers, secrets = get_issuers()

    layer.caas_base.pod_spec_set(
        {
            "version":
            2,
            "serviceAccount": {
                "global":
                True,
                "rules": [
                    {
                        "apiGroups": [""],
                        "resources": ["events"],
                        "verbs": ["create", "patch"],
                    },
                    {
                        "apiGroups": [""],
                        "resources": ["pods", "services"],
                        "verbs": ["get", "list", "watch", "create", "delete"],
                    },
                    {
                        "apiGroups": [""],
                        "resources": ["secrets"],
                        "verbs":
                        ["get", "list", "watch", "create", "update", "delete"],
                    },
                    {
                        "apiGroups": ["extensions", "networking.k8s.io/v1"],
                        "resources": ["ingresses"],
                        "verbs":
                        ["get", "list", "watch", "create", "delete", "update"],
                    },
                    {
                        "apiGroups": ["networking.k8s.io/v1"],
                        "resources": ["ingresses/finalizers"],
                        "verbs": ["update"],
                    },
                    {
                        "apiGroups": ["cert-manager.io"],
                        "resources":
                        ["certificates", "certificaterequests", "issuers"],
                        "verbs": [
                            "get",
                            "list",
                            "watch",
                            "create",
                            "delete",
                            "deletecollection",
                            "patch",
                            "update",
                        ],
                    },
                    {
                        "apiGroups": ["cert-manager.io"],
                        "resources": [
                            "certificaterequests/status",
                            "certificates/finalizers",
                            "certificates/status",
                            "clusterissuers",
                            "clusterissuers/status",
                            "issuers",
                            "issuers/status",
                        ],
                        "verbs": ["update"],
                    },
                    {
                        "apiGroups": ["cert-manager.io"],
                        "resources": [
                            "certificates",
                            "certificaterequests",
                            "clusterissuers",
                            "issuers",
                        ],
                        "verbs": ["get", "list", "watch"],
                    },
                    {
                        "apiGroups": ["acme.cert-manager.io"],
                        "resources": ["orders", "challenges"],
                        "verbs": ["create", "delete", "get", "list", "watch"],
                    },
                    {
                        "apiGroups": ["acme.cert-manager.io"],
                        "resources": [
                            "orders",
                            "orders/status",
                            "orders/finalizers",
                            "challenges",
                            "challenges/status",
                            "challenges/finalizers",
                        ],
                        "verbs": ["update"],
                    },
                ],
            },
            "containers": [{
                "name":
                "cert-manager-controller",
                "imageDetails": {
                    "imagePath": image_info.registry_path,
                    "username": image_info.username,
                    "password": image_info.password,
                },
                "args": [
                    "--v=2",
                    f"--cluster-resource-namespace={namespace}",
                    "--leader-elect=false",
                    f"--webhook-namespace={namespace}",
                    "--webhook-ca-secret=cert-manager-webhook-ca",
                    "--webhook-serving-secret=cert-manager-webhook-tls",
                    "--webhook-dns-names=" + ",".join([
                        "cert-manager-webhook",
                        f"cert-manager-webhook.{namespace}",
                        f"cert-manager-webhook.{namespace}.svc",
                    ]),
                ],
                "config": {
                    "POD_NAMESPACE": namespace
                },
                "ports": [{
                    "name": "http",
                    "containerPort": hookenv.config("port")
                }],
            }],
        },
        {
            "kubernetesResources": {
                "customResourceDefinitions": {
                    crd["metadata"]["name"]: crd["spec"]
                    for crd in yaml.safe_load_all(
                        Path("resources/crds.yaml").read_text())
                },
                "customResources": {
                    "issuers.cert-manager.io": issuers
                },
                "secrets": secrets,
            }
        },
    )

    layer.status.maintenance("creating container")
    set_flag("charm.started")
Exemplo n.º 58
0
 def joined(self):
     reactive.set_flag(self.expand_name('{endpoint_name}.connected'))
Exemplo n.º 59
0
 def _set_flag(self, flag):
     set_flag(self.expand_name(flag))