def install_load_balancer():
    ''' Create the default vhost template for load balancing '''
    apiserver = endpoint_from_flag('apiserver.available')
    # Do both the key and certificate exist?
    if server_crt_path.exists() and server_key_path.exists():
        # At this point the cert and key exist, and they are owned by root.
        chown = ['chown', 'www-data:www-data', str(server_crt_path)]

        # Change the owner to www-data so the nginx process can read the cert.
        subprocess.call(chown)
        chown = ['chown', 'www-data:www-data', str(server_key_path)]

        # Change the owner to www-data so the nginx process can read the key.
        subprocess.call(chown)

        port = hookenv.config('port')
        hookenv.open_port(port)
        services = apiserver.services()
        nginx.configure_site(
            'apilb',
            'apilb.conf',
            server_name='_',
            services=services,
            port=port,
            server_certificate=str(server_crt_path),
            server_key=str(server_key_path),
            proxy_read_timeout=hookenv.config('proxy_read_timeout'))

        maybe_write_apilb_logrotate_config()
        status.active('Loadbalancer ready.')
Exemplo n.º 2
0
def check_cluster_health():
    """report on the cluster health every 5 minutes"""
    etcdctl = EtcdCtl()
    health = etcdctl.cluster_health()

    # Determine if the unit is healthy or unhealthy
    if "unhealthy" in health["status"]:
        unit_health = "UnHealthy"
    else:
        unit_health = "Healthy"

    # Determine units peer count, and surface 0 by default
    try:
        peers = len(etcdctl.member_list())
    except Exception:
        unit_health = "Errored"
        peers = 0

    bp = "{0} with {1} known peer{2}"
    status_message = bp.format(unit_health, peers, "s" if peers != 1 else "")

    if unit_health in ["UnHealthy", "Errored"]:
        status.blocked(status_message)
    else:
        status.active(status_message)
Exemplo n.º 3
0
def reregister_connector():
    status.maintenance('Reregistering connector')
    if not register_latest_connector():
        status.blocked('Could not reregister previous connectors, trying next hook..')
    else:
        status.active('ready')
        clear_flag('kafka-connect-base.unregistered')
Exemplo n.º 4
0
def kubernetes_status_update():
    kubernetes = endpoint_from_flag('endpoint.kubernetes.new-status')    
    k8s_status = kubernetes.get_status()
    if not k8s_status or not k8s_status['status']:
        return
    
    nodeport = None
    deployment_running = False
    # Check if service and deployment has been created on k8s
    # If the service is created, set the connection string
    # else clear it.
    for resource in k8s_status['status']:
        if resource['kind'] == "Service":
            nodeport = resource['spec']['ports'][0]['nodePort']
        elif resource['kind'] == "Deployment":
            if 'availableReplicas' in resource['status'] and \
                resource['status']['availableReplicas'] == \
                resource['status']['readyReplicas']:
                deployment_running = True
    kubernetes_workers = kubernetes.get_worker_ips()
    if nodeport and kubernetes_workers and deployment_running:
        unitdata.kv().set('kafka-connect-service',
                          kubernetes_workers[0] + ':' + str(nodeport))
        status.active('K8s deployment running')
        clear_flag('endpoint.kubernetes.new-status')
        set_flag('kafka-connect.running')
    else:
        unitdata.kv().set('kafka-connect-service', '')
        clear_flag('kafka-connect.running')
def start_kc_influxdb():
    # Get all config for the InfluxDB connector.
    influxdb = endpoint_from_flag('influxdb.available')
    if not ensure_db_exists(conf.get('database'), influxdb.hostname(),
                            influxdb.port()):
        return
    connector_configs = {
        'connector.class':
        'com.datamountaineer.streamreactor.connect.influx.InfluxSinkConnector',
        'tasks.max':
        str(conf.get('max-tasks')),
        'connect.influx.url':
        'http://' + influxdb.hostname() + ':' + influxdb.port(),
        'connect.influx.db':
        conf.get('database'),
        'connect.influx.username':
        influxdb.user(),
        'connect.influx.password':
        influxdb.password(),
        'connect.influx.kcql':
        conf.get('kcql'),
        'topics':
        conf.get("topics").replace(" ", ","),
    }
    # Ask the base layer to send the config to the Kafka connect REST API.
    response = register_connector(connector_configs, INFLUXDB_CONNECTOR_NAME)
    if response and (response.status_code == 200
                     or response.status_code == 201):
        status.active('ready')
        clear_flag('kafka-connect-influxdb.stopped')
        set_flag('kafka-connect-influxdb.running')
    else:
        log('Could not register/update connector Response: ' + str(response))
        status.blocked(
            'Could not register/update connector, retrying next hook.')
Exemplo n.º 6
0
def render_filebeat_template():
    """Create the filebeat.yaml config file.

    Renders the appropriate template for the major version of filebeat that
    is installed.
    """
    # kube_logs requires access to k8s-related filesystem data. If configured,
    # don't try to start filebeat until that data is present.
    if config().get('kube_logs') and not os.path.exists(KUBE_CONFIG):
        status.maint('Waiting for: {}'.format(KUBE_CONFIG))
        return

    version = charms.apt.get_package_version('filebeat')[0]
    cfg_original_hash = file_hash(FILEBEAT_CONFIG)
    connections = render_without_context('filebeat-{}.yml'.format(version),
                                         FILEBEAT_CONFIG)
    cfg_new_hash = file_hash(FILEBEAT_CONFIG)

    # Ensure ssl files match config each time we render a new template
    manage_filebeat_logstash_ssl()
    remove_state('beat.render')

    if connections:
        if cfg_original_hash != cfg_new_hash:
            service('restart', 'filebeat')
        status.active('Filebeat ready.')
    else:
        # Stop the service when not connected to any log handlers.
        # NB: beat base layer will handle setting a waiting status
        service('stop', 'filebeat')
Exemplo n.º 7
0
def install_fresh_rss():
    """Install FreshRSS
    """

    apply_permissions()
    status.active('Installing FreshRSS')

    install_opts = []
    install_opts.extend(['--default_user', config['default-admin-username']])
    install_opts.extend(['--base_url', config['fqdn']])
    install_opts.extend(['--environment', config['environment']])

    # db specific
    install_opts.extend(['--db-type', kv.get('db-scheme')])
    install_opts.extend(['--db-base', kv.get('db-base')])
    install_opts.extend(['--db-user', kv.get('db-user')])
    install_opts.extend(['--db-password', kv.get('db-password')])
    install_opts.extend(['--db-host', kv.get('db-host')])
    install_opts.extend(['--db-prefix', config['db-prefix']])

    # ensure the needed directories in ./data/
    run_script('prepare')
    run_script('do-install', install_opts)

    if not is_flag_set('leadership.set.default_admin_init'):
        run_script('create-user', [
            '--user', config['default-admin-username'], '--password',
            config['default-admin-password']
        ])
        leader_set(default_admin_init="true")

    apply_permissions()

    status.active('FreshRSS installed')
    set_flag('fresh-rss.installed')
Exemplo n.º 8
0
def write_config_start_restart_redis():
    """Write config, restart service
    """

    ctxt = {'port': config('port'),
            'databases': config('databases'),
            'log_level': config('log-level'),
            'tcp_keepalive': config('tcp-keepalive'),
            'timeout': config('timeout'),
            'redis_dir': REDIS_DIR}

    if config('cluster-enabled'):
        ctxt['cluster_conf'] = REDIS_CLUSTER_CONF
    if config('password'):
        ctxt['password'] = config('password')

    render_conf(REDIS_CONF, 'redis.conf.tmpl', ctxt=ctxt)

    if service_running(REDIS_SERVICE):
        service_restart(REDIS_SERVICE)
    else:
        service_start(REDIS_SERVICE)

    status.active("Redis {} available".format(
        "cluster" if config('cluster-enabled') else "singleton"))
    set_flag('redis.ready')
Exemplo n.º 9
0
def ensure_sufficient_masters():
    """Redis enforces us to use at minimum 3 master nodes.
    Set leader flag indicating we have met the minimum # nodes.
    """

    if is_flag_set('endpoint.cluster.peer.joined'):
        endpoint = 'endpoint.cluster.peer.joined'
    elif is_flag_set('endpoint.cluster.peer.changed'):
        endpoint = 'endpoint.cluster.peer.changed'
    else:
        status.blocked('No peer endpoint set')
        return

    # Get the peers, check for min length
    peers = endpoint_from_flag(endpoint).all_units
    peer_ips = [peer._data['private-address']
                for peer in peers if peer._data is not None]
    if len(peer_ips) > 1:
        status.active(
            "Minimum # masters available, got {}.".format(len(peer_ips)+1))
        init_masters = \
            ",".join(peer_ips + [unit_private_ip()])
        charms.leadership.leader_set(init_masters=init_masters)

    clear_flag('endpoint.cluster.peer.joined')
    clear_flag('endpoint.cluster.peer.changed')
Exemplo n.º 10
0
def push_filebeat_index(elasticsearch):
    """Create the Filebeat index in Elasticsearch.

    Once elasticsearch is available, make 5 attempts to create a filebeat
    index. Set appropriate charm status so the operator knows when ES is
    configured to accept data.
    """
    hosts = elasticsearch.list_unit_data()
    for host in hosts:
        host_string = "{}:{}".format(host['host'], host['port'])

    max_attempts = 5
    for i in range(1, max_attempts):
        if push_beat_index(elasticsearch=host_string,
                           service='filebeat',
                           fatal=False):
            set_state('filebeat.index.pushed')
            status.active('Filebeat ready.')
            break
        else:
            msg = "Attempt {} to push filebeat index failed (retrying)".format(
                i)
            status.waiting(msg)
            time.sleep(i * 30)  # back off 30s for each attempt
    else:
        msg = "Failed to push filebeat index to http://{}".format(host_string)
        status.blocked(msg)
Exemplo n.º 11
0
def render_nimsoft_robot_config():
    """Create the nimbus.conf config file.

    Renders the appropriate template for the Nimbus Robot
    """
    # The v5 template is compatible with all versions < 6
    cfg_original_hash = file_hash(NIMBUS_ROBOT_CONFIG)
    context = {
        'hub': config("hub"),
        'domain': config("domain"),
        'hubip': config("hubip"),
        'hub_robot_name': config("hub-robot-name"),
        'secondary_domain': config("secondary-domain"),
        'secondary_hubip': config("secondary-hubip"),
        'secondary_hub': config("secondary-hub"),
        'secondary_hub_robot_name': config("secondary-hub-robot-name"),
        'private_address': unit_private_ip(),
        'hostname': os.uname()[1]
    }
    render('robot.cfg', NIMBUS_ROBOT_CONFIG, context=context)
    cfg_new_hash = file_hash(NIMBUS_ROBOT_CONFIG)

    rsync(charm_dir() + '/files/request_linux_prod.cfg',
          '/opt/nimsoft/request.cfg')

    # Install the nimbus service
    rsync(charm_dir() + '/files/nimbus.service',
          '/lib/systemd/system/nimbus.service')

    if cfg_original_hash != cfg_new_hash:
        service('restart', 'nimbus')
        status.active('nimbus ready.')
Exemplo n.º 12
0
def configured_devpi():
    status.maintenance('Configuring devpi')

    DEVPI_PATH.mkdir(mode=0o755, parents=True, exist_ok=True)
    devpi_server_bin = DEVPI_ENV_BIN / 'devpi-server'

    # initialize devpi
    adduser('devpi')
    chownr(str(DEVPI_PATH), 'devpi', 'devpi', chowntopdir=True)
    check_call([
        'sudo', '-u', 'devpi',
        str(devpi_server_bin), '--init', '--serverdir',
        str(DEVPI_PATH)
    ])

    # render service
    render('devpi.service',
           '/etc/systemd/system/devpi.service',
           context={
               'devpi_server_bin': devpi_server_bin,
               'devpi_path': str(DEVPI_PATH)
           })

    open_port(3141)

    # enable service
    check_call(['systemctl', 'enable', 'devpi.service'])

    # start service
    check_call(['systemctl', 'start', 'devpi.service'])

    status.active('devpi running')
    set_flag('devpi.configured')
Exemplo n.º 13
0
def acquire_db_config():
    """Acquire juju provided database config
    """

    if is_flag_set('pgsql.master.available'):
        pgsql = endpoint_from_flag('pgsql.master.available')

        if pgsql is None:
            log('PostgeSQL not found', level='ERROR')
            return

        db = pgsql.master

        kv.set('db-scheme', 'pgsql')
        kv.set('db-user', db.user)
        kv.set('db-password', db.password)
        kv.set('db-host', db.host)
        kv.set('db-base', db.dbname)

    elif is_flag_set('mysql.available'):
        mysql = endpoint_from_flag('mysql.available')
        prefix = "fresh-rss"

        if mysql is None:
            log('MySQL not found', level='ERROR')
            return

        kv.set('db-scheme', 'mysql')
        kv.set('db-user', mysql.username(prefix))
        kv.set('db-password', mysql.password(prefix))
        kv.set('db-host', mysql.hostname(prefix))
        kv.set('db-base', mysql.database(prefix))

    status.active('Fresh-RSS Database Acquired')
    set_flag('fresh-rss.db.config.acquired')
Exemplo n.º 14
0
def are_we_in_status():
    """Determine if this node is part of the cluster.
    """

    cluster_node_ips = \
        charms.leadership.leader_get("cluster_node_ips").split(",")

    if unit_private_ip() in cluster_node_ips:
        status.active("cluster successfully joined")
        set_flag('redis.cluster.joined')
Exemplo n.º 15
0
def ready():
    ''' Indicate that canal is active. '''
    failing_services = get_failing_services()
    if len(failing_services) > 0:
        msg = 'Waiting for service: {}'.format(', '.join(failing_services))
        status.waiting(msg)
    else:
        try:
            status.active('Flannel subnet ' + get_flannel_subnet())
        except FlannelSubnetNotFound:
            status.waiting('Waiting for Flannel')
Exemplo n.º 16
0
def configure_nginx():
    """Configure NGINX server for fresh_rss
    """

    ctxt = {'fqdn': config['fqdn'], 'port': config['port']}

    configure_site('fresh-rss', 'fresh-rss.conf', **ctxt)
    hookenv.open_port(ctxt['port'])

    status.active('nginx configured')
    set_flag('fresh-rss.nginx.configured')
Exemplo n.º 17
0
def report_ssl_status():
    registered_fqdns = []
    cert_requests = unitdata.kv().get('sslterm.cert-requests', [])
    for cert_request in cert_requests:
        registered_fqdns.extend(cert_request['fqdn'])
    if config.get('fqdn') and is_flag_set('reverseproxy.available'):
        registered_fqdns.append(config.get('fqdn'))
    if registered_fqdns:
        status.active('Ready ({})'.format(",".join(registered_fqdns)))
    else:
        status.active('Ready')
    clear_flag('ssl-termination.report')
def update_status():
    if service_running('update-conda-mirror.service'):
        pkgs = [
            pkg for d in os.walk(str(temp_dir)) for pkg in d[2]
            if pkg.endswith('.tar.bz2')
        ]
        status.maint(f'Updating mirror - {len(pkgs)} packages downloaded')
    else:
        pkgs = [
            pkg for d in os.walk(str(target_dir)) for pkg in d[2]
            if pkg.endswith('.tar.bz2')
        ]
        status.active(f'Conda mirror ready - {len(pkgs)} packages available')
Exemplo n.º 19
0
def charm_status():
    """
    Set the charm's status after each hook is run.

    :return: None
    """
    if is_state('upgrade.series.in-progress'):
        status.blocked('Series upgrade in progress')
    elif is_state('containerd.nvidia.invalid-option'):
        status.blocked('{} is an invalid option for gpu_driver'.format(
            config().get('gpu_driver')))
    elif _check_containerd():
        status.active('Container runtime available')
        set_state('containerd.ready')
    else:
        status.blocked('Container runtime not available')
Exemplo n.º 20
0
def update_status():
    if not is_flag_set('vault.connected'):
        status.blocked('missing relation to vault')
        return
    if not is_flag_set('layer.vaultlocker.configured'):
        status.waiting('waiting for vaultlocker config')
        return
    ready, missing = [], []
    for storage in ('secrets', 'secrets/0', 'multi-secrets', 'multi-secrets/0',
                    'multi-secrets/1', 'multi-secrets/2'):
        if is_flag_set('layer.vaultlocker.{}.ready'.format(storage)):
            ready.append(storage)
        else:
            missing.append(storage)
    status.active('ready: {}; missing: {}'.format(','.join(ready),
                                                  ','.join(missing)))
def init_conda_mirror():
    temp_dir.mkdir(mode=0o755, parents=True, exist_ok=True)
    target_dir.mkdir(mode=0o755, parents=True, exist_ok=True)

    render_services()

    # enable timer
    check_call(['systemctl', 'enable', 'update-conda-mirror.timer'])
    check_call(['systemctl', 'start', 'update-conda-mirror.timer'])

    # do initial sync
    check_call(['systemctl', 'start', 'update-conda-mirror.service'])

    open_port(config('port'))
    status.active('Conda mirror installed')
    set_flag('conda-mirror.initialized')
Exemplo n.º 22
0
def signal_workloads_start():
    """
    Signal to higher layers the container runtime is ready to run
    workloads. At this time the only reasonable thing we can do
    is determine if the container runtime is active.

    :return: None
    """
    # Before we switch to active, probe the runtime to determine if
    # it is available for workloads. Assuming response from daemon
    # to be sufficient.
    if not _probe_runtime_availability():
        status.waiting("Container runtime not available.")
        return

    status.active("Container runtime available.")
    set_state("docker.available")
def install_load_balancer():
    ''' Create the default vhost template for load balancing '''
    apiserver = endpoint_from_name('apiserver')
    lb_consumers = endpoint_from_name('lb-consumers')

    if not (server_crt_path.exists() and server_key_path.exists()):
        hookenv.log('Skipping due to missing cert')
        return
    if not (apiserver.services() or lb_consumers.all_requests):
        hookenv.log('Skipping due to requests not ready')
        return

    # At this point the cert and key exist, and they are owned by root.
    chown = ['chown', 'www-data:www-data', str(server_crt_path)]

    # Change the owner to www-data so the nginx process can read the cert.
    subprocess.call(chown)
    chown = ['chown', 'www-data:www-data', str(server_key_path)]

    # Change the owner to www-data so the nginx process can read the key.
    subprocess.call(chown)

    servers = {}
    if apiserver and apiserver.services():
        servers[hookenv.config('port')] = {(h['hostname'], h['port'])
                                           for service in apiserver.services()
                                           for h in service['hosts']}
    for request in lb_consumers.all_requests:
        for server_port in request.port_mapping.keys():
            service = servers.setdefault(server_port, set())
            service.update(
                (backend, backend_port)
                for backend, backend_port in itertools.product(
                    request.backends, request.port_mapping.values()))
    nginx.configure_site(
        'apilb',
        'apilb.conf',
        servers=servers,
        server_certificate=str(server_crt_path),
        server_key=str(server_key_path),
        proxy_read_timeout=hookenv.config('proxy_read_timeout'))

    maybe_write_apilb_logrotate_config()
    for listen_port in servers.keys():
        hookenv.open_port(listen_port)
    status.active('Loadbalancer ready.')
Exemplo n.º 24
0
def install_kata():
    """
    Install the Kata container runtime.

    :returns: None
    """
    dist = host.lsb_release()
    release = '{}_{}'.format(
        dist['DISTRIB_ID'],
        dist['DISTRIB_RELEASE']
    )

    arch = check_output(['arch']).decode().strip()

    archive = resource_get('kata-archive')

    if not archive or os.path.getsize(archive) == 0:
        status.maintenance('Installing Kata via apt')
        gpg_key = requests.get(
            'http://download.opensuse.org/repositories/home:/katacontainers:/'
            'releases:/{}:/master/x{}/Release.key'.format(arch, release)).text
        import_key(gpg_key)

        with open('/etc/apt/sources.list.d/kata-containers.list', 'w') as f:
            f.write(
                'deb http://download.opensuse.org/repositories/home:/'
                'katacontainers:/releases:/{}:/master/x{}/ /'
                .format(arch, release)
            )

        apt_update()
        apt_install(KATA_PACKAGES)

    else:
        status.maintenance('Installing Kata via resource')
        unpack = '/tmp/kata-debs'

        if not os.path.isdir(unpack):
            os.makedirs(unpack, exist_ok=True)

        check_call(['tar', '-xvf', archive, '-C', unpack])
        check_call('apt-get install -y {}/*.deb'.format(unpack), shell=True)

    status.active('Kata runtime available')
    set_state('kata.installed')
Exemplo n.º 25
0
def ready():
    preconditions = [
        'calico.service.installed', 'calico.pool.configured',
        'calico.cni.configured', 'calico.bgp.globals.configured',
        'calico.node.configured', 'calico.bgp.peers.configured'
    ]
    if is_state('upgrade.series.in-progress'):
        status.blocked('Series upgrade in progress')
        return
    for precondition in preconditions:
        if not is_state(precondition):
            return
    if is_leader() and not is_state('calico.npc.deployed'):
        status.waiting('Waiting to retry deploying policy controller')
        return
    if not service_running('calico-node'):
        status.waiting('Waiting for service: calico-node')
        return
    status.active('Calico is active')
Exemplo n.º 26
0
def check_cluster_health():
    ''' report on the cluster health every 5 minutes'''
    etcdctl = EtcdCtl()
    health = etcdctl.cluster_health()

    # Determine if the unit is healthy or unhealthy
    if 'unhealthy' in health['status']:
        unit_health = "UnHealthy"
    else:
        unit_health = "Healthy"

    # Determine units peer count, and surface 0 by default
    try:
        peers = len(etcdctl.member_list())
    except Exception:
        unit_health = "Errored"
        peers = 0

    bp = "{0} with {1} known peer{2}"
    status_message = bp.format(unit_health, peers, 's' if peers != 1 else '')

    status.active(status_message)
Exemplo n.º 27
0
def configure_keepalived_service():
    """Set up the keepalived service"""

    virtual_ip = config().get("virtual_ip")
    if virtual_ip == "":
        status.blocked("Please configure virtual ips")
        return

    network_interface = config().get("network_interface")
    if network_interface == "":
        network_interface = default_route_interface()

    context = {
        "is_leader": is_leader(),
        "virtual_ip": virtual_ip,
        "network_interface": network_interface,
        "router_id": config().get("router_id"),
        "service_port": config().get("port"),
        "healthcheck_interval": config().get("healthcheck_interval"),
    }
    render(
        source="keepalived.conf",
        target=KEEPALIVED_CONFIG_FILE,
        context=context,
        perms=0o644,
    )
    service_restart("keepalived")

    render(
        source="50-keepalived.conf",
        target=SYSCTL_FILE,
        context={"sysctl": {"net.ipv4.ip_nonlocal_bind": 1}},
        perms=0o644,
    )
    service_restart("procps")

    status.active("VIP ready")
    set_flag("keepalived.started")
Exemplo n.º 28
0
def start_kafka_connect_mongodb():
    if conf.get('write-batch-enabled') and not conf.get('write-batch-size'):
        status.blocked(
            'Write-batch-enabled is True but write-batch-size is not set')
        return
    if len(conf.get('db-collections', []).split(' ')) != len(
            conf.get('topics', []).split(' ')):
        status.blocked('Number of collections does not match topics')
        return

    mongodb = endpoint_from_flag('mongodb.connected')
    mongodb_connection = mongodb.connection_string()

    mongodb_connector_config = {
        'connector.class': 'com.startapp.data.MongoSinkConnector',
        'tasks.max': str(conf.get('max-tasks')),
        'db.host': mongodb_connection.split(':')[0],
        'db.port': mongodb_connection.split(':')[1],
        'db.name': conf.get('db-name'),
        'db.collections': conf.get('db-collections').replace(" ", ","),
        'write.batch.enabled': str(conf.get('write-batch-enabled')).lower(),
        'write.batch.size': str(conf.get('write-batch-size')),
        'connect.use_schema': "false",
        'topics': conf.get("topics").replace(" ", ","),
    }

    response = register_connector(mongodb_connector_config,
                                  MONGODB_CONNECTOR_NAME)
    if response and (response.status_code == 200
                     or response.status_code == 201):
        status.active('ready')
        clear_flag('kafka-connect-mongodb.stopped')
        set_flag('kafka-connect-mongodb.running')
    else:
        log('Could not register/update connector Response: ' + str(response))
        status.blocked(
            'Could not register/update connector, retrying next hook.')
Exemplo n.º 29
0
def configure_keepalived_service():
    ''' Set up the keepalived service '''

    virtual_ip = config().get('virtual_ip')
    if virtual_ip == "":
        status.blocked('Please configure virtual ips')
        return

    network_interface = config().get('network_interface')
    if network_interface == "":
        network_interface = default_route_interface()

    context = {
        'is_leader': is_leader(),
        'virtual_ip': virtual_ip,
        'network_interface': network_interface,
        'router_id': config().get('router_id'),
        'service_port': config().get('port'),
        'healthcheck_interval': config().get('healthcheck_interval'),
    }
    render(source='keepalived.conf',
           target=KEEPALIVED_CONFIG_FILE,
           context=context,
           perms=0o644)
    service_restart('keepalived')

    render(source='50-keepalived.conf',
           target=SYSCTL_FILE,
           context={'sysctl': {
               'net.ipv4.ip_nonlocal_bind': 1
           }},
           perms=0o644)
    service_restart('procps')

    status.active('VIP ready')
    set_flag('keepalived.started')
Exemplo n.º 30
0
def render_filebeat_template():
    """Create the filebeat.yaml config file.

    Renders the appropriate template for the major version of filebeat that
    is installed.
    """
    # kube_logs requires access to a kubeconfig. If configured, log whether or
    # not we have enough to start collecting k8s metadata.
    if config().get('kube_logs'):
        if os.path.exists(KUBE_CONFIG):
            msg = 'Collecting k8s metadata.'
        else:
            msg = ('kube_logs=True, but {} does not exist. '
                   'No k8s metadata will be collected.'.format(KUBE_CONFIG))
        log(msg)

    # The v5 template is compatible with all versions < 6
    major = charms.apt.get_package_version('filebeat')[0]
    version = major if major.isdigit() and int(major) > 5 else "5"
    cfg_original_hash = file_hash(FILEBEAT_CONFIG)
    connections = render_without_context('filebeat-{}.yml'.format(version),
                                         FILEBEAT_CONFIG)
    cfg_new_hash = file_hash(FILEBEAT_CONFIG)

    # Ensure ssl files match config each time we render a new template
    manage_filebeat_logstash_ssl()
    remove_state('beat.render')

    if connections:
        if cfg_original_hash != cfg_new_hash:
            service('restart', 'filebeat')
        status.active('Filebeat ready.')
    else:
        # Stop the service when not connected to any log handlers.
        # NB: beat base layer will handle setting a waiting status
        service('stop', 'filebeat')