Ejemplo n.º 1
0
def relation_changed():
    template_data = get_template_data()

    # Check required keys
    for k in ('etcd_servers',):
        if not template_data.get(k):
            print "Missing data for", k, template_data
            return

    print "Running with\n", template_data

    # Render and restart as needed
    for n in ('apiserver', 'controller-manager', 'scheduler'):
        if render_file(n, template_data) or not host.service_running(n):
            host.service_restart(n)

    # Render the file that makes the kubernetes binaries available to minions.
    if render_file(
            'distribution', template_data,
            'conf.tmpl', '/etc/nginx/sites-enabled/distribution') or \
            not host.service_running('nginx'):
        host.service_reload('nginx')
    # Render the default nginx template.
    if render_file(
            'nginx', template_data,
            'conf.tmpl', '/etc/nginx/sites-enabled/default') or \
            not host.service_running('nginx'):
        host.service_reload('nginx')

    # Send api endpoint to minions
    notify_minions()
Ejemplo n.º 2
0
def tensorflow_available(tf):
    addrs = tf.addrs()
    if not addrs:
        if host.service_running('tfdemo'):
            host.service_stop('tfdemo')
        return
    hookenv.open_port(8080)
    ctx = {
        'addr': ','.join(addrs),
    }
    samples_dir = os.path.join(hookenv.charm_dir(), "samples")
    if os.path.exists(samples_dir):
        ctx['samples'] = samples_dir
    render(
        source="tfdemo.service",
        target="/etc/systemd/system/tfdemo.service",
        owner="root",
        perms=0o644,
        context=ctx,
    )
    check_call(['systemctl', 'daemon-reload'])
    if host.service_running('tfdemo'):
        host.service_restart('tfdemo')
    else:
        host.service_start('tfdemo')
    remove_state('client.available')
Ejemplo n.º 3
0
def ensure_elasticsearch_started():
    """
    Ensure elasticsearch is started.
    (this should only run once)
    """

    sp.call(["systemctl", "daemon-reload"])
    sp.call(["systemctl", "enable", "elasticsearch.service"])

    # If elasticsearch isn't running start it
    if not service_running('elasticsearch'):
        service_start('elasticsearch')
    # If elasticsearch is running restart it
    else:
        service_restart('elasticsearch')

    # Wait 100 seconds for elasticsearch to restart, then break out of the loop
    # and blocked wil be set below
    cnt = 0
    while not service_running('elasticsearch') and cnt < 100:
        status_set('waiting', 'Waiting for Elasticsearch to start')
        sleep(1)
        cnt += 1

    if service_running('elasticsearch'):
        set_flag('elasticsearch.init.running')
        status_set('active', 'Elasticsearch running')
    else:
        # If elasticsearch wont start, set blocked
        status_set('blocked',
                   "There are problems with elasticsearch, please debug")
        return
Ejemplo n.º 4
0
def configure_node(cluster_changed, cluster_joined):
    status_set('maintenance', 'Configuring slurm-node')

    controller_data = cluster_changed.active_data
    create_spool_dir(context=controller_data)

    render_munge_key(context=controller_data)
    # If the munge.key has been changed on the controller and munge is
    # running, the service must be restarted to use the new key
    if flags.is_flag_set('endpoint.slurm-cluster.changed.munge_key'
                         ) and service_running(MUNGE_SERVICE):
        log('Restarting munge due to key change on slurm-controller')
        service_restart(MUNGE_SERVICE)

    render_slurm_config(context=controller_data)

    # Make sure munge is running
    if not service_running(MUNGE_SERVICE):
        service_start(MUNGE_SERVICE)
    # Make sure slurmd is running
    if not service_running(SLURMD_SERVICE):
        service_start(SLURMD_SERVICE)

    flags.set_flag('slurm-node.configured')
    log('Set {} flag'.format('slurm-node.configured'))

    flags.clear_flag('endpoint.slurm-cluster.active.changed')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.active.changed'))

    # Clear this flag to be able to signal munge_key changed if it occurs from
    # a controller.
    flags.clear_flag('endpoint.slurm-cluster.changed.munge_key')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.changed.munge_key'))
Ejemplo n.º 5
0
def _assess_status():
    """Assess status of relations and services for local unit"""
    if is_flag_set('snap.channel.invalid'):
        status_set('blocked',
                   'Invalid snap channel '
                   'configured: {}'.format(config('channel')))
        return
    if is_flag_set('config.dns_vip.invalid'):
        status_set('blocked',
                   'vip and dns-ha-access-record configured')
        return

    health = None
    if service_running('vault'):
        health = vault.get_vault_health()
        application_version_set(health.get('version'))

    _missing_interfaces = []
    _incomplete_interfaces = []

    _assess_interface_groups(REQUIRED_INTERFACES, optional=False,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    _assess_interface_groups(OPTIONAL_INTERFACES, optional=True,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    if _missing_interfaces or _incomplete_interfaces:
        state = 'blocked' if _missing_interfaces else 'waiting'
        status_set(state, ', '.join(_missing_interfaces +
                                    _incomplete_interfaces))
        return

    if not service_running('vault'):
        status_set('blocked', 'Vault service not running')
        return

    if not health['initialized']:
        status_set('blocked', 'Vault needs to be initialized')
        return

    if health['sealed']:
        status_set('blocked', 'Unit is sealed')
        return

    mlock_disabled = is_container() or config('disable-mlock')

    status_set(
        'active',
        'Unit is ready '
        '(active: {}, mlock: {})'.format(
            str(not health['standby']).lower(),
            'disabled' if mlock_disabled else 'enabled'
        )
    )
Ejemplo n.º 6
0
def start():
    if service_running('puma'):
        service_restart('puma')
    else:
        service_start('puma')
    if service_running('nginx'):
        service_restart('nginx')
    else:
        service_start('nginx')
    chown(config('app-path'), 'puma')
    set_state('app.running')
    remove_state('app.restart')
Ejemplo n.º 7
0
def configure_ovs():
    if config('plugin') == OVS:
        if not service_running('openvswitch-switch'):
            full_restart()
        add_bridge(INT_BRIDGE)
        add_bridge(EXT_BRIDGE)
        ext_port = config('ext-port')
        if ext_port:
            add_bridge_port(EXT_BRIDGE, ext_port)
    if config('plugin') == N1KV:
        if not service_running('openvswitch-switch'):
            full_restart()
        add_bridge(INT_BRIDGE)
Ejemplo n.º 8
0
def restart_tor():
    clear_flag('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_flag('tor.started')
def config_changed():
    '''
    This hook is run when a config parameter is changed.
    It also runs on node reboot.
    '''
    charm_config = config()
    if (charm_config.changed('install_sources')
            or charm_config.changed('plumgrid-build')
            or charm_config.changed('networking-build')
            or charm_config.changed('install_keys')):
        status_set('maintenance', 'Upgrading apt packages')
        if charm_config.changed('install_sources'):
            configure_pg_sources()
        configure_sources()
        apt_update()
        pkgs = determine_packages()
        for pkg in pkgs:
            apt_install(pkg, options=['--force-yes'], fatal=True)
        service_stop('neutron-server')
    if (charm_config.changed('networking-plumgrid-version')
            or charm_config.changed('pip-proxy')):
        ensure_files()
        service_stop('neutron-server')
    CONFIGS.write_all()
    if not service_running('neutron-server'):
        service_start('neutron-server')
Ejemplo n.º 10
0
def start_elasticsearch():
    """
    Make sure the service is running.
    """
    if not host.service_running('elasticsearch'):
        host.service_start('elasticsearch')
        status.ready('Ready.')
Ejemplo n.º 11
0
def restart_tor():
    remove_state('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_state('tor.started')
Ejemplo n.º 12
0
def restart():
    open_port(9000)
    if service_running("minio"):
        service("restart", "minio")
    else:
        service("start", "minio")
    status_set("active", "")
def configure_db(db):
    uri_pat = 'postgresql://{user}:{password}@{host}:{port}/{dbname}'
    db_uri = uri_pat.format(**db.master)

    if kvdb.get('db_uri') != db_uri or not service_running(SERVICE):
        kvdb.set('db_uri', db_uri)
        update_db()
Ejemplo n.º 14
0
def restart():
    open_port(port())
    if service_running('hello-juju'):
        service('restart', 'hello-juju')
    else:
        service('start', 'hello-juju')
    status_set("active", "")
Ejemplo n.º 15
0
def stop():
    ''' Juju calls the stop hook before the unit is destroyed.  Clean up. '''
    # Do we need to call explicitly call leave here?
    if host.service_running('consul'):
        host.service_stop('consul')
    for p in PORTS:
        hookenv.close_port(p)
Ejemplo n.º 16
0
def enable_ha_services():
    """Startup and enable HA services."""
    log("Enabling HA services", INFO)
    for svc in ['pacemaker', 'corosync']:
        enable_lsb_services(svc)
        if not service_running(svc):
            service_start(svc)
Ejemplo n.º 17
0
def disable_ha_services():
    """Shutdown and disable HA services."""
    log("Disabling HA services", INFO)
    for svc in ['corosync', 'pacemaker']:
        disable_lsb_services(svc)
        if service_running(svc):
            service_stop(svc)
def remove_block_conffile(confname):
    """
    Remove a previously-created storpool_block config file that
    instructs it to expose devices to LXD containers.
    """
    rdebug("no Cinder LXD containers found, checking for "
           "any previously stored configuration...")
    removed = False
    if confname.is_file():
        rdebug(
            "- yes, {confname} exists, removing it".format(confname=confname))
        try:
            confname.unlink()
            removed = True
        except Exception as e:
            rdebug("could not remove {confname}: {e}".format(confname=confname,
                                                             e=e))
    elif confname.exists():
        rdebug("- well, {confname} exists, but it is not a file; "
               "removing it anyway".format(confname=confname))
        subprocess.call(["rm", "-rf", "--", str(confname)])
        removed = True
    if removed:
        rdebug("- let us try to restart the storpool_block service " +
               "(it may not even have been started yet, so ignore errors)")
        try:
            if host.service_running("storpool_block"):
                rdebug("  - well, it does seem to be running, so " +
                       "restarting it")
                host.service_restart("storpool_block")
            else:
                rdebug("  - nah, it was not running at all indeed")
        except Exception as e:
            rdebug("  - could not restart the service, but "
                   "ignoring the error: {e}".format(e=e))
Ejemplo n.º 19
0
def configure_analyst_opsvm():
    '''
    Configures Anaylyst for OPSVM
    '''
    if not service_running('plumgrid'):
        restart_pg()
    opsvm_ip = pg_gw_context._pg_dir_context()['opsvm_ip']
    NS_ENTER = ('/opt/local/bin/nsenter -t $(ps ho pid --ppid $(cat '
                '/var/run/libvirt/lxc/plumgrid.pid)) -m -n -u -i -p ')
    sigmund_stop = NS_ENTER + '/usr/bin/service plumgrid-sigmund stop'
    sigmund_status = NS_ENTER \
        + '/usr/bin/service plumgrid-sigmund status'
    sigmund_autoboot = NS_ENTER \
        + '/usr/bin/sigmund-configure --ip {0} --start --autoboot' \
        .format(opsvm_ip)
    try:
        status = subprocess.check_output(sigmund_status, shell=True)
        if 'start/running' in status:
            if subprocess.call(sigmund_stop, shell=True):
                log('plumgrid-sigmund couldn\'t be stopped!')
                return
        subprocess.check_call(sigmund_autoboot, shell=True)
        status = subprocess.check_output(sigmund_status, shell=True)
    except:
        log('plumgrid-sigmund couldn\'t be started!')
Ejemplo n.º 20
0
def restart_corosync():
    if service_running("pacemaker"):
        service_stop("pacemaker")

    if not is_unit_paused_set():
        service_restart("corosync")
        service_start("pacemaker")
Ejemplo n.º 21
0
def get_failing_services():
    failing_services = []
    services = ['calico-node', 'flannel']
    for service in services:
        if not service_running(service):
            failing_services.append(service)
    return failing_services
Ejemplo n.º 22
0
def upgrade_charm():
    hookenv.log('Upgrading Meteor')

    OLD_METEOR_CONFIG = BASE_DIR + '/.juju-config'
    NEW_METEOR_CONFIG = os.path.join(hookenv.charm_dir(),
                                     hookenv.Config.CONFIG_FILE_NAME)

    if (os.path.exists(OLD_METEOR_CONFIG) and not
            os.path.exists(NEW_METEOR_CONFIG)):
        hookenv.log('Moving config from {} to {}'.format(
            OLD_METEOR_CONFIG, NEW_METEOR_CONFIG))
        shutil.move(OLD_METEOR_CONFIG, NEW_METEOR_CONFIG)

    config = hookenv.config()
    os.environ['HOME'] = os.path.expanduser('~' + USER)

    hookenv.log('Upgrading nodejs')
    fetch.apt_update()
    fetch.apt_install(PACKAGES)

    hookenv.log('Upgrading meteor/meteorite')
    subprocess.check_call(DOWNLOAD_CMD.split())
    subprocess.check_call(INSTALL_CMD.split())
    subprocess.check_call('npm install -g meteorite'.split())

    init_dependencies(config)

    if host.service_running(SERVICE):
        start()
Ejemplo n.º 23
0
def configure_ovs():
    status_set('maintenance', 'Configuring ovs')
    if not service_running('openvswitch-switch'):
        full_restart()
    add_bridge(INT_BRIDGE)
    add_bridge(EXT_BRIDGE)
    ext_port_ctx = None
    if use_dvr():
        ext_port_ctx = ExternalPortContext()()
    if ext_port_ctx and ext_port_ctx['ext_port']:
        add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])

    portmaps = DataPortContext()()
    bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
    for provider, br in bridgemaps.iteritems():
        add_bridge(br)
        if not portmaps:
            continue

        for port, _br in portmaps.iteritems():
            if _br == br:
                add_bridge_port(br, port, promisc=True)

    # Ensure this runs so that mtu is applied to data-port interfaces if
    # provided.
    service_restart('os-charm-phy-nic-mtu')
def ensure_beats_are_running():
    status_set('maintenance', f'ensuring beats are fully started')
    for beat in ['filebeat', 'metricbeat']:
        if start_restart_systemd_service(beat):
            status_set('active', f'{beat} has initially started')

            ctr = 0
            beat_record = 0

            while True:
                if ctr == 100:
                    status_set('blocked',
                               f'{beat} not starting - please debug')
                    return
                if beat_record == 10:
                    status_set('active', f'{beat} started')
                    set_flag(f'elasticsearch.{beat}.available')
                    break

                status_set('maintenance', f'ensuring {beat} has fully started')

                if service_running(beat):
                    beat_record += 1
                else:
                    start_restart_systemd_service(beat)
                    beat_record = 0

                ctr += 1
                sleep(1)

    if is_flag_set('elasticsearch.filebeat.available') and\
            is_flag_set('elasticsearch.metricbeat.available'):
        set_flag(f'elasticsearch.beats.available')
    es_active_status()
Ejemplo n.º 25
0
 def enable(self):
     if not service_running('lldpd'):
         self.disable_i40e_lldp_agent()
         service_start('lldpd')
         hookenv.log('Waiting to collect LLDP data', 'INFO')
         time.sleep(30)
         enabled = True
Ejemplo n.º 26
0
def check_service(service, attempts=6, delay=10):
    """Check if a given service is up, giving it a bit of time to come up if needed.

    Returns True if the service is running, False if not, or raises a ValueError if
    the service is unknown. Will automatically handle translating master component
    names (e.g., kube-apiserver) to service names (snap.kube-apiserver.daemon).
    """
    for pattern in ("{}", "snap.{}", "snap.{}.daemon", "snap.kube-{}.daemon"):
        if host.service("is-enabled", pattern.format(service)):
            service = pattern.format(service)
            break
    else:
        raise ValueError("Unknown service: {}".format(service))
    # Give each service up to a minute to become active; this is especially
    # needed now that controller-mgr/scheduler/proxy need the apiserver
    # to validate their token against a k8s secret.
    attempt = 0
    while attempt < attempts:
        hookenv.log("Checking if {} is active ({} / {})".format(
            service, attempt, attempts))
        if host.service_running(service):
            return True
        sleep(delay)
        attempt += 1
    return False
Ejemplo n.º 27
0
def restart():
    open_port(port())
    if service_running('omnivector-challenge'):
        service('restart', 'omnivector-challenge')
    else:
        service('start', 'omnivector-challenge')
    status_set("active", "")
Ejemplo n.º 28
0
def relation_changed():
    """Connect the parts and go :-)
    """
    template_data = get_template_data()

    # Check required keys
    for k in ('etcd_servers', 'kubeapi_server'):
        if not template_data.get(k):
            print('Missing data for %s %s' % (k, template_data))
            return
    print('Running with\n%s' % template_data)

    # Setup kubernetes supplemental group
    setup_kubernetes_group()

    # Register upstart managed services
    for n in ('kubelet', 'proxy'):
        if render_upstart(n, template_data) or not host.service_running(n):
            print('Starting %s' % n)
            host.service_restart(n)

    # Register machine via api
    print('Registering machine')
    register_machine(template_data['kubeapi_server'])

    # Save the marker (for restarts to detect prev install)
    template_data.save()
Ejemplo n.º 29
0
def services_running():
    """Determine if both Corosync and Pacemaker are running
    Both from the operating system perspective and with a functional test
    @returns boolean
    """
    pacemaker_status = service_running("pacemaker")
    corosync_status = service_running("corosync")
    log("Pacemaker status: {}, Corosync status: {}"
        "".format(pacemaker_status, corosync_status),
        level=DEBUG)
    if not (pacemaker_status and corosync_status):
        # OS perspective
        return False
    else:
        # Functional test of pacemaker
        return pcmk.wait_for_pcmk()
def config_changed():
    '''
    This hook is run when a config parameter is changed.
    It also runs on node reboot.
    '''
    charm_config = config()
    if (charm_config.changed('install_sources') or
        charm_config.changed('plumgrid-build') or
        charm_config.changed('networking-build') or
            charm_config.changed('install_keys')):
        status_set('maintenance', 'Upgrading apt packages')
        if charm_config.changed('install_sources'):
            configure_pg_sources()
        configure_sources()
        apt_update()
        pkgs = determine_packages()
        for pkg in pkgs:
            apt_install(pkg, options=['--force-yes'], fatal=True)
        service_stop('neutron-server')
    if (charm_config.changed('networking-plumgrid-version') or
            charm_config.changed('pip-proxy')):
        ensure_files()
        service_stop('neutron-server')
    CONFIGS.write_all()
    if not service_running('neutron-server'):
        service_start('neutron-server')
Ejemplo n.º 31
0
def stop():
    ''' Juju calls the stop hook before the unit is destroyed.  Clean up. '''
    # Do we need to call explicitly call leave here?
    if host.service_running('consul'):
        host.service_stop('consul')
    for p in PORTS:
        hookenv.close_port(p)
Ejemplo n.º 32
0
def start(kafka):
    hookenv.log('Starting burrow')
    if not host.service_running('burrow'):
        call(['systemctl', 'enable', 'burrow'])
        host.service_start('burrow')
    status_set('active', 'ready (:' + str(config.get('port')) + ')')
    set_flag('burrow.started')
Ejemplo n.º 33
0
def services_running():
    """Determine if both Corosync and Pacemaker are running
    Both from the operating system perspective and with a functional test
    @returns boolean
    """
    pacemaker_status = service_running("pacemaker")
    corosync_status = service_running("corosync")
    log("Pacemaker status: {}, Corosync status: {}"
        "".format(pacemaker_status, corosync_status),
        level=DEBUG)
    if not (pacemaker_status and corosync_status):
        # OS perspective
        return False
    else:
        # Functional test of pacemaker
        return pcmk.wait_for_pcmk()
Ejemplo n.º 34
0
def config_changed():
    if openstack_upgrade_available(get_common_package()):
        do_openstack_upgrade(CONFIGS)
    if valid_plugin():
        CONFIGS.write_all()
        configure_ovs()
    else:
        log('Please provide a valid plugin config', level=ERROR)
        sys.exit(1)
    if config('plugin') == 'n1kv':
        if config('l3-agent') == 'enable':
            if not service_running('neutron-l3-agent'):
                service_start('neutron-l3-agent')
        else:
            if service_running('neutron-l3-agent'):
                service_stop('neutron-l3-agent')
Ejemplo n.º 35
0
def service_stop(service_name):
    """
    Wrapper around host.service_stop to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_running(service_name):
        host.service_stop(service_name)
Ejemplo n.º 36
0
def remove_block_conffile(confname):
    """
    Remove a previously-created storpool_block config file that
    instructs it to expose devices to LXD containers.
    """
    rdebug('no Cinder LXD containers found, checking for '
           'any previously stored configuration...')
    removed = False
    if os.path.isfile(confname):
        rdebug(
            '- yes, {confname} exists, removing it'.format(confname=confname))
        try:
            os.unlink(confname)
            removed = True
        except Exception as e:
            rdebug('could not remove {confname}: {e}'.format(confname=confname,
                                                             e=e))
    elif os.path.exists(confname):
        rdebug('- well, {confname} exists, but it is not a file; '
               'removing it anyway'.format(confname=confname))
        subprocess.call(['rm', '-rf', '--', confname])
        removed = True
    if removed:
        rdebug('- let us try to restart the storpool_block service ' +
               '(it may not even have been started yet, so ignore errors)')
        try:
            if host.service_running('storpool_block'):
                rdebug('  - well, it does seem to be running, so ' +
                       'restarting it')
                host.service_restart('storpool_block')
            else:
                rdebug('  - nah, it was not running at all indeed')
        except Exception as e:
            rdebug('  - could not restart the service, but '
                   'ignoring the error: {e}'.format(e=e))
Ejemplo n.º 37
0
def write_config_start_restart_redis():
    """Write config, restart service
    """

    ctxt = {'port': config('port'),
            'databases': config('databases'),
            'log_level': config('log-level'),
            'tcp_keepalive': config('tcp-keepalive'),
            'timeout': config('timeout'),
            'redis_dir': REDIS_DIR}

    if config('cluster-enabled'):
        ctxt['cluster_conf'] = REDIS_CLUSTER_CONF
    if config('password'):
        ctxt['password'] = config('password')

    render_conf(REDIS_CONF, 'redis.conf.tmpl', ctxt=ctxt)

    if service_running(REDIS_SERVICE):
        service_restart(REDIS_SERVICE)
    else:
        service_start(REDIS_SERVICE)

    status.active("Redis {} available".format(
        "cluster" if config('cluster-enabled') else "singleton"))
    set_flag('redis.ready')
Ejemplo n.º 38
0
def relation_changed():
    """Connect the parts and go :-)
    """
    template_data = get_template_data()

    # Check required keys
    for k in ('etcd_servers', 'kubeapi_server'):
        if not template_data.get(k):
            print('Missing data for %s %s' % (k, template_data))
            return
    print('Running with\n%s' % template_data)

    # Setup kubernetes supplemental group
    setup_kubernetes_group()

    # Register upstart managed services
    for n in ('kubelet', 'proxy'):
        if render_upstart(n, template_data) or not host.service_running(n):
            print('Starting %s' % n)
            host.service_restart(n)

    # Register machine via api
    print('Registering machine')
    register_machine(template_data['kubeapi_server'])

    # Save the marker (for restarts to detect prev install)
    template_data.save()
Ejemplo n.º 39
0
def service_stop(service_name):
    """
    Wrapper around host.service_stop to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_running(service_name):
        host.service_stop(service_name)
Ejemplo n.º 40
0
def configure_ovs():
    status_set('maintenance', 'Configuring ovs')
    if not service_running('openvswitch-switch'):
        full_restart()
    add_bridge(INT_BRIDGE)
    add_bridge(EXT_BRIDGE)
    ext_port_ctx = None
    if use_dvr():
        ext_port_ctx = ExternalPortContext()()
    if ext_port_ctx and ext_port_ctx['ext_port']:
        add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])

    portmaps = DataPortContext()()
    bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
    for provider, br in bridgemaps.iteritems():
        add_bridge(br)
        if not portmaps:
            continue

        for port, _br in portmaps.iteritems():
            if _br == br:
                add_bridge_port(br, port, promisc=True)

    # Ensure this runs so that mtu is applied to data-port interfaces if
    # provided.
    service_restart('os-charm-phy-nic-mtu')
Ejemplo n.º 41
0
def restart_tor():
    remove_state('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_state('tor.started')
    hookenv.status_set('active', 'tor service ready')
Ejemplo n.º 42
0
def restart_gobinary():
    remove_state("gobinary.start")
    bin_config = gobinary.config()
    if host.service_running(bin_config["binary"]):
        host.service_restart(bin_config["binary"])
    else:
        host.service_start(bin_config["binary"])
    set_state("gobinary.started")
Ejemplo n.º 43
0
def start():
    if service_running('circus'):
        service_restart('circus')
    else:
        service_start('circus')

    set_state('circus.running')
    remove_state('django.restart')
def restart_service_workload():
    remove_state('service-workload.start')
    manifest = service_workload.manifest()
    if host.service_running(manifest['name']):
        host.service_restart(manifest['name'])
    else:
        host.service_start(manifest['name'])
    set_state('service-workload.started')
Ejemplo n.º 45
0
def cmd_all_services(cmd):
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Ejemplo n.º 46
0
def restart_grafana():
    if not host.service_running(SVCNAME):
        hookenv.log('Starting {}...'.format(SVCNAME))
        host.service_start(SVCNAME)
    elif any_file_changed([GRAFANA_INI]):
        hookenv.log('Restarting {}, config file changed...'.format(SVCNAME))
        host.service_restart(SVCNAME)
    hookenv.status_set('active', 'Ready')
    set_state('grafana.started')
Ejemplo n.º 47
0
def start():
    ''' Juju calls the start hook after after config-changed. Open ports. '''
    ensure_running(False)

    if host.service_running('consul'):
        for p in PORTS:
            hookenv.open_port(p)
    else:
        hookenv.log('The consul service is not running!', hookenv.WARNING)
def install():
    # Stop in case the service was already installed and running. This is more
    # likely to happen when a subordinate is redeployed.
    manifest = service_workload.manifest()
    service = manifest["name"]
    if host.service_running(service):
        host.service_stop(service)

    service_workload.install()
Ejemplo n.º 49
0
def install_dependencies():
    deps = hookenv.config()['pip3-dependencies'].split()
    if deps:
        pip_install(deps)
    set_state('dependencies.installed')
    # This might run before jupyter is started for the first time. In that case,
    # don't restart jupyter.
    if host.service_running('jupyter-notebook'):
        restart_notebook()
def render_config_restart_on_changed(hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    config_file = resolve_cnf_file()
    pre_hash = file_hash(config_file)
    render_config(hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(config_file) != pre_hash or bootstrap:
        if bootstrap:
            bootstrap_pxc()
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            # NOTE(jamespage):
            # if mysql@bootstrap is running, then the native
            # bootstrap systemd service was used to start this
            # instance, and it was the initial seed unit
            # stop the bootstap version before restarting normal mysqld
            if service_running('mysql@bootstrap'):
                service_stop('mysql@bootstrap')

            attempts = 0
            max_retries = 5

            cluster_wait()
            while not service_restart('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying per distributed wait",
                    WARNING)
                attempts += 1
                cluster_wait()

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_client_db_relations()
    else:
        log("Config file '{}' unchanged".format(config_file), level=DEBUG)
Ejemplo n.º 51
0
def upgrade():
    service = "minecraft"
    need_restart = False
    if host.service_running(service):
        need_restart = True
    if need_restart:
        stop()
    install()
    if need_restart:
        start()
Ejemplo n.º 52
0
def ensure_running(changed):
    if host.service_running('consul'):
        if changed:
            print("Reloaded consul config")
            subprocess.check_output([BIN_PATH, "reload"])
        else:
            print("Consul server already running")
        return
    print("Starting consul server")
    host.service_start('consul')
Ejemplo n.º 53
0
def start():
    #reconfigure NGINX as upstart job and use specific config file
    run(['/etc/init.d/nginx', 'stop'])
    while host.service_running('nginx'):
        log("nginx still running")
        time.sleep(60)
    os.remove('/etc/init.d/nginx')
    run(['update-rc.d', '-f', 'nginx', 'remove'])
    log("Starting NATS daemonized in the background")
    host.service_start('cf-nats')
Ejemplo n.º 54
0
def service_restart(service_name):
    """
    Wrapper around host.service_restart to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_available(service_name):
        if host.service_running(service_name):
            host.service_restart(service_name)
        else:
            host.service_start(service_name)
def restart_pg():
    '''
    Stops and Starts PLUMgrid service after flushing iptables.
    '''
    stop_pg()
    service_start('plumgrid')
    time.sleep(3)
    if not service_running('plumgrid'):
        if service_running('libvirt-bin'):
            raise ValueError("plumgrid service couldn't be started")
        else:
            if service_start('libvirt-bin'):
                time.sleep(8)
                if not service_running('plumgrid') \
                        and not service_start('plumgrid'):
                    raise ValueError("plumgrid service couldn't be started")
            else:
                raise ValueError("libvirt-bin service couldn't be started")
    status_set('active', 'Unit is ready')
def upgrade():
    # TODO: get_state("service-workload.config")
    #       and compare with upgraded, remove old service if name has changed.
    manifest = service_workload.manifest()
    service = manifest["name"]
    need_restart = False
    if host.service_running(service):
        need_restart = True
        host.service_stop(service)
    service_workload.install()
    if need_restart:
        host.service_start(service)
def cmd_all_services(cmd):
    if is_unit_paused_set():
        log('Unit is in paused state, not issuing {} to all'
            'services'.format(cmd))
        return
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Ejemplo n.º 58
0
def upgrade():
    # TODO: get_state("go-binary.config")
    #       and compare with upgraded, remove old service if name has changed.
    config = gobinary.config()
    service = config["binary"]
    need_restart = False
    if host.service_running(service):
        need_restart = True
        host.service_stop(service)
    install_workload(config)
    if need_restart:
        host.service_start(service)