def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))
    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if snap_install_requested():
        status_set('maintenance', 'Installing keystone snap')
        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
        install_os_snaps(
            get_snaps_install_info_from_origin(
                ['keystone'],
                config('openstack-origin'),
                mode='devmode'))
        post_snap_install()
        service_stop('snap.keystone.*')
    else:
        # unconfigured keystone service will prevent start of haproxy in some
        # circumstances. make sure haproxy runs. LP #1648396
        service_stop('keystone')
        service_start('haproxy')
        if run_in_apache():
            disable_unused_apache_sites()
            service_pause('keystone')
def start_nodemanager():
    hookenv.status_set('maintenance', 'starting nodemanager')
    host.service_start('hadoop-yarn-nodemanager')
    for port in get_layer_opts().exposed_ports('nodemanager'):
        hookenv.open_port(port)
    set_state('nodemanager.started')
    hookenv.status_set('active', 'ready')
Example #3
0
    def install(self, plugins):
        """Install the given plugins, optionally removing unlisted ones.

        @params plugins: A whitespace-separated list of plugins to install.
        """
        plugins = plugins or ""
        plugins = plugins.split()
        hookenv.log("Stopping jenkins for plugin update(s)")
        host.service_stop("jenkins")

        hookenv.log("Installing plugins (%s)" % " ".join(plugins))

        host.mkdir(
            paths.PLUGINS, owner="jenkins", group="jenkins", perms=0o0755)

        existing_plugins = set(glob.glob("%s/*.hpi" % paths.PLUGINS))
        installed_plugins = self._install_plugins(plugins)
        unlisted_plugins = existing_plugins - installed_plugins
        if unlisted_plugins:
            if hookenv.config()["remove-unlisted-plugins"] == "yes":
                self._remove_plugins(unlisted_plugins)
            else:
                hookenv.log(
                    "Unlisted plugins: (%s) Not removed. Set "
                    "remove-unlisted-plugins to 'yes' to clear them "
                    "away." % ", ".join(unlisted_plugins))

        hookenv.log("Starting jenkins to pickup configuration changes")
        host.service_start("jenkins")
Example #4
0
def write_config_start_restart_redis():
    """Write config, restart service
    """

    ctxt = {'port': config('port'),
            'databases': config('databases'),
            'log_level': config('log-level'),
            'tcp_keepalive': config('tcp-keepalive'),
            'timeout': config('timeout'),
            'redis_dir': REDIS_DIR}

    if config('cluster-enabled'):
        ctxt['cluster_conf'] = REDIS_CLUSTER_CONF
    if config('password'):
        ctxt['password'] = config('password')

    render_conf(REDIS_CONF, 'redis.conf.tmpl', ctxt=ctxt)

    if service_running(REDIS_SERVICE):
        service_restart(REDIS_SERVICE)
    else:
        service_start(REDIS_SERVICE)

    status.active("Redis {} available".format(
        "cluster" if config('cluster-enabled') else "singleton"))
    set_flag('redis.ready')
Example #5
0
def enable_ha_services():
    """Startup and enable HA services."""
    log("Enabling HA services", INFO)
    for svc in ['pacemaker', 'corosync']:
        enable_lsb_services(svc)
        if not service_running(svc):
            service_start(svc)
def mount():
    try:
        mountpoint = volumes.configure_volume(before_change=volume_change_pre, after_change=volume_change_post)
    except volumes.VolumeConfigurationError:
        hookenv.log('Storage could not be configured', hookenv.ERROR)
        sys.exit(1)

    if mountpoint == 'ephemeral':
        if os.path.islink(SOLR_DIR):
            os.remove(SOLR_DIR)
            if os.path.isdir(SAVED_DIR):
                os.rename(SAVED_DIR, SOLR_DIR)
            else:
                os.path.mkdir(SOLR_DIR)
    else:
        if not storage_is_persistent():
            try:
                os.rename(SOLR_DIR, SAVED_DIR)
            except OSError as e:
                hookenv.log('ERROR: could not preserve existing log directory', hookenv.ERROR)
                hookenv.log(e.strerror, hookenv.ERROR)
                sys.exit(1)
            os.symlink(mountpoint, SOLR_DIR)
            set_permissions()
            host.service_start('jetty')
Example #7
0
def restart_tor():
    clear_flag('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_flag('tor.started')
Example #8
0
def ensure_elasticsearch_started():
    """
    Ensure elasticsearch is started.
    (this should only run once)
    """

    sp.call(["systemctl", "daemon-reload"])
    sp.call(["systemctl", "enable", "elasticsearch.service"])

    # If elasticsearch isn't running start it
    if not service_running('elasticsearch'):
        service_start('elasticsearch')
    # If elasticsearch is running restart it
    else:
        service_restart('elasticsearch')

    # Wait 100 seconds for elasticsearch to restart, then break out of the loop
    # and blocked wil be set below
    cnt = 0
    while not service_running('elasticsearch') and cnt < 100:
        status_set('waiting', 'Waiting for Elasticsearch to start')
        sleep(1)
        cnt += 1

    if service_running('elasticsearch'):
        set_flag('elasticsearch.init.running')
        status_set('active', 'Elasticsearch running')
    else:
        # If elasticsearch wont start, set blocked
        status_set('blocked',
                   "There are problems with elasticsearch, please debug")
        return
Example #9
0
def start_elasticsearch():
    """
    Make sure the service is running.
    """
    if not host.service_running('elasticsearch'):
        host.service_start('elasticsearch')
        status.ready('Ready.')
def config_changed():
    '''
    This hook is run when a config parameter is changed.
    It also runs on node reboot.
    '''
    charm_config = config()
    if (charm_config.changed('install_sources')
            or charm_config.changed('plumgrid-build')
            or charm_config.changed('networking-build')
            or charm_config.changed('install_keys')):
        status_set('maintenance', 'Upgrading apt packages')
        if charm_config.changed('install_sources'):
            configure_pg_sources()
        configure_sources()
        apt_update()
        pkgs = determine_packages()
        for pkg in pkgs:
            apt_install(pkg, options=['--force-yes'], fatal=True)
        service_stop('neutron-server')
    if (charm_config.changed('networking-plumgrid-version')
            or charm_config.changed('pip-proxy')):
        ensure_files()
        service_stop('neutron-server')
    CONFIGS.write_all()
    if not service_running('neutron-server'):
        service_start('neutron-server')
Example #11
0
def restart_tor():
    remove_state('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_state('tor.started')
def start_datanode():
    hookenv.status_set('maintenance', 'starting datanode')
    host.service_start('hadoop-hdfs-datanode')
    for port in get_layer_opts().exposed_ports('datanode'):
        hookenv.open_port(port)
    set_state('datanode.started')
    hookenv.status_set('active', 'ready')
def install_clustered():
    service_stop('arangodb3')
    if not is_flag_set('arangodb.clustered'):
        if unit_private_ip() == leader_get('master_ip'):
            render(
                source='arangodbcluster.service',
                target='/etc/systemd/system/arangodbcluster.service',
                context={'option': '--starter.data-dir={}'.format(DATA_DIR)})
            subprocess.check_call(['systemctl', 'daemon-reload'])
            subprocess.check_call(
                ['systemctl', 'enable', 'arangodbcluster.service'])
            service_start('arangodbcluster')
            set_flag('arangodb.clustered')
            leader_set({'master_started': True})
        elif leader_get('master_started'):
            render(source='arangodbcluster.service',
                   target='/etc/systemd/system/arangodbcluster.service',
                   context={
                       'option':
                       '--starter.data-dir={} --starter.join {}'.format(
                           DATA_DIR, leader_get('master_ip'))
                   })
            subprocess.check_call(['systemctl', 'daemon-reload'])
            subprocess.check_call(
                ['systemctl', 'enable', 'arangodbcluster.service'])
            service_start('arangodbcluster')
            #let the charm sleep for 15 seconds so that the setup file is created
            time.sleep(15)
            set_flag('arangodb.clustered')
    setup_file = Path('{}/setup.json'.format(DATA_DIR))
    if setup_file.exists():
        close_port(kv.get('port'))
        open_coordinater_port()
Example #14
0
def tensorflow_available(tf):
    addrs = tf.addrs()
    if not addrs:
        if host.service_running('tfdemo'):
            host.service_stop('tfdemo')
        return
    hookenv.open_port(8080)
    ctx = {
        'addr': ','.join(addrs),
    }
    samples_dir = os.path.join(hookenv.charm_dir(), "samples")
    if os.path.exists(samples_dir):
        ctx['samples'] = samples_dir
    render(
        source="tfdemo.service",
        target="/etc/systemd/system/tfdemo.service",
        owner="root",
        perms=0o644,
        context=ctx,
    )
    check_call(['systemctl', 'daemon-reload'])
    if host.service_running('tfdemo'):
        host.service_restart('tfdemo')
    else:
        host.service_start('tfdemo')
    remove_state('client.available')
Example #15
0
def upgrade_monitor():
    current_version = ceph.get_version()
    status_set("maintenance", "Upgrading monitor")
    log("Current ceph version is {}".format(current_version))
    new_version = config('release-version')
    log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
            service_stop('ceph-mon-all')
        apt_install(packages=ceph.PACKAGES, fatal=True)
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_start('ceph-mon@{}'.format(mon_id))
        else:
            service_start('ceph-mon-all')
        status_set("active", "")
    except subprocess.CalledProcessError as err:
        log("Stopping ceph and upgrading packages failed "
            "with message: {}".format(err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
def start_cassandra():
    if is_cassandra_running():
        return

    actual_seeds = sorted(actual_seed_ips())
    assert actual_seeds, 'Attempting to start cassandra with empty seed list'
    hookenv.config()['configured_seeds'] = actual_seeds

    if is_bootstrapped():
        status_set('maintenance',
                   'Starting Cassandra with seeds {!r}'
                   .format(','.join(actual_seeds)))
    else:
        status_set('maintenance',
                   'Bootstrapping with seeds {}'
                   .format(','.join(actual_seeds)))

    host.service_start(get_cassandra_service())

    # Wait for Cassandra to actually start, or abort.
    timeout = time.time() + RESTART_TIMEOUT
    while time.time() < timeout:
        if is_cassandra_running():
            return
        time.sleep(1)
    status_set('blocked', 'Cassandra failed to start')
    raise SystemExit(0)
Example #17
0
def start_api_gunicorn(path, app, port, workers, template, context):
    stop_api()
    path = path.rstrip('/')
    #info[0] = path to project
    #info[1] = main
    info = path.rsplit('/', 1)
    #remove .py from main
    main = info[1].split('.', 1)[0]
    if os.path.exists(info[0] + '/wsgi.py'):
        os.remove(info[0] + '/wsgi.py')
    render(source='gunicorn.wsgi',
           target=info[0] + "/wsgi.py",
           context={
               'app': app,
               'main': main,
           })
    unitfile_dict = load_unitfile()
    unitfile_context = {**unitfile_dict, **context}
    unitfile_context['port'] = str(port)
    unitfile_context['pythonpath'] = info[0]
    unitfile_context['app'] = app
    unitfile_context['workers'] = str(workers)
    unitfile_context['gunicornpath'] = os.path.join(
        os.path.dirname(os.getcwd()), ".venv/bin/gunicorn")

    render(source=template,
           target='/etc/systemd/system/flask.service',
           context=unitfile_context)

    call(['systemctl', 'enable', 'flask'])
    host.service_start('flask')
Example #18
0
def configure_node(cluster_changed, cluster_joined):
    status_set('maintenance', 'Configuring slurm-node')

    controller_data = cluster_changed.active_data
    create_spool_dir(context=controller_data)

    render_munge_key(context=controller_data)
    # If the munge.key has been changed on the controller and munge is
    # running, the service must be restarted to use the new key
    if flags.is_flag_set('endpoint.slurm-cluster.changed.munge_key'
                         ) and service_running(MUNGE_SERVICE):
        log('Restarting munge due to key change on slurm-controller')
        service_restart(MUNGE_SERVICE)

    render_slurm_config(context=controller_data)

    # Make sure munge is running
    if not service_running(MUNGE_SERVICE):
        service_start(MUNGE_SERVICE)
    # Make sure slurmd is running
    if not service_running(SLURMD_SERVICE):
        service_start(SLURMD_SERVICE)

    flags.set_flag('slurm-node.configured')
    log('Set {} flag'.format('slurm-node.configured'))

    flags.clear_flag('endpoint.slurm-cluster.active.changed')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.active.changed'))

    # Clear this flag to be able to signal munge_key changed if it occurs from
    # a controller.
    flags.clear_flag('endpoint.slurm-cluster.changed.munge_key')
    log('Cleared {} flag'.format('endpoint.slurm-cluster.changed.munge_key'))
def install_cherrypy_helloworld():
    """Install the cherrypy helloworld service."""
    # Install dependencies for our helloworld service
    for pkg in ['CherryPy', 'jinja2']:
        pip_install(pkg)

    # When we first run, generate the systemd service file
    with open('{}/templates/helloworld.service.j2'.format(charm_dir())) as f:
        t = Template(f.read())

        # Render the new configuration
        rendered = t.render(
            charm_dir=charm_dir(),
        )

        status_set('maintenance', 'Creating helloworld service...')
        service_file = "/etc/systemd/system/{}.service".format(charm_name())
        with open(service_file, "w") as svc:
            svc.write(rendered)

        # Render the initial configuration
        render_config()

        status_set('maintenance', 'Starting helloworld service...')
        service_start(charm_name())

        # Make sure the port is open
        update_http_port()

        status_set('active', 'Ready!')

    set_flag('cherrypy-helloworld.installed')
Example #20
0
def migrate_to_mount(new_path):
    """Invoked when new mountpoint appears. This function safely migrates
    MySQL data from local disk to persistent storage (only if needed)
    """
    old_path = '/var/lib/mysql'
    if os.path.islink(old_path):
        hookenv.log('{} is already a symlink, skipping migration'.format(
            old_path))
        return True
    # Ensure our new mountpoint is empty. Otherwise error and allow
    # users to investigate and migrate manually
    files = os.listdir(new_path)
    try:
        files.remove('lost+found')
    except ValueError:
        pass
    if files:
        raise RuntimeError('Persistent storage contains old data. '
                           'Please investigate and migrate data manually '
                           'to: {}'.format(new_path))
    os.chmod(new_path, 0o700)
    if os.path.isdir('/etc/apparmor.d/local'):
        render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
               context={'path': os.path.join(new_path, '')})
        host.service_reload('apparmor')
    host.service_stop('mysql')
    host.rsync(os.path.join(old_path, ''),  # Ensure we have trailing slashes
               os.path.join(new_path, ''),
               options=['--archive'])
    shutil.rmtree(old_path)
    os.symlink(new_path, old_path)
    host.service_start('mysql')
def upgrade_monitor():
    current_version = ceph.get_version()
    status_set("maintenance", "Upgrading monitor")
    log("Current ceph version is {}".format(current_version))
    new_version = config('release-version')
    log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
            service_stop('ceph-mon-all')
        apt_install(packages=ceph.PACKAGES, fatal=True)
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_start('ceph-mon@{}'.format(mon_id))
        else:
            service_start('ceph-mon-all')
        status_set("active", "")
    except subprocess.CalledProcessError as err:
        log("Stopping ceph and upgrading packages failed "
            "with message: {}".format(err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
Example #22
0
def configure_crowd():
    opts = {}
    opts['crowd_home'] = '/var/crowd-home'
    render(
        'crowd-init.properties',
        "{}/atlassian-crowd-{}/crowd-webapp/WEB-INF/classes/crowd-init.properties"
        .format(CROWD_INSTALL, hookenv.config('crowd-version')),
        opts,
        owner="crowd",
        group="crowd",
    )
    service_opts = {
        'crowd_install_dir': CROWD_INSTALL,
        'crowd_version': hookenv.config('crowd-version'),
    }
    render(
        'crowd.service',
        '/etc/systemd/system/crowd.service',
        service_opts,
    )
    chmod(
        "{}/atlassian-crowd-{}/start_crowd.sh".format(
            CROWD_INSTALL, hookenv.config('crowd-version')), 0o755)
    chmod(
        "{}/atlassian-crowd-{}/stop_crowd.sh".format(
            CROWD_INSTALL, hookenv.config('crowd-version')), 0o755)

    if hookenv.config('license-key'):
        install_license(hookenv.config('license-key'))

    host.service_start('crowd')
    host.service_resume('crowd')
    hookenv.open_port(8095)
Example #23
0
def migrate_to_mount(new_path):
    """Invoked when new mountpoint appears. This function safely migrates
    MySQL data from local disk to persistent storage (only if needed)
    """
    old_path = '/var/lib/mysql'
    if os.path.islink(old_path):
        hookenv.log('{} is already a symlink, skipping migration'.format(
            old_path))
        return True
    # Ensure our new mountpoint is empty. Otherwise error and allow
    # users to investigate and migrate manually
    files = os.listdir(new_path)
    try:
        files.remove('lost+found')
    except ValueError:
        pass
    if files:
        raise RuntimeError('Persistent storage contains old data. '
                           'Please investigate and migrate data manually '
                           'to: {}'.format(new_path))
    os.chmod(new_path, 0o700)
    if os.path.isdir('/etc/apparmor.d/local'):
        render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
               context={'path': os.path.join(new_path, '')})
        host.service_reload('apparmor')
    host.service_stop('mysql')
    host.rsync(os.path.join(old_path, ''),  # Ensure we have trailing slashes
               os.path.join(new_path, ''),
               options=['--archive'])
    shutil.rmtree(old_path)
    os.symlink(new_path, old_path)
    host.service_start('mysql')
def start_cassandra():
    if is_cassandra_running():
        return

    actual_seeds = sorted(actual_seed_ips())
    assert actual_seeds, 'Attempting to start cassandra with empty seed list'
    hookenv.config()['configured_seeds'] = actual_seeds

    if is_bootstrapped():
        status_set(
            'maintenance', 'Starting Cassandra with seeds {!r}'.format(
                ','.join(actual_seeds)))
    else:
        status_set(
            'maintenance',
            'Bootstrapping with seeds {}'.format(','.join(actual_seeds)))

    host.service_start(get_cassandra_service())

    # Wait for Cassandra to actually start, or abort.
    timeout = time.time() + RESTART_TIMEOUT
    while time.time() < timeout:
        if is_cassandra_running():
            return
        time.sleep(1)
    status_set('blocked', 'Cassandra failed to start')
    raise SystemExit(0)
Example #25
0
def setup_config():
    hookenv.status_set('maintenance', 'Configuring Radarr')
    backups = './backups'
    if radarr.charm_config['restore-config']:
        try:
            os.mkdir(backups)
        except OSError as e:
            if e.errno == 17:
                pass
        backupFile = hookenv.resource_get('radarrconfig')
        if backupFile:
            with ZipFile(backupFile, 'r') as inFile:
                inFile.extractall(radarr.config_dir)
            hookenv.log(
                "Restoring config, indexers are disabled enable with action when configuration has been checked",
                'INFO')
            # Turn off indexers
            radarr.set_indexers(False)
        else:
            hookenv.log(
                "Add radarrconfig resource, see juju attach or disable restore-config",
                'WARN')
            hookenv.status_set('blocked', 'waiting for radarrconfig resource')
            return
    else:
        host.service_start(radarr.service_name)
        configFile = Path(radarr.config_file)
        while not configFile.is_file():
            time.sleep(1)
    radarr.modify_config(port=radarr.charm_config['port'], urlbase='None')
    hookenv.open_port(radarr.charm_config['port'], 'TCP')
    host.service_start(radarr.service_name)
    hookenv.status_set('active', 'Radarr is ready')
    set_state('radarr.configured')
Example #26
0
    def start(self):
        """
        Always start the Spark History Server. Start other services as
        required by our execution mode. Open related ports as appropriate.
        """
        host.service_start('spark-history-server')
        hookenv.open_port(self.dist_config.port('spark-history-ui'))

        # Spark master/worker is only started in standalone mode
        if hookenv.config()['spark_execution_mode'] == 'standalone':
            if host.service_start('spark-master'):
                hookenv.log("Spark Master started")
                hookenv.open_port(self.dist_config.port('spark-master-ui'))
                # If the master started and we have peers, wait 2m for recovery
                # before starting the worker. This ensures the worker binds
                # to the correct master.
                if unitdata.kv().get('sparkpeer.units'):
                    hookenv.status_set('maintenance',
                                       'waiting for spark master recovery')
                    hookenv.log("Waiting 2m to ensure spark master is ALIVE")
                    time.sleep(120)
            else:
                hookenv.log("Spark Master did not start; this is normal "
                            "for non-leader units in standalone mode")

            # NB: Start the worker even if the master process on this unit
            # fails to start. In non-HA mode, spark master only runs on the
            # leader. On non-leader units, we still want a worker bound to
            # the leader.
            if host.service_start('spark-worker'):
                hookenv.log("Spark Worker started")
                hookenv.open_port(self.dist_config.port('spark-worker-ui'))
            else:
                hookenv.log("Spark Worker did not start")
Example #27
0
def move_etcd_data_to_standard_location():
    ''' Moves etcd data to the standard location if it's not already located
    there. This is necessary when generating new etcd config after etcd has
    been upgraded from version 2.3 to 3.x.
    '''
    bag = EtcdDatabag()
    conf_path = bag.etcd_conf_dir + '/etcd.conf.yml'
    if not os.path.exists(conf_path):
        return
    with open(conf_path) as f:
        conf = yaml.safe_load(f)
    data_dir = conf['data-dir']
    desired_data_dir = bag.etcd_data_dir
    if data_dir != desired_data_dir:
        log('Moving etcd data from %s to %s' % (data_dir, desired_data_dir))
        host.service_stop('snap.etcd.etcd')
        for filename in os.listdir(data_dir):
            os.rename(
                data_dir + '/' + filename,
                desired_data_dir + '/' + filename
            )
        os.rmdir(data_dir)
        conf['data-dir'] = desired_data_dir
        with open(conf_path, 'w') as f:
            yaml.dump(conf, f)
        host.service_start('snap.etcd.etcd')
Example #28
0
def restart_corosync():
    if service_running("pacemaker"):
        service_stop("pacemaker")

    if not is_unit_paused_set():
        service_restart("corosync")
        service_start("pacemaker")
Example #29
0
    def start(self):
        '''
        Request that our service start. Normally, puppet will handle this
        for us.

        '''
        host.service_start('zookeeper-server')
Example #30
0
def install_influx():
    open_port(8083)
    open_port(8086)
    config_changed()
    service_start('influxdb')
    status_set('active', '')
    set_state('influxdb.configured')
Example #31
0
    def start(self):
        '''
        Request that our service start. Normally, puppet will handle this
        for us.

        '''
        host.service_start('zookeeper-server')
Example #32
0
def start(kafka):
    hookenv.log('Starting burrow')
    if not host.service_running('burrow'):
        call(['systemctl', 'enable', 'burrow'])
        host.service_start('burrow')
    status_set('active', 'ready (:' + str(config.get('port')) + ')')
    set_flag('burrow.started')
Example #33
0
def launch_rocketchat(database):
    status_set('active', 'Launching Rocket.Chat')
    # Launch Rocket.Chat
    hookenv.open_port('3000')
    service_start('rocketchat')
    log('Launched Rocket.Chat @ {}'.format(config['host_url']), level='info')
    set_state('rocketchat.launched')
    def start(self):
        """
        Always start the Spark History Server. Start other services as
        required by our execution mode. Open related ports as appropriate.
        """
        host.service_start('spark-history-server')
        hookenv.open_port(self.dist_config.port('spark-history-ui'))

        # Spark master/worker is only started in standalone mode
        if hookenv.config()['spark_execution_mode'] == 'standalone':
            if host.service_start('spark-master'):
                hookenv.log("Spark Master started")
                hookenv.open_port(self.dist_config.port('spark-master-ui'))
                # If the master started and we have peers, wait 2m for recovery
                # before starting the worker. This ensures the worker binds
                # to the correct master.
                if unitdata.kv().get('sparkpeer.units'):
                    hookenv.status_set('maintenance',
                                       'waiting for spark master recovery')
                    hookenv.log("Waiting 2m to ensure spark master is ALIVE")
                    time.sleep(120)
            else:
                hookenv.log("Spark Master did not start; this is normal "
                            "for non-leader units in standalone mode")

            # NB: Start the worker even if the master process on this unit
            # fails to start. In non-HA mode, spark master only runs on the
            # leader. On non-leader units, we still want a worker bound to
            # the leader.
            if host.service_start('spark-worker'):
                hookenv.log("Spark Worker started")
                hookenv.open_port(self.dist_config.port('spark-worker-ui'))
            else:
                hookenv.log("Spark Worker did not start")
 def setup_plex(self, hostname, port, user=None, passwd=None):
     '''' Modify an existing plex Notification or create one with the given settings
     hostname: The address for the plex server
     port: The plex port
     user: (Optional) plex user name
     passwd: (Optional) plex password'''
     host.service_stop(self.service_name)
     conn = sqlite3.connect(self.database_file)
     c = conn.cursor()
     c.execute('''SELECT Settings FROM Notifications WHERE ConfigContract is "PlexServerSettings"''')
     result = c.fetchall()
     if len(result):
         hookenv.log("Modifying existing plex setting for Radarr", "INFO")
         row = result[0]
         settings = json.loads(row[0])
         settings['host'] = hostname
         settings['port'] = port
         settings['username'] = settings['username'] or user
         settings['password'] = settings['password'] or passwd
         conn.execute('''UPDATE Notifications SET Settings = ? WHERE ConfigContract is "PlexServerSettings"''',
                      (json.dumps(settings),))
     else:
         hookenv.log("Creating plex setting for Radarr.", "INFO")
         settings = {"host": hostname, "port": port, "username": user or "", "password": passwd or "",
                     "updateLibrary": True, "useSsl": False, "isValid": True}
         c.execute('''INSERT INTO Notifications
                   (Name,OnGrab,onDownload,Settings,Implementation,ConfigContract,OnUpgrade,Tags,OnRename)
                   VALUES (?,?,?,?,?,?,?,?,?)''', ("Plex", 0, 1,
                                                   json.dumps(settings),
                                                   "PlexServer",
                                                   "PlexServerSettings", 1, None,
                                                   1))
     conn.commit()
     host.service_start(self.service_name)
Example #36
0
def config_changed():

    if not conf.changed('server_port') and not conf.changed('RAM_MAX'):
        return

    log('ftb-infinity: config_changed')
    cur_status = status_get()
    status_set('maintenance', 'configuring')

    port_changed = conf.changed('server_port')
    ram_changed = conf.changed('RAM_MAX')

    # Let's suppose java will rewrite server.properties on exit
    started = is_state(CHARM_STATE_STARTED)
    if started:
        service_stop(CHARM_NAME)
        sleep(2)

    if port_changed:
        close_port(conf.previous('server_port'))
        ftb_config_server()

    if ram_changed:
        ftb_systemd_install()

    if started:
        service_start(CHARM_NAME)
        if port_changed:
            open_port(conf['server_port'])

    # restore state
    status_set(cur_status[0], cur_status[1])
Example #37
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))
    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if snap_install_requested():
        status_set('maintenance', 'Installing keystone snap')
        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
        install_os_snaps(
            get_snaps_install_info_from_origin(['keystone'],
                                               config('openstack-origin'),
                                               mode='devmode'))
        post_snap_install()
        service_stop('snap.keystone.*')
    else:
        # unconfigured keystone service will prevent start of haproxy in some
        # circumstances. make sure haproxy runs. LP #1648396
        service_stop('keystone')
        service_start('haproxy')
        if run_in_apache():
            disable_unused_apache_sites()
            service_pause('keystone')

    unison.ensure_user(user=SSH_USER, group=SSH_USER)
    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
Example #38
0
 def enable(self):
     if not service_running('lldpd'):
         self.disable_i40e_lldp_agent()
         service_start('lldpd')
         hookenv.log('Waiting to collect LLDP data', 'INFO')
         time.sleep(30)
         enabled = True
def config_changed():
    '''
    This hook is run when a config parameter is changed.
    It also runs on node reboot.
    '''
    charm_config = config()
    if (charm_config.changed('install_sources') or
        charm_config.changed('plumgrid-build') or
        charm_config.changed('networking-build') or
            charm_config.changed('install_keys')):
        status_set('maintenance', 'Upgrading apt packages')
        if charm_config.changed('install_sources'):
            configure_pg_sources()
        configure_sources()
        apt_update()
        pkgs = determine_packages()
        for pkg in pkgs:
            apt_install(pkg, options=['--force-yes'], fatal=True)
        service_stop('neutron-server')
    if (charm_config.changed('networking-plumgrid-version') or
            charm_config.changed('pip-proxy')):
        ensure_files()
        service_stop('neutron-server')
    CONFIGS.write_all()
    if not service_running('neutron-server'):
        service_start('neutron-server')
Example #40
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))
    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if snap_install_requested():
        status_set('maintenance', 'Installing keystone snap')
        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
        install_os_snaps(
            get_snaps_install_info_from_origin(['keystone'],
                                               config('openstack-origin'),
                                               mode='devmode'))
        post_snap_install()
        service_stop('snap.keystone.*')
    else:
        # unconfigured keystone service will prevent start of haproxy in some
        # circumstances. make sure haproxy runs. LP #1648396
        service_stop('keystone')
        service_start('haproxy')
        if run_in_apache():
            disable_unused_apache_sites()
            service_pause('keystone')
    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides(
        os_release('keystone'),
        'keystone',
        restart_handler=lambda: service_restart('apache2'))
 def setup_sabnzbd(self, port, apikey, hostname):
     host.service_stop(self.service_name)
     conn = sqlite3.connect(self.database_file)
     c = conn.cursor()
     c.execute('''SELECT Settings FROM DownloadClients WHERE ConfigContract is "SabnzbdSettings"''')
     result = c.fetchall()
     if len(result):
         hookenv.log("Modifying existing sabnzbd setting for Radarr", "INFO")
         row = result[0]
         settings = json.loads(row[0])
         settings['port'] = port
         settings['apiKey'] = apikey
         settings['host'] = hostname
         conn.execute('''UPDATE DownloadClients SET Settings = ? WHERE ConfigContract is "SabnzbdSettings"''',
                      (json.dumps(settings),))
     else:
         hookenv.log("Creating sabnzbd setting for Radarr.", "INFO")
         settings = {"tvCategory": "tv", "port": port, "apiKey": apikey,
                     "olderTvPriority": -100, "host": hostname, "useSsl": False, "recentTvPriority": -100}
         c.execute('''INSERT INTO DownloadClients
                   (Enable,Name,Implementation,Settings,ConfigContract) VALUES
                   (?,?,?,?,?)''',
                   (1, 'Sabnzbd', 'Sabnzbd', json.dumps(settings), 'SabnzbdSettings'))
     conn.commit()
     host.service_start(self.service_name)
Example #42
0
def start():
    if service_running('circus'):
        service_restart('circus')
    else:
        service_start('circus')

    set_state('circus.running')
    remove_state('django.restart')
Example #43
0
def start():
    service_start("onos")
    check_call("sleep 60", shell=True)
    if config("profile"):
        process_onos_cmds(PROFILES[config("profile")])
    extport = config("ext-port")
    print "external port is " + extport
    check_call('/opt/onos/bin/onos "externalportname-set -n ' + extport + '"', shell=True)
Example #44
0
def cmd_all_services(cmd):
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
def restart_service_workload():
    remove_state('service-workload.start')
    manifest = service_workload.manifest()
    if host.service_running(manifest['name']):
        host.service_restart(manifest['name'])
    else:
        host.service_start(manifest['name'])
    set_state('service-workload.started')
def start_openvim():
    status_set("maintenance", "starting openvim")
    service_start('openvim')
    t0 = time.time()
    while not openvim_running():
        if time.time() - t0 > 60:
            raise Exception('Failed to start openvim.')
        time.sleep(0.25)
Example #47
0
def restart_tor():
    remove_state('tor.start')
    if host.service_running('tor'):
        host.service_restart('tor')
    else:
        host.service_start('tor')
    set_state('tor.started')
    hookenv.status_set('active', 'tor service ready')
Example #48
0
def restart_gobinary():
    remove_state("gobinary.start")
    bin_config = gobinary.config()
    if host.service_running(bin_config["binary"]):
        host.service_restart(bin_config["binary"])
    else:
        host.service_start(bin_config["binary"])
    set_state("gobinary.started")
def start_ftb(java):
    """ start instance """
    log('ftb-infinity: start_ftb')
    service('enable', CHARM_NAME)
    service_start(CHARM_NAME)

    open_port(conf['server_port'])
    set_state(CHARM_STATE_STARTED)
    status_set('active', 'ftb started')
Example #50
0
def restart_grafana():
    if not host.service_running(SVCNAME):
        hookenv.log('Starting {}...'.format(SVCNAME))
        host.service_start(SVCNAME)
    elif any_file_changed([GRAFANA_INI]):
        hookenv.log('Restarting {}, config file changed...'.format(SVCNAME))
        host.service_restart(SVCNAME)
    hookenv.status_set('active', 'Ready')
    set_state('grafana.started')
Example #51
0
def update():
    if is_state('statsd.started'):
        host.service_stop('statsd')
    apt_update()
    apt_upgrade(['nodejs', 'npm', 'git'])
    charm_dir = hookenv.charm_dir()
    check_call(['npm', 'update', os.path.join(charm_dir, 'files/statsd-influxdb-backend')])
    if is_state('statsd.started'):
        host.service_start('statsd')
Example #52
0
def service_restart(service_name):
    """
    Wrapper around host.service_restart to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_available(service_name):
        if host.service_running(service_name):
            host.service_restart(service_name)
        else:
            host.service_start(service_name)
Example #53
0
def ensure_running(changed):
    if host.service_running('consul'):
        if changed:
            print("Reloaded consul config")
            subprocess.check_output([BIN_PATH, "reload"])
        else:
            print("Consul server already running")
        return
    print("Starting consul server")
    host.service_start('consul')
Example #54
0
def start():
    #reconfigure NGINX as upstart job and use specific config file
    run(['/etc/init.d/nginx', 'stop'])
    while host.service_running('nginx'):
        log("nginx still running")
        time.sleep(60)
    os.remove('/etc/init.d/nginx')
    run(['update-rc.d', '-f', 'nginx', 'remove'])
    log("Starting NATS daemonized in the background")
    host.service_start('cf-nats')
Example #55
0
def force_etcd_restart():
    '''
    If etcd has been reconfigured we need to force it to fully restart.
    This is necessary because etcd has some config flags that it ignores
    after the first time it starts, so we need to make it forget them.
    '''
    service_stop('etcd')
    for directory in glob.glob('/var/lib/etcd/*'):
        shutil.rmtree(directory)
    service_start('etcd')
Example #56
0
def config_bindings():
    try:
        subprocess.check_call(['service','neo4j','stop'])
    except subprocess.CalledProcessError as exception:
        hooken.log(exception.output)
    utils.re_edit_in_place('/etc/neo4j/neo4j.conf', {
        r'#dbms.connector.http.address=0.0.0.0:7474': 'dbms.connector.http.address=0.0.0.0:7474',
    })
    service_start('neo4j')
    hookenv.status_set('active','Ready')
    set_state('neo4j.installed')
def cmd_all_services(cmd):
    if is_unit_paused_set():
        log('Unit is in paused state, not issuing {} to all'
            'services'.format(cmd))
        return
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Example #58
0
def install():
    log('Installing gluu-server...')
    run('wget {} -P /tmp'.format(RIPO))
    deb = RIPO.rsplit('/',1)[1]
    run('dpkg -i /tmp/{}'.format(deb))
    run('apt-get update')
    run('apt-get -y --force-yes install gluu-server')
    host.service_start(SERVICE)
    #download setup script
    run('chroot /home/gluu-server wget {} -P /root'.format(MASTER))
    #extract setup script
    run('chroot /home/gluu-server unzip /root/master.zip -d /root')
Example #59
0
def upgrade():
    # TODO: get_state("go-binary.config")
    #       and compare with upgraded, remove old service if name has changed.
    config = gobinary.config()
    service = config["binary"]
    need_restart = False
    if host.service_running(service):
        need_restart = True
        host.service_stop(service)
    install_workload(config)
    if need_restart:
        host.service_start(service)