def assess_status(configs):
    """Assess status of current unit
    Decides what the state of the unit should be based on the current
    configuration.
    SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
    status of the unit.
    Also calls status_set(...) directly if paused state isn't complete.
    @param configs: a templating.OSConfigRenderer() object
    @returns None - this function is executed for its side-effect
    """
    assess_status_func(configs)()
    if pxc_installed():
        # NOTE(fnordahl) ensure we do not call application_version_set with
        # None argument.  New charm deployments will have the meta-package
        # installed, but upgraded deployments will not.
        def _possible_packages():
            base = determine_packages()[0]
            yield base
            if '.' not in base:
                for i in range(5, 7+1):
                    yield base+'-5.'+str(i)
        version = None
        for pkg in _possible_packages():
            version = get_upstream_version(pkg)
            if version is not None:
                break
        else:
            log('Unable to determine installed version for package "{}"'
                .format(determine_packages()[0]), level=WARNING)
            return
        application_version_set(version)
def start_datanode(namenode):
    hookenv.status_set('maintenance', 'starting datanode')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-hdfs-datanode')
    if started:
        # Create a /user/ubuntu dir in HDFS (this is safe to run multiple times).
        bigtop = Bigtop()
        if not bigtop.check_hdfs_setup():
            try:
                utils.wait_for_hdfs(30)
                bigtop.setup_hdfs()
            except utils.TimeoutError:
                # HDFS is not yet available or is still in safe mode, so we can't
                # do the initial setup (create dirs); skip setting the .started
                # state below so that we try again on the next hook.
                hookenv.status_set('waiting', 'waiting on hdfs')
                return

        # HDFS is ready. Open ports and set .started, status, and app version
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.open_port(port)
        set_state('apache-bigtop-datanode.started')
        hookenv.status_set('maintenance', 'datanode started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('DataNode failed to start')
        hookenv.status_set('blocked', 'datanode failed to start')
        remove_state('apache-bigtop-datanode.started')
        for port in get_layer_opts().exposed_ports('datanode'):
            hookenv.close_port(port)
Example #3
0
def check_app_config():
    """
    Check the Ghost application config and possibly update and restart it.
    """
    cfg_changed = is_state('config.changed')
    db_changed = ghost.check_db_changed()
    if cfg_changed or db_changed:
        hookenv.status_set('maintenance', 'updating configuration')

        # Update application
        ghost.update_ghost()

        # Update general config
        if cfg_changed:
            ghost.update_general_config()

        # Update database config
        if db_changed:
            ghost.update_db_config()

        ghost.restart_ghost()
        set_state('ghost.running')
        host.service_restart('nginx')

        with open(path.join(node_dist_dir(), 'package.json'), 'r') as fp:
            package_json = json.loads(fp.read())

            # Set Ghost application version
            hookenv.application_version_set(package_json['version'])

    hookenv.status_set('active', 'ready')
def setup_mattermost_backend(postgres_relation):
    print("Configuring and starting backend service.")
    _configure_mattermost_postgres(postgres_relation.master.uri)
    service_restart('mattermost')
    # Set build number for Juju status
    try:
        output = check_output(
            ['/opt/mattermost/bin/platform', 'version'],
            cwd='/opt/mattermost/bin/',
            universal_newlines=True,
            stderr=STDOUT,
        )
    except CalledProcessError as e:
        print(e.output)
        raise
    build_number = re.search(r'Build Number: ([0-9]+.[0-9]+.[0-9])+\n', output).group(1)
    application_version_set(build_number)
    open_port(8065)
    # The next two aren't really open. This is a fix for the following issue:
    #    no expose possible before `open-port`.
    #    no `open-port` of 80 and 443 before ssl.
    #    no ssl certificate before `expose`.
    open_port(config().get('port'))
    open_port(443)
    status_set(
        'active',
        'Ready (http://{}:8065 [Insecure! Please set fqdn!])'.format(unit_public_ip()))
    set_state('mattermost.backend.started')
Example #5
0
    def install_pig(self):
        '''
        Trigger the Bigtop puppet recipe that handles the Pig service.
        '''
        # Dirs are handled by the bigtop deb. No need to call out to
        # dist_config to do that work.
        roles = ['pig-client']

        bigtop = Bigtop()
        bigtop.render_site_yaml(roles=roles)
        bigtop.trigger_puppet()

        # Set app version for juju status output; pig --version looks like:
        #   Apache Pig version 0.15.0 (r: unknown)
        #   compiled Feb 06 2016, 23:00:40
        try:
            pig_out = check_output(['pig', '-x', 'local', '--version']).decode()
        except CalledProcessError as e:
            pig_out = e.output
        lines = pig_out.splitlines()
        parts = lines[0].split() if lines else []
        if len(parts) < 4:
            hookenv.log('Error getting Pig version: {}'.format(pig_out),
                        hookenv.ERROR)
            pig_ver = ''
        else:
            pig_ver = parts[3]
        hookenv.application_version_set(pig_ver)
Example #6
0
def install_zookeeper():
    '''
    After Bigtop has done the initial setup, trigger a puppet install,
    via our Zooekeeper library.

    puppet will start the service, as a side effect.

    '''
    hookenv.status_set('maintenance', 'installing zookeeper')
    zookeeper = Zookeeper()
    # Prime data changed
    data_changed('zkpeer.nodes', zookeeper.read_peers())
    data_changed(
        'zk.network_interface',
        hookenv.config().get('network_interface'))
    data_changed(
        'zk.autopurge_purge_interval',
        hookenv.config().get('autopurge_purge_interval'))
    data_changed(
        'zk.autopurge_snap_retain_count',
        hookenv.config().get('autopurge_snap_retain_count'))
    zookeeper.install()
    zookeeper.open_ports()
    set_state('zookeeper.installed')
    set_state('zookeeper.started')
    hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
    # set app version string for juju status output
    zoo_version = get_package_version('zookeeper') or 'unknown'
    hookenv.application_version_set(zoo_version)
Example #7
0
def set_deployment_mode_state(state):
    if is_state('spark.yarn.installed'):
        remove_state('spark.standalone.installed')
    if is_state('spark.standalone.installed'):
        remove_state('spark.yarn.installed')
    set_state(state)
    # set app version string for juju status output
    spark_version = get_package_version('spark-core') or 'unknown'
    hookenv.application_version_set(spark_version)
Example #8
0
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set("blocked",
                   "Ready for do-release-upgrade and reboot. "
                   "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        if not vaultlocker.vault_relation_complete():
            status_set('waiting', 'Incomplete relation: vault')
            return

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set('blocked',
                       'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
    else:
        pristine = True
        osd_journals = get_journal_devices()
        for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)):
            if (not ceph.is_active_bluestore_device(dev) and
                    not ceph.is_pristine_disk(dev)):
                pristine = False
                break
        if pristine:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
Example #9
0
def set_app_version():
    ''' Surface the etcd application version on juju status '''
    # Format of version output at the time of writing
    # etcd Version: 2.2.5
    # Git SHA: Not provided (use ./build instead of go build)
    # Go Version: go1.6rc2
    # Go OS/Arch: linux/amd64
    cmd = ['etcd', '-version']
    version = check_output(cmd).split(b'\n')[0].split(b':')[-1].lstrip()
    application_version_set(version)
Example #10
0
def initial_setup():
    hookenv.status_set('maintenance', 'installing zeppelin')
    zeppelin = Zeppelin()
    zeppelin.install()
    zeppelin.open_ports()
    set_state('zeppelin.installed')
    update_status()
    # set app version string for juju status output
    zeppelin_version = get_package_version('zeppelin') or 'unknown'
    hookenv.application_version_set(zeppelin_version)
Example #11
0
def initial_setup():
    hookenv.status_set("maintenance", "installing zeppelin")
    zeppelin = Zeppelin()
    zeppelin.install()
    zeppelin.setup_etc_env()
    zeppelin.open_ports()
    set_state("zeppelin.installed")
    update_status()
    # set app version string for juju status output
    zeppelin_version = get_package_version("zeppelin") or "unknown"
    hookenv.application_version_set(zeppelin_version)
Example #12
0
def assess_status():
    '''Determine status of current unit'''
    if is_unit_upgrading_set():
        status_set('blocked',
                   'Ready for do-release-upgrade and reboot. '
                   'Set complete when finished.')
    elif lxd_running():
        status_set('active', 'Unit is ready')
    else:
        status_set('blocked', 'LXD is not running')
    application_version_set(get_upstream_version(VERSION_PACKAGE))
Example #13
0
def start_namenode():
    hookenv.status_set('maintenance', 'starting namenode')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-hdfs-namenode')
    for port in get_layer_opts().exposed_ports('namenode'):
        hookenv.open_port(port)
    set_state('apache-bigtop-namenode.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'namenode started')
Example #14
0
def reset_application_version():
    '''Set the Juju application version, per settings in layer.yaml'''
    # Reset the application version. We call this after installing
    # packages to initialize the version. We also call this every
    # hook, incase the version has changed (eg. Landscape upgraded
    # the package).
    opts = layer.options().get('apt', {})
    pkg = opts.get('version_package')
    if pkg and pkg in installed():
        ver = get_package_version(pkg, opts.get('full_version', False))
        hookenv.application_version_set(ver)
Example #15
0
def start_resourcemanager(namenode):
    hookenv.status_set('maintenance', 'starting resourcemanager')
    # NB: service should be started by install, but this may be handy in case
    # we have something that removes the .started state in the future. Also
    # note we restart here in case we modify conf between install and now.
    host.service_restart('hadoop-yarn-resourcemanager')
    host.service_restart('hadoop-mapreduce-historyserver')
    for port in get_layer_opts().exposed_ports('resourcemanager'):
        hookenv.open_port(port)
    set_state('apache-bigtop-resourcemanager.started')
    hookenv.application_version_set(get_hadoop_version())
    hookenv.status_set('maintenance', 'resourcemanager started')
Example #16
0
def install_hive(hadoop):
    '''
    Anytime our dependencies are available, check to see if we have a valid
    reason to (re)install. These include:
    - initial install
    - HBase has joined/departed
    '''
    # Hive cannot handle - in the metastore db name and
    # mysql uses the service name to name the db
    if "-" in hookenv.service_name():
        hookenv.status_set('blocked', "application name may not contain '-'; "
                                      "redeploy with a different name")
        return

    # Get hbase connection dict if it's available
    if is_state('hbase.ready'):
        hbase = RelationBase.from_state('hbase.ready')
        hbserver = hbase.hbase_servers()[0]
    else:
        hbserver = None

    # Use this to determine if we need to reinstall
    deployment_matrix = {
        'hbase': hbserver,
    }

    # Handle nuances when installing versus re-installing
    if not is_state('hive.installed'):
        prefix = "installing"

        # On initial install, prime our kv with the current deployment matrix.
        # Subsequent calls will use this to determine if a reinstall is needed.
        data_changed('deployment_matrix', deployment_matrix)
    else:
        prefix = "configuring"

        # Return if our matrix has not changed
        if not data_changed('deployment_matrix', deployment_matrix):
            return

    hookenv.status_set('maintenance', '{} hive'.format(prefix))
    hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))
    hive = Hive()
    hive.install(hbase=hbserver)
    hive.restart()
    hive.open_ports()
    set_state('hive.installed')
    report_status()

    # set app version string for juju status output
    hive_version = get_package_version('hive') or 'unknown'
    hookenv.application_version_set(hive_version)
Example #17
0
def set_nginx_version():
    ''' Surface the currently deployed version of nginx to Juju '''
    cmd = 'nginx -v'
    p = Popen(cmd, shell=True,
              stdin=PIPE,
              stdout=PIPE,
              stderr=STDOUT,
              close_fds=True)
    raw = p.stdout.read()
    # The version comes back as:
    # nginx version: nginx/1.10.0 (Ubuntu)
    version = raw.split(b'/')[-1].split(b' ')[0]
    hookenv.application_version_set(version.rstrip())
Example #18
0
def assess_status(configs):
    """Assess status of current unit.

    Decides what the state of the unit should be based on the current
    configuration.
    SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
    status of the unit.
    Also calls status_set(...) directly if paused state isn't complete.
    @param configs: a templating.OSConfigRenderer() object
    @returns None - this function is executed for its side-effect
    """
    assess_status_func(configs)()
    application_version_set(get_upstream_version(VERSION_PACKAGE))
Example #19
0
def configure_kafka(zk):
    hookenv.status_set('maintenance', 'setting up kafka')
    data_changed(  # Prime data changed for network interface
        'kafka.network_interface', hookenv.config().get('network_interface'))
    kafka = Kafka()
    zks = zk.zookeepers()
    kafka.configure_kafka(zks)
    kafka.open_ports()
    set_state('kafka.started')
    hookenv.status_set('active', 'ready')
    # set app version string for juju status output
    kafka_version = get_package_version('kafka') or 'unknown'
    hookenv.application_version_set(kafka_version)
Example #20
0
def set_app_version():
    ''' Declare the application version to juju '''
    cmd = ['kubectl', 'version', '--client']
    from subprocess import CalledProcessError
    try:
        version = check_output(cmd).decode('utf-8')
    except CalledProcessError:
        message = "Missing kubeconfig causes errors. Skipping version set."
        hookenv.log(message)
        return
    git_version = version.split('GitVersion:"v')[-1]
    version_from = git_version.split('",')[0]
    hookenv.application_version_set(version_from.rstrip())
Example #21
0
def install_mahout():
    hookenv.status_set('maintenance', 'installing mahout')
    bigtop = Bigtop()
    bigtop.render_site_yaml(
        roles=[
            'mahout-client',
        ],
    )
    bigtop.trigger_puppet()
    with utils.environment_edit_in_place('/etc/environment') as env:
        env['MAHOUT_HOME'] = '/usr/lib/mahout'

    set_state('mahout.installed')
    hookenv.status_set('active', 'ready')
    # set app version string for juju status output
    mahout_version = get_package_version('mahout') or 'unknown'
    hookenv.application_version_set(mahout_version)
Example #22
0
def set_application_version():
    config = cassandra.config()
    last_update = config.get('last_version_update', 0)
    if time.time() < last_update + 3600:
        return
    ed = cassandra.get_edition()
    if ed == 'apache-snap':
        ver = cassandra.get_snap_version('cassandra')
    elif ed == 'dse':
        ver = cassandra.get_package_version('dse')
    else:
        ver = cassandra.get_package_version('cassandra')
    if ver:
        hookenv.application_version_set(ver)
        config['last_version_update'] = int(time.time())
    else:
        hookenv.log('Invalid version {!r} extracted'.format(ver), ERROR)
Example #23
0
def install_giraph(giraph):
    """Install giraph when prerequisite states are present."""
    hookenv.status_set('maintenance', 'installing giraph')
    bigtop = Bigtop()
    bigtop.render_site_yaml(
        roles=[
            'giraph-client',
        ],
    )
    bigtop.trigger_puppet()

    # Put down the -doc subpackage so we get giraph-examples
    fetch.apt_install('giraph-doc')

    giraph_home = Path('/usr/lib/giraph')
    giraph_docdir = Path('/usr/share/doc/giraph')
    giraph_libdir = Path(giraph_home / 'lib')
    giraph_examples = glob('{}/giraph-examples-*.jar'.format(giraph_docdir))

    # Gather a list of all the giraph jars (needed for -libjars)
    giraph_jars = giraph_examples
    giraph_jars.extend(get_good_jars(giraph_home, prefix=True))
    giraph_jars.extend(get_good_jars(giraph_libdir, prefix=True))

    # Update environment with appropriate giraph bits. HADOOP_CLASSPATH can
    # use wildcards (and it should for readability), but GIRAPH_JARS, which
    # is intended to be used as 'hadoop jar -libjars $GIRAPH_JARS', needs to
    # be a comma-separate list of jars.
    with utils.environment_edit_in_place('/etc/environment') as env:
        cur_cp = env['HADOOP_CLASSPATH'] if 'HADOOP_CLASSPATH' in env else ""
        env['GIRAPH_HOME'] = giraph_home
        env['HADOOP_CLASSPATH'] = "{examples}/*:{home}/*:{libs}/*:{cp}".format(
            examples=giraph_docdir,
            home=giraph_home,
            libs=giraph_libdir,
            cp=cur_cp
        )
        env['GIRAPH_JARS'] = ','.join(j for j in giraph_jars)

    set_state('giraph.installed')
    report_status()
    # set app version string for juju status output
    giraph_version = get_package_version('giraph') or 'unknown'
    hookenv.application_version_set(giraph_version)
Example #24
0
def install_hadoop_client_hdfs(principal, namenode):
    """Install if the namenode has sent its FQDN.

    We only need the namenode FQDN to perform the plugin install, so poll for
    namenodes() data whenever we have a namenode relation. This allows us to
    install asap, even if 'namenode.ready' is not set yet.
    """
    if namenode.namenodes():
        hookenv.status_set('maintenance', 'installing plugin (hdfs)')
        nn_host = namenode.namenodes()[0]
        bigtop = Bigtop()
        hosts = {'namenode': nn_host}
        bigtop.render_site_yaml(hosts=hosts, roles='hadoop-client')
        bigtop.trigger_puppet()
        set_state('apache-bigtop-plugin.hdfs.installed')
        hookenv.application_version_set(get_hadoop_version())
        hookenv.status_set('maintenance', 'plugin (hdfs) installed')
    else:
        hookenv.status_set('waiting', 'waiting for namenode fqdn')
def start_nodemanager(namenode, resourcemanager):
    hookenv.status_set('maintenance', 'starting nodemanager')
    # NB: service should be started by install, but we want to verify it is
    # running before we set the .started state and open ports. We always
    # restart here, which may seem heavy-handed. However, restart works
    # whether the service is currently started or stopped. It also ensures the
    # service is using the most current config.
    started = host.service_restart('hadoop-yarn-nodemanager')
    if started:
        for port in get_layer_opts().exposed_ports('nodemanager'):
            hookenv.open_port(port)
        set_state('apache-bigtop-nodemanager.started')
        hookenv.status_set('maintenance', 'nodemanager started')
        hookenv.application_version_set(get_hadoop_version())
    else:
        hookenv.log('NodeManager failed to start')
        hookenv.status_set('blocked', 'nodemanager failed to start')
        remove_state('apache-bigtop-nodemanager.started')
        for port in get_layer_opts().exposed_ports('nodemanager'):
            hookenv.close_port(port)
Example #26
0
def install_hbase(zk, hdfs):
    zks = zk.zookeepers()
    if (is_state('hbase.installed') and
            (not data_changed('zks', zks))):
        return

    msg = "configuring hbase" if is_state('hbase.installed') else "installing hbase"
    hookenv.status_set('maintenance', msg)

    hbase = HBase()
    hosts = {}
    nns = hdfs.namenodes()
    hosts['namenode'] = nns[0]
    hbase.configure(hosts, zks)
    hbase.open_ports()
    set_state('hbase.installed')
    report_status()
    # set app version string for juju status output
    hbase_version = get_package_version('hbase-master') or 'unknown'
    hookenv.application_version_set(hbase_version)
Example #27
0
def assess_status(configs):
    assess_status_func(configs)()
    if config('controller-app-mode') == 'dvnd':
        hookenv.application_version_set(config('cplane-version'))
    elif config('controller-app-mode') == 'msm':
        hookenv.application_version_set(str(config('msm-version')))
    elif config('controller-app-mode') == 'doclt':
        hookenv.application_version_set(str(config('doctl-version')))
Example #28
0
def set_app_version():
    ''' Declare the application version to juju '''
    cmd = ['kubelet', '--version']
    version = check_output(cmd)
    hookenv.application_version_set(version.split(b' v')[-1].rstrip())
def application_version_set():
    """Get the version of software deployed on this system."""
    hookenv.application_version_set(haproxy.get_version())
Example #30
0
def set_easyrsa_version():
    '''Find the version of easyrsa and set that on the charm.'''
    version = unitdata.kv().get('easyrsa-version')
    hookenv.application_version_set(version)
Example #31
0
def _assess_status():
    """Assess status of relations and services for local unit"""
    if is_flag_set('snap.channel.invalid'):
        status_set(
            'blocked', 'Invalid snap channel '
            'configured: {}'.format(config('channel')))
        return
    if is_flag_set('config.dns_vip.invalid'):
        status_set('blocked', 'vip and dns-ha-access-record configured')
        return

    if unitdata.kv().get('charm.vault.series-upgrading'):
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return

    if is_flag_set('failed.to.start'):
        status_set("blocked",
                   "Vault failed to start; check journalctl -u vault")
        return

    _missing_interfaces = []
    _incomplete_interfaces = []

    _assess_interface_groups(REQUIRED_INTERFACES,
                             optional=False,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    _assess_interface_groups(OPTIONAL_INTERFACES,
                             optional=True,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    if _missing_interfaces or _incomplete_interfaces:
        state = 'blocked' if _missing_interfaces else 'waiting'
        status_set(state,
                   ', '.join(_missing_interfaces + _incomplete_interfaces))
        return

    health = None
    if service_running('vault'):
        try:
            health = vault.get_vault_health()
        except Exception:
            log(traceback.format_exc(), level=ERROR)
            status_set('blocked', 'Vault health check failed')
            return
    else:
        status_set('blocked', 'Vault service not running')
        return

    if health.get('version'):
        application_version_set(health.get('version'))
    else:
        application_version_set('Unknown')
        status_set('blocked', 'Vault health check failed')
        return

    if not service_running('vault'):
        status_set('blocked', 'Vault service not running')
        return

    if not health['initialized']:
        status_set('blocked', 'Vault needs to be initialized')
        return

    if health['sealed']:
        status_set('blocked', 'Unit is sealed')
        return

    mlock_disabled = is_container() or config('disable-mlock')

    status_set(
        'active', 'Unit is ready '
        '(active: {}, mlock: {})'.format(
            str(not health['standby']).lower(),
            'disabled' if mlock_disabled else 'enabled'))
Example #32
0
def status_ubuntu():
    status_set('active', 'ready')
    application_version_set(lsb_release()['DISTRIB_RELEASE'])
    set_state('ubuntu.ready')
Example #33
0
def install():
    dlog("Install called")
    status_set("maintenance", "Installing Datera Driver")
    _install()
    application_version_set(get_version())
    status_set("maintenance", "Datera Driver installation finished")
Example #34
0
def get_set_elasticsearch_version():
    """
    Set Elasticsearch version.
    """
    application_version_set(es_version())
    set_flag('elasticsearch.version.set')
def set_app_version():
    ''' Declare the application version to juju '''
    version = check_output(['kube-apiserver', '--version'])
    hookenv.application_version_set(version.split(b' v')[-1].rstrip())
def assess_status(configs):
    assess_status_func(configs)()
    hookenv.application_version_set(config('cplane-version'))
def set_app_ver():
    version = layer.snap.get_installed_version('openstackclients')
    hookenv.application_version_set(version)
Example #38
0
def set_app_version():
    ''' Surface the etcd application version on juju status '''
    # note - the snap doesn't place an etcd alias on disk. This shall infer
    # the version from etcdctl, as the snap distributes both in lockstep.
    application_version_set(etcd_version())
Example #39
0
def set_message_mysql_server():
    application_version_set(get_upstream_version('mysql-server'))

    status_set('maintenance', 'Mysql Installed')

    set_flag('mysql-server.version.set')
def set_app_version():
    ''' Declare the application version to juju '''
    version = check_output(['kube-apiserver', '--version'])
    hookenv.application_version_set(version.split(b' v')[-1].rstrip())
Example #41
0
def assess_status(configs):
    assess_status_func(configs)()
    hookenv.application_version_set(
        config('cplane-version'))
def set_app_version():
    ''' Declare the application version to juju '''
    cmd = ['kubelet', '--version']
    version = check_output(cmd)
    hookenv.application_version_set(version.split(b' v')[-1].rstrip())
Example #43
0
def install_tvault_contego_plugin():

    status_set('maintenance', 'Installing...')

    # Read config parameters TrilioVault IP, backup target
    tv_ip = config('triliovault-ip')
    bkp_type = config('backup-target-type')

    # Validate triliovault_ip
    if not validate_ip(tv_ip):
        # IP address is invalid
        # Set status as blocked and return
        status_set('blocked',
                   'Invalid IP address, please provide correct IP address')
        return

    # Valildate backup target
    if not validate_backup():
        log("Failed while validating backup")
        status_set('blocked',
                   'Invalid Backup target info, please provide valid info')
        return

    # Proceed as triliovault_ip Address is valid
    if not add_users():
        log("Failed while adding Users")
        status_set('blocked', 'Failed while adding Users')
        return

    if not create_virt_env():
        log("Failed while Creating Virtual Env")
        status_set('blocked', 'Failed while Creating Virtual Env')
        return

    if not ensure_files():
        log("Failed while ensuring files")
        status_set('blocked', 'Failed while ensuring files')
        return

    if not create_conf():
        log("Failed while creating conf files")
        status_set('blocked', 'Failed while creating conf files')
        return

    if not ensure_data_dir():
        log("Failed while ensuring datat directories")
        status_set('blocked', 'Failed while ensuring datat directories')
        return

    if not create_service_file():
        log("Failed while creating DataMover service file")
        status_set('blocked', 'Failed while creating DataMover service file')
        return

    if bkp_type == 's3' and not create_object_storage_service():
        log("Failed while creating Object Store service file")
        status_set('blocked', 'Failed while creating ObjectStore service file')
        return

    os.system('sudo systemctl daemon-reload')
    # Enable and start the object-store service
    if bkp_type == 's3':
        os.system('sudo systemctl enable tvault-object-store')
        service_restart('tvault-object-store')
    # Enable and start the datamover service
    os.system('sudo systemctl enable tvault-contego')
    service_restart('tvault-contego')

    # Install was successful
    status_set('active', 'Ready...')
    # Add the flag "installed" since it's done
    application_version_set(get_new_version('tvault-contego'))
    set_flag('tvault-contego.installed')