Esempio n. 1
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    configure_installation_source(config('openstack-origin'))
    status_set('maintenance', 'Installing apt packages')
    apt_update()
    apt_install(determine_packages(), fatal=True)

    if snap_install_requested():
        status_set('maintenance', 'Installing keystone snap')
        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
        install_os_snaps(
            get_snaps_install_info_from_origin(
                ['keystone'],
                config('openstack-origin'),
                mode='devmode'))
        post_snap_install()
        service_stop('snap.keystone.*')
    else:
        # unconfigured keystone service will prevent start of haproxy in some
        # circumstances. make sure haproxy runs. LP #1648396
        service_stop('keystone')
        service_start('haproxy')
        if run_in_apache():
            disable_unused_apache_sites()
            service_pause('keystone')
Esempio n. 2
0
    def install(self, plugins):
        """Install the given plugins, optionally removing unlisted ones.

        @params plugins: A whitespace-separated list of plugins to install.
        """
        plugins = plugins or ""
        plugins = plugins.split()
        hookenv.log("Stopping jenkins for plugin update(s)")
        host.service_stop("jenkins")

        hookenv.log("Installing plugins (%s)" % " ".join(plugins))

        host.mkdir(
            paths.PLUGINS, owner="jenkins", group="jenkins", perms=0o0755)

        existing_plugins = set(glob.glob("%s/*.hpi" % paths.PLUGINS))
        installed_plugins = self._install_plugins(plugins)
        unlisted_plugins = existing_plugins - installed_plugins
        if unlisted_plugins:
            if hookenv.config()["remove-unlisted-plugins"] == "yes":
                self._remove_plugins(unlisted_plugins)
            else:
                hookenv.log(
                    "Unlisted plugins: (%s) Not removed. Set "
                    "remove-unlisted-plugins to 'yes' to clear them "
                    "away." % ", ".join(unlisted_plugins))

        hookenv.log("Starting jenkins to pickup configuration changes")
        host.service_start("jenkins")
Esempio n. 3
0
def migrate_to_mount(new_path):
    """Invoked when new mountpoint appears. This function safely migrates
    MySQL data from local disk to persistent storage (only if needed)
    """
    old_path = '/var/lib/mysql'
    if os.path.islink(old_path):
        hookenv.log('{} is already a symlink, skipping migration'.format(
            old_path))
        return True
    # Ensure our new mountpoint is empty. Otherwise error and allow
    # users to investigate and migrate manually
    files = os.listdir(new_path)
    try:
        files.remove('lost+found')
    except ValueError:
        pass
    if files:
        raise RuntimeError('Persistent storage contains old data. '
                           'Please investigate and migrate data manually '
                           'to: {}'.format(new_path))
    os.chmod(new_path, 0o700)
    if os.path.isdir('/etc/apparmor.d/local'):
        render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
               context={'path': os.path.join(new_path, '')})
        host.service_reload('apparmor')
    host.service_stop('mysql')
    host.rsync(os.path.join(old_path, ''),  # Ensure we have trailing slashes
               os.path.join(new_path, ''),
               options=['--archive'])
    shutil.rmtree(old_path)
    os.symlink(new_path, old_path)
    host.service_start('mysql')
Esempio n. 4
0
def upgrade_monitor():
    current_version = ceph.get_version()
    status_set("maintenance", "Upgrading monitor")
    log("Current ceph version is {}".format(current_version))
    new_version = config('release-version')
    log("Upgrading to: {}".format(new_version))

    try:
        add_source(config('source'), config('key'))
        apt_update(fatal=True)
    except subprocess.CalledProcessError as err:
        log("Adding the ceph source failed with message: {}".format(
            err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
    try:
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_stop('ceph-mon@{}'.format(mon_id))
        else:
            service_stop('ceph-mon-all')
        apt_install(packages=ceph.PACKAGES, fatal=True)
        if ceph.systemd():
            for mon_id in ceph.get_local_mon_ids():
                service_start('ceph-mon@{}'.format(mon_id))
        else:
            service_start('ceph-mon-all')
        status_set("active", "")
    except subprocess.CalledProcessError as err:
        log("Stopping ceph and upgrading packages failed "
            "with message: {}".format(err.message))
        status_set("blocked", "Upgrade to {} failed".format(new_version))
        sys.exit(1)
def stop_datanode():
    hookenv.status_set('maintenance', 'stopping datanode')
    for port in get_layer_opts().exposed_ports('datanode'):
        hookenv.close_port(port)
    host.service_stop('hadoop-hdfs-datanode')
    remove_state('datanode.started')
    hookenv.status_set('maintenance', 'datanode stopped')
def cleanup_pre_snap_services():
    # remove old states
    remove_state('kubernetes-worker.components.installed')

    # disable old services
    services = ['kubelet', 'kube-proxy']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        service_stop(service)

    # cleanup old files
    files = [
        "/lib/systemd/system/kubelet.service",
        "/lib/systemd/system/kube-proxy.service",
        "/etc/default/kube-default",
        "/etc/default/kubelet",
        "/etc/default/kube-proxy",
        "/srv/kubernetes",
        "/usr/local/bin/kubectl",
        "/usr/local/bin/kubelet",
        "/usr/local/bin/kube-proxy",
        "/etc/kubernetes"
    ]
    for file in files:
        if os.path.isdir(file):
            hookenv.log("Removing directory: " + file)
            shutil.rmtree(file)
        elif os.path.isfile(file):
            hookenv.log("Removing file: " + file)
            os.remove(file)
Esempio n. 7
0
def service_stop(service_name):
    """
    Wrapper around host.service_stop to prevent spurious "unknown service"
    messages in the logs.
    """
    if host.service_running(service_name):
        host.service_stop(service_name)
Esempio n. 8
0
def stop():
    """ Stop hook """
    log('ftb-infinity: stop')
    remove_state(CHARM_STATE_STARTED)
    close_port(conf['server_port'])
    service_stop(CHARM_NAME)
    ftb_systemd_remove()
Esempio n. 9
0
def stop_cassandra():
    if is_cassandra_running():
        hookenv.log('Shutting down Cassandra')
        host.service_stop(get_cassandra_service())
    if is_cassandra_running():
        hookenv.status_set('blocked', 'Cassandra failed to shut down')
        raise SystemExit(0)
Esempio n. 10
0
def config_changed():

    if not conf.changed('server_port') and not conf.changed('RAM_MAX'):
        return

    log('ftb-infinity: config_changed')
    cur_status = status_get()
    status_set('maintenance', 'configuring')

    port_changed = conf.changed('server_port')
    ram_changed = conf.changed('RAM_MAX')

    # Let's suppose java will rewrite server.properties on exit
    started = is_state(CHARM_STATE_STARTED)
    if started:
        service_stop(CHARM_NAME)
        sleep(2)

    if port_changed:
        close_port(conf.previous('server_port'))
        ftb_config_server()

    if ram_changed:
        ftb_systemd_install()

    if started:
        service_start(CHARM_NAME)
        if port_changed:
            open_port(conf['server_port'])

    # restore state
    status_set(cur_status[0], cur_status[1])
def remove_packetbeat():
    service_stop('packetbeat')
    try:
        os.remove('/etc/packetbeat/packetbeat.yml')
    except OSError:
        pass
    charms.apt.purge('packetbeat')
Esempio n. 12
0
def toggle_docker_daemon_source():
    ''' A disruptive toggleable action which will swap out the installed docker
    daemon for the configured source. If true, installs the latest available
    docker from the upstream PPA. Else installs docker from universe. '''

    # this returns a list of packages not currently installed on the system
    # based on the parameters input. Use this to check if we have taken
    # prior action against a docker deb package.
    packages = filter_installed_packages(['docker.io', 'docker-engine'])

    if 'docker.io' in packages and 'docker_engine' in packages:
        # we have not reached installation phase, return until
        # we can reasonably re-test this scenario
        hookenv.log('Neither docker.io nor docker-engine are installed. Noop.')
        return

    install_ppa = config('install_from_upstream')

    # Remove the inverse package from what is declared. Only take action if
    # we meet having a package installed.
    if install_ppa and 'docker.io' not in packages:
        host.service_stop('docker')
        hookenv.log('Removing docker.io package.')
        apt_purge('docker.io')
        remove_state('docker.ready')
        remove_state('docker.available')
    elif not install_ppa and 'docker-engine' not in packages:
        host.service_stop('docker')
        hookenv.log('Removing docker-engine package.')
        apt_purge('docker-engine')
        remove_state('docker.ready')
        remove_state('docker.available')
    else:
        hookenv.log('Not touching packages.')
Esempio n. 13
0
def stop():
    ''' Juju calls the stop hook before the unit is destroyed.  Clean up. '''
    # Do we need to call explicitly call leave here?
    if host.service_running('consul'):
        host.service_stop('consul')
    for p in PORTS:
        hookenv.close_port(p)
Esempio n. 14
0
def remove_topbeat():
    service_stop('topbeat')
    try:
        os.remove('/etc/topbeat/topbeat.yml')
    except OSError:
        pass
    charms.apt.purge('topbeat')
def config_changed():
    '''
    This hook is run when a config parameter is changed.
    It also runs on node reboot.
    '''
    charm_config = config()
    if (charm_config.changed('install_sources') or
        charm_config.changed('plumgrid-build') or
        charm_config.changed('networking-build') or
            charm_config.changed('install_keys')):
        status_set('maintenance', 'Upgrading apt packages')
        if charm_config.changed('install_sources'):
            configure_pg_sources()
        configure_sources()
        apt_update()
        pkgs = determine_packages()
        for pkg in pkgs:
            apt_install(pkg, options=['--force-yes'], fatal=True)
        service_stop('neutron-server')
    if (charm_config.changed('networking-plumgrid-version') or
            charm_config.changed('pip-proxy')):
        ensure_files()
        service_stop('neutron-server')
    CONFIGS.write_all()
    if not service_running('neutron-server'):
        service_start('neutron-server')
Esempio n. 16
0
def set_blocked():
    print("No postgres. Signalling this.")
    service_stop('mattermost')
    remove_state('mattermost.backend.started')
    close_port(8065)
    close_port(config().get('port'))
    close_port(443)
    status_set('blocked', 'Need relation to postgres')
Esempio n. 17
0
def stop_services():
    name = networking_name()
    svcs = set()
    for ctxt in CONFIG_FILES[name][config("plugin")].itervalues():
        for svc in ctxt["services"]:
            svcs.add(svc)
    for svc in svcs:
        service_stop(svc)
Esempio n. 18
0
def update():
    if is_state('statsd.started'):
        host.service_stop('statsd')
    apt_update()
    apt_upgrade(['nodejs', 'npm', 'git'])
    charm_dir = hookenv.charm_dir()
    check_call(['npm', 'update', os.path.join(charm_dir, 'files/statsd-influxdb-backend')])
    if is_state('statsd.started'):
        host.service_start('statsd')
Esempio n. 19
0
def setup_upstart_jobs():
    hookenv.log('setting up upstart jobs')
    context = {
        'vault_path': '/usr/local/bin/vault',
        'name': 'vault',
        'vault_options': '--config=/etc/vault/config.hcl'
    }
    render('upstart.conf', '/etc/init/vault.conf', context, perms=0o644)
    service_stop('vault')
def install():
    # Stop in case the service was already installed and running. This is more
    # likely to happen when a subordinate is redeployed.
    manifest = service_workload.manifest()
    service = manifest["name"]
    if host.service_running(service):
        host.service_stop(service)

    service_workload.install()
Esempio n. 21
0
def install_packages(workload):
    config = hookenv.config()
    hookenv.status_set('maintenance', 'Installing packages')
    packages = ['apache2', 'php5-cgi', 'libapache2-mod-php5']
    packages.extend(workload['packages'])
    fetch.apt_install(fetch.filter_installed_packages(packages))
    host.service_stop('apache2')
    check_call(['a2dissite', '000-default'])
    hookenv.open_port(config['port'])
def render_config_restart_on_changed(hosts, bootstrap=False):
    """Render mysql config and restart mysql service if file changes as a
    result.

    If bootstrap is True we do a bootstrap-pxc in order to bootstrap the
    percona cluster. This should only be performed once at cluster creation
    time.

    If percona is already bootstrapped we can get away with just ensuring that
    it is started so long as the new node to be added is guaranteed to have
    been restarted so as to apply the new config.
    """
    config_file = resolve_cnf_file()
    pre_hash = file_hash(config_file)
    render_config(hosts)
    create_binlogs_directory()
    update_db_rels = False
    if file_hash(config_file) != pre_hash or bootstrap:
        if bootstrap:
            bootstrap_pxc()
            # NOTE(dosaboy): this will not actually do anything if no cluster
            # relation id exists yet.
            notify_bootstrapped()
            update_db_rels = True
        else:
            # NOTE(jamespage):
            # if mysql@bootstrap is running, then the native
            # bootstrap systemd service was used to start this
            # instance, and it was the initial seed unit
            # stop the bootstap version before restarting normal mysqld
            if service_running('mysql@bootstrap'):
                service_stop('mysql@bootstrap')

            attempts = 0
            max_retries = 5

            cluster_wait()
            while not service_restart('mysql'):
                if attempts == max_retries:
                    raise Exception("Failed to start mysql (max retries "
                                    "reached)")

                log("Failed to start mysql - retrying per distributed wait",
                    WARNING)
                attempts += 1
                cluster_wait()

        # If we get here we assume prior actions have succeeded to always
        # this unit is marked as seeded so that subsequent calls don't result
        # in a restart.
        mark_seeded()

        if update_db_rels:
            update_client_db_relations()
    else:
        log("Config file '{}' unchanged".format(config_file), level=DEBUG)
Esempio n. 23
0
def force_etcd_restart():
    '''
    If etcd has been reconfigured we need to force it to fully restart.
    This is necessary because etcd has some config flags that it ignores
    after the first time it starts, so we need to make it forget them.
    '''
    service_stop('etcd')
    for directory in glob.glob('/var/lib/etcd/*'):
        shutil.rmtree(directory)
    service_start('etcd')
def stop_services():
    release = get_os_codename_install_source(config('openstack-origin'))
    plugin = config('plugin')
    config_files = resolve_config_files(plugin, release)
    svcs = set()
    for ctxt in config_files[config('plugin')].itervalues():
        for svc in ctxt['services']:
            svcs.add(remap_service(svc))
    for svc in svcs:
        service_stop(svc)
Esempio n. 25
0
def storage_joined():
    if not is_elected_leader(SWIFT_HA_RES):
        log("New storage relation joined - stopping proxy until ring builder "
            "synced", level=INFO)
        service_stop('swift-proxy')

        # This unit is not currently responsible for distributing rings but
        # may become so at some time in the future so we do this to avoid the
        # possibility of storage nodes getting out-of-date rings by deprecating
        # any existing ones from the www dir.
        mark_www_rings_deleted()
def post_series_upgrade():
    log("Running complete series upgrade hook", "INFO")
    service_stop('nova-compute')
    service_stop('libvirt-bin')
    # After package upgrade the service is broken and leaves behind a
    # PID file which causes the service to fail to start.
    # Remove this before restart
    if os.path.exists(LIBVIRTD_PID):
        os.unlink(LIBVIRTD_PID)
    series_upgrade_complete(
        resume_unit_helper, CONFIGS)
Esempio n. 27
0
def shutdown():
    ''' When this unit is destroyed:
        - delete the current node
        - stop the kubelet service
        - stop the kube-proxy service
        - remove the 'kubernetes-worker.components.installed' state
    '''
    kubectl('delete', 'node', gethostname())
    service_stop('kubelet')
    service_stop('kube-proxy')
    remove_state('kubernetes-worker.components.installed')
Esempio n. 28
0
def shutdown():
    ''' When this unit is destroyed:
        - delete the current node
        - stop the worker services
    '''
    try:
        if os.path.isfile(kubeconfig_path):
            kubectl('delete', 'node', gethostname().lower())
    except CalledProcessError:
        hookenv.log('Failed to unregister node.')
    service_stop('snap.kubelet.daemon')
    service_stop('snap.kube-proxy.daemon')
Esempio n. 29
0
def upgrade():
    # TODO: get_state("go-binary.config")
    #       and compare with upgraded, remove old service if name has changed.
    config = gobinary.config()
    service = config["binary"]
    need_restart = False
    if host.service_running(service):
        need_restart = True
        host.service_stop(service)
    install_workload(config)
    if need_restart:
        host.service_start(service)
Esempio n. 30
0
def migrate_from_pre_snaps():
    # remove old states
    remove_state('kubernetes.components.installed')
    remove_state('kubernetes.dashboard.available')
    remove_state('kube-dns.available')
    remove_state('kubernetes-master.app_version.set')

    # disable old services
    services = ['kube-apiserver',
                'kube-controller-manager',
                'kube-scheduler']
    for service in services:
        hookenv.log('Stopping {0} service.'.format(service))
        host.service_stop(service)

    # rename auth files
    os.makedirs('/root/cdk', exist_ok=True)
    rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
                           '/root/cdk/serviceaccount.key')
    rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
                           '/root/cdk/basic_auth.csv')
    rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
                           '/root/cdk/known_tokens.csv')

    # cleanup old files
    files = [
        "/lib/systemd/system/kube-apiserver.service",
        "/lib/systemd/system/kube-controller-manager.service",
        "/lib/systemd/system/kube-scheduler.service",
        "/etc/default/kube-defaults",
        "/etc/default/kube-apiserver.defaults",
        "/etc/default/kube-controller-manager.defaults",
        "/etc/default/kube-scheduler.defaults",
        "/srv/kubernetes",
        "/home/ubuntu/kubectl",
        "/usr/local/bin/kubectl",
        "/usr/local/bin/kube-apiserver",
        "/usr/local/bin/kube-controller-manager",
        "/usr/local/bin/kube-scheduler",
        "/etc/kubernetes"
    ]
    for file in files:
        if os.path.isdir(file):
            hookenv.log("Removing directory: " + file)
            shutil.rmtree(file)
        elif os.path.isfile(file):
            hookenv.log("Removing file: " + file)
            os.remove(file)

    # clear the flag managers
    FlagManager('kube-apiserver').destroy_all()
    FlagManager('kube-controller-manager').destroy_all()
    FlagManager('kube-scheduler').destroy_all()
Esempio n. 31
0
def stop_service():
    host.service_stop('jujushell')
    clear_flag('jujushell.running')
Esempio n. 32
0
def stop_running_web_service():
    service_name = layer.options('lets-encrypt').get('service-name')
    if service_name and service_running(service_name):
        log('stopping running service: %s' % (service_name))
        service_stop(service_name)
        return True
def cluster_leader_actions():
    """Cluster relation hook actions to be performed by leader units.

    NOTE: must be called by leader from cluster relation hook.
    """
    log("Cluster changed by unit={} (local is leader)".format(remote_unit()),
        level=DEBUG)

    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    rx_ack_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    tx_rq_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    rx_leader_changed = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if rx_leader_changed:
        log(
            "Leader change notification received and this is leader so "
            "retrying sync.",
            level=INFO)
        # FIXME: check that we were previously part of a successful sync to
        #        ensure we have good rings.
        cluster_sync_rings(peers_only=tx_settings.get('peers-only', False),
                           token=rx_leader_changed)
        return

    rx_resync_request = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_REQUEST_RESYNC)
    resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
    tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
    if rx_resync_request and tx_resync_request_ack != rx_resync_request:
        log("Unit '{}' has requested a resync".format(remote_unit()),
            level=INFO)
        cluster_sync_rings(peers_only=True)
        relation_set(**{resync_request_ack_key: rx_resync_request})
        return

    # If we have received an ack token ensure it is not associated with a
    # request we received from another peer. If it is, this would indicate
    # a leadership change during a sync and this unit will abort the sync or
    # attempt to restore the original leader so to be able to complete the
    # sync.

    if rx_ack_token and rx_ack_token == tx_rq_token:
        # Find out if all peer units have been stopped.
        responses = []
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                responses.append(relation_get(rid=rid, unit=unit))

        # Ensure all peers stopped before starting sync
        if is_all_peers_stopped(responses):
            key = 'peers-only'
            if not all_responses_equal(responses, key, must_exist=False):
                msg = ("Did not get equal response from every peer unit for "
                       "'{}'".format(key))
                raise SwiftProxyCharmException(msg)

            peers_only = bool(
                get_first_available_value(responses, key, default=0))
            log("Syncing rings and builders (peers-only={})".format(
                peers_only),
                level=DEBUG)
            broadcast_rings_available(broker_token=rx_ack_token,
                                      storage=not peers_only)
        else:
            key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
            acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
            log("Not all peer apis stopped - skipping sync until all peers "
                "ready (current='{}', token='{}')".format(acks, tx_ack_token),
                level=INFO)
    elif ((rx_ack_token and (rx_ack_token == tx_ack_token))
          or (rx_rq_token and (rx_rq_token == rx_ack_token))):
        log(
            "It appears that the cluster leader has changed mid-sync - "
            "stopping proxy service",
            level=WARNING)
        service_stop('swift-proxy')
        broker = rx_settings.get('builder-broker')
        if broker:
            # If we get here, manual intervention will be required in order
            # to restore the cluster.
            raise SwiftProxyCharmException(
                "Failed to restore previous broker '{}' as leader".format(
                    broker))
        else:
            raise SwiftProxyCharmException(
                "No builder-broker on rx_settings relation from '{}' - "
                "unable to attempt leader restore".format(remote_unit()))
    else:
        log("Not taking any sync actions", level=DEBUG)

    CONFIGS.write_all()
Esempio n. 34
0
 def stop(self):
     host.service_stop('oozie-server')
Esempio n. 35
0
def format_and_mount_storage():
    ''' This allows users to request persistent volumes from the cloud provider
    for the purposes of disaster recovery. '''
    set_state('data.volume.attached')
    # Query juju for the information about the block storage
    device_info = storage_get()
    block = device_info['location']
    bag = EtcdDatabag()
    bag.cluster = leader_get('cluster')
    # the databag has behavior that keeps the path updated.
    # Reference the default path from layer_options.
    etcd_opts = layer.options('etcd')
    # Split the tail of the path to mount the volume 1 level before
    # the data directory.
    tail = os.path.split(bag.etcd_data_dir)[0]

    if volume_is_mounted(block):
        hookenv.log('Device is already attached to the system.')
        hookenv.log('Refusing to take action against {}'.format(block))
        return

    # Format the device in non-interactive mode
    cmd = ['mkfs.ext4', device_info['location'], '-F']
    hookenv.log('Creating filesystem on {}'.format(device_info['location']))
    hookenv.log('With command: {}'.format(' '.join(cmd)))
    check_call(cmd)

    # halt etcd to perform the data-store migration
    host.service_stop(bag.etcd_daemon)

    os.makedirs(tail, exist_ok=True)
    mount_volume(block, tail)
    # handle first run during early-attach storage, pre-config-changed hook.
    os.makedirs(bag.etcd_data_dir, exist_ok=True)

    # Only attempt migration if directory exists
    if os.path.isdir(etcd_opts['etcd_data_dir']):
        migrate_path = "{}/".format(etcd_opts['etcd_data_dir'])
        output_path = "{}/".format(bag.etcd_data_dir)
        cmd = ['rsync', '-azp', migrate_path, output_path]

        hookenv.log('Detected existing data, migrating to new location.')
        hookenv.log('With command: {}'.format(' '.join(cmd)))

        check_call(cmd)

    with open('/etc/fstab', 'r') as fp:
        contents = fp.readlines()

    found = 0
    # scan fstab for the device
    for line in contents:
        if block in line:
            found = found + 1

    # if device not in fstab, append so it persists through reboots
    if not found > 0:
        append = "{0} {1} ext4 defaults 0 0".format(block, tail)  # noqa
        with open('/etc/fstab', 'a') as fp:
            fp.writelines([append])

    # Finally re-render the configuration and resume operation
    render_config(bag)
    host.service_restart(bag.etcd_daemon)
Esempio n. 36
0
def upgrade_charm():
    print("Upgrading mattermost setup.")
    if service_running("mattermost"):
        service_stop("mattermost")
    remove_state('mattermost.installed')
    remove_state('mattermost.backend.started')
Esempio n. 37
0
 def stop_nodemanager(self):
     host.service_stop('nodemanager')
Esempio n. 38
0
 def stop_jobhistory(self):
     host.service_stop('historyserver')
Esempio n. 39
0
 def stop_resourcemanager(self):
     host.service_stop('resourcemanager')
Esempio n. 40
0
def stop():
    service_stop("jenkins")
    status_set("maintenance", "Jenkins stopped")
Esempio n. 41
0
 def stop_datanode(self):
     host.service_stop('datanode')
Esempio n. 42
0
 def stop_secondarynamenode(self):
     host.service_stop('secondarynamenode')
Esempio n. 43
0
 def stop_zookeeper(self):
     host.service_stop('zkfc')
Esempio n. 44
0
def service_start(service_name):
    hookenv.log("Starting {0} service.".format(service_name))
    host.service_stop(service_name)
def series_upgrade():

    # Set this unit to series upgrading
    set_unit_upgrading()

    # The leader will "bootstrap" with no wrep peers
    # Non-leaders will point only at the newly upgraded leader until the
    # cluster series upgrade is completed.
    # Set cluster_series_upgrading for the duration of the cluster series
    # upgrade. This will be unset with the action
    # complete-cluster-series-upgrade on the leader node.
    if (leader_get('cluster_series_upgrade_leader') ==
            get_relation_ip('cluster')):
        hosts = []
    else:
        hosts = [leader_get('cluster_series_upgrade_leader')]

    # New series after series upgrade and reboot
    _release = lsb_release()['DISTRIB_CODENAME'].lower()

    if _release == "xenial":
        # Guarantee /var/run/mysqld exists
        _dir = '/var/run/mysqld'
        mkdir(_dir, owner="mysql", group="mysql", perms=0o755)

    # Install new versions of the percona packages
    apt_install(determine_packages())
    service_stop("mysql")

    if _release == "bionic":
        render_config(hosts)

    if _release == "xenial":
        # Move the packaged version empty DB out of the way.
        cmd = ["mv", "/var/lib/percona-xtradb-cluster",
               "/var/lib/percona-xtradb-cluster.dpkg"]
        subprocess.check_call(cmd)

        # Symlink the previous versions data to the new
        cmd = ["ln", "-s", "/var/lib/mysql", "/var/lib/percona-xtradb-cluster"]
        subprocess.check_call(cmd)

    # Start mysql temporarily with no wrep for the upgrade
    cmd = ["mysqld"]
    if _release == "bionic":
        cmd.append("--skip-grant-tables")
        cmd.append("--user=mysql")
    cmd.append("--wsrep-provider=none")
    log("Starting mysqld --wsrep-provider='none' and waiting ...")
    proc = subprocess.Popen(cmd, stderr=subprocess.PIPE)

    # Wait for the mysql socket to exist
    check_for_socket(MYSQL_SOCKET, exists=True)

    # Execute the upgrade process
    log("Running mysql_upgrade")
    cmd = ['mysql_upgrade']
    if _release == "xenial":
        cmd.append('-p{}'.format(root_password()))
    subprocess.check_call(cmd)

    # Terminate the temporary mysql
    proc.terminate()

    # Wait for the mysql socket to be removed
    check_for_socket(MYSQL_SOCKET, exists=False)

    # Clear states
    clear_unit_paused()
    clear_unit_upgrading()

    if _release == "xenial":
        # Point at the correct my.cnf
        cmd = ["update-alternatives", "--set", "my.cnf",
               "/etc/mysql/percona-xtradb-cluster.cnf"]
        subprocess.check_call(cmd)

    # Render config
    render_config(hosts)

    resume_unit_helper(register_configs())
Esempio n. 46
0
def controller_gone():
    service_stop(MUNGE_SERVICE)
    service_stop(SLURMD_SERVICE)
    for f in ['slurm-node.configured', 'slurm-node.info.sent']:
        flags.clear_flag(f)
        log('Cleared {} flag'.format(f))
def cluster_non_leader_actions():
    """Cluster relation hook actions to be performed by non-leader units.

    NOTE: must be called by non-leader from cluster relation hook.
    """
    log("Cluster changed by unit={} (local is non-leader)".format(
        remote_unit()),
        level=DEBUG)
    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    token = rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if token:
        log(
            "Leader-changed notification received from peer unit. Since "
            "this most likely occurred during a ring sync proxies will "
            "be disabled until the leader is restored and a fresh sync "
            "request is set out",
            level=WARNING)
        service_stop("swift-proxy")
        return

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)

    # Check whether we have been requested to stop proxy service
    if rx_rq_token:
        log("Peer request to stop proxy service received ({}) - sending ack".
            format(rx_rq_token),
            level=INFO)
        service_stop('swift-proxy')
        peers_only = rx_settings.get('peers-only', None)
        rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=rx_rq_token,
                                                   echo_peers_only=peers_only)
        relation_set(relation_settings=rq)
        return

    # Check if there are any builder files we can sync from the leader.
    broker = rx_settings.get('builder-broker', None)
    broker_token = rx_settings.get('broker-token', None)
    broker_timestamp = rx_settings.get('broker-timestamp', None)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
    if not broker:
        log("No ring/builder update available", level=DEBUG)
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')

        return
    elif broker_token:
        if tx_ack_token:
            if broker_token == tx_ack_token:
                log("Broker and ACK tokens match ({})".format(broker_token),
                    level=DEBUG)
            else:
                log("Received ring/builder update notification but tokens do "
                    "not match (broker-token={}/ack-token={})".format(
                        broker_token, tx_ack_token),
                    level=WARNING)
                return
        else:
            log(
                "Broker token available without handshake, assuming we just "
                "joined and rings won't change",
                level=DEBUG)
    else:
        log("Not taking any sync actions", level=DEBUG)
        return

    # If we upgrade from cluster that did not use timestamps, the new peer will
    # need to request a re-sync from the leader
    if not is_most_recent_timestamp(broker_timestamp):
        if not timestamps_available(excluded_unit=remote_unit()):
            log("Requesting resync")
            rq = SwiftProxyClusterRPC().request_resync(broker_token)
            relation_set(relation_settings=rq)
        else:
            log(
                "Did not receive most recent broker timestamp but timestamps "
                "are available - waiting for next timestamp",
                level=INFO)

        return

    log("Ring/builder update available", level=DEBUG)
    builders_only = int(rx_settings.get('sync-only-builders', 0))
    path = os.path.basename(get_www_dir())
    try:
        sync_proxy_rings('http://{}/{}'.format(broker, path),
                         rings=not builders_only)
    except CalledProcessError:
        log(
            "Ring builder sync failed, builders not yet available - "
            "leader not ready?",
            level=WARNING)
        return

    # Re-enable the proxy once all builders and rings are synced
    if fully_synced():
        log("Ring builders synced - starting proxy", level=INFO)
        CONFIGS.write_all()
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')
    else:
        log(
            "Not all builders and rings synced yet - waiting for peer sync "
            "before starting proxy",
            level=INFO)
Esempio n. 48
0
 def install(self):
     '''
     Nothing to install as kafka charm will installs all utility
     '''
     host.service_stop(KAFKA_SERVICE)
Esempio n. 49
0
 def stop(self):
     host.service_stop('kafka-server')
Esempio n. 50
0
    def stop(self):
        '''
        Stops the Kafka service.

        '''
        host.service_stop(KAFKA_SERVICE)
Esempio n. 51
0
 def stop(self):
     '''
     Stops the metamorphosis service
     '''
     host.service_stop(METAMORPHOSIS_SERVICE)
Esempio n. 52
0
def service_stop(service_name):
    hookenv.log('Stopping {0} service.'.format(service_name))
    host.service_stop(service_name)
Esempio n. 53
0
def stop_apache():
    assert host.service_stop('apache2'), 'Failed to stop Apache'
    reactive.remove_state('apache.started')
Esempio n. 54
0
def missing_nodes():
    status_set('blocked', 'Missing relation to slurm-node')
    remove_state('slurm-controller.configured')
    service_stop(SLURMCTLD_SERVICE)
Esempio n. 55
0
def migrate_database():
    """Runs heat-manage to initialize a new database or migrate existing"""
    log('Migrating the heat database.')
    [service_stop(s) for s in services()]
    check_call(['heat-manage', 'db_sync'])
    [service_start(s) for s in services()]
Esempio n. 56
0
 def stop(self):
     host.service_stop('hbase-master')
     host.service_stop('hbase-regionserver')
     host.service_stop('hbase-thrift')
Esempio n. 57
0
def ha_relation_changed():
    # Check that we are related to a principle and that
    # it has already provided the required corosync configuration
    if not get_corosync_conf():
        log('Unable to configure corosync right now, deferring configuration',
            level=INFO)
        return

    if relation_ids('hanode'):
        log('Ready to form cluster - informing peers', level=DEBUG)
        relation_set(relation_id=relation_ids('hanode')[0], ready=True)
    else:
        log('Ready to form cluster, but not related to peers just yet',
            level=INFO)
        return

    # Check that there's enough nodes in order to perform the
    # configuration of the HA cluster
    if len(get_cluster_nodes()) < int(config('cluster_count')):
        log('Not enough nodes in cluster, deferring configuration', level=INFO)
        return

    relids = relation_ids('ha') or relation_ids('juju-info')
    if len(relids) == 1:  # Should only ever be one of these
        # Obtain relation information
        relid = relids[0]
        units = related_units(relid)
        if len(units) < 1:
            log('No principle unit found, deferring configuration', level=INFO)
            return

        unit = units[0]
        log('Parsing cluster configuration using rid: %s, unit: %s' %
            (relid, unit),
            level=DEBUG)
        resources = parse_data(relid, unit, 'resources')
        delete_resources = parse_data(relid, unit, 'delete_resources')
        resource_params = parse_data(relid, unit, 'resource_params')
        groups = parse_data(relid, unit, 'groups')
        ms = parse_data(relid, unit, 'ms')
        orders = parse_data(relid, unit, 'orders')
        colocations = parse_data(relid, unit, 'colocations')
        clones = parse_data(relid, unit, 'clones')
        locations = parse_data(relid, unit, 'locations')
        init_services = parse_data(relid, unit, 'init_services')
    else:
        log('Related to %s ha services' % (len(relids)), level=DEBUG)
        return

    if True in [ra.startswith('ocf:openstack') for ra in resources.values()]:
        apt_install('openstack-resource-agents')
    if True in [ra.startswith('ocf:ceph') for ra in resources.values()]:
        apt_install('ceph-resource-agents')

    if True in [ra.startswith('ocf:maas') for ra in resources.values()]:
        try:
            validate_dns_ha()
        except MAASConfigIncomplete as ex:
            log(ex.args[0], level=ERROR)
            status_set('blocked', ex.args[0])
            # if an exception is raised the hook will end up in error state
            # which will obfuscate the workload status and message.
            return

        log('Setting up access to MAAS API', level=INFO)
        setup_maas_api()
        # Update resource_parms for DNS resources to include MAAS URL and
        # credentials
        for resource in resource_params.keys():
            if resource.endswith("_hostname"):
                res_ipaddr = get_ip_addr_from_resource_params(
                    resource_params[resource])
                resource_params[resource] += (
                    ' maas_url="{}" maas_credentials="{}"'
                    ''.format(config('maas_url'), config('maas_credentials')))
                write_maas_dns_address(resource, res_ipaddr)

    # NOTE: this should be removed in 15.04 cycle as corosync
    # configuration should be set directly on subordinate
    configure_corosync()
    try_pcmk_wait()

    # Only configure the cluster resources
    # from the oldest peer unit.
    if is_leader():
        run_initial_setup()
        log('Setting cluster symmetry', level=INFO)
        set_cluster_symmetry()
        log('Deleting Resources' % (delete_resources), level=DEBUG)
        for res_name in delete_resources:
            if pcmk.crm_opt_exists(res_name):
                if ocf_file_exists(res_name, resources):
                    log('Stopping and deleting resource %s' % res_name,
                        level=DEBUG)
                    if pcmk.crm_res_running(res_name):
                        pcmk.commit('crm -w -F resource stop %s' % res_name)
                else:
                    log('Cleanuping and deleting resource %s' % res_name,
                        level=DEBUG)
                    pcmk.commit('crm resource cleanup %s' % res_name)
                # Daemon process may still be running after the upgrade.
                kill_legacy_ocf_daemon_process(res_name)

                # Stop the resource before the deletion (LP: #1838528)
                log('Stopping %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F resource stop %s' % res_name)
                log('Deleting %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F configure delete %s' % res_name)

        log('Configuring Resources: %s' % (resources), level=DEBUG)
        for res_name, res_type in resources.items():
            # disable the service we are going to put in HA
            if res_type.split(':')[0] == "lsb":
                disable_lsb_services(res_type.split(':')[1])
                if service_running(res_type.split(':')[1]):
                    service_stop(res_type.split(':')[1])
            elif (len(init_services) != 0 and res_name in init_services
                  and init_services[res_name]):
                disable_upstart_services(init_services[res_name])
                if service_running(init_services[res_name]):
                    service_stop(init_services[res_name])
            # Put the services in HA, if not already done so
            # if not pcmk.is_resource_present(res_name):
            if not pcmk.crm_opt_exists(res_name):
                if res_name not in resource_params:
                    cmd = 'crm -w -F configure primitive %s %s' % (res_name,
                                                                   res_type)
                else:
                    cmd = ('crm -w -F configure primitive %s %s %s' %
                           (res_name, res_type, resource_params[res_name]))

                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)
                if config('monitor_host'):
                    cmd = ('crm -F configure location Ping-%s %s rule '
                           '-inf: pingd lte 0' % (res_name, res_name))
                    pcmk.commit(cmd)

            else:
                # the resource already exists so it will be updated.
                code = pcmk.crm_update_resource(res_name, res_type,
                                                resource_params.get(res_name))
                if code != 0:
                    msg = "Cannot update pcmkr resource: {}".format(res_name)
                    status_set('blocked', msg)
                    raise Exception(msg)

        log('Configuring Groups: %s' % (groups), level=DEBUG)
        for grp_name, grp_params in groups.items():
            if not pcmk.crm_opt_exists(grp_name):
                cmd = ('crm -w -F configure group %s %s' %
                       (grp_name, grp_params))
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
        for ms_name, ms_params in ms.items():
            if not pcmk.crm_opt_exists(ms_name):
                cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Orders: %s' % (orders), level=DEBUG)
        for ord_name, ord_params in orders.items():
            if not pcmk.crm_opt_exists(ord_name):
                cmd = 'crm -w -F configure order %s %s' % (ord_name,
                                                           ord_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Clones: %s' % clones, level=DEBUG)
        for cln_name, cln_params in clones.items():
            if not pcmk.crm_opt_exists(cln_name):
                cmd = 'crm -w -F configure clone %s %s' % (cln_name,
                                                           cln_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        # Ordering is important here, colocation and location constraints
        # reference resources. All resources referenced by the constraints
        # need to exist otherwise constraint creation will fail.

        log('Configuring Colocations: %s' % colocations, level=DEBUG)
        for col_name, col_params in colocations.items():
            if not pcmk.crm_opt_exists(col_name):
                cmd = 'crm -w -F configure colocation %s %s' % (col_name,
                                                                col_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Locations: %s' % locations, level=DEBUG)
        for loc_name, loc_params in locations.items():
            if not pcmk.crm_opt_exists(loc_name):
                cmd = 'crm -w -F configure location %s %s' % (loc_name,
                                                              loc_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        for res_name, res_type in resources.items():
            if len(init_services) != 0 and res_name in init_services:
                # Checks that the resources are running and started.
                # Ensure that clones are excluded as the resource is
                # not directly controllable (dealt with below)
                # Ensure that groups are cleaned up as a whole rather
                # than as individual resources.
                if (res_name not in clones.values()
                        and res_name not in groups.values()
                        and not pcmk.crm_res_running(res_name)):
                    # Just in case, cleanup the resources to ensure they get
                    # started in case they failed for some unrelated reason.
                    cmd = 'crm resource cleanup %s' % res_name
                    pcmk.commit(cmd)

        for cl_name in clones:
            # Always cleanup clones
            cmd = 'crm resource cleanup %s' % cl_name
            pcmk.commit(cmd)

        for grp_name in groups:
            # Always cleanup groups
            cmd = 'crm resource cleanup %s' % grp_name
            pcmk.commit(cmd)

        # All members of the cluster need to be registered before resources
        # that reference them can be created.
        if len(get_member_ready_nodes()) >= int(config('cluster_count')):
            log('Configuring any remote nodes', level=INFO)
            remote_resources = configure_pacemaker_remote_resources()
            resources.update(remote_resources)
            configure_resources_on_remotes(resources=resources,
                                           clones=clones,
                                           groups=groups)

            stonith_resources = {}
            stonith_remote_res = configure_pacemaker_remote_stonith_resource()
            stonith_resources.update(stonith_remote_res)
            if stonith_remote_res:
                stonith_peer_res = configure_peer_stonith_resource()
                stonith_resources.update(stonith_peer_res)
            configure_resources_on_remotes(resources=stonith_resources,
                                           clones=clones,
                                           groups=groups)
            configure_stonith()
        else:
            log('Deferring configuration of any remote nodes', level=INFO)

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id, clustered="yes")

    # Inform peers that local configuration is complete and this member
    # is ready
    for rel_id in relation_ids('hanode'):
        relation_set(relation_id=rel_id, member_ready=True)
Esempio n. 58
0
 def stop(self):
     host.service_stop('zeppelin')
Esempio n. 59
0
 def stop_journalnode(self):
     host.service_stop('journalnode')
Esempio n. 60
0
 def stop_namenode(self):
     host.service_stop('namenode')