Exemplo n.º 1
0
def initialize_new_leader():
    ''' Create an initial cluster string to bring up a single member cluster of
    etcd, and set the leadership data so the followers can join this one. '''
    bag = EtcdDatabag()
    bag.token = bag.token
    bag.cluster_state = 'new'
    cluster_connection_string = get_connection_string([bag.private_address],
                                                      bag.management_port)
    bag.cluster = "{}={}".format(bag.unit_name, cluster_connection_string)
    render('defaults', '/etc/default/etcd', bag.__dict__, owner='root',
           group='root')
    host.service_restart('etcd')

    # sorry, some hosts need this. The charm races with systemd and wins.
    time.sleep(2)

    # Check health status before we say we are good
    etcdctl = EtcdCtl()
    status = etcdctl.cluster_health()
    if 'unhealthy' in status:
        status_set('blocked', 'Cluster not healthy')
        return
    # We have a healthy leader, broadcast initial data-points for followers
    open_port(bag.port)
    leader_connection_string = get_connection_string([bag.private_address],
                                                     bag.port)
    leader_set({'token': bag.token,
                'leader_address': leader_connection_string,
                'cluster': bag.cluster})

    # finish bootstrap delta and set configured state
    set_state('etcd.leader.configured')
Exemplo n.º 2
0
def install_app():
    """ Performs application installation
    """

    hookenv.log('Installing Huginn', 'info')

    # Configure NGINX vhost
    nginxlib.configure_site('default', 'vhost.conf',
                            app_path=ruby_dist_dir())

    # Update application
    huginnlib.download_archive()
    shell("mkdir -p %s/{log,tmp/pids,tmp/sockets}" % (ruby_dist_dir()))
    shell("cp %(dir)s/config/unicorn.rb.example "
          "%(dir)s/config/unicorn.rb" % {'dir': ruby_dist_dir()})

    bundle("install --deployment --without development test")
    procfile = path.join(hookenv.charm_dir(), 'templates/Procfile')
    shell("cp %(procfile)s %(dir)s/Procfile" % {
        'procfile': procfile,
        'dir': ruby_dist_dir()
    })

    bundle("exec rake assets:precompile RAILS_ENV=production")

    host.service_restart('nginx')
    hookenv.status_set('active', 'Huginn is installed!')
Exemplo n.º 3
0
def balance_rings():
    '''handle doing ring balancing and distribution.'''
    new_ring = False
    for ring in SWIFT_RINGS.itervalues():
        if balance_ring(ring):
            log('Balanced ring %s' % ring)
            new_ring = True
    if not new_ring:
        return

    for ring in SWIFT_RINGS.keys():
        f = '%s.ring.gz' % ring
        shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f),
                        os.path.join(WWW_DIR, f))

    if cluster.eligible_leader(SWIFT_HA_RES):
        msg = 'Broadcasting notification to all storage nodes that new '\
              'ring is ready for consumption.'
        log(msg)
        path = WWW_DIR.split('/var/www/')[1]
        trigger = uuid.uuid4()

        if cluster.is_clustered():
            hostname = config('vip')
        else:
            hostname = unit_get('private-address')

        rings_url = 'http://%s/%s' % (hostname, path)
        # notify storage nodes that there is a new ring to fetch.
        for relid in relation_ids('swift-storage'):
            relation_set(relation_id=relid, swift_hash=get_swift_hash(),
                         rings_url=rings_url, trigger=trigger)

    service_restart('swift-proxy')
def config_changed():

    if not is_state('nginx.available'):
        return

    host.service_restart('nginx')
    hookenv.status_set('active', 'Ready')
Exemplo n.º 5
0
def check_app_config():
    """
    Check the Ghost application config and possibly update and restart it.
    """
    cfg_changed = is_state('config.changed')
    db_changed = ghost.check_db_changed()
    if cfg_changed or db_changed:
        hookenv.status_set('maintenance', 'updating configuration')

        # Update application
        if config.changed('release') or config.changed('checksum'):
            ghost.update_ghost()

        # Update general config
        if cfg_changed:
            ghost.update_general_config()

        # Update database config
        if db_changed:
            ghost.update_db_config()

        ghost.restart_ghost()
        set_state('ghost.running')
        host.service_restart('nginx')
    hookenv.status_set('active', 'ready')
Exemplo n.º 6
0
def install_app():
    """ Performs application installation
    """

    hookenv.log('Installing Dokuwiki', 'info')

    # Configure NGINX vhost
    nginx.configure_site('default', 'vhost.conf',
                         listen_address=php.socket())

    # Update application
    dokuwiki.download_archive()

    # Needs to set dokuwiki directory permissions for installation
    app_path = nginx.get_app_path()

    render(source='local.php',
           target=path.join(app_path, 'conf/local.php'),
           context=config, perms=0o644)

    render(source='acl.auth.php',
           target=path.join(app_path, 'conf/acl.auth.php'),
           context=config, perms=0o644)

    render(source='plugins.local.php',
           target=path.join(app_path, 'conf/plugins.local.php'),
           context=config, perms=0o644)

    # Clean up install.php as we don't need it
    call("rm -f {}/conf/install.php", shell=True)

    php.restart()
    service_restart('nginx')
    hookenv.status_set('active', 'Dokuwiki is installed!')
Exemplo n.º 7
0
def leader_config_changed():
    ''' The leader executes the runtime configuration update for the cluster,
    as it is the controlling unit. Will render config, close and open ports and
    restart the etcd service.'''
    configuration = hookenv.config()
    previous_port = configuration.previous('port')
    log('Previous port: {0}'.format(previous_port))
    previous_mgmt_port = configuration.previous('management_port')
    log('Previous management port: {0}'.format(previous_mgmt_port))
    if previous_port and previous_mgmt_port:
        bag = EtcdDatabag()
        etcdctl = EtcdCtl()
        members = etcdctl.member_list()
        # Iterate over all the members in the list.
        for unit_name in members:
            # Grab the previous peer url and replace the management port.
            peer_urls = members[unit_name]['peer_urls']
            log('Previous peer url: {0}'.format(peer_urls))
            old_port = ':{0}'.format(previous_mgmt_port)
            new_port = ':{0}'.format(configuration.get('management_port'))
            url = peer_urls.replace(old_port, new_port)
            # Update the member's peer_urls with the new ports.
            log(etcdctl.member_update(members[unit_name]['unit_id'], url))
        # Render just the leaders configuration with the new values.
        render('defaults', '/etc/default/etcd', bag.__dict__, owner='root',
               group='root')
        # Close the previous client port and open the new one.
        close_open_ports()
        leader_set({'leader_address':
                   get_connection_string([bag.private_address],
                                         bag.management_port)})
        host.service_restart('etcd')
def nm_changed():
    CONFIGS.write_all()
    if relation_get('ca_cert'):
        ca_crt = b64decode(relation_get('ca_cert'))
        install_ca_cert(ca_crt)

    if config('ha-legacy-mode'):
        cache_env_data()

    # Disable nova metadata if possible,
    if disable_nova_metadata():
        remove_legacy_nova_metadata()
    else:
        # NOTE: nova-api-metadata needs to be restarted
        #       once the nova-conductor is up and running
        #       on the nova-cc units.
        restart_nonce = relation_get('restart_trigger')
        if restart_nonce is not None:
            db = kv()
            previous_nonce = db.get('restart_nonce')
            if previous_nonce != restart_nonce:
                if not is_unit_paused_set():
                    service_restart('nova-api-metadata')
                db.set('restart_nonce', restart_nonce)
                db.flush()
Exemplo n.º 9
0
def upgrade_charm():
    packages_to_install = filter_installed_packages(determine_packages())
    if packages_to_install:
        log('Installing apt packages')
        status_set('maintenance', 'Installing apt packages')
        apt_install(packages_to_install)
    packages_removed = remove_old_packages()

    if run_in_apache():
        disable_unused_apache_sites()

    log('Regenerating configuration files')
    status_set('maintenance', 'Regenerating configuration files')
    CONFIGS.write_all()

    # See LP bug 1519035
    leader_init_db_if_ready()

    update_nrpe_config()

    if packages_removed:
        status_set('maintenance', 'Restarting services')
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
        stop_manager_instance()

    if is_elected_leader(CLUSTER_RES):
        log('Cluster leader - ensuring endpoint configuration is up to '
            'date', level=DEBUG)
        update_all_identity_relation_units()
Exemplo n.º 10
0
def restart_scheduler():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
    hookenv.status_set(prev_state, prev_msg)
    remove_state('kube-scheduler.do-restart')
    set_state('kube-scheduler.started')
Exemplo n.º 11
0
def relation_changed():
    template_data = get_template_data()

    # Check required keys
    for k in ('etcd_servers',):
        if not template_data.get(k):
            print "Missing data for", k, template_data
            return

    print "Running with\n", template_data

    # Render and restart as needed
    for n in ('apiserver', 'controller-manager', 'scheduler'):
        if render_file(n, template_data) or not host.service_running(n):
            host.service_restart(n)

    # Render the file that makes the kubernetes binaries available to minions.
    if render_file(
            'distribution', template_data,
            'conf.tmpl', '/etc/nginx/sites-enabled/distribution') or \
            not host.service_running('nginx'):
        host.service_reload('nginx')
    # Render the default nginx template.
    if render_file(
            'nginx', template_data,
            'conf.tmpl', '/etc/nginx/sites-enabled/default') or \
            not host.service_running('nginx'):
        host.service_reload('nginx')

    # Send api endpoint to minions
    notify_minions()
Exemplo n.º 12
0
def bootstrap_monitor_cluster(secret):
    hostname = get_unit_hostname()
    path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
    done = '{}/done'.format(path)
    upstart = '{}/upstart'.format(path)
    keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)

    if os.path.exists(done):
        log('bootstrap_monitor_cluster: mon already initialized.')
    else:
        # Ceph >= 0.61.3 needs this for ceph-mon fs creation
        mkdir('/var/run/ceph', perms=0o755)
        mkdir(path)
        # end changes for Ceph >= 0.61.3
        try:
            subprocess.check_call(['ceph-authtool', keyring,
                                   '--create-keyring', '--name=mon.',
                                   '--add-key={}'.format(secret),
                                   '--cap', 'mon', 'allow *'])

            subprocess.check_call(['ceph-mon', '--mkfs',
                                   '-i', hostname,
                                   '--keyring', keyring])

            with open(done, 'w'):
                pass
            with open(upstart, 'w'):
                pass

            service_restart('ceph-mon-all')
        except:
            raise
        finally:
            os.unlink(keyring)
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    # If neutron is ready to be queried then check for incompatability between
    # existing neutron objects and charm settings
    if neutron_ready():
        if l3ha_router_present() and not get_l3ha():
            e = ('Cannot disable Router HA while ha enabled routers exist.'
                 ' Please remove any ha routers')
            status_set('blocked', e)
            raise Exception(e)
        if dvr_router_present() and not get_dvr():
            e = ('Cannot disable dvr while dvr enabled routers exist. Please'
                 ' remove any distributed routers')
            log(e, level=ERROR)
            status_set('blocked', e)
            raise Exception(e)
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    global CONFIGS
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('neutron-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    additional_install_locations(
        config('neutron-plugin'),
        config('openstack-origin')
    )
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(
                determine_packages(config('openstack-origin'))),
                fatal=True)
    packages_removed = remove_old_packages()
    configure_https()
    update_nrpe_config()
    CONFIGS.write_all()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    for r_id in relation_ids('neutron-api'):
        neutron_api_relation_joined(rid=r_id)
    for r_id in relation_ids('neutron-plugin-api'):
        neutron_plugin_api_relation_joined(rid=r_id)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
Exemplo n.º 14
0
def create_service_configs(kube_control):
    """Create the users for kubelet"""
    should_restart = False
    # generate the username/pass for the requesting unit
    proxy_token = get_token('system:kube-proxy')
    if not proxy_token:
        setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
        proxy_token = get_token('system:kube-proxy')
        should_restart = True

    client_token = get_token('admin')
    if not client_token:
        setup_tokens(None, 'admin', 'admin', "system:masters")
        client_token = get_token('admin')
        should_restart = True

    requests = kube_control.auth_user()
    for request in requests:
        username = request[1]['user']
        group = request[1]['group']
        kubelet_token = get_token(username)
        if not kubelet_token and username and group:
            # Usernames have to be in the form of system:node:<nodeName>
            userid = "kubelet-{}".format(request[0].split('/')[1])
            setup_tokens(None, username, userid, group)
            kubelet_token = get_token(username)
            kube_control.sign_auth_request(request[0], username,
                                           kubelet_token, proxy_token,
                                           client_token)
            should_restart = True

    if should_restart:
        host.service_restart('snap.kube-apiserver.daemon')
        remove_state('authentication.setup')
Exemplo n.º 15
0
def domain_backend_changed(relation_id=None, unit=None):
    if get_api_version() < 3:
        log('Domain specific backend identity configuration only supported '
            'with Keystone v3 API, skipping domain creation and '
            'restart.')
        return

    domain_name = relation_get(attribute='domain-name',
                               unit=unit,
                               rid=relation_id)
    if domain_name:
        # NOTE(jamespage): Only create domain data from lead
        #                  unit when clustered and database
        #                  is configured and created.
        if is_leader() and is_db_ready() and is_db_initialised():
            create_or_show_domain(domain_name)
        # NOTE(jamespage): Deployment may have multiple domains,
        #                  with different identity backends so
        #                  ensure that a domain specific nonce
        #                  is checked for restarts of keystone
        restart_nonce = relation_get(attribute='restart-nonce',
                                     unit=unit,
                                     rid=relation_id)
        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
        db = unitdata.kv()
        if restart_nonce != db.get(domain_nonce_key):
            if not is_unit_paused_set():
                service_restart(keystone_service())
            db.set(domain_nonce_key, restart_nonce)
            db.flush()
def check_local_db_actions_complete():
    """Check if we have received db init'd notification and restart services
    if we have not already.

    NOTE: this must only be called from peer relation context.
    """
    if not is_db_initialised():
        return

    settings = relation_get() or {}
    if settings:
        init_id = settings.get(NEUTRON_DB_INIT_RKEY)
        echoed_init_id = relation_get(unit=local_unit(),
                                      attribute=NEUTRON_DB_INIT_ECHO_RKEY)

        # If we have received an init notification from a peer unit
        # (assumed to be the leader) then restart neutron-api and echo the
        # notification and don't restart again unless we receive a new
        # (different) notification.
        if is_new_dbinit_notification(init_id, echoed_init_id):
            if not is_unit_paused_set():
                log("Restarting neutron services following db "
                    "initialisation", level=DEBUG)
                service_restart('neutron-server')

            # Echo notification
            relation_set(**{NEUTRON_DB_INIT_ECHO_RKEY: init_id})
Exemplo n.º 17
0
def upgrade_charm():
    apt_install(determine_packages(), fatal=True)
    if remove_old_packages():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    if is_leader():
        # if we are upgrading, then the old version might have used the
        # HEAT_PATH/encryption-key. So we grab the key from that, and put it in
        # leader settings to ensure that the key remains the same during an
        # upgrade.
        encryption_path = os.path.join(HEAT_PATH, 'encryption-key')
        if os.path.isfile(encryption_path):
            with open(encryption_path, 'r') as f:
                encryption_key = f.read()
            try:
                leader_set({'heat-auth-encryption-key': encryption_key})
            except subprocess.CalledProcessError as e:
                log("upgrade: leader_set: heat-auth-encryption-key failed,"
                    " didn't delete the existing file: {}.\n"
                    "Error was: ".format(encryption_path, str(e)),
                    level=WARNING)
            else:
                # now we just delete the file
                os.remove(encryption_path)
    leader_elected()
Exemplo n.º 18
0
def relation_changed():
    """Connect the parts and go :-)
    """
    template_data = get_template_data()

    # Check required keys
    for k in ('etcd_servers', 'kubeapi_server'):
        if not template_data.get(k):
            print('Missing data for %s %s' % (k, template_data))
            return
    print('Running with\n%s' % template_data)

    # Setup kubernetes supplemental group
    setup_kubernetes_group()

    # Register upstart managed services
    for n in ('kubelet', 'proxy'):
        if render_upstart(n, template_data) or not host.service_running(n):
            print('Starting %s' % n)
            host.service_restart(n)

    # Register machine via api
    print('Registering machine')
    register_machine(template_data['kubeapi_server'])

    # Save the marker (for restarts to detect prev install)
    template_data.save()
def upgrade_charm():
    ch_fetch.apt_install(
        ch_fetch.filter_installed_packages(
            ncc_utils.determine_packages()), fatal=True)
    packages_removed = ncc_utils.remove_old_packages()
    if packages_removed:
        hookenv.log("Package purge detected, restarting services", "INFO")
        for s in ncc_utils.services():
            ch_host.service_restart(s)

    # For users already using bionic-rocky which are upgrading their
    # charm only we need ensure to not end-up with the old
    # 'wsgi-openstack-api' and the new 'wsgi-placement-api' apache
    # configurations installed at the same time.
    ncc_utils.stop_deprecated_services()
    ncc_utils.disable_package_apache_site(service_reload=True)

    for r_id in hookenv.relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in hookenv.relation_ids('cloud-compute'):
        for unit in hookenv.related_units(r_id):
            compute_changed(r_id, unit)
    for r_id in hookenv.relation_ids('shared-db'):
        db_joined(relation_id=r_id)

    leader_init_db_if_ready_allowed_units()

    update_nrpe_config()
Exemplo n.º 20
0
def lxc_changed():
    nonce = relation_get('nonce')
    db = kv()
    if nonce and db.get('lxd-nonce') != nonce:
        db.set('lxd-nonce', nonce)
        configure_lxd(user='******')
        service_restart('nova-compute')
Exemplo n.º 21
0
def start_mattermost_nginx():
    print("Starting NGINX reverseproxy and https endpoint.")
    service_restart('nginx')
    open_port(config().get('port'))
    open_port(443)
    status_set('active', 'Ready (https://{})'.format(config().get('fqdn')))
    set_state('mattermost.nginx.started')
Exemplo n.º 22
0
def start():
    # In case we're being redeployed to the same machines, try
    # to make sure everything is running as soon as possible.
    if ceph.systemd():
        service_restart('ceph-mon')
    else:
        service_restart('ceph-mon-all')
Exemplo n.º 23
0
def setup_mattermost_backend(postgres_relation):
    print("Configuring and starting backend service.")
    _configure_mattermost_postgres(postgres_relation.master.uri)
    service_restart('mattermost')
    # Set build number for Juju status
    try:
        output = check_output(
            ['/opt/mattermost/bin/platform', 'version'],
            cwd='/opt/mattermost/bin/',
            universal_newlines=True,
            stderr=STDOUT,
        )
    except CalledProcessError as e:
        print(e.output)
        raise
    build_number = re.search(r'Build Number: ([0-9]+.[0-9]+.[0-9])+\n', output).group(1)
    application_version_set(build_number)
    open_port(8065)
    # The next two aren't really open. This is a fix for the following issue:
    #    no expose possible before `open-port`.
    #    no `open-port` of 80 and 443 before ssl.
    #    no ssl certificate before `expose`.
    open_port(config().get('port'))
    open_port(443)
    status_set(
        'active',
        'Ready (http://{}:8065 [Insecure! Please set fqdn!])'.format(unit_public_ip()))
    set_state('mattermost.backend.started')
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Exemplo n.º 25
0
def configure_ovs():
    status_set('maintenance', 'Configuring ovs')
    if not service_running('openvswitch-switch'):
        full_restart()
    add_bridge(INT_BRIDGE)
    add_bridge(EXT_BRIDGE)
    ext_port_ctx = None
    if use_dvr():
        ext_port_ctx = ExternalPortContext()()
    if ext_port_ctx and ext_port_ctx['ext_port']:
        add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])

    portmaps = DataPortContext()()
    bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
    for provider, br in bridgemaps.iteritems():
        add_bridge(br)
        if not portmaps:
            continue

        for port, _br in portmaps.iteritems():
            if _br == br:
                add_bridge_port(br, port, promisc=True)

    # Ensure this runs so that mtu is applied to data-port interfaces if
    # provided.
    service_restart('os-charm-phy-nic-mtu')
Exemplo n.º 26
0
def check_app_config():
    """
    Check the Ghost application config and possibly update and restart it.
    """
    cfg_changed = is_state('config.changed')
    db_changed = ghost.check_db_changed()
    if cfg_changed or db_changed:
        hookenv.status_set('maintenance', 'updating configuration')

        # Update application
        ghost.update_ghost()

        # Update general config
        if cfg_changed:
            ghost.update_general_config()

        # Update database config
        if db_changed:
            ghost.update_db_config()

        ghost.restart_ghost()
        set_state('ghost.running')
        host.service_restart('nginx')

        with open(path.join(node_dist_dir(), 'package.json'), 'r') as fp:
            package_json = json.loads(fp.read())

            # Set Ghost application version
            hookenv.application_version_set(package_json['version'])

    hookenv.status_set('active', 'ready')
Exemplo n.º 27
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'):
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=relation_get('key'))

    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_imagebackend_allowed()):
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            # Ensure that nova-compute is restarted since only now can we
            # guarantee that ceph resources are ready.
            service_restart('nova-compute')
        else:
            send_request_if_needed(get_ceph_request())
Exemplo n.º 28
0
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {'src': src_etc,
         'dest': '/etc/neutron'},
        {'src': os.path.join(src_etc, 'neutron/plugins'),
         'dest': '/etc/neutron/plugins'},
        {'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
         'dest': '/etc/neutron/rootwrap.d'},
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
                             'bin/neutron-rootwrap'),
         'link': '/usr/local/bin/neutron-rootwrap'},
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers', '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    neutron_ovs_agent_context = {
        'service_description': 'Neutron OpenvSwitch Plugin Agent',
        'charm_name': 'neutron-openvswitch',
        'process_name': 'neutron-openvswitch-agent',
        'executable_name': os.path.join(bin_dir, 'neutron-openvswitch-agent'),
        'cleanup_process_name': 'neutron-ovs-cleanup',
        'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
        'log_file': '/var/log/neutron/openvswitch-agent.log',
    }

    neutron_ovs_cleanup_context = {
        'service_description': 'Neutron OpenvSwitch Cleanup',
        'charm_name': 'neutron-openvswitch',
        'process_name': 'neutron-ovs-cleanup',
        'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
        'log_file': '/var/log/neutron/ovs-cleanup.log',
    }

    # NOTE(coreycb): Needs systemd support
    render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
           '/etc/init/neutron-plugin-openvswitch-agent.conf',
           neutron_ovs_agent_context, perms=0o644)
    render('git/upstart/neutron-ovs-cleanup.upstart',
           '/etc/init/neutron-ovs-cleanup.conf',
           neutron_ovs_cleanup_context, perms=0o644)

    service_restart('neutron-plugin-openvswitch-agent')
Exemplo n.º 29
0
def deploy_docker_bootstrap_daemon():
    ''' This is a nifty trick. We're going to init and start
    a secondary docker engine instance to run applications that
    can modify the "workload docker engine" '''
    # Render static template for init job
    status_set('maintenance', 'Configuring bootstrap docker daemon.')
    codename = host.lsb_release()['DISTRIB_CODENAME']

    # Render static template for daemon options
    render('bootstrap-docker.defaults', '/etc/default/bootstrap-docker', {},
           owner='root', group='root')

    # The templates are static, but running through the templating engine for
    # future modification. This doesn't add much overhead.
    if codename == 'trusty':
        render('bootstrap-docker.upstart', '/etc/init/bootstrap-docker.conf',
               {}, owner='root', group='root')
    else:
        # Render the service definition
        render('bootstrap-docker.service',
               '/lib/systemd/system/bootstrap-docker.service',
               {}, owner='root', group='root')
        # let systemd allocate the unix socket
        render('bootstrap-docker.socket',
               '/lib/systemd/system/bootstrap-docker.socket',
               {}, owner='root', group='root')
        # this creates the proper symlinks in /etc/systemd/system path
        check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.socket'))  # noqa
        check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.service'))  # noqa

    # start the bootstrap daemon
    service_restart('bootstrap-docker')
    set_state('bootstrap_daemon.available')
Exemplo n.º 30
0
    def __call__(self):
        # late import to work around circular dependency
        from keystone_utils import (
            determine_ports,
            update_hash_from_path,
        )

        ssl_paths = [CA_CERT_PATH,
                     os.path.join('/etc/apache2/ssl/',
                                  self.service_namespace)]

        self.external_ports = determine_ports()
        before = hashlib.sha256()
        for path in ssl_paths:
            update_hash_from_path(before, path)

        ret = super(ApacheSSLContext, self).__call__()

        after = hashlib.sha256()
        for path in ssl_paths:
            update_hash_from_path(after, path)

        # Ensure that apache2 is restarted if these change
        if before.hexdigest() != after.hexdigest():
            service_restart('apache2')

        return ret
def create_block_conffile(lxc_name, confname):
    """
    Create the storpool_block config snippet for the old-style
    "mount each and every sp-* device within the container" behavior.
    """
    rdebug('found a Cinder container at "{name}"'.format(name=lxc_name))
    try:
        rdebug('about to record the name of the Cinder LXD - "{name}" - '
               "into {confname}".format(name=lxc_name, confname=confname))
        dirname = confname.parent
        rdebug(
            "- checking for the {dirname} directory".format(dirname=dirname))
        if not dirname.is_dir():
            rdebug("  - nah, creating it")
            dirname.mkdir(mode=0o755)

        rdebug("- is the file there?")
        okay = False
        expected_contents = [
            "[{node}]".format(node=platform.node()),
            "SP_EXTRA_FS=lxd:{name}".format(name=lxc_name),
        ]
        if confname.is_file():
            rdebug("  - yes, it is... but does it contain the right data?")
            contents = confname.read_text(encoding="ISO-8859-15").splitlines()
            if contents == expected_contents:
                rdebug("   - whee, it already does!")
                okay = True
            else:
                rdebug("   - it does NOT: {lst}".format(lst=contents))
        else:
            rdebug("   - nah...")
            if confname.exists():
                rdebug("     - but it still exists?!")
                subprocess.call(["rm", "-rf", "--", str(confname)])
                if confname.exists():
                    rdebug("     - could not remove it, so leaving it "
                           "alone, I guess")
                    okay = True

        if not okay:
            rdebug("- about to recreate the {confname} file".format(
                confname=confname))
            with tempfile.NamedTemporaryFile(dir="/tmp", mode="w+t") as spconf:
                print("\n".join(expected_contents), file=spconf)
                spconf.flush()
                subprocess.check_call(
                    [
                        "install",
                        "-o",
                        "root",
                        "-g",
                        "root",
                        "-m",
                        "644",
                        "--",
                        spconf.name,
                        str(confname),
                    ],
                    shell=False,
                )
            rdebug("- looks like we are done with it")
            rdebug("- let us try to restart the storpool_block service "
                   "(it may not even have been started yet, so "
                   "ignore errors)")
            try:
                if host.service_running("storpool_block"):
                    rdebug("  - well, it does seem to be running, "
                           "so restarting it")
                    host.service_restart("storpool_block")
                else:
                    rdebug("  - nah, it was not running at all indeed")
            except Exception as e:
                rdebug("  - could not restart the service, but "
                       "ignoring the error: {e}".format(e=e))
    except Exception as e:
        rdebug("could not check for and/or recreate the {confname} "
               'storpool_block config file adapted the "{name}" '
               "LXD container: {e}".format(confname=confname,
                                           name=lxc_name,
                                           e=e))
Exemplo n.º 32
0
 def restart(self):
     '''
     Restarts the Kafka service.
     '''
     host.service_restart(KAFKA_SERVICE)
Exemplo n.º 33
0
def update_config(servicename):
    check_call("sudo neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head",shell=True)
    service_restart('neutron-server')
def common_upgrade_charm_and_config_changed():
    """Common code between upgrade-charm and config-changed hooks"""
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    # If neutron is ready to be queried then check for incompatability between
    # existing neutron objects and charm settings
    if neutron_ready():
        if l3ha_router_present() and not get_l3ha():
            e = ('Cannot disable Router HA while ha enabled routers exist.'
                 ' Please remove any ha routers')
            status_set('blocked', e)
            raise Exception(e)
        if dvr_router_present() and not get_dvr():
            e = ('Cannot disable dvr while dvr enabled routers exist. Please'
                 ' remove any distributed routers')
            log(e, level=ERROR)
            status_set('blocked', e)
            raise Exception(e)
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    global CONFIGS
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('neutron-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    additional_install_locations(config('neutron-plugin'),
                                 config('openstack-origin'))
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(
        determine_packages(config('openstack-origin'))),
                fatal=True)
    packages_removed = remove_old_packages()
    configure_https()
    update_nrpe_config()
    infoblox_changed()
    # This part can be removed for U.
    if os.path.exists(ADMIN_POLICY):
        # Clean 00-admin.json added for bug/1830536. At has been
        # noticed that it creates regression.
        os.remove(ADMIN_POLICY)
    CONFIGS.write_all()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    for r_id in relation_ids('neutron-api'):
        neutron_api_relation_joined(rid=r_id)
    for r_id in relation_ids('neutron-plugin-api'):
        neutron_plugin_api_relation_joined(rid=r_id)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    for r_id in relation_ids('neutron-plugin-api-subordinate'):
        neutron_plugin_api_subordinate_relation_joined(relid=r_id)
    for rid in relation_ids('cluster'):
        cluster_joined(rid)
Exemplo n.º 35
0
def install_tvault_contego_plugin():

    status_set('maintenance', 'Installing...')

    # Read config parameters
    bkp_type = config('backup-target-type')
    if config('python-version') == 2:
        pkg_name = 'tvault-contego'
    else:
        pkg_name = 'python3-tvault-contego'

    # add triliovault package repo
    os.system('sudo echo "{}" > '
              '/etc/apt/sources.list.d/trilio-gemfury-sources.list'.format(
                  config('triliovault-pkg-source')))
    apt_update()

    # Valildate backup target
    if not validate_backup():
        log("Failed while validating backup")
        status_set('blocked',
                   'Invalid Backup target info, please provide valid info')
        return

    # Proceed as triliovault_ip Address is valid
    if not add_users():
        log("Failed while adding Users")
        status_set('blocked', 'Failed while adding Users')
        return

    pkg_loc = create_virt_env(pkg_name)
    if not pkg_loc:
        log("Failed while Creating Virtual Env")
        status_set('blocked', 'Failed while Creating Virtual Env')
        return

    if not ensure_files():
        log("Failed while ensuring files")
        status_set('blocked', 'Failed while ensuring files')
        return

    if not create_conf():
        log("Failed while creating conf files")
        status_set('blocked', 'Failed while creating conf files')
        return

    if not ensure_data_dir():
        log("Failed while ensuring datat directories")
        status_set('blocked', 'Failed while ensuring datat directories')
        return

    if not create_service_file():
        log("Failed while creating DataMover service file")
        status_set('blocked', 'Failed while creating DataMover service file')
        return

    if bkp_type == 's3' and not create_object_storage_service():
        log("Failed while creating Object Store service file")
        status_set('blocked', 'Failed while creating ObjectStore service file')
        return

    os.system('sudo systemctl daemon-reload')
    # Enable and start the object-store service
    if bkp_type == 's3':
        os.system('sudo systemctl enable tvault-object-store')
        service_restart('tvault-object-store')
    # Enable and start the datamover service
    os.system('sudo systemctl enable tvault-contego')
    service_restart('tvault-contego')

    # Install was successful
    status_set('active', 'Ready...')
    # Add the flag "installed" since it's done
    application_version_set(get_new_version(pkg_name))
    set_flag('tvault-contego.installed')
Exemplo n.º 36
0
def start_restart(service):
    if service_running(service):
        service_restart(service)
    else:
        service_start(service)
Exemplo n.º 37
0
def configure_ovs():
    """Configure the OVS plugin.

    This function uses the config.yaml parameters ext-port, data-port and
    bridge-mappings to configure the bridges and ports on the ovs on the
    unit.

    Note that the ext-port is deprecated and data-port/bridge-mappings are
    preferred.

    Thus, if data-port is set, then ext-port is ignored (and if set, then
    it is removed from the set of bridges unless it is defined in
    bridge-mappings/data-port).  A warning is issued, if both data-port and
    ext-port are set.
    """
    if config('plugin') in [OVS, OVS_ODL]:
        if not service_running('openvswitch-switch'):
            full_restart()
        # Get existing set of bridges and ports
        current_bridges_and_ports = get_bridges_and_ports_map()
        log("configure OVS: Current bridges and ports map: {}".format(
            ", ".join("{}: {}".format(b, ",".join(v))
                      for b, v in current_bridges_and_ports.items())))

        add_bridge(INT_BRIDGE, brdata=generate_external_ids())
        add_bridge(EXT_BRIDGE, brdata=generate_external_ids())

        ext_port_ctx = ExternalPortContext()()
        portmaps = DataPortContext()()
        bridgemaps = parse_bridge_mappings(config('bridge-mappings'))

        # if we have portmaps, then we ignore its value and log an
        # error/warning to the unit's log.
        if config('data-port') and config('ext-port'):
            log(
                "Both ext-port and data-port are set.  ext-port is deprecated"
                " and is not used when data-port is set",
                level=ERROR)

        # only use ext-port if data-port is not set
        if not portmaps and ext_port_ctx and ext_port_ctx['ext_port']:
            _port = ext_port_ctx['ext_port']
            add_bridge_port(EXT_BRIDGE,
                            _port,
                            ifdata=generate_external_ids(EXT_BRIDGE),
                            portdata=generate_external_ids(EXT_BRIDGE))
            log("DEPRECATION: using ext-port to set the port {} on the "
                "EXT_BRIDGE ({}) is deprecated.  Please use data-port instead."
                .format(_port, EXT_BRIDGE),
                level=WARNING)

        for br in bridgemaps.values():
            add_bridge(br, brdata=generate_external_ids())
            if not portmaps:
                continue

            for port, _br in portmaps.items():
                if _br == br:
                    if not is_linuxbridge_interface(port):
                        add_bridge_port(br,
                                        port,
                                        promisc=True,
                                        ifdata=generate_external_ids(br),
                                        portdata=generate_external_ids(br))
                    else:
                        # NOTE(lourot): this will raise on focal+ and/or if the
                        # system has no `ifup`. See lp:1877594
                        add_ovsbridge_linuxbridge(
                            br,
                            port,
                            ifdata=generate_external_ids(br),
                            portdata=generate_external_ids(br))

        target = config('ipfix-target')
        bridges = [INT_BRIDGE, EXT_BRIDGE]
        bridges.extend(bridgemaps.values())

        if target:
            for bridge in bridges:
                disable_ipfix(bridge)
                enable_ipfix(bridge, target)
        else:
            # NOTE: removing ipfix setting from a bridge is idempotent and
            #       will pass regardless of the existence of the setting
            for bridge in bridges:
                disable_ipfix(bridge)

        new_bridges_and_ports = get_bridges_and_ports_map()
        log("configure OVS: Final bridges and ports map: {}".format(", ".join(
            "{}: {}".format(b, ",".join(v))
            for b, v in new_bridges_and_ports.items())),
            level=DEBUG)

        # Ensure this runs so that mtu is applied to data-port interfaces if
        # provided.
        service_restart('os-charm-phy-nic-mtu')
Exemplo n.º 38
0
def restart_controller_manager():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
    host.service_restart('snap.kube-controller-manager.daemon')
    hookenv.status_set(prev_state, prev_msg)
Exemplo n.º 39
0
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {
            'src': src_etc,
            'dest': '/etc/neutron'
        },
        {
            'src': os.path.join(src_etc, 'neutron/plugins'),
            'dest': '/etc/neutron/plugins'
        },
        {
            'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
            'dest': '/etc/neutron/rootwrap.d'
        },
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {
            'src':
            os.path.join(git_pip_venv_dir(projects_yaml),
                         'bin/neutron-rootwrap'),
            'link':
            '/usr/local/bin/neutron-rootwrap'
        },
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers',
           '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    # Use systemd init units/scripts from ubuntu wily onward
    if lsb_release()['DISTRIB_RELEASE'] >= '15.10':
        templates_dir = os.path.join(charm_dir(), 'templates/git')
        daemons = ['neutron-openvswitch-agent', 'neutron-ovs-cleanup']
        for daemon in daemons:
            neutron_ovs_context = {
                'daemon_path': os.path.join(bin_dir, daemon),
            }
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if os_release('neutron-common') < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            template_file = 'git/{}.init.in.template'.format(filename)
            init_in_file = '{}.init.in'.format(filename)
            render(template_file,
                   os.path.join(templates_dir, init_in_file),
                   neutron_ovs_context,
                   perms=0o644)
        git_generate_systemd_init_files(templates_dir)

        for daemon in daemons:
            filename = daemon
            if daemon == 'neutron-openvswitch-agent':
                if os_release('neutron-common') < 'mitaka':
                    filename = 'neutron-plugin-openvswitch-agent'
            service('enable', filename)
    else:
        neutron_ovs_agent_context = {
            'service_description': 'Neutron OpenvSwitch Plugin Agent',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-openvswitch-agent',
            'executable_name': os.path.join(bin_dir,
                                            'neutron-openvswitch-agent'),
            'cleanup_process_name': 'neutron-ovs-cleanup',
            'plugin_config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
            'log_file': '/var/log/neutron/openvswitch-agent.log',
        }

        neutron_ovs_cleanup_context = {
            'service_description': 'Neutron OpenvSwitch Cleanup',
            'charm_name': 'neutron-openvswitch',
            'process_name': 'neutron-ovs-cleanup',
            'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
            'log_file': '/var/log/neutron/ovs-cleanup.log',
        }

        if os_release('neutron-common') < 'mitaka':
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-plugin-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        else:
            render('git/upstart/neutron-plugin-openvswitch-agent.upstart',
                   '/etc/init/neutron-openvswitch-agent.conf',
                   neutron_ovs_agent_context,
                   perms=0o644)
        render('git/upstart/neutron-ovs-cleanup.upstart',
               '/etc/init/neutron-ovs-cleanup.conf',
               neutron_ovs_cleanup_context,
               perms=0o644)

    if not is_unit_paused_set():
        service_restart('neutron-plugin-openvswitch-agent')
Exemplo n.º 40
0
def enable_ovs_dpdk():
    '''Enables the DPDK variant of ovs-vswitchd and restarts it'''
    subprocess.check_call(UPDATE_ALTERNATIVES + [OVS_DPDK_BIN])
    if not is_unit_paused_set():
        service_restart('openvswitch-switch')
Exemplo n.º 41
0
def restart():
    host.service_restart('bundleservice')
Exemplo n.º 42
0
def restart_nrpe():
    """Restart nrpe."""
    if OS_RELEASE_CTXT['ID'] == 'ubuntu':
        host.service_restart("nagios-nrpe-server")
    else:
        host.service_restart("nrpe")
Exemplo n.º 43
0
def configure():
    m = mongodb.mongodb(config().get('version'))
    m.configure(config())
    service_restart('mongodb')
    set_state('mongodb.ready')
def kick_nginx(tls):
    # certificate changed, so sighup nginx
    hookenv.log("Certificate information changed, sending SIGHUP to nginx")
    host.service_restart('nginx')
    clear_flag('tls_client.certs.changed')
    clear_flag('tls_client.ca.written')
Exemplo n.º 45
0
def install_tvault_contego_plugin():

    status_set('maintenance', 'Installing...')

    # Read config parameters TrilioVault IP, backup target
    tv_ip = config('triliovault-ip')
    bkp_type = config('backup-target-type')

    # Validate triliovault_ip
    if not validate_ip(tv_ip):
        # IP address is invalid
        # Set status as blocked and return
        status_set(
            'blocked',
            'Invalid IP address, please provide correct IP address')
        return

    # Valildate backup target
    if not validate_backup():
        log("Failed while validating backup")
        status_set(
            'blocked',
            'Invalid Backup target info, please provide valid info')
        return

    # Proceed as triliovault_ip Address is valid
    if not add_users():
        log("Failed while adding Users")
        status_set('blocked', 'Failed while adding Users')
        return

    if not create_virt_env():
        log("Failed while Creating Virtual Env")
        status_set('blocked', 'Failed while Creating Virtual Env')
        return

    if not ensure_files():
        log("Failed while ensuring files")
        status_set('blocked', 'Failed while ensuring files')
        return

    if not create_conf():
        log("Failed while creating conf files")
        status_set('blocked', 'Failed while creating conf files')
        return

    if not ensure_data_dir():
        log("Failed while ensuring datat directories")
        status_set('blocked', 'Failed while ensuring datat directories')
        return

    if not create_service_file():
        log("Failed while creating DataMover service file")
        status_set('blocked', 'Failed while creating DataMover service file')
        return

    if bkp_type == 's3' and not create_object_storage_service():
        log("Failed while creating Object Store service file")
        status_set('blocked', 'Failed while creating ObjectStore service file')
        return

    os.system('sudo systemctl daemon-reload')
    # Enable and start the object-store service
    if bkp_type == 's3':
        os.system('sudo systemctl enable tvault-object-store')
        service_restart('tvault-object-store')
    # Enable and start the datamover service
    os.system('sudo systemctl enable tvault-contego')
    service_restart('tvault-contego')

    # Install was successful
    status_set('active', 'Ready...')
    # Add the flag "installed" since it's done
    application_version_set(get_new_version('tvault-contego'))
    set_flag('tvault-contego.installed')
Exemplo n.º 46
0
def restart_rsync(service_name):
    """Restart rsync."""
    host.service_restart("rsync")
Exemplo n.º 47
0
def restart_mattermost():
    print("Mattermost config changed. Restarting.")
    service_restart('mattermost')
def service_restart(service_name):
    hookenv.status_set('maintenance',
                       'Restarting {0} service'.format(service_name))
    host.service_restart(service_name)
Exemplo n.º 49
0
def setup_flask():
    setup_dirs()
    copy_static_files()
    render_files()
    host.service_restart(FLASK_SERVICE)
Exemplo n.º 50
0
def restart_on_munge_change():
    log('Restarting munge due to changed munge key on disk (%s)' %
        MUNGE_KEY_PATH)
    service_restart(MUNGE_SERVICE)
Exemplo n.º 51
0
def restart_on_munge_change2():
    hookenv.log(
        'restart_on_munge_change2(): file %s modified, restarting due to flag'
        % munge.MUNGE_KEY_PATH)
    host.service_restart(munge.MUNGE_SERVICE)
    flags.clear_flag('munge.changed_key_file')
Exemplo n.º 52
0
def restart():
    host.service_restart('tokin')
Exemplo n.º 53
0
def remove_controller(sojobo):
    api_dir = list(sojobo.connection())[0]['api-dir']
    remove('{}/controllers/controller_manual.py'.format(api_dir))
    remove('{}/scripts/bootstrap_manual_controller.py'.format(api_dir))
    service_restart('nginx')
    remove_state('controller-manual.installed')
Exemplo n.º 54
0
def git_post_install(projects_yaml):
    """Perform glance post-install setup."""
    http_proxy = git_yaml_value(projects_yaml, 'http_proxy')
    if http_proxy:
        for pkg in ['mysql-python', 'python-cephlibs']:
            pip_install(pkg,
                        proxy=http_proxy,
                        venv=git_pip_venv_dir(projects_yaml))
    else:
        for pkg in ['mysql-python', 'python-cephlibs']:
            pip_install(pkg, venv=git_pip_venv_dir(projects_yaml))

    src_etc = os.path.join(git_src_dir(projects_yaml, 'glance'), 'etc')
    configs = {
        'src': src_etc,
        'dest': GLANCE_CONF_DIR,
    }

    if os.path.exists(configs['dest']):
        shutil.rmtree(configs['dest'])
    shutil.copytree(configs['src'], configs['dest'])

    symlinks = [
        # NOTE(coreycb): Need to find better solution than bin symlinks.
        {
            'src':
            os.path.join(git_pip_venv_dir(projects_yaml), 'bin/glance-manage'),
            'link':
            '/usr/local/bin/glance-manage'
        },
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    # Use systemd init units/scripts from ubuntu wily onward
    if lsb_release()['DISTRIB_RELEASE'] >= '15.10':
        templates_dir = os.path.join(charm_dir(), TEMPLATES, 'git')
        daemons = ['glance-api', 'glance-glare', 'glance-registry']
        for daemon in daemons:
            glance_context = {
                'daemon_path': os.path.join(bin_dir, daemon),
            }
            template_file = 'git/{}.init.in.template'.format(daemon)
            init_in_file = '{}.init.in'.format(daemon)
            render(template_file,
                   os.path.join(templates_dir, init_in_file),
                   glance_context,
                   perms=0o644)
        git_generate_systemd_init_files(templates_dir)
    else:
        glance_api_context = {
            'service_description': 'Glance API server',
            'service_name': 'Glance',
            'user_name': 'glance',
            'start_dir': '/var/lib/glance',
            'process_name': 'glance-api',
            'executable_name': os.path.join(bin_dir, 'glance-api'),
            'config_files': [GLANCE_API_CONF],
            'log_file': '/var/log/glance/api.log',
        }

        glance_registry_context = {
            'service_description': 'Glance registry server',
            'service_name': 'Glance',
            'user_name': 'glance',
            'start_dir': '/var/lib/glance',
            'process_name': 'glance-registry',
            'executable_name': os.path.join(bin_dir, 'glance-registry'),
            'config_files': [GLANCE_REGISTRY_CONF],
            'log_file': '/var/log/glance/registry.log',
        }

        templates_dir = 'hooks/charmhelpers/contrib/openstack/templates'
        templates_dir = os.path.join(charm_dir(), templates_dir)
        render('git.upstart',
               '/etc/init/glance-api.conf',
               glance_api_context,
               perms=0o644,
               templates_dir=templates_dir)
        render('git.upstart',
               '/etc/init/glance-registry.conf',
               glance_registry_context,
               perms=0o644,
               templates_dir=templates_dir)

    # Don't restart services if the unit is supposed to be paused.
    if not is_unit_paused_set():
        service_restart('glance-api')
        service_restart('glance-registry')
Exemplo n.º 55
0
def restart_scheduler():
    prev_state, prev_msg = hookenv.status_get()
    hookenv.status_set('maintenance', 'Restarting kube-scheduler')
    host.service_restart('snap.kube-scheduler.daemon')
    hookenv.status_set(prev_state, prev_msg)
Exemplo n.º 56
0
def restart_filebeat():
    ''' Anytime we touch the config file, cycle the service'''
    service_restart('filebeat')
Exemplo n.º 57
0
def service_restart(service_name):
    hookenv.status_set("maintenance",
                       "Restarting {0} service".format(service_name))
    host.service_restart(service_name)
Exemplo n.º 58
0
def restart_on_slurm_change():
    log('Restarting slurmd due to changed configuration on disk (%s)' %
        SLURM_CONFIG_PATH)
    service_restart(SLURMD_SERVICE)
def restart_unit_services():
    '''Restart worker services.'''
    hookenv.log('Restarting kubelet and kube-proxy.')
    services = ['kube-proxy', 'kubelet']
    for service in services:
        service_restart('snap.%s.daemon' % service)
def storage_changed():
    """Storage relation.

    Only the leader unit can update and distribute rings so if we are not the
    leader we ignore this event and wait for a resync request from the leader.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log(
            "Not the leader - deferring storage relation change to leader "
            "unit.",
            level=DEBUG)
        return

    log("Storage relation changed -processing", level=DEBUG)
    host_ip = get_host_ip()
    if not host_ip:
        log(
            "No host ip found in storage relation - deferring storage "
            "relation",
            level=WARNING)
        return

    update_rsync_acls()

    zone = get_zone(config('zone-assignment'))
    node_settings = {
        'ip': host_ip,
        'zone': zone,
        'account_port': relation_get('account_port'),
        'object_port': relation_get('object_port'),
        'container_port': relation_get('container_port'),
    }
    node_repl_settings = {
        'ip_rep': relation_get('ip_rep'),
        'region': relation_get('region'),
        'account_port_rep': relation_get('account_port_rep'),
        'object_port_rep': relation_get('object_port_rep'),
        'container_port_rep': relation_get('container_port_rep')
    }

    if any(node_repl_settings.values()):
        node_settings.update(node_repl_settings)

    if None in node_settings.values():
        missing = [k for k, v in node_settings.items() if v is None]
        log("Relation not ready - some required values not provided by "
            "relation (missing={})".format(', '.join(missing)),
            level=INFO)
        return None

    for k in [
            'region', 'zone', 'account_port', 'account_port_rep',
            'object_port', 'object_port_rep', 'container_port',
            'container_port_rep'
    ]:
        if node_settings.get(k) is not None:
            node_settings[k] = int(node_settings[k])

    CONFIGS.write_all()

    # Allow for multiple devs per unit, passed along as a : separated list
    # Update and balance rings.
    nodes = []
    devs = relation_get('device')
    if devs:
        for dev in devs.split(':'):
            node = {k: v for k, v in node_settings.items()}
            node['device'] = dev
            nodes.append(node)

    update_rings(nodes)
    if not openstack.is_unit_paused_set():
        # Restart proxy here in case no config changes made (so
        # restart_on_change() ineffective).
        service_restart('swift-proxy')