Esempio n. 1
0
def configure_crowd():
    opts = {}
    opts['crowd_home'] = '/var/crowd-home'
    render(
        'crowd-init.properties',
        "{}/atlassian-crowd-{}/crowd-webapp/WEB-INF/classes/crowd-init.properties"
        .format(CROWD_INSTALL, hookenv.config('crowd-version')),
        opts,
        owner="crowd",
        group="crowd",
    )
    service_opts = {
        'crowd_install_dir': CROWD_INSTALL,
        'crowd_version': hookenv.config('crowd-version'),
    }
    render(
        'crowd.service',
        '/etc/systemd/system/crowd.service',
        service_opts,
    )
    chmod(
        "{}/atlassian-crowd-{}/start_crowd.sh".format(
            CROWD_INSTALL, hookenv.config('crowd-version')), 0o755)
    chmod(
        "{}/atlassian-crowd-{}/stop_crowd.sh".format(
            CROWD_INSTALL, hookenv.config('crowd-version')), 0o755)

    if hookenv.config('license-key'):
        install_license(hookenv.config('license-key'))

    host.service_start('crowd')
    host.service_resume('crowd')
    hookenv.open_port(8095)
def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
Esempio n. 4
0
def update_nova_consoleauth_config():
    """
    Configure nova-consoleauth pacemaker resources
    """
    relids = hookenv.relation_ids('ha')
    if len(relids) == 0:
        hookenv.log('Related to {} ha services'.format(len(relids)),
                    level=hookenv.DEBUG)
        ha_relid = None
        data = {}
    else:
        ha_relid = relids[0]
        data = hookenv.relation_get(rid=ha_relid) or {}

    # initialize keys in case this is a new dict
    data.setdefault('delete_resources', [])
    for k in ['colocations', 'init_services', 'resources', 'resource_params']:
        data.setdefault(k, {})

    if (hookenv.config('single-nova-consoleauth')
            and common.console_attributes('protocol')):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            try:
                data['delete_resources'].remove(item)
            except ValueError:
                pass  # nothing to remove, we are good

        # the new pcmkr resources have to be added to the existing ones
        data['colocations']['vip_consoleauth'] = COLO_CONSOLEAUTH
        data['init_services']['res_nova_consoleauth'] = 'nova-consoleauth'
        data['resources']['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        data['resource_params']['res_nova_consoleauth'] = AGENT_CA_PARAMS

        for rid in hookenv.relation_ids('ha'):
            hookenv.relation_set(rid, **data)

        # nova-consoleauth will be managed by pacemaker, so stop it
        # and prevent it to be started again at boot. (LP: #1693629).
        if hookenv.relation_ids('ha'):
            ch_host.service_pause('nova-consoleauth')

    elif (not hookenv.config('single-nova-consoleauth')
          and common.console_attributes('protocol')):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            if item not in data['delete_resources']:
                data['delete_resources'].append(item)

        # remove them from the rel, so they aren't recreated when the hook
        # is recreated
        data['colocations'].pop('vip_consoleauth', None)
        data['init_services'].pop('res_nova_consoleauth', None)
        data['resources'].pop('res_nova_consoleauth', None)
        data['resource_params'].pop('res_nova_consoleauth', None)

        for rid in hookenv.relation_ids('ha'):
            hookenv.relation_set(rid, **data)

        if not ch_utils.is_unit_paused_set():
            ch_host.service_resume('nova-consoleauth')
def post_series_upgrade():
    log("Running complete series upgrade hook", "INFO")
    openstack.clear_unit_paused()
    openstack.clear_unit_upgrading()
    _services, _ = get_managed_services_and_ports(services(), [])
    if not openstack.is_unit_paused_set():
        for service in _services:
            service_resume(service)
            started = service_start(service)
            if not started:
                raise Exception("{} didn't start cleanly.".format(service))
Esempio n. 6
0
def start_ntpmon():
    """
    Start the ntpmon daemon process.
    If ntp is not installed, do nothing.
    """
    if os.path.exists(ntp_conf):
        hookenv.log(ntp_conf + ' present; enabling and starting ntpmon')
        host.service_resume(service_name)
    else:
        hookenv.log(ntp_conf + ' not present; disabling ntpmon')
        host.service_pause(service_name)
    set_state('ntpmon.started')
Esempio n. 7
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid, key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
def enable_and_start():
    """
    Enable and start the `storpool_beacon` service.
    """
    if sputils.check_in_lxc():
        rdebug('running in an LXC container, not doing anything more')
        reactive.set_state('storpool-beacon.beacon-started')
        return

    if not sputils.check_cgroups('beacon'):
        return

    rdebug('enabling and starting the beacon service')
    host.service_resume('storpool_beacon')
    reactive.set_state('storpool-beacon.beacon-started')
Esempio n. 9
0
def resume(args):
    """Resume the MySQL service.

    @raises Exception should the service fail to start."""
    if not service_resume(MYSQL_SERVICE):
        raise Exception("Failed to resume MySQL service.")
    assess_status()
Esempio n. 10
0
def enable_and_start():
    """
    Start the `storpool_block` service.
    """
    if sputils.check_in_lxc():
        rdebug('running in an LXC container, not doing anything more')
        reactive.set_state('storpool-block.block-started')
        return

    if not sputils.check_cgroups('block'):
        return

    rdebug('enabling and starting the block service')
    host.service_resume('storpool_block')
    if os.path.isfile('/usr/sbin/storpool_stat.bin'):
        host.service_resume('storpool_stat')
    reactive.set_state('storpool-block.block-started')
Esempio n. 11
0
def start_ntpmon():
    """
    Start the ntpmon daemon process.
    If no NTP server is installed, do nothing.
    """
    started = False
    service_name = layer.options.get('ntpmon', 'service-name')
    if service_name:
        for f in (CHRONY_CONF, NTP_CONF):
            if os.path.exists(f):
                log('{} present; enabling and starting ntpmon'.format(f))
                host.service_resume(service_name)
                started = True
                break
        if not started:
            log('No supported NTP service present; disabling ntpmon')
            host.service_pause(service_name)
    set_flag('ntpmon.started')
Esempio n. 12
0
def start_ntpmon():
    """
    Start the ntpmon daemon process.
    If no NTP server is installed, do nothing.
    """
    started = False
    service_name = layer.options.get('ntpmon', 'service-name')
    if service_name:
        for f in (CHRONY_CONF, NTP_CONF):
            if os.path.exists(f):
                log('{} present; enabling and starting ntpmon'.format(f))
                host.service_resume(service_name)
                started = True
                break
        if not started:
            log('No supported NTP service present; disabling ntpmon')
            host.service_pause(service_name)
    set_flag('ntpmon.started')
Esempio n. 13
0
def restart_notebook():
    # Start notebook and ensure it is running. Note that if the actual config
    # file is broken, the notebook will be running but won't be accessible from
    # anywhere else then localhost.
    host.service_stop('jupyter-notebook')
    # Wait until notebook shut down completely.
    import time
    time.sleep(10)
    # service_resume also enables the serivice on startup
    host.service_resume('jupyter-notebook')
    if host.service_running('jupyter-notebook'):
        status_set('active',
                   'Ready (Pass: "******")'.format(unitdata.kv().get('password')))
        open_port(hookenv.config()['open-port'])
        set_state('jupyter-notebook.configured')
    else:
        status_set('blocked',
                   'Could not restart service due to wrong configuration!')
Esempio n. 14
0
def update_aws_compat_services():
    """Depending on the configuration of `disable-aws-compatibility` config
    option.

    This will stop/start and disable/enable `nova-api-ec2` and
    `nova-objectstore` services.
    """
    # if packages aren't installed, then there is nothing to do
    if filter_installed_packages(AWS_COMPAT_SERVICES) != []:
        return

    if config('disable-aws-compat'):
        # TODO: the endpoints have to removed from keystone
        for service_ in AWS_COMPAT_SERVICES:
            service_pause(service_)
    else:
        for service_ in AWS_COMPAT_SERVICES:
            service_resume(service_)
def restart_notebook():
    # Start notebook and ensure it is running. Note that if the actual config
    # file is broken, the notebook will be running but won't be accessible from
    # anywhere else then localhost.
    host.service_stop('jupyter-notebook')
    # Wait until notebook shut down completely.
    import time
    time.sleep(10)
    # service_resume also enables the serivice on startup
    host.service_resume('jupyter-notebook')
    if host.service_running('jupyter-notebook'):
        status_set('active',
                   'Ready (Pass: "******")'.format(unitdata.kv().get('password')))
        open_port(hookenv.config()['open-port'])
        set_state('jupyter-notebook.configured')
    else:
        status_set('blocked',
                   'Could not restart service due to wrong configuration!')
Esempio n. 16
0
def resume(args):
    """Resume all the Keystone services.

    @raises Exception if any services fail to start
    """
    for service in services():
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    status_set("active", "")
Esempio n. 17
0
def resume(args):
    """Resume all the Keystone services.

    @raises Exception if any services fail to start
    """
    for service in services():
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    status_set("active", "")
Esempio n. 18
0
def resume(args):
    """Resume all the swift services.

    @raises Exception if any services fail to start
    """
    for service in args.services:
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    clear_unit_paused()
    assess_status(CONFIGS, args.services)
Esempio n. 19
0
def resume(args):
    """Resume all the swift services.

    @raises Exception if any services fail to start
    """
    for service in args.services:
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    clear_unit_paused()
    assess_status(CONFIGS, args.services)
Esempio n. 20
0
 def restart(self, coordinator):
     """
     The restart method manages the OpsCoordinator and requests for the
     locks. Once the lock is granted, run the restart on each of the
     services that have been passed.
     """
     if coordinator.acquire('restart'):
         for ev in self.svc:
             # Unmask and enable service
             service_resume(ev)
             # Reload and restart
             service_reload(ev)
             service_restart(ev)
         # Now that restart is done, save lock state and release it.
         # Inform that restart has been successful
         return True
     else:
         # Still waiting for the lock to be granted.
         # Return False so this event can be deferred
         return False
Esempio n. 21
0
def register_to_cloud():
    """
    Implementation of `register-to-cloud` action.

    This action reverts `remove-from-cloud` action. It starts nova-comptue
    system service which will trigger its re-registration in the cloud.
    """
    log("Starting nova-compute service", DEBUG)
    service_resume('nova-compute')
    current_status = status_get()
    if current_status[0] == WORKLOAD_STATES.BLOCKED.value and \
            current_status[1] == UNIT_REMOVED_MSG:
        status_set(WORKLOAD_STATES.ACTIVE, 'Unit is ready')

    nova_compute_hooks.update_status()
    function_set({
        'command': 'openstack compute service list',
        'message': "Nova compute service started. It should get registered "
                   "with the cloud controller in a short time. Use the "
                   "'openstack' command to verify that it's registered."
    })
Esempio n. 22
0
def resume(args):
    """Resume all the swift services.

    @raises Exception if any services fail to start
    """
    for service in args.services:
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    with HookData()():
        kv().set('unit-paused', False)
    assess_status(CONFIGS)
Esempio n. 23
0
def resume(args):
    """Resume all the swift services.

    @raises Exception if any services fail to start
    """
    for service in args.services:
        started = service_resume(service)
        if not started:
            raise Exception("{} didn't start cleanly.".format(service))
    with HookData()():
        kv().set('unit-paused', False)
    set_os_workload_status(CONFIGS,
                           REQUIRED_INTERFACES,
                           charm_func=assess_status)
Esempio n. 24
0
def resume(args):
    """Resume the Ceilometer services.

    @raises Exception should the service fail to start."""
    services = CEILOMETER_BASE_SERVICES + ceilometer_release_services()
    for service in services:
        if not service_resume(service):
            raise Exception("Failed to resume %s." % service)

    db = kv()
    db.set('unit-paused', False)
    db.flush()

    assess_status(CONFIGS)
Esempio n. 25
0
def post_series_upgrade():
    service_resume('keepalived')
    service_resume('procps')
    clear_flag('keepalived.started')
Esempio n. 26
0
def post_series_upgrade():
    bag = EtcdDatabag()
    host.service_resume(bag.etcd_daemon)
Esempio n. 27
0
 def enable_services(self):
     '''Enable all services related to panko'''
     for svc in self.services:
         host.service_resume(svc)
Esempio n. 28
0
 def enable_services(self):
     for svc in self.services:
         host.service_resume(svc)
Esempio n. 29
0
def post_series_upgrade():
    service_resume("snap.kubelet.daemon")
    service_resume("snap.kube-proxy.daemon")
    kubectl("uncordon", get_node_name())
Esempio n. 30
0
def post_series_upgrade():
    service_resume("keepalived")
    service_resume("procps")
    clear_flag("keepalived.package.installed")
    clear_flag("keepalived.started")
Esempio n. 31
0
def enable_beat_on_boot(service):
    """ Enable the beat to start automaticaly during boot """
    # Remove any existing links first
    remove_beat_on_boot(service)
    service_resume(service)
Esempio n. 32
0
def post_series_upgrade():
    host.service_resume('docker')
    layer.docker_registry.start_registry()
def post_series_upgrade():
    host.service_resume('nginx')
Esempio n. 34
0
def resume():
    """Restart basic-auth-service service."""
    hookenv.status_set("maintenance",
                       "Restarting service: basic-auth-service.")
    host.service_resume(SERVICE_JOB)
    hookenv.status_set("active", "Service up and running.")
Esempio n. 35
0
def start():
    hookenv.log('Starting the_lounge')
    service_resume('thelounge')
Esempio n. 36
0
def install_package():
    """
    Install the base StorPool packages.
    """
    rdebug('the common repo has become available and '
           'we do have the configuration')

    rdebug('checking the kernel command line')
    with open('/proc/cmdline', mode='r') as f:
        ln = f.readline()
        if not ln:
            sputils.err('Could not read a single line from /proc/cmdline')
            return
        words = ln.split()

        # OK, so this is a bit naive, but it will do the job
        global KERNEL_REQUIRED_PARAMS
        missing = list(
            filter(lambda param: param not in words, KERNEL_REQUIRED_PARAMS))
        if missing:
            if sputils.bypassed('kernel_parameters'):
                hookenv.log(
                    'The "kernel_parameters" bypass is meant FOR '
                    'DEVELOPMENT ONLY!  DO NOT run a StorPool cluster '
                    'in production with it!', hookenv.WARNING)
            else:
                sputils.err('Missing kernel parameters: {missing}'.format(
                    missing=' '.join(missing)))
                return

    spstatus.npset('maintenance', 'obtaining the requested StorPool version')
    spver = spconfig.m().get('storpool_version', None)
    if spver is None or spver == '':
        rdebug('no storpool_version key in the charm config yet')
        return

    spstatus.npset('maintenance', 'installing the StorPool common packages')
    (err, newly_installed) = sprepo.install_packages({
        'storpool-cli':
        spver,
        'storpool-common':
        spver,
        'storpool-etcfiles':
        spver,
        'kmod-storpool-' + os.uname().release:
        spver,
        'python-storpool':
        spver,
    })
    if err is not None:
        rdebug('oof, we could not install packages: {err}'.format(err=err))
        rdebug('removing the package-installed state')
        return

    if newly_installed:
        rdebug('it seems we managed to install some packages: {names}'.format(
            names=newly_installed))
        sprepo.record_packages('storpool-common', newly_installed)
    else:
        rdebug('it seems that all the packages were installed already')

    rdebug('updating the kernel module dependencies')
    spstatus.npset('maintenance', 'updating the kernel module dependencies')
    subprocess.check_call(['depmod', '-a'])

    rdebug('gathering CPU information for the cgroup configuration')
    with open('/proc/cpuinfo', mode='r') as f:
        lns = f.readlines()
        all_cpus = sorted(
            map(
                lambda lst: int(lst[2]),
                filter(lambda lst: lst and lst[0] == 'processor',
                       map(lambda s: s.split(), lns))))
    if sputils.bypassed('very_few_cpus'):
        hookenv.log(
            'The "very_few_cpus" bypass is meant '
            'FOR DEVELOPMENT ONLY!  DO NOT run a StorPool cluster in '
            'production with it!', hookenv.WARNING)
        last_cpu = all_cpus[-1]
        all_cpus.extend([last_cpu, last_cpu, last_cpu])
    if len(all_cpus) < 4:
        sputils.err('Not enough CPUs, need at least 4')
        return
    tdata = {
        'cpu_rdma': str(all_cpus[0]),
        'cpu_beacon': str(all_cpus[1]),
        'cpu_block': str(all_cpus[2]),
        'cpu_rest': '{min}-{max}'.format(min=all_cpus[3], max=all_cpus[-1]),
    }

    rdebug('gathering system memory information for the cgroup configuration')
    with open('/proc/meminfo', mode='r') as f:
        while True:
            line = f.readline()
            if not line:
                sputils.err('Could not find MemTotal in /proc/meminfo')
                return
            words = line.split()
            if words[0] == 'MemTotal:':
                mem_total = int(words[1])
                unit = words[2].upper()
                if unit.startswith('K'):
                    mem_total = int(mem_total / 1024)
                elif unit.startswith('M'):
                    pass
                elif unit.startswith('G'):
                    mem_total = mem_total * 1024
                else:
                    sputils.err('Could not parse the "{u}" unit for '
                                'MemTotal in /proc/meminfo'.format(u=words[2]))
                    return
                break
    mem_system = 4 * 1024
    mem_user = 4 * 1024
    mem_storpool = 1 * 1024
    mem_kernel = 10 * 1024
    if sputils.bypassed('very_little_memory'):
        hookenv.log(
            'The "very_little_memory" bypass is meant '
            'FOR DEVELOPMENT ONLY!  DO NOT run a StorPool cluster in '
            'production with it!', hookenv.WARNING)
        mem_system = 1 * 1900
        mem_user = 1 * 512
        mem_storpool = 1 * 1024
        mem_kernel = 1 * 512
    mem_reserved = mem_system + mem_user + mem_storpool + mem_kernel
    if mem_total <= mem_reserved:
        sputils.err(
            'Not enough memory, only have {total}M, need {mem}M'.format(
                mem=mem_reserved, total=mem_total))
        return
    mem_machine = mem_total - mem_reserved
    tdata.update({
        'mem_system': mem_system,
        'mem_user': mem_user,
        'mem_storpool': mem_storpool,
        'mem_machine': mem_machine,
    })

    rdebug('generating the cgroup configuration: {tdata}'.format(tdata=tdata))
    if not os.path.isdir('/etc/cgconfig.d'):
        os.mkdir('/etc/cgconfig.d', mode=0o755)
    cgconfig_dir = '/usr/share/doc/storpool/examples/cgconfig/ubuntu1604'
    for (path, _, files) in os.walk(cgconfig_dir):
        for fname in files:
            src = path + '/' + fname
            dst = src.replace(cgconfig_dir, '')
            dstdir = os.path.dirname(dst)
            if not os.path.isdir(dstdir):
                os.makedirs(dstdir, mode=0o755)

            if fname in (
                    'machine.slice.conf',
                    'storpool.slice.conf',
                    'system.slice.conf',
                    'user.slice.conf',
                    'machine-cgsetup.conf',
            ):
                with tempfile.NamedTemporaryFile(dir='/tmp',
                                                 mode='w+t',
                                                 delete=True) as tempf:
                    rdebug('- generating {tempf} for {dst}'.format(
                        dst=dst, tempf=tempf.name))
                    templating.render(
                        source=fname,
                        target=tempf.name,
                        owner='root',
                        perms=0o644,
                        context=tdata,
                    )
                    rdebug('- generating {dst}'.format(dst=dst))
                    txn.install('-o', 'root', '-g', 'root', '-m', '644', '--',
                                tempf.name, dst)
            else:
                mode = '{:o}'.format(os.stat(src).st_mode & 0o777)
                rdebug('- installing {src} as {dst}'.format(src=src, dst=dst))
                txn.install('-o', 'root', '-g', 'root', '-m', mode, '--', src,
                            dst)

    rdebug('starting the cgconfig service')
    rdebug('- refreshing the systemctl service database')
    subprocess.check_call(['systemctl', 'daemon-reload'])
    rdebug('- starting the cgconfig service')
    try:
        host.service_resume('cgconfig')
    except Exception:
        pass

    rdebug('setting the package-installed state')
    reactive.set_state('storpool-common.package-installed')
    spstatus.npset('maintenance', '')
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if ch_utils.is_unit_paused_set():
        hookenv.log("Unit is pause or upgrading. Skipping config_changed",
                    hookenv.WARNING)
        return

    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migrations to fail if >= juno.
    # Disable neutron-server if >= juno
    if ch_utils.CompareOpenStackReleases(
            ch_utils.os_release('nova-common')) >= 'juno':
        try:
            ch_host.service_pause('neutron-server')
        except ValueError:
            # neutron-server service not installed, ignore.
            pass
    if hookenv.config('prefer-ipv6'):
        hookenv.status_set('maintenance', 'configuring ipv6')
        ncc_utils.setup_ipv6()
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

    global CONFIGS
    if not hookenv.config('action-managed-upgrade'):
        if ch_utils.openstack_upgrade_available('nova-common'):
            hookenv.status_set('maintenance', 'Running openstack upgrade')
            ncc_utils.do_openstack_upgrade(CONFIGS)
            for rid in hookenv.relation_ids('neutron-api'):
                neutron_api_relation_joined(rid=rid, remote_restart=True)
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            for r_id in hookenv.relation_ids('shared-db'):
                db_joined(relation_id=r_id)

    ncc_utils.save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    filtered = ch_fetch.filter_installed_packages(
        ncc_utils.determine_packages())
    if filtered:
        ch_fetch.apt_install(filtered, fatal=True)

    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in hookenv.relation_ids('cluster'):
        cluster_joined(rid)
    update_nova_relation()

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if ch_utils.config_value_changed('region'):
        for rid in hookenv.relation_ids('cloud-compute'):
            for unit in hookenv.related_units(rid):
                compute_changed(rid, unit)

    ncc_utils.update_aws_compat_services()

    if hookenv.config('vendor-data'):
        ncc_utils.write_vendordata(hookenv.config('vendor-data'))
    if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
        ncc_utils.set_shared_metadatasecret()
    for rid in hookenv.relation_ids('ha'):
        ha_joined(rid)
    if (not ch_utils.is_unit_paused_set() and
            ncc_utils.is_console_auth_enabled()):
        ch_host.service_resume('nova-consoleauth')
Esempio n. 38
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        legacy = True
        if request_per_unit_key():
            legacy = False
            relation_set(relation_id=rid, key_name=key_name)
        try:
            rq = ceph.get_create_rgw_pools_rq(
                prefix=config('zone') or config('pool-prefix'))
        except ValueError as e:
            # The end user has most likely provided a invalid value for
            # a configuration option. Just log the traceback here, the
            # end user will be notified by assess_status() called at
            # the end of the hook execution.
            log('Caught ValueError, invalid value provided for '
                'configuration?: "{}"'.format(str(e)),
                level=DEBUG)
            return

        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            if multisite_deployment():
                process_multisite_relations()
            elif (ready_for_service(legacy=legacy) and is_leader()
                  and 'mon' in CONFIGS.complete_contexts()):
                # In a non multi-site deployment create the
                # zone using the default zonegroup and restart the service
                internal_url = '{}:{}'.format(
                    canonical_url(CONFIGS, INTERNAL),
                    listen_port(),
                )
                endpoints = [internal_url]
                zonegroup = 'default'
                zone = config('zone')
                existing_zones = multisite.list_zones()
                log('Existing zones {}'.format(existing_zones), level=DEBUG)
                if zone not in existing_zones:
                    log("Zone '{}' doesn't exist, creating".format(zone))
                    try:
                        multisite.create_zone(zone,
                                              endpoints=endpoints,
                                              default=True,
                                              master=True,
                                              zonegroup=zonegroup)
                    except subprocess.CalledProcessError as e:
                        if 'File exists' in e.stderr.decode('UTF-8'):
                            # NOTE(lourot): may have been created in the
                            # background by the Rados Gateway daemon, see
                            # lp:1856106
                            log("Zone '{}' existed already after all".format(
                                zone))
                        else:
                            raise

                    existing_zones = multisite.list_zones(retry_on_empty=True)
                    log('Existing zones {}'.format(existing_zones),
                        level=DEBUG)
                    if zone not in existing_zones:
                        raise RuntimeError(
                            "Could not create zone '{}'".format(zone))

                    service_restart(service_name())
        else:
            send_request_if_needed(rq, relation='mon')
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if ch_utils.is_unit_paused_set():
        hookenv.log("Unit is pause or upgrading. Skipping config_changed",
                    hookenv.WARNING)
        return

    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migrations to fail if >= juno.
    # Disable neutron-server if >= juno
    if ch_utils.CompareOpenStackReleases(
            ch_utils.os_release('nova-common')) >= 'juno':
        try:
            ch_host.service_pause('neutron-server')
        except ValueError:
            # neutron-server service not installed, ignore.
            pass
    if hookenv.config('prefer-ipv6'):
        hookenv.status_set('maintenance', 'configuring ipv6')
        ncc_utils.setup_ipv6()
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

    global CONFIGS
    if not hookenv.config('action-managed-upgrade'):
        if ch_utils.openstack_upgrade_available('nova-common'):
            hookenv.status_set('maintenance', 'Running openstack upgrade')
            ncc_utils.do_openstack_upgrade(CONFIGS)
            for rid in hookenv.relation_ids('neutron-api'):
                neutron_api_relation_joined(rid=rid, remote_restart=True)
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            for r_id in hookenv.relation_ids('shared-db'):
                db_joined(relation_id=r_id)

    ncc_utils.save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    filtered = ch_fetch.filter_installed_packages(
        ncc_utils.determine_packages())
    if filtered:
        ch_fetch.apt_install(filtered, fatal=True)

    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in hookenv.relation_ids('cluster'):
        cluster_joined(rid)
    update_nova_relation()

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if ch_utils.config_value_changed('region'):
        for rid in hookenv.relation_ids('cloud-compute'):
            set_region_on_relation_from_config(rid)

    ncc_utils.update_aws_compat_services()

    if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
        ncc_utils.set_shared_metadatasecret()
    for rid in hookenv.relation_ids('ha'):
        ha_joined(rid)
    if (not ch_utils.is_unit_paused_set() and
            ncc_utils.is_console_auth_enabled()):
        ch_host.service_resume('nova-consoleauth')