def __call__(self): api_settings = NeutronAPIContext()() ctxt = {} if config('run-internal-router') == 'leader': ctxt['handle_internal_only_router'] = eligible_leader(None) if config('run-internal-router') == 'all': ctxt['handle_internal_only_router'] = True if config('run-internal-router') == 'none': ctxt['handle_internal_only_router'] = False if config('external-network-id'): ctxt['ext_net_id'] = config('external-network-id') if not config('ext-port') and not config('external-network-id'): ctxt['external_configuration_new'] = True if config('plugin'): ctxt['plugin'] = config('plugin') if api_settings['enable_dvr']: ctxt['agent_mode'] = 'dvr_snat' else: ctxt['agent_mode'] = 'legacy' ctxt['rpc_response_timeout'] = api_settings['rpc_response_timeout'] ctxt['report_interval'] = api_settings['report_interval'] ctxt['use_l3ha'] = api_settings['enable_l3ha'] l3_extension_plugins = api_settings.get('l3_extension_plugins', []) ctxt['l3_extension_plugins'] = ','.join(l3_extension_plugins) return ctxt
def __call__(self): api_settings = NeutronAPIContext()() ctxt = {} if config('run-internal-router') == 'leader': ctxt['handle_internal_only_router'] = eligible_leader(None) if config('run-internal-router') == 'all': ctxt['handle_internal_only_router'] = True if config('run-internal-router') == 'none': ctxt['handle_internal_only_router'] = False if config('external-network-id'): ctxt['ext_net_id'] = config('external-network-id') if not config('ext-port') and not config('external-network-id'): ctxt['external_configuration_new'] = True if config('plugin'): ctxt['plugin'] = config('plugin') if api_settings['enable_dvr']: ctxt['agent_mode'] = 'dvr_snat' else: ctxt['agent_mode'] = 'legacy' return ctxt
def __call__(self): api_settings = NeutronAPIContext()() ctxt = {} if config('run-internal-router') == 'leader': ctxt['handle_internal_only_router'] = eligible_leader(None) if config('run-internal-router') == 'all': ctxt['handle_internal_only_router'] = True if config('run-internal-router') == 'none': ctxt['handle_internal_only_router'] = False if config('external-network-id'): ctxt['ext_net_id'] = config('external-network-id') if not config('ext-port') and not config('ext_net_id'): ctxt['external_configuration_new'] = True if config('plugin'): ctxt['plugin'] = config('plugin') if api_settings['enable_dvr']: ctxt['agent_mode'] = 'dvr_snat' else: ctxt['agent_mode'] = 'legacy' return ctxt
def do_openstack_upgrade(configs): """ Perform an uprade of cinder. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() if eligible_leader(CLUSTER_RES): migrate_database()
def balance_rings(): '''handle doing ring balancing and distribution.''' new_ring = False for ring in SWIFT_RINGS.itervalues(): if balance_ring(ring): log('Balanced ring %s' % ring) new_ring = True if not new_ring: return for ring in SWIFT_RINGS.keys(): f = '%s.ring.gz' % ring shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f), os.path.join(WWW_DIR, f)) if cluster.eligible_leader(SWIFT_HA_RES): msg = 'Broadcasting notification to all storage nodes that new '\ 'ring is ready for consumption.' log(msg) path = WWW_DIR.split('/var/www/')[1] trigger = uuid.uuid4() if cluster.is_clustered(): hostname = config('vip') else: hostname = unit_get('private-address') rings_url = 'http://%s/%s' % (hostname, path) # notify storage nodes that there is a new ring to fetch. for relid in relation_ids('swift-storage'): relation_set(relation_id=relid, swift_hash=get_swift_hash(), rings_url=rings_url, trigger=trigger) service_restart('swift-proxy')
def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(CINDER_CONF) if eligible_leader(CLUSTER_RES): juju_log('Cluster leader, performing db sync') migrate_database()
def cluster_departed(): if config('plugin') == 'nvp': log('Unable to re-assign agent resources for failed nodes with nvp', level=WARNING) return if config('plugin') == 'n1kv': log('Unable to re-assign agent resources for failed nodes with n1kv', level=WARNING) return if eligible_leader(None): reassign_agent_resources() CONFIGS.write_all()
def image_service_joined(relation_id=None): if not eligible_leader(CLUSTER_RES): return relation_data = { 'glance-api-server': canonical_url(CONFIGS) + ":9292" } juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) relation_set(relation_id=relation_id, **relation_data)
def cluster_departed(): if config('plugin') in ['nvp', 'nsx']: log('Unable to re-assign agent resources for' ' failed nodes with nvp|nsx', level=WARNING) return if config('plugin') == 'n1kv': log('Unable to re-assign agent resources for failed nodes with n1kv', level=WARNING) return if not config('ha-legacy-mode') and eligible_leader(None): reassign_agent_resources() CONFIGS.write_all()
def cluster_departed(): if config('plugin') in ['nvp', 'nsx']: log( 'Unable to re-assign agent resources for' ' failed nodes with nvp|nsx', level=WARNING) return if config('plugin') == 'n1kv': log('Unable to re-assign agent resources for failed nodes with n1kv', level=WARNING) return if not config('ha-legacy-mode') and eligible_leader(None): reassign_agent_resources() CONFIGS.write_all()
def keystone_joined(relation_id=None): if not eligible_leader(CLUSTER_RES): juju_log('Deferring keystone_joined() to service leader.') return url = canonical_url(CONFIGS) + ":9292" relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': url, 'admin_url': url, 'internal_url': url, } relation_set(relation_id=relation_id, **relation_data)
def ha_relation_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: juju_log('ha_changed: hacluster subordinate is not fully clustered.') return if not eligible_leader(CLUSTER_RES): juju_log('ha_changed: hacluster complete but we are not leader.') return # reconfigure endpoint in keystone to point to clustered VIP. [keystone_joined(rid) for rid in relation_ids('identity-service')] # notify glance client services of reconfigured URL. [image_service_joined(rid) for rid in relation_ids('image-service')]
def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(NOVA_CONF) if network_manager() in ['neutron', 'quantum']: plugin = neutron_plugin() # DB config might have been moved to main neutron.conf in H? CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) if eligible_leader(CLUSTER_RES): migrate_database() log('Triggering remote cloud-compute restarts.') [compute_joined(rid=rid, remote_restart=True) for rid in relation_ids('cloud-compute')]
def __call__(self): ctxt = {} if config('run-internal-router') == 'leader': ctxt['handle_internal_only_router'] = eligible_leader(None) if config('run-internal-router') == 'all': ctxt['handle_internal_only_router'] = True if config('run-internal-router') == 'none': ctxt['handle_internal_only_router'] = False if config('external-network-id'): ctxt['ext_net_id'] = config('external-network-id') if config('plugin'): ctxt['plugin'] = config('plugin') return ctxt
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): juju_log('ceph relation incomplete. Peer not ready?') return svc = service_name() if not ensure_ceph_keyring(service=svc, user='******', group='cinder'): juju_log('Could not create ceph keyring: peer not ready?') return CONFIGS.write(CEPH_CONF) CONFIGS.write(CINDER_CONF) set_ceph_env_variables(service=svc) if eligible_leader(CLUSTER_RES): _config = config() ensure_ceph_pool(service=svc, replicas=_config['ceph-osd-replication-count'])
def identity_joined(rid=None): if not eligible_leader(CLUSTER_RES): return conf = config() port = conf['api-listening-port'] url = canonical_url(CONFIGS) + ':%s/v1/$(tenant_id)s' % port settings = { 'region': conf['region'], 'service': 'cinder', 'public_url': url, 'internal_url': url, 'admin_url': url, } relation_set(relation_id=rid, **settings)
def do_openstack_upgrade(configs): new_src = config("openstack-origin") new_os_rel = get_os_codename_install_source(new_src) log("Performing OpenStack upgrade to %s." % (new_os_rel)) configure_installation_source(new_src) apt_update() dpkg_opts = ["--option", "Dpkg::Options::=--force-confnew", "--option", "Dpkg::Options::=--force-confdef"] apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() if eligible_leader(CLUSTER_RES): migrate_database()
def compute_joined(rid=None, remote_restart=False): if not eligible_leader(CLUSTER_RES): return rel_settings = { 'network_manager': network_manager(), 'volume_service': volume_service(), # (comment from bash vers) XXX Should point to VIP if clustered, or # this may not even be needed. 'ec2_host': unit_get('private-address'), } # update relation setting if we're attempting to restart remote # services if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) rel_settings.update(keystone_compute_settings()) relation_set(relation_id=rid, **rel_settings)
def __call__(self): api_settings = NeutronAPIContext()() ctxt = {} if config('run-internal-router') == 'leader': ctxt['handle_internal_only_router'] = eligible_leader(None) if config('run-internal-router') == 'all': ctxt['handle_internal_only_router'] = True if config('run-internal-router') == 'none': ctxt['handle_internal_only_router'] = False if config('external-network-id'): ctxt['ext_net_id'] = config('external-network-id') if not config('ext-port') and not config('external-network-id'): ctxt['external_configuration_new'] = True if config('plugin'): ctxt['plugin'] = config('plugin') if api_settings['enable_dvr']: ctxt['agent_mode'] = 'dvr_snat' else: ctxt['agent_mode'] = 'legacy' ctxt['rpc_response_timeout'] = api_settings['rpc_response_timeout'] ctxt['report_interval'] = api_settings['report_interval'] ctxt['use_l3ha'] = api_settings['enable_l3ha'] cmp_os_release = CompareOpenStackReleases(os_release('neutron-common')) l3_extension_plugins = api_settings.get('l3_extension_plugins', []) # per Change-Id If1b332eb0f581e9acba111f79ba578a0b7081dd2 # only enable it for stein although fwaasv2 was added in Queens is_stein = cmp_os_release >= 'stein' if is_stein: l3_extension_plugins.append('fwaas_v2') if (is_stein and api_settings.get('enable_nfg_logging')): l3_extension_plugins.append('fwaas_v2_log') ctxt['l3_extension_plugins'] = ','.join(l3_extension_plugins) return ctxt
def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): juju_log('ceph relation incomplete. Peer not ready?') return service = service_name() if not ensure_ceph_keyring(service=service, user='******', group='glance'): juju_log('Could not create ceph keyring: peer not ready?') return CONFIGS.write(GLANCE_API_CONF) CONFIGS.write(CEPH_CONF) if eligible_leader(CLUSTER_RES): _config = config() ensure_ceph_pool(service=service, replicas=_config['ceph-osd-replication-count'])
def keystone_joined(relid=None): if not cluster.eligible_leader(SWIFT_HA_RES): return if cluster.is_clustered(): hostname = config('vip') else: hostname = unit_get('private-address') port = config('bind-port') if cluster.https(): proto = 'https' else: proto = 'http' admin_url = '%s://%s:%s' % (proto, hostname, port) internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=config('operator-roles'), relation_id=relid)
def db_changed(): rel = get_os_codename_package("glance-common") if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(GLANCE_REGISTRY_CONF) # since folsom, a db connection setting in glance-api.conf is required. if rel != "essex": CONFIGS.write(GLANCE_API_CONF) if eligible_leader(CLUSTER_RES): if rel == "essex": status = call(['glance-manage', 'db_version']) if status != 0: juju_log('Setting version_control to 0') check_call(["glance-manage", "version_control", "0"]) juju_log('Cluster leader, performing db sync') migrate_database()
def quantum_joined(rid=None): if not eligible_leader(CLUSTER_RES): return url = canonical_url(CONFIGS) + ':9696' # XXX: Can we rename to neutron_*? rel_settings = { 'quantum_host': urlparse(url).hostname, 'quantum_url': url, 'quantum_port': 9696, 'quantum_plugin': neutron_plugin(), 'region': config('region') } # inform quantum about local keystone auth config ks_auth_config = _auth_config() rel_settings.update(ks_auth_config) # must pass the keystone CA cert, if it exists. ks_ca = keystone_ca_cert_b64() if ks_auth_config and ks_ca: rel_settings['ca_cert'] = ks_ca relation_set(relation_id=rid, **rel_settings)
def identity_joined(rid=None): if not eligible_leader(CLUSTER_RES): return base_url = canonical_url(CONFIGS) relation_set(relation_id=rid, **determine_endpoints(base_url))