def identity_joined(rid=None): public_url_base = canonical_url(CONFIGS, PUBLIC) internal_url_base = canonical_url(CONFIGS, INTERNAL) admin_url_base = canonical_url(CONFIGS, ADMIN) api_url_template = '{}:8004/v1/$(tenant_id)s' public_api_endpoint = (api_url_template.format(public_url_base)) internal_api_endpoint = (api_url_template.format(internal_url_base)) admin_api_endpoint = (api_url_template.format(admin_url_base)) cfn_url_template = '{}:8000/v1' public_cfn_endpoint = (cfn_url_template.format(public_url_base)) internal_cfn_endpoint = (cfn_url_template.format(internal_url_base)) admin_cfn_endpoint = (cfn_url_template.format(admin_url_base)) relation_data = { 'heat_service': 'heat', 'heat_region': config('region'), 'heat_public_url': public_api_endpoint, 'heat_admin_url': admin_api_endpoint, 'heat_internal_url': internal_api_endpoint, 'heat-cfn_service': 'heat-cfn', 'heat-cfn_region': config('region'), 'heat-cfn_public_url': public_cfn_endpoint, 'heat-cfn_admin_url': admin_cfn_endpoint, 'heat-cfn_internal_url': internal_cfn_endpoint, } relation_set(relation_id=rid, **relation_data)
def identity_joined(rid=None): public_url_base = canonical_url(CONFIGS, PUBLIC) internal_url_base = canonical_url(CONFIGS, INTERNAL) admin_url_base = canonical_url(CONFIGS, ADMIN) api_url_template = '{}:8004/v1/$(tenant_id)s' public_api_endpoint = (api_url_template.format(public_url_base)) internal_api_endpoint = (api_url_template.format(internal_url_base)) admin_api_endpoint = (api_url_template.format(admin_url_base)) cfn_url_template = '{}:8000/v1' public_cfn_endpoint = (cfn_url_template.format(public_url_base)) internal_cfn_endpoint = (cfn_url_template.format(internal_url_base)) admin_cfn_endpoint = (cfn_url_template.format(admin_url_base)) relation_data = { 'heat_service': 'heat', 'heat_region': config('region'), 'heat_public_url': public_api_endpoint, 'heat_admin_url': admin_api_endpoint, 'heat_internal_url': internal_api_endpoint, 'heat-cfn_service': 'heat-cfn', 'heat-cfn_region': config('region'), 'heat-cfn_public_url': public_cfn_endpoint, 'heat-cfn_admin_url': admin_cfn_endpoint, 'heat-cfn_internal_url': internal_cfn_endpoint, } relation_set(relation_id=rid, **relation_data)
def keystone_joined(relid=None): cmp_codename = CompareOpenStackReleases( get_os_codename_install_source(config('openstack-origin'))) if cmp_codename >= 'queens': log( "For OpenStack version Queens and onwards Ceilometer Charm " "requires the 'identity-credentials' relation to Keystone, not " "the 'identity-service' relation.", level=WARNING) log('Skipping endpoint registration for >= Queens', level=DEBUG) return if config('vip') and not is_clustered(): log('Defering registration until clustered', level=DEBUG) return public_url = "{}:{}".format(canonical_url(CONFIGS, PUBLIC), CEILOMETER_PORT) admin_url = "{}:{}".format(canonical_url(CONFIGS, ADMIN), CEILOMETER_PORT) internal_url = "{}:{}".format(canonical_url(CONFIGS, INTERNAL), CEILOMETER_PORT) region = config("region") relation_set(relation_id=relid, service=CEILOMETER_SERVICE, public_url=public_url, admin_url=admin_url, internal_url=internal_url, requested_roles=CEILOMETER_ROLE, region=region)
def identity_joined(rid=None): public_url = canonical_url(CONFIGS, PUBLIC) internal_url = canonical_url(CONFIGS, INTERNAL) admin_url = canonical_url(CONFIGS, ADMIN) relation_set(relation_id=rid, **determine_endpoints(public_url, internal_url, admin_url))
def identity_joined(rid=None): public_url = canonical_url(CONFIGS, PUBLIC) internal_url = canonical_url(CONFIGS, INTERNAL) admin_url = canonical_url(CONFIGS, ADMIN) relation_set(relation_id=rid, **determine_endpoints(public_url, internal_url, admin_url))
def identity_joined(rid=None, relation_trigger=False): if config('vip') and not is_clustered(): log('Defering registration until clustered', level=DEBUG) return public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC), api_port('neutron-server')) admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), api_port('neutron-server')) internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL), api_port('neutron-server') ) rel_settings = { 'neutron_service': 'neutron', 'neutron_region': config('region'), 'neutron_public_url': public_url, 'neutron_admin_url': admin_url, 'neutron_internal_url': internal_url, 'quantum_service': None, 'quantum_region': None, 'quantum_public_url': None, 'quantum_admin_url': None, 'quantum_internal_url': None, } if relation_trigger: rel_settings['relation_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, relation_settings=rel_settings)
def identity_joined(rid=None): juju_log('**********identity-service-relation-joined') if not service_enabled('api'): juju_log('api service not enabled; skipping endpoint registration') return public_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings = { 'region': None, 'service': None, 'public_url': None, 'internal_url': None, 'admin_url': None, 'vsm_region': config('region'), 'vsm_service': 'vsm', 'vsm_public_url': public_url, 'vsm_internal_url': internal_url, 'vsm_admin_url': admin_url, } juju_log("**********settings is %s" % str(settings)) juju_log("**********relation_id is %s" % str(rid)) relation_set(relation_id=rid, **settings)
def identity_joined(rid=None, relation_trigger=False): if config('vip') and not is_clustered(): log('Defering registration until clustered', level=DEBUG) return public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC), api_port('neutron-server')) admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), api_port('neutron-server')) internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL), api_port('neutron-server') ) rel_settings = { 'neutron_service': 'neutron', 'neutron_region': config('region'), 'neutron_public_url': public_url, 'neutron_admin_url': admin_url, 'neutron_internal_url': internal_url, 'quantum_service': None, 'quantum_region': None, 'quantum_public_url': None, 'quantum_admin_url': None, 'quantum_internal_url': None, } if relation_trigger: rel_settings['relation_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, relation_settings=rel_settings)
def identity_joined(rid=None): if config('vip') and not is_clustered(): log('Defering registration until clustered', level=DEBUG) return public_url = canonical_url(CONFIGS, PUBLIC) internal_url = canonical_url(CONFIGS, INTERNAL) admin_url = canonical_url(CONFIGS, ADMIN) relation_set(relation_id=rid, **determine_endpoints(public_url, internal_url, admin_url))
def identity_joined(rid=None): if hookenv.config('vip') and not ch_cluster.is_clustered(): hookenv.log('Defering registration until clustered', level=hookenv.DEBUG) return public_url = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC) internal_url = ch_ip.canonical_url(CONFIGS, ch_ip.INTERNAL) admin_url = ch_ip.canonical_url(CONFIGS, ch_ip.ADMIN) hookenv.relation_set(relation_id=rid, **ncc_utils.determine_endpoints( public_url, internal_url, admin_url))
def keystone_joined(relation_id=None): public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': public_url, 'admin_url': admin_url, 'internal_url': internal_url, } relation_set(relation_id=relation_id, **relation_data)
def keystone_joined(relation_id=None): public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': public_url, 'admin_url': admin_url, 'internal_url': internal_url, } relation_set(relation_id=relation_id, **relation_data)
def identity_joined(rid=None): if hookenv.config('vip') and not ch_cluster.is_clustered(): hookenv.log('Defering registration until clustered', level=hookenv.DEBUG) return public_url = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC) internal_url = ch_ip.canonical_url(CONFIGS, ch_ip.INTERNAL) admin_url = ch_ip.canonical_url(CONFIGS, ch_ip.ADMIN) hookenv.relation_set(relation_id=rid, **ncc_utils.determine_endpoints(public_url, internal_url, admin_url))
def keystone_joined(relid=None): port = config('bind-port') admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port) internal_url = ('%s:%s/v1/AUTH_$(tenant_id)s' % (canonical_url(CONFIGS, INTERNAL), port)) public_url = ('%s:%s/v1/AUTH_$(tenant_id)s' % (canonical_url(CONFIGS, PUBLIC), port)) region = config('region') roles = config('operator-roles') relation_set(service='swift', region=region, public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=roles, relation_id=relid)
def neutron_settings(): neutron_settings = {} if is_relation_made('neutron-api', 'neutron-plugin'): neutron_api_info = NeutronAPIContext()() neutron_settings.update({ # XXX: Rename these relations settings? 'quantum_plugin': neutron_api_info['neutron_plugin'], 'region': config('region'), 'quantum_security_groups': neutron_api_info['neutron_security_groups'], 'quantum_url': neutron_api_info['neutron_url'], }) else: neutron_settings.update({ # XXX: Rename these relations settings? 'quantum_plugin': neutron_plugin(), 'region': config('region'), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server'))), }) neutron_url = urlparse(neutron_settings['quantum_url']) neutron_settings['quantum_host'] = neutron_url.hostname neutron_settings['quantum_port'] = neutron_url.port return neutron_settings
def nova_cell_relation_joined(rid=None, remote_restart=True): rel_settings = { 'nova_url': "%s:8774/v2" % canonical_url(CONFIGS, INTERNAL) } if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def neutron_api_relation_joined(rid=None): base_url = canonical_url(CONFIGS, INTERNAL) neutron_url = '%s:%s' % (base_url, api_port('neutron-server')) relation_data = { 'enable-sriov': config('enable-sriov'), 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" else: relation_data['neutron-security-groups'] = "no" if is_api_ready(CONFIGS): relation_data['neutron-api-ready'] = "yes" else: relation_data['neutron-api-ready'] = "no" # LP Bug#1805645 dns_domain = get_dns_domain() if dns_domain: relation_data['dns-domain'] = dns_domain relation_set(relation_id=rid, **relation_data) # Nova-cc may have grabbed the neutron endpoint so kick identity-service # relation to register that its here for r_id in relation_ids('identity-service'): identity_joined(rid=r_id, relation_trigger=True)
def neutron_settings(): neutron_settings = {} if is_relation_made('neutron-api', 'neutron-plugin'): neutron_api_info = NeutronAPIContext()() neutron_settings.update({ # XXX: Rename these relations settings? 'quantum_plugin': neutron_api_info['neutron_plugin'], 'region': config('region'), 'quantum_security_groups': neutron_api_info['neutron_security_groups'], 'quantum_url': neutron_api_info['neutron_url'], }) else: neutron_settings.update({ # XXX: Rename these relations settings? 'quantum_plugin': neutron_plugin(), 'region': config('region'), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server'))), }) neutron_url = urlparse(neutron_settings['quantum_url']) neutron_settings['quantum_host'] = neutron_url.hostname neutron_settings['quantum_port'] = neutron_url.port return neutron_settings
def object_store_joined(relation_id=None): relation_data = { 'swift-url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), config('bind-port')) } relation_set(relation_id=relation_id, **relation_data)
def keystone_joined(relation_id=None): if config('vip') and not is_clustered(): juju_log('Defering registration until clustered', level=DEBUG) return public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': public_url, 'admin_url': admin_url, 'internal_url': internal_url, } relation_set(relation_id=relation_id, **relation_data)
def nova_cell_relation_joined(rid=None, remote_restart=True): rel_settings = { 'nova_url': "%s:8774/v2" % canonical_url(CONFIGS, INTERNAL) } if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def object_store_joined(relation_id=None): relation_data = { 'swift-url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), config('bind-port')) } relation_set(relation_id=relation_id, **relation_data)
def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) port = config('port') admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) internal_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=config('operator-roles'), relation_id=relid)
def identity_joined(rid=None, relation_trigger=False): public_url = '{}'.format(canonical_url(CONFIGS, PUBLIC)) internal_url = '{}'.format(canonical_url(CONFIGS, INTERNAL)) admin_url = '{}'.format(canonical_url(CONFIGS, ADMIN)) rel_settings = { 'service': 'neutron', 'region': config('region'), 'public_url': public_url, 'admin_url': admin_url, 'internal_url': internal_url, } if relation_trigger: rel_settings['relation_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, relation_settings=rel_settings)
def identity_joined(rid=None, relation_trigger=False): public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC), api_port('neutron-server')) admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), api_port('neutron-server')) internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL), api_port('neutron-server')) rel_settings = { 'quantum_service': 'quantum', 'quantum_region': config('region'), 'quantum_public_url': public_url, 'quantum_admin_url': admin_url, 'quantum_internal_url': internal_url, } if relation_trigger: rel_settings['relation_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, relation_settings=rel_settings)
def keystone_joined(relation_id=None): if config('vip') and not is_clustered(): juju_log('Defering registration until clustered', level=DEBUG) return public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) relation_data = { 'service': 'glance', 'region': config('region'), 'public_url': public_url, 'admin_url': admin_url, 'internal_url': internal_url, } relation_set(relation_id=relation_id, **relation_data)
def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) port = config('port') admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) internal_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, PUBLIC), port) relation_set(service='swift', region=config('region'), public_url=public_url, internal_url=internal_url, admin_url=admin_url, requested_roles=config('operator-roles'), relation_id=relid)
def neutron_api_relation_joined(rid=None, remote_restart=False): for id_rid in relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = {'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2"} if get_cell_type(): rel_settings['cell_type'] = get_cell_type() if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def identity_joined(rid=None, relation_trigger=False): public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC), api_port('neutron-server')) admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), api_port('neutron-server')) internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL), api_port('neutron-server') ) rel_settings = { 'quantum_service': 'quantum', 'quantum_region': config('region'), 'quantum_public_url': public_url, 'quantum_admin_url': admin_url, 'quantum_internal_url': internal_url, } if relation_trigger: rel_settings['relation_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, relation_settings=rel_settings)
def neutron_api_relation_joined(rid=None, remote_restart=False): for id_rid in hookenv.relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = { 'nova_url': ch_ip.canonical_url(CONFIGS, ch_ip.INTERNAL) + ":8774/v2" } if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) hookenv.relation_set(relation_id=rid, **rel_settings)
def neutron_api_relation_joined(rid=None, remote_restart=False): for id_rid in hookenv.relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = { 'nova_url': ch_ip.canonical_url(CONFIGS, ch_ip.INTERNAL) + ":8774/v2" } if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) hookenv.relation_set(relation_id=rid, **rel_settings)
def identity_joined(rid=None): public_url_base = canonical_url(CONFIGS, PUBLIC) internal_url_base = canonical_url(CONFIGS, INTERNAL) admin_url_base = canonical_url(CONFIGS, ADMIN) api_url_template = '%s:8889/' public_api_endpoint = (api_url_template % public_url_base) internal_api_endpoint = (api_url_template % internal_url_base) admin_api_endpoint = (api_url_template % admin_url_base) relation_data = { 'cloudkitty_service': 'cloudkitty', 'cloudkitty_region': config('region'), 'cloudkitty_public_url': public_api_endpoint, 'cloudkitty_admin_url': admin_api_endpoint, 'cloudkitty_internal_url': internal_api_endpoint, } relation_set(relation_id=rid, **relation_data)
def image_service_joined(relation_id=None): relation_data = { 'glance-api-server': "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) } juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) relation_set(relation_id=relation_id, **relation_data)
def keystone_joined(relation_id=None): # TODO(adam_g): This will actually need to happen in astara-orchestrator url = '{}:44250'.format(canonical_url(configs=None, endpoint_type=ADMIN)) relation_data = { 'service': 'astara', 'region': config('region'), 'public_url': url, 'admin_url': url, 'internal_url': url, } relation_set(relation_id=relation_id, **relation_data)
def keystone_joined(relation_id=None): # TODO(adam_g): This will actually need to happen in astara-orchestrator url = '{}:44250'.format(canonical_url(configs=None, endpoint_type=ADMIN)) relation_data = { 'service': 'astara', 'region': config('region'), 'public_url': url, 'admin_url': url, 'internal_url': url, } relation_set(relation_id=relation_id, **relation_data)
def neutron_api_relation_joined(rid=None, remote_restart=False): for id_rid in relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = { 'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2" } if get_cell_type(): rel_settings['cell_type'] = get_cell_type() if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def keystone_joined(relid=None): public_url = "{}:{}".format( canonical_url(CONFIGS, PUBLIC), CEILOMETER_PORT ) admin_url = "{}:{}".format( canonical_url(CONFIGS, ADMIN), CEILOMETER_PORT ) internal_url = "{}:{}".format( canonical_url(CONFIGS, INTERNAL), CEILOMETER_PORT ) region = config("region") relation_set(relation_id=relid, service=CEILOMETER_SERVICE, public_url=public_url, admin_url=admin_url, internal_url=internal_url, requested_roles=CEILOMETER_ROLE, region=region)
def keystone_joined(relid=None): port = config('bind-port') admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), port) internal_url = ('{}:{}/v1/AUTH_$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), port)) public_url = ('{}:{}/v1/AUTH_$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), port)) region = config('region') s3_public_url = ('{}:{}'.format(canonical_url(CONFIGS, PUBLIC), port)) s3_internal_url = ('{}:{}'.format(canonical_url(CONFIGS, INTERNAL), port)) s3_admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), port) relation_set(relation_id=relid, region=None, public_url=None, internal_url=None, admin_url=None, service=None, swift_service='swift', swift_region=region, swift_public_url=public_url, swift_internal_url=internal_url, swift_admin_url=admin_url, s3_service='s3', s3_region=region, s3_public_url=s3_public_url, s3_admin_url=s3_admin_url, s3_internal_url=s3_internal_url)
def nova_vmware_relation_joined(rid=None): rel_settings = {'network_manager': network_manager()} ks_auth = _auth_config() if ks_auth: rel_settings.update(ks_auth) rel_settings.update({ 'quantum_plugin': neutron_plugin(), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server')))}) relation_set(relation_id=rid, **rel_settings)
def image_service_joined(relation_id=None): relation_data = { 'glance-api-server': "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) } if is_api_ready(CONFIGS): relation_data['glance-api-ready'] = 'yes' else: relation_data['glance-api-ready'] = 'no' juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) relation_set(relation_id=relation_id, **relation_data)
def neutron_api_relation_joined(rid=None, remote_restart=False): with open('/etc/init/neutron-server.override', 'wb') as out: out.write('manual\n') if os.path.isfile(NEUTRON_CONF): os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused') if service_running('neutron-server'): service_stop('neutron-server') for id_rid in relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = {'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2"} if get_cell_type(): rel_settings['cell_type'] = get_cell_type() if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def neutron_api_relation_joined(rid=None): base_url = canonical_url(CONFIGS, INTERNAL) neutron_url = '%s:%s' % (base_url, api_port('neutron-server')) relation_data = { 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" else: relation_data['neutron-security-groups'] = "no" relation_set(relation_id=rid, **relation_data) # Nova-cc may have grabbed the quantum endpoint so kick identity-service # relation to register that its here for r_id in relation_ids('identity-service'): identity_joined(rid=r_id, relation_trigger=True)
def neutron_api_relation_joined(rid=None): base_url = canonical_url(CONFIGS, INTERNAL) neutron_url = '%s:%s' % (base_url, api_port('neutron-server')) relation_data = { 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" else: relation_data['neutron-security-groups'] = "no" relation_set(relation_id=rid, **relation_data) # Nova-cc may have grabbed the quantum endpoint so kick identity-service # relation to register that its here for r_id in relation_ids('identity-service'): identity_joined(rid=r_id, relation_trigger=True)
def console_settings(): rel_settings = {} proto = common.console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = hookenv.config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if (hookenv.config('console-ssl-cert') and hookenv.config('console-ssl-key')): console_ssl = True if hookenv.config('console-proxy-ip') == 'local': if console_ssl: address = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC) address = ch_network_ip.format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC) else: if console_ssl or ch_cluster.https(): schema = "https" else: schema = "http" proxy_base_addr = ("{}://{}" .format(schema, hookenv.config('console-proxy-ip'))) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_{}_address'.format(_proto)] = \ "{}:{}{}".format( proxy_base_addr, common.console_attributes('proxy-port', proto=_proto), common.console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ common.console_attributes('proxy-port', proto=_proto) return rel_settings
def console_settings(): rel_settings = {} proto = common.console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = hookenv.config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if (hookenv.config('console-ssl-cert') and hookenv.config('console-ssl-key')): console_ssl = True if hookenv.config('console-proxy-ip') == 'local': if console_ssl: address = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC) address = ch_network_ip.format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC) else: if console_ssl or ch_cluster.https(): schema = "https" else: schema = "http" proxy_base_addr = ("{}://{}" .format(schema, hookenv.config('console-proxy-ip'))) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_{}_address'.format(_proto)] = \ "{}:{}{}".format( proxy_base_addr, common.console_attributes('proxy-port', proto=_proto), common.console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ common.console_attributes('proxy-port', proto=_proto) return rel_settings
def nova_vmware_relation_joined(rid=None): rel_settings = {'network_manager': network_manager()} ks_auth = _auth_config() if ks_auth: rel_settings.update(ks_auth) rel_settings.update({ 'quantum_plugin': neutron_plugin(), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server'))) }) relation_set(relation_id=rid, **rel_settings)
def image_service_joined(relation_id=None): relation_data = { 'glance-api-server': "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) } juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) if ('object-store' in CONFIGS.complete_contexts() and 'identity-service' in CONFIGS.complete_contexts()): relation_data.update({ 'swift-temp-url-key': swift_temp_url_key(), 'swift-container': 'glance' }) relation_set(relation_id=relation_id, **relation_data)
def neutron_api_relation_joined(rid=None, remote_restart=False): with open('/etc/init/neutron-server.override', 'wb') as out: out.write('manual\n') if os.path.isfile(NEUTRON_CONF): os.rename(NEUTRON_CONF, NEUTRON_CONF + '_unused') if service_running('neutron-server'): service_stop('neutron-server') for id_rid in relation_ids('identity-service'): identity_joined(rid=id_rid) rel_settings = { 'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2" } if get_cell_type(): rel_settings['cell_type'] = get_cell_type() if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) relation_set(relation_id=rid, **rel_settings)
def identity_joined(rid=None): if not service_enabled('api'): juju_log('api service not enabled; skipping endpoint registration') return public_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings = { 'region': None, 'service': None, 'public_url': None, 'internal_url': None, 'admin_url': None, 'cinder_region': config('region'), 'cinder_service': 'cinder', 'cinder_public_url': public_url, 'cinder_internal_url': internal_url, 'cinder_admin_url': admin_url, } if os_release('cinder-common') >= 'icehouse': # NOTE(jamespage) register v2 endpoint as well public_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings.update({ 'cinderv2_region': config('region'), 'cinderv2_service': 'cinderv2', 'cinderv2_public_url': public_url, 'cinderv2_internal_url': internal_url, 'cinderv2_admin_url': admin_url, }) relation_set(relation_id=rid, **settings)
def identity_joined(rid=None): if not service_enabled('api'): juju_log('api service not enabled; skipping endpoint registration') return public_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings = { 'region': None, 'service': None, 'public_url': None, 'internal_url': None, 'admin_url': None, 'cinder_region': config('region'), 'cinder_service': 'cinder', 'cinder_public_url': public_url, 'cinder_internal_url': internal_url, 'cinder_admin_url': admin_url, } if os_release('cinder-common') >= 'icehouse': # NOTE(jamespage) register v2 endpoint as well public_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings.update({ 'cinderv2_region': config('region'), 'cinderv2_service': 'cinderv2', 'cinderv2_public_url': public_url, 'cinderv2_internal_url': internal_url, 'cinderv2_admin_url': admin_url, }) relation_set(relation_id=rid, **settings)
def console_settings(): rel_settings = {} proto = console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if config('console-ssl-cert') and config('console-ssl-key'): console_ssl = True if config('console-proxy-ip') == 'local': if console_ssl: address = resolve_address(endpoint_type=PUBLIC) address = format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = canonical_url(CONFIGS, PUBLIC) else: if console_ssl or https(): schema = "https" else: schema = "http" proxy_base_addr = "%s://%s" % (schema, config('console-proxy-ip')) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_%s_address' % (_proto)] = \ "%s:%s%s" % (proxy_base_addr, console_attributes('proxy-port', proto=_proto), console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ console_attributes('proxy-port', proto=_proto) return rel_settings
def console_settings(): rel_settings = {} proto = console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if config('console-ssl-cert') and config('console-ssl-key'): console_ssl = True if config('console-proxy-ip') == 'local': if console_ssl: address = resolve_address(endpoint_type=PUBLIC) address = format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = canonical_url(CONFIGS, PUBLIC) else: if console_ssl or https(): schema = "https" else: schema = "http" proxy_base_addr = "%s://%s" % (schema, config('console-proxy-ip')) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_%s_address' % (_proto)] = \ "%s:%s%s" % (proxy_base_addr, console_attributes('proxy-port', proto=_proto), console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ console_attributes('proxy-port', proto=_proto) return rel_settings
def image_service_joined(relation_id=None): relation_data = { 'glance-api-server': "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) } if is_api_ready(CONFIGS): relation_data['glance-api-ready'] = 'yes' else: relation_data['glance-api-ready'] = 'no' juju_log("%s: image-service_joined: To peer glance-api-server=%s" % (CHARM, relation_data['glance-api-server'])) if ('object-store' in CONFIGS.complete_contexts() and 'identity-service' in CONFIGS.complete_contexts()): relation_data.update({ 'swift-temp-url-key': swift_temp_url_key(), 'swift-container': 'glance' }) relation_set(relation_id=relation_id, **relation_data)
def identity_joined(relid=None): if cmp_pkgrevno('radosgw', '0.55') < 0: log('Integration with keystone requires ceph >= 0.55') sys.exit(1) port = listen_port() admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port) if leader_get('namespace_tenants') == 'True': internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \ (canonical_url(CONFIGS, PUBLIC), port) else: internal_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, INTERNAL), port) public_url = '%s:%s/swift/v1' % \ (canonical_url(CONFIGS, PUBLIC), port) roles = [x for x in [config('operator-roles'), config('admin-roles')] if x] requested_roles = '' if roles: requested_roles = ','.join(roles) if len(roles) > 1 else roles[0] relation_set(swift_service='swift', swift_region=config('region'), swift_public_url=public_url, swift_internal_url=internal_url, swift_admin_url=admin_url, requested_roles=requested_roles, relation_id=relid) if cmp_pkgrevno('radosgw', '12.2') >= 0: relation_set( s3_service='s3', s3_region=config('region'), s3_public_url='{}:{}/'.format(canonical_url(CONFIGS, PUBLIC), port), s3_internal_url='{}:{}/'.format(canonical_url(CONFIGS, INTERNAL), port), s3_admin_url='{}:{}/'.format(canonical_url(CONFIGS, ADMIN), port), relation_id=relid)
def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): return if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return master_data = relation_get(rid=relation_id, unit=unit) if not all((master_data.get('realm'), master_data.get('zonegroup'), master_data.get('access_key'), master_data.get('secret'), master_data.get('url'))): log("Defer processing until master RGW has provided required data") return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), config('port') ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']): log("Mismatched configuration so stop multi-site configuration now") return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.pull_realm(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.pull_period(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.set_default_realm(realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, zonegroup=zonegroup, access_key=master_data['access_key'], secret=master_data['secret']) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4()))
def master_relation_joined(relation_id=None): if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), config('port') ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') access_key = leader_get('access_key') secret = leader_get('secret') if not all((realm, zonegroup, zone)): return relation_set(relation_id=relation_id, realm=realm, zonegroup=zonegroup, url=endpoints[0], access_key=access_key, secret=secret) if not is_leader(): return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.create_realm(realm, default=True) mutation = True if zonegroup not in multisite.list_zonegroups(): multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, realm=realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) mutation = True if MULTISITE_SYSTEM_USER not in multisite.list_users(): access_key, secret = multisite.create_system_user( MULTISITE_SYSTEM_USER ) multisite.modify_zone(zone, access_key=access_key, secret=secret) leader_set(access_key=access_key, secret=secret) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
def slave_relation_changed(relation_id=None, unit=None): if not is_leader(): return if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return master_data = relation_get(rid=relation_id, unit=unit) if not all((master_data.get('realm'), master_data.get('zonegroup'), master_data.get('access_key'), master_data.get('secret'), master_data.get('url'))): log("Defer processing until master RGW has provided required data") return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']): log("Mismatched configuration so stop multi-site configuration now") return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.pull_realm(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.pull_period(url=master_data['url'], access_key=master_data['access_key'], secret=master_data['secret']) multisite.set_default_realm(realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=False, master=False, zonegroup=zonegroup, access_key=master_data['access_key'], secret=master_data['secret']) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4()))
def master_relation_joined(relation_id=None): if not ready_for_service(legacy=False): log('unit not ready, deferring multisite configuration') return internal_url = '{}:{}'.format( canonical_url(CONFIGS, INTERNAL), listen_port(), ) endpoints = [internal_url] realm = config('realm') zonegroup = config('zonegroup') zone = config('zone') access_key = leader_get('access_key') secret = leader_get('secret') if not all((realm, zonegroup, zone)): return relation_set(relation_id=relation_id, realm=realm, zonegroup=zonegroup, url=endpoints[0], access_key=access_key, secret=secret) if not is_leader(): return if not leader_get('restart_nonce'): # NOTE(jamespage): # This is an ugly kludge to force creation of the required data # items in the .rgw.root pool prior to the radosgw process being # started; radosgw-admin does not currently have a way of doing # this operation but a period update will force it to be created. multisite.update_period(fatal=False) mutation = False if realm not in multisite.list_realms(): multisite.create_realm(realm, default=True) mutation = True if zonegroup not in multisite.list_zonegroups(): multisite.create_zonegroup(zonegroup, endpoints=endpoints, default=True, master=True, realm=realm) mutation = True if zone not in multisite.list_zones(): multisite.create_zone(zone, endpoints=endpoints, default=True, master=True, zonegroup=zonegroup) mutation = True if MULTISITE_SYSTEM_USER not in multisite.list_users(): access_key, secret = multisite.create_system_user( MULTISITE_SYSTEM_USER) multisite.modify_zone(zone, access_key=access_key, secret=secret) leader_set(access_key=access_key, secret=secret) mutation = True if mutation: multisite.update_period() service_restart(service_name()) leader_set(restart_nonce=str(uuid.uuid4())) relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
def identity_joined(rid=None): if config('vip') and not is_clustered(): log('Defering registration until clustered', level=DEBUG) return settings = {} if not service_enabled('api'): juju_log('api service not enabled; skipping endpoint ' 'registration') return cinder_release = os_release('cinder-common') if CompareOpenStackReleases(cinder_release) < 'pike': public_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v1/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings.update({ 'region': None, 'service': None, 'public_url': None, 'internal_url': None, 'admin_url': None, 'cinder_region': config('region'), 'cinder_service': 'cinder', 'cinder_public_url': public_url, 'cinder_internal_url': internal_url, 'cinder_admin_url': admin_url, }) if CompareOpenStackReleases(cinder_release) >= 'icehouse': # NOTE(jamespage) register v2 endpoint as well public_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v2/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings.update({ 'cinderv2_region': config('region'), 'cinderv2_service': 'cinderv2', 'cinderv2_public_url': public_url, 'cinderv2_internal_url': internal_url, 'cinderv2_admin_url': admin_url, }) if CompareOpenStackReleases(cinder_release) >= 'pike': # NOTE(jamespage) register v3 endpoint as well public_url = '{}:{}/v3/$(tenant_id)s'.format( canonical_url(CONFIGS, PUBLIC), config('api-listening-port') ) internal_url = '{}:{}/v3/$(tenant_id)s'.format( canonical_url(CONFIGS, INTERNAL), config('api-listening-port') ) admin_url = '{}:{}/v3/$(tenant_id)s'.format( canonical_url(CONFIGS, ADMIN), config('api-listening-port') ) settings.update({ 'cinderv3_region': config('region'), 'cinderv3_service': 'cinderv3', 'cinderv3_public_url': public_url, 'cinderv3_internal_url': internal_url, 'cinderv3_admin_url': admin_url, }) relation_set(relation_id=rid, **settings)