def guard_map(): '''Map of services and required interfaces that must be present before the service should be allowed to start''' gmap = {} nova_services = deepcopy(BASE_SERVICES) if os_release('nova-common') not in ['essex', 'folsom']: nova_services.append('nova-conductor') nova_interfaces = ['identity-service', 'amqp'] if relation_ids('pgsql-nova-db'): nova_interfaces.append('pgsql-nova-db') else: nova_interfaces.append('shared-db') for svc in nova_services: gmap[svc] = nova_interfaces net_manager = network_manager() if net_manager in ['neutron', 'quantum'] and \ not is_relation_made('neutron-api'): neutron_interfaces = ['identity-service', 'amqp'] if relation_ids('pgsql-neutron-db'): neutron_interfaces.append('pgsql-neutron-db') else: neutron_interfaces.append('shared-db') if network_manager() == 'quantum': gmap['quantum-server'] = neutron_interfaces else: gmap['neutron-server'] = neutron_interfaces return gmap
def amqp_changed(): if 'amqp' not in CONFIGS.complete_contexts(): log('amqp relation incomplete. Peer not ready?') return CONFIGS.write(NOVA_CONF) if network_manager() == 'quantum': CONFIGS.write(QUANTUM_CONF) if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF)
def determine_packages(): # currently all packages match service names packages = [] + BASE_PACKAGES for k, v in resource_map().iteritems(): packages.extend(v["services"]) if network_manager() in ["neutron", "quantum"]: pkgs = neutron_plugin_attribute(neutron_plugin(), "server_packages", network_manager()) packages.extend(pkgs) return list(set(packages))
def amqp_changed(): if 'amqp' not in CONFIGS.complete_contexts(): log('amqp relation incomplete. Peer not ready?') return CONFIGS.write(NOVA_CONF) if not is_relation_made('neutron-api'): if network_manager() == 'quantum': CONFIGS.write(QUANTUM_CONF) if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF) [nova_cell_relation_joined(rid=rid) for rid in relation_ids('cell')]
def amqp_changed(): if 'amqp' not in CONFIGS.complete_contexts(): log('amqp relation incomplete. Peer not ready?') return CONFIGS.write(NOVA_CONF) if not is_relation_made('neutron-api'): if network_manager() == 'quantum': CONFIGS.write(QUANTUM_CONF) if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF) [nova_cell_relation_joined(rid=rid) for rid in relation_ids('cell')] for r_id in relation_ids('nova-api'): nova_api_relation_joined(rid=r_id)
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = determine_api_port(api_port('nova-api-ec2'), singlenode_mode=True) s3_api = determine_api_port(api_port('nova-objectstore'), singlenode_mode=True) neutron_api = determine_api_port(api_port('neutron-server'), singlenode_mode=True) # Apache ports a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = determine_apache_port(api_port('nova-objectstore'), singlenode_mode=True) a_neutron_api = determine_apache_port(api_port('neutron-server'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [ api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [ api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [ api_port('nova-objectstore'), a_s3_api], } if not is_relation_made('neutron-api'): if neutron.network_manager() == 'neutron': port_mapping.update({ 'neutron-server': [ api_port('neutron-server'), a_neutron_api] }) # neutron.conf listening port, set separte from nova's. ctxt['neutron_bind_port'] = neutron_api # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def db_joined(): if is_relation_made('pgsql-nova-db') or \ is_relation_made('pgsql-neutron-db'): # error, postgresql is used e = ('Attempting to associate a mysql database when there is already ' 'associated a postgresql one') log(e, level=ERROR) raise Exception(e) if network_manager() in ['quantum', 'neutron']: config_neutron = True else: config_neutron = False if config('prefer-ipv6'): sync_db_with_multi_ipv6_addresses(config('database'), config('database-user'), relation_prefix='nova') if config_neutron: sync_db_with_multi_ipv6_addresses(config('neutron-database'), config('neutron-database-user'), relation_prefix='neutron') else: host = unit_get('private-address') relation_set(nova_database=config('database'), nova_username=config('database-user'), nova_hostname=host) if config_neutron: # XXX: Renaming relations from quantum_* to neutron_* here. relation_set(neutron_database=config('neutron-database'), neutron_username=config('neutron-database-user'), neutron_hostname=host)
def get_compute_config(remote_restart=False): """Get the compute config as a dictionary to set on the relation. This gets the console settings (from console_settings()) the serial console settings and some additional items that are in the form suitable for a relation_set. :param remote_restart: whether a restart should be notified :type remote_restart: bool :returns: dictionary settings for the relation :rtype: Dict[str, ANY] """ rel_settings = { 'network_manager': ch_neutron.network_manager(), 'volume_service': 'cinder', # (comment from bash vers) XXX Should point to VIP if clustered, or # this may not even be needed. 'ec2_host': hookenv.unit_get('private-address'), 'region': hookenv.config('region'), } rel_settings.update(console_settings()) rel_settings.update(ncc_utils.serial_console_settings()) # update relation setting if we're attempting to restart remote # services if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) return rel_settings
def identity_changed(): if 'identity-service' not in CONFIGS.complete_contexts(): log('identity-service relation incomplete. Peer not ready?') return CONFIGS.write('/etc/nova/api-paste.ini') CONFIGS.write(NOVA_CONF) if network_manager() == 'quantum': CONFIGS.write(QUANTUM_API_PASTE) CONFIGS.write(QUANTUM_CONF) save_novarc() if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF) [compute_joined(rid) for rid in relation_ids('cloud-compute')] [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] configure_https()
def resource_map(): ''' Dynamically generate a map of resources that will be managed for a single hook execution. ''' resource_map = deepcopy(BASE_RESOURCE_MAP) if relation_ids('nova-volume-service'): # if we have a relation to a nova-volume service, we're # also managing the nova-volume API endpoint (legacy) resource_map['/etc/nova/nova.conf']['services'].append( 'nova-api-os-volume') net_manager = network_manager() # pop out irrelevant resources from the OrderedDict (easier than adding # them late) if net_manager != 'quantum': [resource_map.pop(k) for k in list(resource_map.iterkeys()) if 'quantum' in k] if net_manager != 'neutron': [resource_map.pop(k) for k in list(resource_map.iterkeys()) if 'neutron' in k] if os.path.exists('/etc/apache2/conf-available'): resource_map.pop(APACHE_CONF) else: resource_map.pop(APACHE_24_CONF) # add neutron plugin requirements. nova-c-c only needs the neutron-server # associated with configs, not the plugin agent. if net_manager in ['quantum', 'neutron']: plugin = neutron_plugin() if plugin: conf = neutron_plugin_attribute(plugin, 'config', net_manager) ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager) or []) services = neutron_plugin_attribute(plugin, 'server_services', net_manager) resource_map[conf] = {} resource_map[conf]['services'] = services resource_map[conf]['contexts'] = ctxts resource_map[conf]['contexts'].append( nova_cc_context.NeutronCCContext()) # nova-conductor for releases >= G. if os_release('nova-common') not in ['essex', 'folsom']: resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor'] # also manage any configs that are being updated by subordinates. vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware', service='nova', config_file=NOVA_CONF) vmware_ctxt = vmware_ctxt() if vmware_ctxt and 'services' in vmware_ctxt: for s in vmware_ctxt['services']: if s not in resource_map[NOVA_CONF]['services']: resource_map[NOVA_CONF]['services'].append(s) return resource_map
def save_script_rc(): env_vars = { 'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'), 'OPENSTACK_SERVICE_API_EC2': 'nova-api-ec2', 'OPENSTACK_SERVICE_API_OS_COMPUTE': 'nova-api-os-compute', 'OPENSTACK_SERVICE_CERT': 'nova-cert', 'OPENSTACK_SERVICE_CONDUCTOR': 'nova-conductor', 'OPENSTACK_SERVICE_OBJECTSTORE': 'nova-objectstore', 'OPENSTACK_SERVICE_SCHEDULER': 'nova-scheduler', } if relation_ids('nova-volume-service'): env_vars['OPENSTACK_SERVICE_API_OS_VOL'] = 'nova-api-os-volume' if network_manager() == 'quantum': env_vars['OPENSTACK_SERVICE_API_QUANTUM'] = 'quantum-server' if network_manager() == 'neutron': env_vars['OPENSTACK_SERVICE_API_NEUTRON'] = 'neutron-server' _save_script_rc(**env_vars)
def save_script_rc(): env_vars = { "OPENSTACK_PORT_MCASTPORT": config("ha-mcastport"), "OPENSTACK_SERVICE_API_EC2": "nova-api-ec2", "OPENSTACK_SERVICE_API_OS_COMPUTE": "nova-api-os-compute", "OPENSTACK_SERVICE_CERT": "nova-cert", "OPENSTACK_SERVICE_CONDUCTOR": "nova-conductor", "OPENSTACK_SERVICE_OBJECTSTORE": "nova-objectstore", "OPENSTACK_SERVICE_SCHEDULER": "nova-scheduler", } if relation_ids("nova-volume-service"): env_vars["OPENSTACK_SERVICE_API_OS_VOL"] = "nova-api-os-volume" if network_manager() == "quantum": env_vars["OPENSTACK_SERVICE_API_QUANTUM"] = "quantum-server" if network_manager() == "neutron": env_vars["OPENSTACK_SERVICE_API_NEUTRON"] = "neutron-server" _save_script_rc(**env_vars)
def nova_vmware_relation_joined(rid=None): rel_settings = {'network_manager': network_manager()} ks_auth = _auth_config() if ks_auth: rel_settings.update(ks_auth) rel_settings.update(neutron_settings()) relation_set(relation_id=rid, **rel_settings)
def db_joined(): relation_set(nova_database=config('database'), nova_username=config('database-user'), nova_hostname=unit_get('private-address')) if network_manager() in ['quantum', 'neutron']: # XXX: Renaming relations from quantum_* to neutron_* here. relation_set(neutron_database=config('neutron-database'), neutron_username=config('neutron-database-user'), neutron_hostname=unit_get('private-address'))
def ha_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: log('ha_changed: hacluster subordinate not fully clustered.') return CONFIGS.write(NOVA_CONF) if not is_relation_made('neutron-api'): if network_manager() == 'quantum': CONFIGS.write(QUANTUM_CONF) if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF) log('Cluster configured, notifying other services and updating ' 'keystone endpoint configuration') for rid in relation_ids('identity-service'): identity_joined(rid=rid) update_nova_consoleauth_config()
def neutron_db_manage(actions): net_manager = network_manager() if net_manager == 'neutron': plugin = neutron_plugin() conf = neutron_plugin_attribute(plugin, 'config', net_manager) subprocess.check_call([ NEUTRON_DB_MANAGE, '--config-file=/etc/{mgr}/{mgr}.conf'.format(mgr=net_manager), '--config-file={}'.format(conf)] + actions )
def test_network_manager_essex(self): essex_cases = { 'quantum': 'quantum', 'neutron': 'quantum', 'newhotness': 'newhotness', } self.os_release.return_value = 'essex' for nwmanager in essex_cases: self.config.return_value = nwmanager self.assertRaises(Exception, neutron.network_manager())
def determine_endpoints(url): """Generates a dictionary containing all relevant endpoints to be passed to keystone as relation settings.""" region = config("region") # TODO: Configurable nova API version. nova_url = "%s:%s/v1.1/$(tenant_id)s" % (url, api_port("nova-api-os-compute")) ec2_url = "%s:%s/services/Cloud" % (url, api_port("nova-api-ec2")) nova_volume_url = "%s:%s/v1/$(tenant_id)s" % (url, api_port("nova-api-os-compute")) neutron_url = "%s:%s" % (url, api_port("neutron-server")) s3_url = "%s:%s" % (url, api_port("nova-objectstore")) # the base endpoints endpoints = { "nova_service": "nova", "nova_region": region, "nova_public_url": nova_url, "nova_admin_url": nova_url, "nova_internal_url": nova_url, "ec2_service": "ec2", "ec2_region": region, "ec2_public_url": ec2_url, "ec2_admin_url": ec2_url, "ec2_internal_url": ec2_url, "s3_service": "s3", "s3_region": region, "s3_public_url": s3_url, "s3_admin_url": s3_url, "s3_internal_url": s3_url, } if relation_ids("nova-volume-service"): endpoints.update( { "nova-volume_service": "nova-volume", "nova-volume_region": region, "nova-volume_public_url": nova_volume_url, "nova-volume_admin_url": nova_volume_url, "nova-volume_internal_url": nova_volume_url, } ) # XXX: Keep these relations named quantum_*?? if network_manager() in ["quantum", "neutron"]: endpoints.update( { "quantum_service": "quantum", "quantum_region": region, "quantum_public_url": neutron_url, "quantum_admin_url": neutron_url, "quantum_internal_url": neutron_url, } ) return endpoints
def test_network_manager_icehouse(self): icehouse_cases = { 'quantum': 'neutron', 'neutron': 'neutron', 'newhotness': 'newhotness', } self.os_release.return_value = 'icehouse' for nwmanager in icehouse_cases: self.config.return_value = nwmanager renamed_manager = neutron.network_manager() self.assertEquals(renamed_manager, icehouse_cases[nwmanager])
def test_network_manager_havana(self): havana_cases = { 'quantum': 'neutron', 'neutron': 'neutron', 'newhotness': 'newhotness', } self.os_release.return_value = 'havana' for nwmanager in havana_cases: self.config.return_value = nwmanager renamed_manager = neutron.network_manager() self.assertEquals(renamed_manager, havana_cases[nwmanager])
def test_network_manager_grizzly(self): grizzly_cases = { 'quantum': 'quantum', 'neutron': 'quantum', 'newhotness': 'newhotness', } self.os_release.return_value = 'grizzly' for nwmanager in grizzly_cases: self.config.return_value = nwmanager renamed_manager = neutron.network_manager() self.assertEquals(renamed_manager, grizzly_cases[nwmanager])
def test_network_manager_folsom(self): folsom_cases = { 'quantum': 'quantum', 'neutron': 'quantum', 'newhotness': 'newhotness', } self.os_release.return_value = 'folsom' for nwmanager in folsom_cases: self.config.return_value = nwmanager renamed_manager = neutron.network_manager() self.assertEquals(renamed_manager, folsom_cases[nwmanager])
def keystone_compute_settings(): ks_auth_config = _auth_config() rel_settings = {} if network_manager() == 'neutron': if ks_auth_config: rel_settings.update(ks_auth_config) rel_settings.update(neutron_settings()) ks_ca = keystone_ca_cert_b64() if ks_auth_config and ks_ca: rel_settings['ca_cert'] = ks_ca return rel_settings
def keystone_compute_settings(): ks_auth_config = _auth_config() rel_settings = {} if ch_neutron.network_manager() == 'neutron': if ks_auth_config: rel_settings.update(ks_auth_config) rel_settings.update(neutron_settings()) ks_ca = ncc_utils.keystone_ca_cert_b64() if ks_auth_config and ks_ca: rel_settings['ca_cert'] = ks_ca return rel_settings
def determine_packages(): # currently all packages match service names packages = [] + BASE_PACKAGES for v in resource_map().values(): packages.extend(v['services']) if network_manager() in ['neutron', 'quantum']: pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages', network_manager()) packages.extend(pkgs) if console_attributes('packages'): packages.extend(console_attributes('packages')) if git_install_requested(): packages = list(set(packages)) packages.extend(BASE_GIT_PACKAGES) # don't include packages that will be installed from git for p in GIT_PACKAGE_BLACKLIST: if p in packages: packages.remove(p) return list(set(packages))
def keystone_compute_settings(): ks_auth_config = _auth_config() rel_settings = {} if network_manager() in ['quantum', 'neutron']: if ks_auth_config: rel_settings.update(ks_auth_config) rel_settings.update(neutron_settings()) ks_ca = keystone_ca_cert_b64() if ks_auth_config and ks_ca: rel_settings['ca_cert'] = ks_ca return rel_settings
def determine_endpoints(url): '''Generates a dictionary containing all relevant endpoints to be passed to keystone as relation settings.''' region = config('region') # TODO: Configurable nova API version. nova_url = ('%s:%s/v1.1/$(tenant_id)s' % (url, api_port('nova-api-os-compute'))) ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2')) nova_volume_url = ('%s:%s/v1/$(tenant_id)s' % (url, api_port('nova-api-os-compute'))) neutron_url = '%s:%s' % (url, api_port('neutron-server')) s3_url = '%s:%s' % (url, api_port('nova-objectstore')) # the base endpoints endpoints = { 'nova_service': 'nova', 'nova_region': region, 'nova_public_url': nova_url, 'nova_admin_url': nova_url, 'nova_internal_url': nova_url, 'ec2_service': 'ec2', 'ec2_region': region, 'ec2_public_url': ec2_url, 'ec2_admin_url': ec2_url, 'ec2_internal_url': ec2_url, 's3_service': 's3', 's3_region': region, 's3_public_url': s3_url, 's3_admin_url': s3_url, 's3_internal_url': s3_url, } if relation_ids('nova-volume-service'): endpoints.update({ 'nova-volume_service': 'nova-volume', 'nova-volume_region': region, 'nova-volume_public_url': nova_volume_url, 'nova-volume_admin_url': nova_volume_url, 'nova-volume_internal_url': nova_volume_url, }) # XXX: Keep these relations named quantum_*?? if network_manager() in ['quantum', 'neutron']: endpoints.update({ 'quantum_service': 'quantum', 'quantum_region': region, 'quantum_public_url': neutron_url, 'quantum_admin_url': neutron_url, 'quantum_internal_url': neutron_url, }) return endpoints
def identity_changed(): if 'identity-service' not in CONFIGS.complete_contexts(): log('identity-service relation incomplete. Peer not ready?') return CONFIGS.write('/etc/nova/api-paste.ini') CONFIGS.write(NOVA_CONF) if not is_relation_made('neutron-api'): if network_manager() == 'quantum': CONFIGS.write(QUANTUM_API_PASTE) CONFIGS.write(QUANTUM_CONF) save_novarc() if network_manager() == 'neutron': CONFIGS.write(NEUTRON_CONF) [compute_joined(rid) for rid in relation_ids('cloud-compute')] [quantum_joined(rid) for rid in relation_ids('quantum-network-service')] [nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')] [neutron_api_relation_joined(rid) for rid in relation_ids('neutron-api')] configure_https() for r_id in relation_ids('nova-api'): nova_api_relation_joined(rid=r_id)
def neutron_db_manage(actions): net_manager = network_manager() if net_manager in ['neutron', 'quantum']: plugin = neutron_plugin() conf = neutron_plugin_attribute(plugin, 'config', net_manager) if net_manager == 'quantum': cmd = QUANTUM_DB_MANAGE else: cmd = NEUTRON_DB_MANAGE subprocess.check_call([ cmd, '--config-file=/etc/{mgr}/{mgr}.conf'.format( mgr=net_manager), '--config-file={}'.format(conf) ] + actions)
def neutron_db_manage(actions): net_manager = network_manager() if net_manager in ['neutron', 'quantum']: plugin = neutron_plugin() conf = neutron_plugin_attribute(plugin, 'config', net_manager) if net_manager == 'quantum': cmd = QUANTUM_DB_MANAGE else: cmd = NEUTRON_DB_MANAGE subprocess.check_call([ cmd, '--config-file=/etc/{mgr}/{mgr}.conf'.format(mgr=net_manager), '--config-file={}'.format(conf)] + actions )
def nova_vmware_relation_joined(rid=None): rel_settings = {'network_manager': network_manager()} ks_auth = _auth_config() if ks_auth: rel_settings.update(ks_auth) rel_settings.update({ 'quantum_plugin': neutron_plugin(), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server')))}) relation_set(relation_id=rid, **rel_settings)
def resource_map(): """ Dynamically generate a map of resources that will be managed for a single hook execution. """ resource_map = deepcopy(BASE_RESOURCE_MAP) if relation_ids("nova-volume-service"): # if we have a relation to a nova-volume service, we're # also managing the nova-volume API endpoint (legacy) resource_map["/etc/nova/nova.conf"]["services"].append("nova-api-os-volume") net_manager = network_manager() # pop out irrelevant resources from the OrderedDict (easier than adding # them late) if net_manager != "quantum": [resource_map.pop(k) for k in list(resource_map.iterkeys()) if "quantum" in k] if net_manager != "neutron": [resource_map.pop(k) for k in list(resource_map.iterkeys()) if "neutron" in k] if os.path.exists("/etc/apache2/conf-available"): resource_map.pop(APACHE_CONF) else: resource_map.pop(APACHE_24_CONF) # add neutron plugin requirements. nova-c-c only needs the neutron-server # associated with configs, not the plugin agent. if net_manager in ["quantum", "neutron"]: plugin = neutron_plugin() if plugin: conf = neutron_plugin_attribute(plugin, "config", net_manager) ctxts = neutron_plugin_attribute(plugin, "contexts", net_manager) or [] services = neutron_plugin_attribute(plugin, "server_services", net_manager) resource_map[conf] = {} resource_map[conf]["services"] = services resource_map[conf]["contexts"] = ctxts resource_map[conf]["contexts"].append(nova_cc_context.NeutronCCContext()) # nova-conductor for releases >= G. if os_release("nova-common") not in ["essex", "folsom"]: resource_map["/etc/nova/nova.conf"]["services"] += ["nova-conductor"] # also manage any configs that are being updated by subordinates. vmware_ctxt = context.SubordinateConfigContext(interface="nova-vmware", service="nova", config_file=NOVA_CONF) vmware_ctxt = vmware_ctxt() if vmware_ctxt and "services" in vmware_ctxt: for s in vmware_ctxt["services"]: if s not in resource_map[NOVA_CONF]["services"]: resource_map[NOVA_CONF]["services"].append(s) return resource_map
def ml2_migration(): reset_os_release() net_manager = network_manager() if net_manager == 'neutron': plugin = neutron_plugin() if plugin == 'ovs': log('Migrating from openvswitch to ml2 plugin') cmd = [ 'python', '/usr/lib/python2.7/dist-packages/neutron' '/db/migration/migrate_to_ml2.py', '--tunnel-type', 'gre', '--release', 'icehouse', 'openvswitch', get_db_connection() ] subprocess.check_call(cmd)
def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write(NOVA_CONF) if network_manager() in ['neutron', 'quantum']: plugin = neutron_plugin() # DB config might have been moved to main neutron.conf in H? CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) if eligible_leader(CLUSTER_RES): migrate_database() log('Triggering remote cloud-compute restarts.') [compute_joined(rid=rid, remote_restart=True) for rid in relation_ids('cloud-compute')]
def compute_joined(rid=None, remote_restart=False): cons_settings = console_settings() relation_set(relation_id=rid, **cons_settings) rel_settings = { 'network_manager': network_manager(), 'volume_service': volume_service(), # (comment from bash vers) XXX Should point to VIP if clustered, or # this may not even be needed. 'ec2_host': unit_get('private-address'), } # update relation setting if we're attempting to restart remote # services if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) rel_settings.update(keystone_compute_settings()) relation_set(relation_id=rid, **rel_settings)
def nova_vmware_relation_joined(rid=None): rel_settings = {'network_manager': network_manager()} ks_auth = _auth_config() if ks_auth: rel_settings.update(ks_auth) rel_settings.update({ 'quantum_plugin': neutron_plugin(), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL), str(api_port('neutron-server'))) }) relation_set(relation_id=rid, **rel_settings)
def get_compute_config(rid=None, remote_restart=False): cons_settings = console_settings() hookenv.relation_set(relation_id=rid, **cons_settings) rel_settings = { 'network_manager': ch_neutron.network_manager(), 'volume_service': 'cinder', # (comment from bash vers) XXX Should point to VIP if clustered, or # this may not even be needed. 'ec2_host': hookenv.unit_get('private-address'), 'region': hookenv.config('region'), } rel_settings.update(ncc_utils.serial_console_settings()) # update relation setting if we're attempting to restart remote # services if remote_restart: rel_settings['restart_trigger'] = str(uuid.uuid4()) return rel_settings
def keystone_compute_settings(): ks_auth_config = _auth_config() rel_settings = {} if network_manager() in ['quantum', 'neutron']: if ks_auth_config: rel_settings.update(ks_auth_config) rel_settings.update({ # XXX: Rename these relations settings? 'quantum_plugin': neutron_plugin(), 'region': config('region'), 'quantum_security_groups': config('quantum-security-groups'), 'quantum_url': (canonical_url(CONFIGS) + ':' + str(api_port('neutron-server'))), }) ks_ca = keystone_ca_cert_b64() if ks_auth_config and ks_ca: rel_settings['ca_cert'] = ks_ca return rel_settings
def network_manager(self): return neutron.network_manager()
def nova_cell_api_relation_joined(rid=None, remote_restart=False): rel_settings = get_compute_config(rid=rid, remote_restart=remote_restart) if ch_neutron.network_manager() == 'neutron': rel_settings.update(neutron_settings()) hookenv.relation_set(relation_id=rid, **rel_settings)
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = determine_api_port(api_port('nova-api-ec2'), singlenode_mode=True) s3_api = determine_api_port(api_port('nova-objectstore'), singlenode_mode=True) nvol_api = determine_api_port(api_port('nova-api-os-volume'), singlenode_mode=True) neutron_api = determine_api_port(api_port('neutron-server'), singlenode_mode=True) # Apache ports a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = determine_apache_port(api_port('nova-objectstore'), singlenode_mode=True) a_nvol_api = determine_apache_port(api_port('nova-api-os-volume'), singlenode_mode=True) a_neutron_api = determine_apache_port(api_port('neutron-server'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [api_port('nova-objectstore'), a_s3_api], } if relation_ids('nova-volume-service'): port_mapping.update({ 'nova-api-ec2': [api_port('nova-api-ec2'), a_nvol_api], }) listen_ports['osapi_volume_listen_port'] = nvol_api if not is_relation_made('neutron-api'): if neutron.network_manager() in ['neutron', 'quantum']: port_mapping.update({ 'neutron-server': [api_port('neutron-server'), a_neutron_api] }) # quantum/neutron.conf listening port, set separte from nova's. ctxt['neutron_bind_port'] = neutron_api # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def postgresql_neutron_db_changed(): if network_manager() in ['neutron', 'quantum']: plugin = neutron_plugin() # DB config might have been moved to main neutron.conf in H? CONFIGS.write(neutron_plugin_attribute(plugin, 'config'))
def nova_cell_api_relation_joined(rid=None, remote_restart=False): rel_settings = get_compute_config(remote_restart=remote_restart) if ch_neutron.network_manager() == 'neutron': rel_settings.update(neutron_settings()) hookenv.relation_set(relation_id=rid, **rel_settings)
def determine_endpoints(public_url, internal_url, admin_url): '''Generates a dictionary containing all relevant endpoints to be passed to keystone as relation settings.''' region = config('region') os_rel = os_release('nova-common') if os_rel >= 'grizzly': nova_public_url = ('%s:%s/v2/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_internal_url = ('%s:%s/v2/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_admin_url = ('%s:%s/v2/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) else: nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) ec2_public_url = '%s:%s/services/Cloud' % ( public_url, api_port('nova-api-ec2')) ec2_internal_url = '%s:%s/services/Cloud' % ( internal_url, api_port('nova-api-ec2')) ec2_admin_url = '%s:%s/services/Cloud' % (admin_url, api_port('nova-api-ec2')) nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server')) neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server')) neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server')) s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore')) s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore')) s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore')) # the base endpoints endpoints = { 'nova_service': 'nova', 'nova_region': region, 'nova_public_url': nova_public_url, 'nova_admin_url': nova_admin_url, 'nova_internal_url': nova_internal_url, 'ec2_service': 'ec2', 'ec2_region': region, 'ec2_public_url': ec2_public_url, 'ec2_admin_url': ec2_admin_url, 'ec2_internal_url': ec2_internal_url, 's3_service': 's3', 's3_region': region, 's3_public_url': s3_public_url, 's3_admin_url': s3_admin_url, 's3_internal_url': s3_internal_url, } if relation_ids('nova-volume-service'): endpoints.update({ 'nova-volume_service': 'nova-volume', 'nova-volume_region': region, 'nova-volume_public_url': nova_volume_public_url, 'nova-volume_admin_url': nova_volume_admin_url, 'nova-volume_internal_url': nova_volume_internal_url, }) # XXX: Keep these relations named quantum_*?? if relation_ids('neutron-api'): endpoints.update({ 'quantum_service': None, 'quantum_region': None, 'quantum_public_url': None, 'quantum_admin_url': None, 'quantum_internal_url': None, }) elif network_manager() in ['quantum', 'neutron']: endpoints.update({ 'quantum_service': 'quantum', 'quantum_region': region, 'quantum_public_url': neutron_public_url, 'quantum_admin_url': neutron_admin_url, 'quantum_internal_url': neutron_internal_url, }) if os_rel >= 'kilo': # NOTE(jamespage) drop endpoints for ec2 and s3 # ec2 is deprecated # s3 is insecure and should die in flames endpoints.update({ 'ec2_service': None, 'ec2_region': None, 'ec2_public_url': None, 'ec2_admin_url': None, 'ec2_internal_url': None, 's3_service': None, 's3_region': None, 's3_public_url': None, 's3_admin_url': None, 's3_internal_url': None, }) return endpoints
def resource_map(): ''' Dynamically generate a map of resources that will be managed for a single hook execution. ''' resource_map = deepcopy(BASE_RESOURCE_MAP) if relation_ids('nova-volume-service'): # if we have a relation to a nova-volume service, we're # also managing the nova-volume API endpoint (legacy) resource_map['/etc/nova/nova.conf']['services'].append( 'nova-api-os-volume') net_manager = network_manager() if os.path.exists('/etc/apache2/conf-available'): resource_map.pop(APACHE_CONF) else: resource_map.pop(APACHE_24_CONF) resource_map[NOVA_CONF]['contexts'].append( nova_cc_context.NeutronCCContext()) # pop out irrelevant resources from the OrderedDict (easier than adding # them late) if net_manager != 'quantum': [ resource_map.pop(k) for k in list(resource_map.iterkeys()) if 'quantum' in k ] if net_manager != 'neutron': [ resource_map.pop(k) for k in list(resource_map.iterkeys()) if 'neutron' in k ] # add neutron plugin requirements. nova-c-c only needs the # neutron-server associated with configs, not the plugin agent. if net_manager in ['quantum', 'neutron']: plugin = neutron_plugin() if plugin: conf = neutron_plugin_attribute(plugin, 'config', net_manager) ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager) or []) services = neutron_plugin_attribute(plugin, 'server_services', net_manager) resource_map[conf] = {} resource_map[conf]['services'] = services resource_map[conf]['contexts'] = ctxts resource_map[conf]['contexts'].append( nova_cc_context.NeutronCCContext()) # update for postgres resource_map[conf]['contexts'].append( nova_cc_context.NeutronPostgresqlDBContext()) if is_relation_made('neutron-api'): for k in list(resource_map.iterkeys()): # neutron-api runs neutron services if 'quantum' in k or 'neutron' in k: resource_map[k]['services'] = [] resource_map[NOVA_CONF]['contexts'].append( nova_cc_context.NeutronAPIContext()) # nova-conductor for releases >= G. if os_release('nova-common') not in ['essex', 'folsom']: resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor'] if console_attributes('services'): resource_map['/etc/nova/nova.conf']['services'] += \ console_attributes('services') # also manage any configs that are being updated by subordinates. vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware', service='nova', config_file=NOVA_CONF) vmware_ctxt = vmware_ctxt() if vmware_ctxt and 'services' in vmware_ctxt: for s in vmware_ctxt['services']: if s not in resource_map[NOVA_CONF]['services']: resource_map[NOVA_CONF]['services'].append(s) return resource_map
def determine_endpoints(public_url, internal_url, admin_url): '''Generates a dictionary containing all relevant endpoints to be passed to keystone as relation settings.''' region = config('region') os_rel = os_release('nova-common') if os_rel >= 'grizzly': nova_public_url = ('%s:%s/v2/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_internal_url = ('%s:%s/v2/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_admin_url = ('%s:%s/v2/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) else: nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) ec2_public_url = '%s:%s/services/Cloud' % (public_url, api_port('nova-api-ec2')) ec2_internal_url = '%s:%s/services/Cloud' % (internal_url, api_port('nova-api-ec2')) ec2_admin_url = '%s:%s/services/Cloud' % (admin_url, api_port('nova-api-ec2')) nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' % (public_url, api_port('nova-api-os-compute'))) nova_volume_internal_url = ( '%s:%s/v1/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute'))) nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' % (admin_url, api_port('nova-api-os-compute'))) neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server')) neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server')) neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server')) s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore')) s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore')) s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore')) # the base endpoints endpoints = { 'nova_service': 'nova', 'nova_region': region, 'nova_public_url': nova_public_url, 'nova_admin_url': nova_admin_url, 'nova_internal_url': nova_internal_url, 'ec2_service': 'ec2', 'ec2_region': region, 'ec2_public_url': ec2_public_url, 'ec2_admin_url': ec2_admin_url, 'ec2_internal_url': ec2_internal_url, 's3_service': 's3', 's3_region': region, 's3_public_url': s3_public_url, 's3_admin_url': s3_admin_url, 's3_internal_url': s3_internal_url, } if relation_ids('nova-volume-service'): endpoints.update({ 'nova-volume_service': 'nova-volume', 'nova-volume_region': region, 'nova-volume_public_url': nova_volume_public_url, 'nova-volume_admin_url': nova_volume_admin_url, 'nova-volume_internal_url': nova_volume_internal_url, }) # XXX: Keep these relations named quantum_*?? if relation_ids('neutron-api'): endpoints.update({ 'quantum_service': None, 'quantum_region': None, 'quantum_public_url': None, 'quantum_admin_url': None, 'quantum_internal_url': None, }) elif network_manager() in ['quantum', 'neutron']: endpoints.update({ 'quantum_service': 'quantum', 'quantum_region': region, 'quantum_public_url': neutron_public_url, 'quantum_admin_url': neutron_admin_url, 'quantum_internal_url': neutron_internal_url, }) if os_rel >= 'kilo': # NOTE(jamespage) drop endpoints for ec2 and s3 # ec2 is deprecated # s3 is insecure and should die in flames endpoints.update({ 'ec2_service': None, 'ec2_region': None, 'ec2_public_url': None, 'ec2_admin_url': None, 'ec2_internal_url': None, 's3_service': None, 's3_region': None, 's3_public_url': None, 's3_admin_url': None, 's3_internal_url': None, }) return endpoints