def get_ha_nodes(): ha_units = peer_ips(peer_relation='hanode') ha_nodes = {} for unit in ha_units: corosync_id = get_corosync_id(unit) addr = ha_units[unit] if config('prefer-ipv6'): if not utils.is_ipv6(addr): # Not an error since cluster may still be forming/updating log("Expected an ipv6 address but got %s" % (addr), level=WARNING) ha_nodes[corosync_id] = addr else: ha_nodes[corosync_id] = get_host_ip(addr) corosync_id = get_corosync_id(local_unit()) if config('prefer-ipv6'): addr = get_ipv6_addr() else: addr = get_host_ip(unit_get('private-address')) ha_nodes[corosync_id] = addr return ha_nodes
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt() if not ovs_ctxt: return {} conf = config() fallback = get_host_ip(unit_get('private-address')) if config('os-data-network'): # NOTE: prefer any existing use of config based networking ovs_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), fallback) else: # NOTE: test out network-spaces support, then fallback try: ovs_ctxt['local_ip'] = get_host_ip( network_get_primary_address('data') ) except NotImplementedError: ovs_ctxt['local_ip'] = fallback neutron_api_settings = NeutronAPIContext()() ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups ovs_ctxt['l2_population'] = neutron_api_settings['l2_population'] ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr'] ovs_ctxt['overlay_network_type'] = \ neutron_api_settings['overlay_network_type'] # TODO: We need to sort out the syslog and debug/verbose options as a # general context helper ovs_ctxt['use_syslog'] = conf['use-syslog'] ovs_ctxt['verbose'] = conf['verbose'] ovs_ctxt['debug'] = conf['debug'] ovs_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing'] ovs_ctxt['enable_dpdk'] = conf['enable-dpdk'] net_dev_mtu = neutron_api_settings.get('network_device_mtu') if net_dev_mtu: # neutron.conf ovs_ctxt['network_device_mtu'] = net_dev_mtu # ml2 conf ovs_ctxt['veth_mtu'] = net_dev_mtu mappings = config('bridge-mappings') if mappings: ovs_ctxt['bridge_mappings'] = ','.join(mappings.split()) flat_providers = config('flat-network-providers') if flat_providers: ovs_ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) return ovs_ctxt
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt() if not ovs_ctxt: return {} conf = config() fallback = get_host_ip(unit_get('private-address')) if config('os-data-network'): # NOTE: prefer any existing use of config based networking ovs_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), fallback) else: # NOTE: test out network-spaces support, then fallback try: ovs_ctxt['local_ip'] = get_host_ip( network_get_primary_address('data')) except NotImplementedError: ovs_ctxt['local_ip'] = fallback neutron_api_settings = NeutronAPIContext()() ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups ovs_ctxt['l2_population'] = neutron_api_settings['l2_population'] ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr'] ovs_ctxt['overlay_network_type'] = \ neutron_api_settings['overlay_network_type'] # TODO: We need to sort out the syslog and debug/verbose options as a # general context helper ovs_ctxt['use_syslog'] = conf['use-syslog'] ovs_ctxt['verbose'] = conf['verbose'] ovs_ctxt['debug'] = conf['debug'] ovs_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing'] ovs_ctxt['enable_dpdk'] = conf['enable-dpdk'] net_dev_mtu = neutron_api_settings.get('network_device_mtu') if net_dev_mtu: # neutron.conf ovs_ctxt['network_device_mtu'] = net_dev_mtu # ml2 conf ovs_ctxt['veth_mtu'] = net_dev_mtu mappings = config('bridge-mappings') if mappings: ovs_ctxt['bridge_mappings'] = ','.join(mappings.split()) flat_providers = config('flat-network-providers') if flat_providers: ovs_ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) return ovs_ctxt
def ssh_compute_add(public_key, rid=None, unit=None, user=None): # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. private_address = relation_get(rid=rid, unit=unit, attribute='private-address') hosts = [private_address] if not is_ipv6(private_address): if relation_get('hostname'): hosts.append(relation_get('hostname')) if not is_ip(private_address): hosts.append(get_host_ip(private_address)) hosts.append(private_address.split('.')[0]) else: hn = get_hostname(private_address) hosts.append(hn) hosts.append(hn.split('.')[0]) for host in list(set(hosts)): add_known_host(host, unit, user) if not ssh_authorized_key_exists(public_key, unit, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, unit, user)
def storage_changed(): zone = get_zone(config('zone-assignment')) node_settings = { 'ip': openstack.get_host_ip(relation_get('private-address')), 'zone': zone, 'account_port': relation_get('account_port'), 'object_port': relation_get('object_port'), 'container_port': relation_get('container_port'), } if None in node_settings.itervalues(): log('storage_changed: Relation not ready.') return None for k in ['zone', 'account_port', 'object_port', 'container_port']: node_settings[k] = int(node_settings[k]) CONFIGS.write_all() # allow for multiple devs per unit, passed along as a : separated list devs = relation_get('device').split(':') for dev in devs: node_settings['device'] = dev for ring in SWIFT_RINGS.itervalues(): if not exists_in_ring(ring, node_settings): add_to_ring(ring, node_settings) if should_balance([r for r in SWIFT_RINGS.itervalues()]): balance_rings()
def agent_changed(rid=None, unit=None): if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return if 'amqp' not in CONFIGS.complete_contexts(): juju_log('amqp relation incomplete. Peer not ready?') return if 'identity-service' not in CONFIGS.complete_contexts(): juju_log('identity-service relation incomplete. Peer not ready?') return juju_log('**********rid is %s' % str(rid)) juju_log('**********unit is %s' % str(unit)) rel_settings = relation_get(rid=rid, unit=unit) agent_hostname = rel_settings.get('hostname') agent_hostaddress = rel_settings.get('hostaddress') juju_log('**********agent_hostname is %s' % agent_hostname) juju_log('**********agent_hostaddress is %s' % agent_hostaddress) host = unit_get('private-address') hostname = get_hostname(host) hostaddress = get_host_ip(host) juju_log('**********host is %s' % host) juju_log('**********hostname is %s' % hostname) juju_log('**********hostaddress is %s' % hostaddress) hosts = [agent_hostname, hostname, agent_hostaddress, hostaddress] for host in hosts: if host: add_known_host(host, unit=unit, user='******')
def ssh_compute_add(public_key, rid=None, unit=None, user=None): # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. private_address = relation_get(rid=rid, unit=unit, attribute='private-address') hosts = [private_address] if not is_ipv6(private_address): if relation_get('hostname'): hosts.append(relation_get('hostname')) if not is_ip(private_address): hosts.append(get_host_ip(private_address)) short = private_address.split('.')[0] if ns_query(short): hosts.append(short) else: hn = get_hostname(private_address) if hn: hosts.append(hn) short = hn.split('.')[0] if ns_query(short): hosts.append(short) for host in list(set(hosts)): add_known_host(host, unit, user) if not ssh_authorized_key_exists(public_key, unit, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, unit, user)
def calico_ctxt(self): calico_ctxt = super(CalicoPluginContext, self).calico_ctxt() if not calico_ctxt: return {} conf = config() calico_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), get_host_ip(unit_get('private-address'))) calico_ctxt['neutron_security_groups'] = self.neutron_security_groups calico_ctxt['use_syslog'] = conf['use-syslog'] calico_ctxt['verbose'] = conf['verbose'] calico_ctxt['debug'] = conf['debug'] calico_ctxt['peer_ips'] = [] calico_ctxt['peer_ips6'] = [] # Our BGP peers are either route reflectors or our cluster peers. # Prefer route reflectors. calico_ctxt['peer_ips'] = self.addrs_from_relation( 'bgp-route-reflector') calico_ctxt['peer_ips6'] = self.addrs_from_relation( 'bgp-route-reflector', ip_version=6) if not calico_ctxt['peer_ips']: calico_ctxt['peer_ips'] = self.addrs_from_relation('cluster') if not calico_ctxt['peer_ips6']: calico_ctxt['peer_ips6'] = self.addrs_from_relation('cluster', ip_version=6) return calico_ctxt
def get_unit_ip(config_override=AMQP_OVERRIDE_CONFIG, interface=AMQP_INTERFACE): """Return this unit's IP. Future proof to allow for network spaces or other more complex addresss selection. @param config_override: string name of the config option for network override. Default to amqp-network @param interface: string name of the relation. Default to amqp. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): assert_charm_supports_ipv6() return get_ipv6_addr()[0] elif config(config_override): # NOTE(jamespage) # override private-address settings if access-network is # configured and an appropriate network interface is # configured. return get_address_in_network(config(config_override), fallback) else: # NOTE(jamespage) # Try using network spaces if access-network is not # configured, fallback to private address if not # supported try: return network_get_primary_address(interface) except NotImplementedError: return fallback
def __call__(self): ctxt = {} if config('prefer-ipv6'): ctxt['memcached_ip'] = 'ip6-localhost' else: ctxt['memcached_ip'] = get_host_ip(unit_get('private-address')) return ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == '0': import multiprocessing workers = multiprocessing.cpu_count() ctxt = { 'proxy_ip': get_host_ip(unit_get('private-address')), 'bind_port': determine_api_port(bind_port), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision') } ctxt['ssl'] = False auth_type = config('auth-type') auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name') } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': 'http', # TODO: http hardcode 'keystone_host': relation_get('auth_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), } if context_complete(ks_auth): ctxt.update(ks_auth) return ctxt
def agent_joined(relation_id=None): initialize_ssh_keys() host = unit_get('private-address') settings = { 'hostname': get_hostname(host), 'hostaddress': get_host_ip(host) } relation_set(relation_id=relation_id, **settings)
def local_address(self): """Return remotely accessible address of charm (not localhost) @return True if user has requested ipv6 support otherwise False """ if self.ipv6_mode: addr = ch_ip.get_ipv6_addr(exc_list=[self.vip])[0] else: addr = ch_utils.get_host_ip(hookenv.unit_get('private-address')) return addr
def local_address(self): """Return remotely accessible address of charm (not localhost) @return True if user has requested ipv6 support otherwise False """ if self.ipv6_mode: addr = ch_ip.get_ipv6_addr(exc_list=[self.vip])[0] else: addr = ch_utils.get_host_ip( hookenv.unit_get('private-address')) return addr
def vrs_set_credentials_for_metadata_agent(relation_id=None, remote_unit=None): username = relation_get("credentials_username") password = relation_get("credentials_password") tenant = relation_get("credentials_project") keystone_ip = relation_get("private-address") host_ip_address = get_host_ip(unit_get('private-address')) log("username:{}, password:{}, tenant:{}, keystone_ip:{}, private_ip: {}". format(username, password, tenant, keystone_ip, host_ip_address)) create_nuage_metadata_file(username, password, tenant, keystone_ip, host_ip_address) vrs_full_restart()
def __call__(self): allowed_hosts = [] for relid in relation_ids('swift-storage'): for unit in related_units(relid): host = relation_get('private-address', unit, relid) allowed_hosts.append(get_host_ip(host)) ctxt = { 'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts } return ctxt
def __call__(self): ctxt = {} if config('prefer-ipv6'): host_ip = get_ipv6_addr()[0] else: host_ip = get_host_ip(unit_get('private-address')) if host_ip: # NOTE: do not format this even for ipv6 (see bug 1499656) ctxt['host_ip'] = host_ip return ctxt
def get_host_ip(rid=None, unit=None): addr = relation_get('private-address', rid=rid, unit=unit) if config('prefer-ipv6'): host_ip = format_ipv6_addr(addr) if host_ip: return host_ip else: msg = ("Did not get IPv6 address from storage relation " "(got={})".format(addr)) log(msg, level=WARNING) return openstack.get_host_ip(addr)
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() if not ovs_ctxt: return {} self._ensure_bridge() ovs_ctxt['local_ip'] = get_host_ip(unit_get('private-address')) return ovs_ctxt
def get_host_ip(rid=None, unit=None): addr = relation_get('private-address', rid=rid, unit=unit) if config('prefer-ipv6'): host_ip = format_ipv6_addr(addr) if host_ip: return host_ip else: msg = ("Did not get IPv6 address from storage relation " "(got=%s)" % (addr)) log(msg, level=WARNING) return openstack.get_host_ip(addr)
def ssh_compute_add_host_and_key(public_key, hostname, private_address, application_name, user=None): """Add a compute nodes ssh details to local cache. Collect various hostname variations and add the corresponding host keys to the local known hosts file. Finally, add the supplied public key to the authorized_key file. :param public_key: Public key. :type public_key: str :param hostname: Hostname to collect host keys from. :type hostname: str :param private_address:aCorresponding private address for hostname :type private_address: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str """ # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. hosts = [private_address] if not is_ipv6(private_address): if hostname: hosts.append(hostname) if is_ip(private_address): hn = get_hostname(private_address) if hn: hosts.append(hn) short = hn.split('.')[0] if ns_query(short): hosts.append(short) else: hosts.append(get_host_ip(private_address)) short = private_address.split('.')[0] if ns_query(short): hosts.append(short) for host in list(set(hosts)): add_known_host(host, application_name, user) if not ssh_authorized_key_exists(public_key, application_name, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, application_name, user)
def get_local_addresses(self): """Return list of local addresses on each configured network For each network return an address the local unit has on that network if one exists. @returns [private_addr, admin_addr, public_addr, ...] """ addresses = [os_utils.get_host_ip(hookenv.unit_get('private-address'))] for addr_type in os_ip.ADDRESS_MAP.keys(): laddr = os_ip.resolve_address(endpoint_type=addr_type) if laddr: addresses.append(laddr) return sorted(list(set(addresses)))
def __call__(self): allowed_hosts = [] for relid in relation_ids('swift-storage'): for unit in related_units(relid): host = relation_get('private-address', unit, relid) if config('prefer-ipv6'): host_ip = get_ipv6_addr(exc_list=[config('vip')])[0] else: host_ip = get_host_ip(host) allowed_hosts.append(host_ip) ctxt = {'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts} return ctxt
def get_local_addresses(self): """Return list of local addresses on each configured network For each network return an address the local unit has on that network if one exists. @returns [private_addr, admin_addr, public_addr, ...] """ addresses = [ os_utils.get_host_ip(hookenv.unit_get('private-address'))] for addr_type in os_ip.ADDRESS_MAP.keys(): laddr = os_ip.resolve_address(endpoint_type=addr_type) if laddr: addresses.append(laddr) return sorted(list(set(addresses)))
def get_local_nodename(): '''Resolve local nodename into something that's universally addressable''' ip_addr = get_host_ip(unit_get('private-address')) log('getting local nodename for ip address: %s' % ip_addr, level=INFO) try: nodename = get_hostname(ip_addr, fqdn=False) except: log('Cannot resolve hostname for %s using DNS servers' % ip_addr, level='WARNING') log('Falling back to use socket.gethostname()', level='WARNING') # If the private-address is not resolvable using DNS # then use the current hostname nodename = socket.gethostname() log('local nodename: %s' % nodename, level=INFO) return nodename
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() if not ovs_ctxt: return {} if config('manage-neutron-plugin-legacy-mode'): self._ensure_packages() self._ensure_bridge() ovs_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), get_host_ip(unit_get('private-address'))) return ovs_ctxt
def _pg_dir_context(): ''' Inspects relation with PLUMgrid director. ''' ctxt = { 'opsvm_ip': '127.0.0.1', 'director_ips': [], } for rid in relation_ids('plumgrid'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) ctxt['director_ips'].append( str(get_host_ip(rdata['private-address']))) if "opsvm_ip" in rdata: ctxt['opsvm_ip'] = \ rdata['opsvm_ip'] return ctxt
def agent_changed(rid=None, unit=None): if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return if 'amqp' not in CONFIGS.complete_contexts(): juju_log('amqp relation incomplete. Peer not ready?') return with open('/etc/manifest/server.manifest') as server_manifest: flag = 'token-tenant' in server_manifest.read() if flag: rel_settings = relation_get(rid=rid, unit=unit) key = rel_settings.get('ssh_public_key') juju_log("**********key is %s" % str(key)) if not key: juju_log('peer did not publish key?') return ssh_controller_key_add(key, rid=rid, unit=unit) host = unit_get('private-address') hostname = get_hostname(host) hostaddress = get_host_ip(host) juju_log("**********host is %s" % host) juju_log("**********hostname is %s" % hostname) juju_log("**********hostaddress is %s" % hostaddress) with open('/etc/hosts', 'a') as hosts: hosts.write('%s %s' % (hostaddress, hostname) + '\n') token_tenant = rel_settings.get('token_tenant') juju_log("**********token_tenant is %s" % token_tenant) rsync( charm_dir() + '/files/server.manifest', '/etc/manifest/server.manifest' ) c_hostaddress = rel_settings.get('hostaddress') juju_log("**********controller_hostaddress is %s" % c_hostaddress) subprocess.check_call(['sudo', 'sed', '-i', 's/^controller_ip/%s/g' % c_hostaddress, '/etc/manifest/server.manifest']) subprocess.check_call(['sudo', 'sed', '-i', 's/token-tenant/%s/g' % token_tenant, '/etc/manifest/server.manifest']) subprocess.check_call(['sudo', 'service', 'vsm-agent', 'stop']) subprocess.check_call(['sudo', 'service', 'vsm-agent', 'start']) subprocess.check_call(['sudo', 'service', 'vsm-physical', 'stop']) subprocess.check_call(['sudo', 'service', 'vsm-physical', 'start']) juju_log("**********start vsm-agent") juju_log("**********start vsm-physical")
def _pg_dir_context(): ''' Inspects relation with PLUMgrid director. ''' ctxt = { 'opsvm_ip': '127.0.0.1', 'director_ips': [], } for rid in relation_ids('plumgrid'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) ctxt['director_ips' ].append(str(get_host_ip(rdata['private-address']))) if "opsvm_ip" in rdata: ctxt['opsvm_ip'] = \ rdata['opsvm_ip'] return ctxt
def __call__(self): allowed_hosts = [] for relid in relation_ids('swift-storage'): for unit in related_units(relid): host = relation_get('private-address', unit, relid) if config('prefer-ipv6'): host_ip = get_ipv6_addr(exc_list=[config('vip')])[0] else: host_ip = get_host_ip(host) allowed_hosts.append(host_ip) ctxt = { 'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts } return ctxt
def agent_joined(relation_id=None): initialize_ssh_keys() host = unit_get('private-address') settings = { 'hostname': get_hostname(host), 'hostaddress': get_host_ip(host) } keystone_host = auth_token_config('identity_uri').split('/')[2].split(':')[0] admin_tenant_name = auth_token_config('admin_tenant_name') admin_user = auth_token_config('admin_user') admin_password = auth_token_config('admin_password') args = ['agent-token', admin_tenant_name, admin_user, admin_password, keystone_host] token_tenant = subprocess.check_output(args).strip('\n') settings['token_tenant'] = token_tenant settings['ssh_public_key'] = public_ssh_key() relation_set(relation_id=relation_id, **settings)
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt() if not ovs_ctxt: return {} conf = config() ovs_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), get_host_ip(unit_get('private-address'))) neutron_api_settings = NeutronAPIContext()() ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups ovs_ctxt['l2_population'] = neutron_api_settings['l2_population'] ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr'] ovs_ctxt['overlay_network_type'] = \ neutron_api_settings['overlay_network_type'] # TODO: We need to sort out the syslog and debug/verbose options as a # general context helper ovs_ctxt['use_syslog'] = conf['use-syslog'] ovs_ctxt['verbose'] = conf['verbose'] ovs_ctxt['debug'] = conf['debug'] net_dev_mtu = neutron_api_settings.get('network_device_mtu') if net_dev_mtu: # neutron.conf ovs_ctxt['network_device_mtu'] = net_dev_mtu # ml2 conf ovs_ctxt['veth_mtu'] = net_dev_mtu mappings = config('bridge-mappings') if mappings: ovs_ctxt['bridge_mappings'] = ','.join(mappings.split()) flat_providers = config('flat-network-providers') if flat_providers: ovs_ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) return ovs_ctxt
def cluster_joined(relation_id=None): relation_settings = { 'hostname': rabbit.get_local_nodename(), } if config('prefer-ipv6'): relation_settings['private-address'] = get_ipv6_addr()[0] else: relation_settings['private-address'] = get_host_ip( unit_get('private-address')) relation_set(relation_id=relation_id, relation_settings=relation_settings) if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.') return configure_nodename() try: if not is_leader(): log('Not the leader, deferring cookie propagation to leader') return except NotImplementedError: if is_newer(): log('cluster_joined: Relation greater.') return if not os.path.isfile(rabbit.COOKIE_PATH): log('erlang cookie missing from %s' % rabbit.COOKIE_PATH, level=ERROR) return if not is_sufficient_peers(): return if is_elected_leader('res_rabbitmq_vip'): log('Leader peer_storing cookie', level=INFO) cookie = open(rabbit.COOKIE_PATH, 'r').read().strip() peer_store('cookie', cookie) peer_store('leader_node_ip', unit_private_ip())
def __call__(self): ctxt = {} for rid in relation_ids('cloud-compute'): for unit in related_units(rid): rel = {'rid': rid, 'unit': unit} proto = relation_get('console_access_protocol', **rel) if not proto: # only bother with units that have a proto set. continue ctxt['console_keymap'] = relation_get('console_keymap', **rel) ctxt['console_access_protocol'] = proto ctxt['console_vnc_type'] = True if 'vnc' in proto else False if proto == 'vnc': ctxt = dict(ctxt, **self.get_console_info('xvpvnc', **rel)) ctxt = dict(ctxt, **self.get_console_info('novnc', **rel)) else: ctxt = dict(ctxt, **self.get_console_info(proto, **rel)) break ctxt['console_listen_addr'] = get_host_ip(unit_get('private-address')) return ctxt
def ssh_compute_add(public_key, user=None): # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. private_address = relation_get("private-address") hosts = [private_address] if not is_ip(private_address): hosts.append(get_host_ip(private_address)) hosts.append(private_address.split(".")[0]) else: hn = get_hostname(private_address) hosts.append(hn) hosts.append(hn.split(".")[0]) for host in list(set(hosts)): if not ssh_known_host_key(host, user): add_known_host(host, user) if not ssh_authorized_key_exists(public_key, user): log("Saving SSH authorized key for compute host at %s." % private_address) add_authorized_key(public_key, user)
def __call__(self): ''' Grab cert and key from configuraton for SSL config ''' ctxt = { 'http_port': 70, 'https_port': 433 } if config('enforce-ssl'): # NOTE(dosaboy): if ssl is not configured we shouldn't allow this if all(get_cert()): if config('vip'): addr = config('vip') elif config('prefer-ipv6'): addr = format_ipv6_addr(get_ipv6_addr()[0]) else: addr = get_host_ip(unit_get('private-address')) ctxt['ssl_addr'] = addr else: log("Enforce ssl redirect requested but ssl not configured - " "skipping redirect", level=WARNING) return ctxt
def calico_ctxt(self): calico_ctxt = super(CalicoPluginContext, self).calico_ctxt() if not calico_ctxt: return {} conf = config() calico_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), get_host_ip(unit_get('private-address'))) calico_ctxt['neutron_security_groups'] = self.neutron_security_groups calico_ctxt['use_syslog'] = conf['use-syslog'] calico_ctxt['verbose'] = conf['verbose'] calico_ctxt['debug'] = conf['debug'] calico_ctxt['peer_ips'] = [] calico_ctxt['peer_ips6'] = [] # Our BGP peers are either route reflectors or our cluster peers. # Prefer route reflectors. calico_ctxt['peer_ips'] = self.addrs_from_relation( 'bgp-route-reflector' ) calico_ctxt['peer_ips6'] = self.addrs_from_relation( 'bgp-route-reflector', ip_version=6 ) if not calico_ctxt['peer_ips']: calico_ctxt['peer_ips'] = self.addrs_from_relation('cluster') if not calico_ctxt['peer_ips6']: calico_ctxt['peer_ips6'] = self.addrs_from_relation( 'cluster', ip_version=6 ) return calico_ctxt
def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = get_host_ip(unit_get('private-address')) l_unit = local_unit().replace('/', '-') cluster_hosts = {} # NOTE(jamespage): build out map of configured network endpoints # and associated backends for addr_type in ADDRESS_TYPES: cfg_opt = 'os-{}-network'.format(addr_type) laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) cluster_hosts[laddr] = { 'network': "{}/{}".format(laddr, netmask), 'backends': { l_unit: laddr } } for rid in relation_ids('cluster'): for unit in related_units(rid): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr # NOTE(jamespage) add backend based on private address - this # with either be the only backend or the fallback if no acls # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) cluster_hosts[addr] = { 'network': "{}/{}".format(addr, netmask), 'backends': { l_unit: addr } } for rid in relation_ids('cluster'): for unit in related_units(rid): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[addr]['backends'][_unit] = _laddr ctxt = {'frontends': cluster_hosts, 'default_backend': addr} if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' else: ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or self.singlenode_mode): # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.', level=DEBUG) with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') return ctxt log('HAProxy context is incomplete, this unit has no peers.', level=INFO) return {}
def __call__(self): ctxt = {} ctxt['local_ip'] = get_host_ip(unit_get('private-address')) return ctxt
def __call__(self): if not relation_ids('cluster') and not self.singlenode_mode: return {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = get_host_ip(unit_get('private-address')) l_unit = local_unit().replace('/', '-') cluster_hosts = {} # NOTE(jamespage): build out map of configured network endpoints # and associated backends for addr_type in ADDRESS_TYPES: cfg_opt = 'os-{}-network'.format(addr_type) laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, netmask), 'backends': {l_unit: laddr}} for rid in relation_ids('cluster'): for unit in related_units(rid): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr # NOTE(jamespage) add backend based on private address - this # with either be the only backend or the fallback if no acls # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), 'backends': {l_unit: addr}} for rid in relation_ids('cluster'): for unit in related_units(rid): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: _unit = unit.replace('/', '-') cluster_hosts[addr]['backends'][_unit] = _laddr ctxt = { 'frontends': cluster_hosts, 'default_backend': addr } if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' else: ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or self.singlenode_mode): # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.', level=DEBUG) with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') return ctxt log('HAProxy context is incomplete, this unit has no peers.', level=INFO) return {}
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = ('[{}]'.format( get_ipv6_addr(exc_list=[config('vip')])[0])) memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), 'static_large_object_segments': config('static-large-object-segments'), 'enable_multi_region': config('enable-multi-region'), 'read_affinity': get_read_affinity(), 'write_affinity': get_write_affinity(), 'write_affinity_node_count': get_write_affinity_node_count() } cmp_openstack = CompareOpenStackReleases(os_release('swift')) if cmp_openstack < 'train': # swauth is no longer supported for OpenStack Train and later admin_key = leader_get('swauth-admin-key') if admin_key is not None: ctxt['swauth_admin_key'] = admin_key if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') ctxt['auth_type'] = auth_type auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name'), } ctxt.update(ks_auth) # Sometime during the 20.08 development cycle, keystone changed from # every unit setting relation data to just the leader. This means that # the charm needs to data from the first relation that actually has # data, or almalgamate the data from all the relations. For this charm, # it merges from the relation ids available like the charms.reactive # system does. _keys = (('auth_protocol', 'auth_protocol', 'http'), ('service_protocol', 'service_protocol', 'http'), ('keystone_host', 'auth_host', None), ('service_host', 'service_host', None), ('auth_port', 'auth_port', None), ('service_user', 'service_username', None), ('service_password', 'service_password', None), ('service_tenant', 'service_tenant', None), ('service_port', 'service_port', None), ('api_version', 'api_version', '2')) _keysv3 = (('admin_domain_id', 'admin_domain_id'), ('service_tenant_id', 'service_tenant_id'), ('admin_domain_name', 'service_domain'), ('admin_tenant_name', 'service_tenant')) kvs = {} relids = relation_ids('identity-service') # if we have relids at all, then set the auth_type to keystone if relids: kvs['auth_type'] = 'keystone' # merge the data from the related units for (key, source, default) in _keys: for relid in relids: for unit in related_units(relid): value = relation_get(source, unit, relid) if value is not None: kvs[key] = value else: kvs[key] = kvs.get(key, default) # if the api is version 3, also merge the additional keys if kvs.get('api_version', None) == '3': for (key, source) in _keysv3: for relid in relids: for unit in related_units(relid): value = relation_get(source, unit, relid) if value is not None: kvs[key] = value # merge in the creds from the relation; which override the config ctxt.update(kvs) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def lb_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. lb_ctxt = super(LBPluginContext, self).lb_ctxt() if not lb_ctxt: return {} conf = config() fallback = get_host_ip(unit_get('private-address')) if config('os-data-network'): # NOTE: prefer any existing use of config based networking lb_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), fallback) else: # NOTE: test out network-spaces support, then fallback try: lb_ctxt['local_ip'] = get_host_ip( network_get_primary_address('data')) except NotImplementedError: lb_ctxt['local_ip'] = fallback neutron_api_settings = NeutronAPIContext()() portmaps = context.DataPortContext()() if not portmaps: log("There are no data-ports defined for this host.", level=ERROR) lb_ctxt['interface_mappings'] = "physnet1:%s" % portmaps.keys()[0] #lb_ctxt['interface_mappings'] = conf['interface-mappings'] lb_ctxt['neutron_security_groups'] = self.neutron_security_groups lb_ctxt['l2_population'] = neutron_api_settings['l2_population'] lb_ctxt['overlay_network_type'] = \ neutron_api_settings['overlay_network_type'] # TODO: We need to sort out the syslog and debug/verbose options as a # general context helper lb_ctxt['use_syslog'] = conf['use-syslog'] lb_ctxt['verbose'] = conf['verbose'] lb_ctxt['debug'] = conf['debug'] lb_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing'] lb_ctxt['enable_vxlan'] = conf['enable-vxlan'] lb_ctxt['enable_dpdk'] = conf['enable-dpdk'] net_dev_mtu = neutron_api_settings.get('network_device_mtu') if net_dev_mtu: # neutron.conf lb_ctxt['network_device_mtu'] = net_dev_mtu # ml2 conf lb_ctxt['veth_mtu'] = net_dev_mtu mappings = config('bridge-mappings') if mappings: lb_ctxt['bridge_mappings'] = ','.join(mappings.split()) flat_providers = config('flat-network-providers') if flat_providers: lb_ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: lb_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) return lb_ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = ('[{}]'.format( get_ipv6_addr(exc_list=[config('vip')])[0])) memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), 'static_large_object_segments': config('static-large-object-segments') } admin_key = leader_get('swauth-admin-key') if admin_key is not None: ctxt['swauth_admin_key'] = admin_key if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') ctxt['auth_type'] = auth_type auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name'), } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': relation_get('auth_protocol', unit, relid) or 'http', 'service_protocol': relation_get('service_protocol', unit, relid) or 'http', 'keystone_host': relation_get('auth_host', unit, relid), 'service_host': relation_get('service_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), 'api_version': relation_get('api_version', unit, relid) or '2', } if ks_auth['api_version'] == '3': ks_auth['admin_domain_id'] = relation_get( 'admin_domain_id', unit, relid) ks_auth['service_tenant_id'] = relation_get( 'service_tenant_id', unit, relid) ks_auth['admin_domain_name'] = relation_get( 'service_domain', unit, relid) ks_auth['admin_tenant_name'] = relation_get( 'service_tenant', unit, relid) ctxt.update(ks_auth) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def get_local_nodename(): '''Resolve local nodename into something that's universally addressable''' ip_addr = get_host_ip(unit_get('private-address')) log('getting local nodename for ip address: %s' % ip_addr, level=INFO) return get_node_hostname(ip_addr)
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0] memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers') } if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name') } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': relation_get('auth_protocol', unit, relid) or 'http', 'service_protocol': relation_get('service_protocol', unit, relid) or 'http', 'keystone_host': relation_get('auth_host', unit, relid), 'service_host': relation_get('service_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), } if context_complete(ks_auth): ctxt.update(ks_auth) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt() if not ovs_ctxt: return {} conf = config() fallback = get_host_ip(unit_get('private-address')) if config('os-data-network'): # NOTE: prefer any existing use of config based networking ovs_ctxt['local_ip'] = \ get_address_in_network(config('os-data-network'), fallback) else: # NOTE: test out network-spaces support, then fallback try: ovs_ctxt['local_ip'] = get_host_ip( network_get_primary_address('data') ) except NotImplementedError: ovs_ctxt['local_ip'] = fallback neutron_api_settings = NeutronAPIContext()() ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups ovs_ctxt['l2_population'] = neutron_api_settings['l2_population'] ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr'] ovs_ctxt['extension_drivers'] = neutron_api_settings[ 'extension_drivers'] ovs_ctxt['overlay_network_type'] = \ neutron_api_settings['overlay_network_type'] ovs_ctxt['polling_interval'] = neutron_api_settings['polling_interval'] ovs_ctxt['rpc_response_timeout'] = \ neutron_api_settings['rpc_response_timeout'] ovs_ctxt['report_interval'] = neutron_api_settings['report_interval'] # TODO: We need to sort out the syslog and debug/verbose options as a # general context helper ovs_ctxt['use_syslog'] = conf['use-syslog'] ovs_ctxt['verbose'] = conf['verbose'] ovs_ctxt['debug'] = conf['debug'] cmp_release = CompareOpenStackReleases( os_release('neutron-common', base='icehouse')) if conf['prevent-arp-spoofing'] and cmp_release >= 'ocata': log("prevent-arp-spoofing is True yet this feature is deprecated " "and no longer has any effect in your version of Openstack", WARNING) ovs_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing'] ovs_ctxt['enable_dpdk'] = conf['enable-dpdk'] net_dev_mtu = neutron_api_settings.get('network_device_mtu') if net_dev_mtu: # neutron.conf ovs_ctxt['network_device_mtu'] = net_dev_mtu # ml2 conf ovs_ctxt['veth_mtu'] = net_dev_mtu mappings = config('bridge-mappings') if mappings: ovs_ctxt['bridge_mappings'] = ','.join(mappings.split()) sriov_mappings = config('sriov-device-mappings') if sriov_mappings: ovs_ctxt['sriov_device_mappings'] = ( ','.join(sriov_mappings.split()) ) enable_sriov = config('enable-sriov') if enable_sriov: ovs_ctxt['enable_sriov'] = True sriov_numvfs = config('sriov-numvfs') if sriov_numvfs: try: if sriov_numvfs != 'auto': int(sriov_numvfs) except ValueError: ovs_ctxt['sriov_vfs_list'] = sriov_numvfs else: ovs_ctxt['sriov_vfs_blanket'] = sriov_numvfs flat_providers = config('flat-network-providers') if flat_providers: ovs_ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) ovs_ctxt['enable_nsg_logging'] = \ neutron_api_settings['enable_nsg_logging'] ovs_ctxt['nsg_log_output_base'] = get_nsg_log_path( config('security-group-log-output-base') ) ovs_ctxt['nsg_log_rate_limit'] = \ config('security-group-log-rate-limit') ovs_ctxt['nsg_log_burst_limit'] = \ config('security-group-log-burst-limit') ovs_ctxt['firewall_driver'] = _get_firewall_driver(ovs_ctxt) if ovs_ctxt['firewall_driver'] != OPENVSWITCH: ovs_ctxt['enable_nsg_logging'] = False return ovs_ctxt