def compose(self): super(Neutron, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri( region=dr, name='neutron', etype='network', description='OpenStack Network Service', url_base=url_base + ':9696/') self.keystone.register_service_admin_user('neutron') self.keystone.register_service_admin_user('nova_for_neutron') neutrons = self.hosts_with_any_service(set(self.services.keys())) self.messaging.populate_peer(neutrons) self.sql.register_user_with_schemas('neutron', ['neutron']) self.sql.populate_peer(neutrons, ['client']) # TODO: maybe not all node needs it secret_service = self.find_nova_comp_shared() util.bless_with_principal(neutrons, [(self.keystone, 'neutron@default'), (self.keystone, 'nova_for_neutron@default'), (self.sql, 'neutron'), (secret_service, 'neutron_nova_metadata'), (self.messaging.name, 'openstack')])
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Cinder, self).compose() url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri(region=dr, name='cinder', etype='volume', description='OpenStack Volume Service', url_base=url_base + ':8776/v1/$(tenant_id)s') self.keystone.register_endpoint_tri(region=dr, name='cinderv2', etype='volumev2', description='OpenStack Volume Service', url_base=url_base + ':8776/v2/$(tenant_id)s') self.keystone.register_endpoint_tri(region=dr, name='cinderv3', etype='volumev3', description='OpenStack Volume Service', url_base=url_base + ':8776/v3/$(tenant_id)s') self.keystone.register_service_admin_user('cinder') comp = self cins = self.hosts_with_any_service(set(comp.services.keys())) self.sql.register_user_with_schemas('cinder', ['cinder']) self.sql.populate_peer(cins, ['client']) self.messaging.populate_peer(cins) util.bless_with_principal(cins, [(self.keystone.name, 'cinder@default'), (self.messaging.name, 'openstack'), (self.sql.name, 'cinder')])
def compose(self): super(Keystone, self).compose() url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.register_endpoints(region=dr, name='keystone', etype='identity', description='OpenStack Identity', eps={ 'admin': url_base + ':35357', 'internal': url_base + ':5000', 'public': url_base + ':5000' }) self.register_project_in_domain('Default', 'admin', 'members are full admins') self.register_user_in_domain( 'Default', 'admin', password=util.get_keymgr()(self.name, 'admin@default'), project_roles={('Default', 'admin'): ['admin']}) keystones = self.hosts_with_service('keystone') self.sql.populate_peer(keystones, ['client']) sql = self.sql sql.register_user_with_schemas('keystone', ['keystone']) util.bless_with_principal(keystones, [(self.name, 'admin@default'), (sql.name, 'keystone')])
def etc_nova_nova_conf(self): # NOTE! mariadb.db_url not required on compute when the use_conductur is False gconf = conf.get_global_config() pv = conf.get_vip('public')['domain_name'] neutron_section = self.keystone.authtoken_section('neutron_for_nova') neutron_section.update({ 'service_metadata_proxy': True, 'metadata_proxy_shared_secret': util.get_keymgr()([self, self.networking], 'neutron_nova_metadata') }) # add dual suffix if util.get_keymanager().has_creds(self.keystone.name, 'placement@default'): placement_section = self.keystone.authtoken_section('placement') else: placement_section = {} # TODO: exclude sql on compute return { 'DEFAULT': { 'debug': True, 'transport_url': self.messaging.transport_url(), 'compute_driver': 'libvirt.LibvirtDriver', 'use_neutron': True, 'firewall_driver': "nova.virt.firewall.NoopFirewallDriver", 'security_group_api': "neutron", 'log_dir': '/var/log/nova', 'default_floating_pool': "public", # ext net needs to match 'state_path': '/var/lib/nova', }, 'keystone_authtoken': self.keystone.authtoken_section('nova'), 'placement': placement_section, 'database': { 'connection': self.sql.db_url('nova') }, 'api_database': { 'connection': self.sql.db_url('nova_api', 'nova') }, 'glance': { 'api_servers': 'http://' + pv + ':9292' }, 'scheduler': { 'discover_hosts_in_cells_interval': '300' }, 'neutron': neutron_section, # TODO: create a nova ceph user, with the same privileges 'libvirt': { 'rbd_user': '******', 'rbd_secret_uuid': gconf['cinder_ceph_libvirt_secret_uuid'], 'disk_cachemodes': "network=writeback", # file=unsafe ? 'virt_type': 'qemu', # untile nested is fixed 'images_type': 'rbd', 'images_rbd_pool': 'vms', 'images_rbd_ceph_conf': '/etc/ceph/ceph.conf' }, 'filter_scheduler': { 'enabled_filters': 'RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter' } # tempest likes the SameHostFilter,DifferentHostFilter }
def etc_swift_container_sync_realms_conf(self): return { 'realm1': { 'key': 'realm1key', 'cluster_name1': 'http://' + conf.get_vip('public')['domain_name'] + ':8080/v1/' } }
def etc_neutron_metadata_agent_ini(self): ivip = conf.get_vip('internal')['domain_name'] return { 'DEFAULT': { 'nova_metadata_ip': ivip, 'metadata_proxy_shared_secret': util.get_keymgr()(self.find_nova_comp_shared(), 'neutron_nova_metadata') } }
def _keystone_authtoken_section(service_user): d = { "auth_url": 'http://' + conf.get_vip('public')['domain_name'] + ':5000/', "project_domain_name": 'Default', "project_name": 'service', "password": get_keymgr()('os', service_user + '@default'), "user_domain_name": 'Default', "username": service_user, "auth_type": 'password' } return d
def etc_swift_proxy_server_conf(self): pv = conf.get_vip('public')['domain_name'] dr = conf.get_default_region() filters = [ 'catch_errors', 'gatekeeper', 'healthcheck', 'proxy-logging', 'memcache', 'container_sync', 'bulk', 'tempurl', 'ratelimit', 's3token', 'crossdomain', 'authtoken', 'keystoneauth', 'formpost', 'staticweb', 'container-quotas', 'account-quotas', 'slo', 'dlo', 'versioned_writes' ] r = {} for f in filters: r['filter:' + f] = {'use': 'egg:swift#' + f.replace('-', '_')} r['filter:authtoken'] = self.keystone.authtoken_section('swift') r['filter:authtoken']['delay_auth_decision'] = 1 r['filter:authtoken'][ 'paste.filter_factory'] = 'keystonemiddleware.auth_token:filter_factory' r['filter:keystoneauth']['operator_roles'] = "user, admin" r['filter:keystoneauth']['reseller_admin_role'] = "admin" proxy_ip = self.get_addr_for(self.get_this_inv(), 'internal_listen', service=self.services['swift-proxy'], net_attr='swift_proxy_network') pipeline_str = ' '.join(filters + ['proxy-logging', 'proxy-server']) r.update({ 'DEFAULT': { 'bind_port': 8080, 'bind_ip': proxy_ip }, 'pipeline:main': { 'pipeline': pipeline_str }, 'app:proxy-server': { 'use': 'egg:swift#proxy', 'account_autocreate': True }, 'filter:s3token': { 'paste.filter_factory': 'keystonemiddleware.s3_token:filter_factory', 'auth_port': 5000, 'auth_host': pv, 'admin_user': '******', 'amind_tenant_name': 'service' }, 'filter:swift3': { 'use': 'egg:swift3#swift3', 'location': dr } }) return r
def populate_peer(self, nodes): port = 35357 if not self.peer_info: hostname = addr = conf.get_vip('internal')['domain_name'] self.peer_info['client'] = { 'hostname': hostname, 'addr': addr, 'port': port } for n in nodes: node = self.get_node(n) node['peers']['keystone'] = self.peer_info
def compose(self): super(Swift, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri( region=dr, name='swift', etype='object-store', description='Swift Storage Service', url_base=url_base + ':8080/v1/AUTH_$(tenant_id)s') self.keystone.register_service_admin_user('swift') sp = self.hosts_with_service('swift-proxy') util.bless_with_principal(sp, [(self.keystone.name, 'swift@default')])
def authtoken_section(self, service_user): # openstack ini file handles % specially # now we are escaping just the password and just here (lower layer does not do escape ATM) pwd = util.get_keymgr()(self.name, service_user + '@default') pwd = pwd.replace('%', '%%') d = { "auth_url": 'http://' + conf.get_vip('public')['domain_name'] + ':5000/', "project_domain_name": 'Default', "project_name": 'service', "password": util.get_keymgr()(self.name, service_user + '@default'), "user_domain_name": 'Default', "username": service_user, "auth_type": 'password' } return d
def userrc_script(user, project=None, domain='default'): if not project: project = user pwd = get_keymgr()('keystone', '@'.join( (user, domain))) # TODO: multikeystone return """export OS_PROJECT_DOMAIN_ID={domain} export OS_USER_DOMAIN_ID={domain} export OS_PROJECT_NAME={project} export OS_TENANT_NAME={project} export OS_USERNAME={user} export OS_PASSWORD='******' export OS_AUTH_URL=http://{vip}:35357/v3 export OS_IDENTITY_API_VERSION=3;""".format( pwd=pwd, vip=conf.get_vip('public')['domain_name'], user=user, project=project, domain=domain)
def etc_cinder_cinder_conf(self): gconf = conf.get_global_config() return { 'DEFAULT': {'debug': True, 'glance_api_version': 2, 'enabled_backends': 'ceph', 'default_volume_type': 'ceph', 'backup_swift_url': 'http://' + conf.get_vip('public')['domain_name'] + ':8080/v1/AUTH_', 'transport_url': self.messaging.transport_url()}, 'database': {'connection': self.sql.db_url('cinder')}, 'keystone_authtoken': self.keystone.authtoken_section('cinder'), 'oslo_concurrency': {'lock_path': '$state_path/lock'}, 'ceph': {'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver', 'rbd_pool': 'volumes', 'rbd_user': '******', 'rbd_ceph_conf': '/etc/ceph/ceph.conf', 'volume_backend_name': 'ceph', 'rbd_secret_uuid': gconf['cinder_ceph_libvirt_secret_uuid']}}
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Nova, self).compose() pv = conf.get_vip('public')['domain_name'] dr = conf.get_default_region() url_base = "http://" + pv self.keystone.register_endpoint_tri( region=dr, name='nova', etype='compute', description='OpenStack Compute Service', url_base=url_base + ':8774/v2.1/$(tenant_id)s') self.keystone.register_endpoint_tri( region=dr, name='placement', etype='placement', description='OpenStack Nova Placement Service', url_base=url_base + ':8780') self.keystone.register_service_admin_user('nova') self.keystone.register_service_admin_user('placement') self.keystone.register_service_admin_user('neutron_for_nova') # TODO: revisit which components needs what and skip it from cfg rh = self.hosts_with_any_service({ 'nova-api', 'nova-compute', 'nova-scheduler', 'nova-conductor', 'nova-cells' }) self.messaging.populate_peer(rh) n_srv = set(self.services.keys()) novas = self.hosts_with_any_service(n_srv) self.sql.register_user_with_schemas( 'nova', ['nova', 'nova_api', 'nova_cell0']) # TODO: use the cell deps util.bless_with_principal( novas, [(self.keystone, 'nova@default'), (self.keystone, 'neutron_for_nova@default'), ([self, self.networking], 'neutron_nova_metadata'), (self.sql, 'nova'), (self.messaging, 'openstack')]) util.bless_with_principal( novas, [(self.keystone.name, 'placement@default')]) # n-cpu using it self.sql.populate_peer(rh, ['client']) # TODO: maybe not all node needs it
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Placement, self).compose() pv = conf.get_vip('public')['domain_name'] dr = conf.get_default_region() url_base = "http://" + pv self.keystone.register_endpoint_tri( region=dr, name='placement', etype='placement', description='OpenStack Nova Placement Service', url_base=url_base + ':8780') self.keystone.register_service_admin_user('placement') placements = self.hosts_with_service('placement-api') self.sql.register_user_with_schemas('placement', ['placement']) util.bless_with_principal(placements, [(self.keystone, 'placement@default'), (self.sql, 'placement')])
def populate_peer(self, nodes, modes): port = 3306 if not self.peer_info: self.peer_info['cluster'] = self.get_cluster_info() balancer = self.get_balancer() if balancer: port = 13306 # use different port with vip hostname = addr = conf.get_vip('internal')['domain_name'] self.peer_info['client'] = {'hostname': hostname, 'addr': addr, 'port': port} for n in nodes: node = self.get_node(n) peer_rec = node['peers'].setdefault('mariadb', {}) if 'client' in modes: peer_rec['client'] = self.peer_info['client'] if 'cluster' in modes: peer_rec['cluster'] = self.peer_info['cluster']
def compose(self): super(Glance, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() glance_port = 9292 glance_ha_port = 19292 servers = [] for b in self.get_beckend_list(): servers.append(' '.join((b['hostname'], b['addr'] + ':' + str(glance_ha_port), 'check'))) gconf = conf.get_global_config() if 'haproxy' in gconf['global_service_flags']: self.haproxy.add_listener('glance', { 'bind': '*:' + str(glance_port), 'mode': 'http', 'http-request': ['set-header X-Forwarded-Proto https if { ssl_fc }', 'set-header X-Forwarded-Proto http if !{ ssl_fc }'], 'server': servers}) self.keystone.register_endpoint_tri(region=dr, name='glance', etype='image', description='OpenStack Image Service', url_base=url_base + ':' + str(glance_port)) # just auth or admin user ? self.keystone.register_service_admin_user('glance') glances = self.hosts_with_any_service(g_srv) self.sql.register_user_with_schemas('glance', ['glance']) self.sql.populate_peer(glances, ['client']) util.bless_with_principal(glances, [(self.keystone.name, 'glance@default'), (self.sql.name, 'glance'), (self.messaging.name, 'openstack')])
def gen_tempest_conf(self, image_ref, image_ref_alt, public_network_id, min_compute_nodes=1): pwd = util.get_keymgr()(self.keystone.name, 'admin@default') auth_url = ''.join( ('http://', conf.get_vip('public')['domain_name'], ':35357/v3')) gconf = conf.get_global_config() service_flags = gconf['global_service_flags'] return { 'DEFAULT': { 'debug': True, 'log_file': 'tempest.log' }, 'auth': { 'tempest_roles': 'user', 'admin_username': '******', 'admin_project_name': 'admin', 'admin_domain_name': 'Default', 'admin_password': pwd }, 'compute': { 'flavor_ref': 42, 'flavor_ref_alt': 84, 'image_ref': image_ref, 'image_ref_alt': image_ref_alt, 'min_compute_nodes': min_compute_nodes, 'max_microversion': 'latest' }, 'compute-feature-enabled': { 'attach_encrypted_volume': False }, 'network': { 'floating_network_name': 'public', 'public_network_id': public_network_id }, 'scenario': { 'img_dir': 'etc', 'img_file': 'cirros.img' }, 'validation': { 'image_ssh_user': '******' }, 'object-storage': { 'reseller_admin_role': 'admin', 'operator_role': 'user' }, 'oslo-concurrency': { 'lock_path': '/tmp' }, 'image': { 'image_path': img_url, 'http_image': img_url }, 'identity': { 'uri': auth_url, 'uri_v3': auth_url }, 'volume': { 'storage_protocol': 'ceph', 'max_microversion': 'latest' }, 'service_available': { 'horizon': True if 'horizon' in service_flags else False, 'cinder': True if 'cinder-api' in service_flags else False, 'nova': True if 'nova-api' in service_flags else False, 'neutron': True if 'neutron-server' in service_flags else False, 'glance': True if 'glance-api' in service_flags else False, 'heat': True if 'heat-api' in service_flags else False, 'ironic': True if 'ironic-api' in service_flags else False, 'zaqar': True if 'zaqar' in service_flags else False, 'swift': True if 'swift-proxy' in service_flags else False } }