def compose(self): super(Neutron, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri( region=dr, name='neutron', etype='network', description='OpenStack Network Service', url_base=url_base + ':9696/') self.keystone.register_service_admin_user('neutron') self.keystone.register_service_admin_user('nova_for_neutron') neutrons = self.hosts_with_any_service(set(self.services.keys())) self.messaging.populate_peer(neutrons) self.sql.register_user_with_schemas('neutron', ['neutron']) self.sql.populate_peer(neutrons, ['client']) # TODO: maybe not all node needs it secret_service = self.find_nova_comp_shared() util.bless_with_principal(neutrons, [(self.keystone, 'neutron@default'), (self.keystone, 'nova_for_neutron@default'), (self.sql, 'neutron'), (secret_service, 'neutron_nova_metadata'), (self.messaging.name, 'openstack')])
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Cinder, self).compose() url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri(region=dr, name='cinder', etype='volume', description='OpenStack Volume Service', url_base=url_base + ':8776/v1/$(tenant_id)s') self.keystone.register_endpoint_tri(region=dr, name='cinderv2', etype='volumev2', description='OpenStack Volume Service', url_base=url_base + ':8776/v2/$(tenant_id)s') self.keystone.register_endpoint_tri(region=dr, name='cinderv3', etype='volumev3', description='OpenStack Volume Service', url_base=url_base + ':8776/v3/$(tenant_id)s') self.keystone.register_service_admin_user('cinder') comp = self cins = self.hosts_with_any_service(set(comp.services.keys())) self.sql.register_user_with_schemas('cinder', ['cinder']) self.sql.populate_peer(cins, ['client']) self.messaging.populate_peer(cins) util.bless_with_principal(cins, [(self.keystone.name, 'cinder@default'), (self.messaging.name, 'openstack'), (self.sql.name, 'cinder')])
def compose(self): super(Keystone, self).compose() url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.register_endpoints(region=dr, name='keystone', etype='identity', description='OpenStack Identity', eps={ 'admin': url_base + ':35357', 'internal': url_base + ':5000', 'public': url_base + ':5000' }) self.register_project_in_domain('Default', 'admin', 'members are full admins') self.register_user_in_domain( 'Default', 'admin', password=util.get_keymgr()(self.name, 'admin@default'), project_roles={('Default', 'admin'): ['admin']}) keystones = self.hosts_with_service('keystone') self.sql.populate_peer(keystones, ['client']) sql = self.sql sql.register_user_with_schemas('keystone', ['keystone']) util.bless_with_principal(keystones, [(self.name, 'admin@default'), (sql.name, 'keystone')])
def compose(self): super(MariaDB, self).compose() h = self.hosts_with_service('mariadb') self.populate_peer(h, ['cluster']) ci = self.get_cluster_info() servers = [] if len(h) > 1: check = ' check inter 3s on-marked-down shutdown-sessions port 9200' else: check = '' for i in ci: servers.append(' '.join((i['hostname'], i['addr'] + ':' + str(i['port']), 'backup' + check))) balancer = self.get_balancer() if balancer: if util.get_distro()['family'] == 'debian': # the galera packages does not have cluster checker # TODO: support mor mysql variants option = ['tcpka'] else: option = ['tcpka', 'httpchk'] balancer.add_listener('mariadb', { 'bind': '*:13306', 'stick': 'on dst', 'stick-table': 'type ip size 1024', 'option': option, 'timeout': {'client': '128m', 'server': '128m'}, 'server': servers}) # clustercheckuser allowed from localhost only util.bless_with_principal(h, [(self.name, 'clustercheckuser')])
def compose(self): super(Swift, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() self.keystone.register_endpoint_tri( region=dr, name='swift', etype='object-store', description='Swift Storage Service', url_base=url_base + ':8080/v1/AUTH_$(tenant_id)s') self.keystone.register_service_admin_user('swift') sp = self.hosts_with_service('swift-proxy') util.bless_with_principal(sp, [(self.keystone.name, 'swift@default')])
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Nova, self).compose() pv = conf.get_vip('public')['domain_name'] dr = conf.get_default_region() url_base = "http://" + pv self.keystone.register_endpoint_tri( region=dr, name='nova', etype='compute', description='OpenStack Compute Service', url_base=url_base + ':8774/v2.1/$(tenant_id)s') self.keystone.register_endpoint_tri( region=dr, name='placement', etype='placement', description='OpenStack Nova Placement Service', url_base=url_base + ':8780') self.keystone.register_service_admin_user('nova') self.keystone.register_service_admin_user('placement') self.keystone.register_service_admin_user('neutron_for_nova') # TODO: revisit which components needs what and skip it from cfg rh = self.hosts_with_any_service({ 'nova-api', 'nova-compute', 'nova-scheduler', 'nova-conductor', 'nova-cells' }) self.messaging.populate_peer(rh) n_srv = set(self.services.keys()) novas = self.hosts_with_any_service(n_srv) self.sql.register_user_with_schemas( 'nova', ['nova', 'nova_api', 'nova_cell0']) # TODO: use the cell deps util.bless_with_principal( novas, [(self.keystone, 'nova@default'), (self.keystone, 'neutron_for_nova@default'), ([self, self.networking], 'neutron_nova_metadata'), (self.sql, 'nova'), (self.messaging, 'openstack')]) util.bless_with_principal( novas, [(self.keystone.name, 'placement@default')]) # n-cpu using it self.sql.populate_peer(rh, ['client']) # TODO: maybe not all node needs it
def compose(self): # it can consider the full inventory and config to influnce facility registered # resources super(Placement, self).compose() pv = conf.get_vip('public')['domain_name'] dr = conf.get_default_region() url_base = "http://" + pv self.keystone.register_endpoint_tri( region=dr, name='placement', etype='placement', description='OpenStack Nova Placement Service', url_base=url_base + ':8780') self.keystone.register_service_admin_user('placement') placements = self.hosts_with_service('placement-api') self.sql.register_user_with_schemas('placement', ['placement']) util.bless_with_principal(placements, [(self.keystone, 'placement@default'), (self.sql, 'placement')])
def compose(self): super(Glance, self).compose() # it can consider the full inventory and config to influnce facility registered # resources url_base = "http://" + conf.get_vip('public')['domain_name'] dr = conf.get_default_region() glance_port = 9292 glance_ha_port = 19292 servers = [] for b in self.get_beckend_list(): servers.append(' '.join((b['hostname'], b['addr'] + ':' + str(glance_ha_port), 'check'))) gconf = conf.get_global_config() if 'haproxy' in gconf['global_service_flags']: self.haproxy.add_listener('glance', { 'bind': '*:' + str(glance_port), 'mode': 'http', 'http-request': ['set-header X-Forwarded-Proto https if { ssl_fc }', 'set-header X-Forwarded-Proto http if !{ ssl_fc }'], 'server': servers}) self.keystone.register_endpoint_tri(region=dr, name='glance', etype='image', description='OpenStack Image Service', url_base=url_base + ':' + str(glance_port)) # just auth or admin user ? self.keystone.register_service_admin_user('glance') glances = self.hosts_with_any_service(g_srv) self.sql.register_user_with_schemas('glance', ['glance']) self.sql.populate_peer(glances, ['client']) util.bless_with_principal(glances, [(self.keystone.name, 'glance@default'), (self.sql.name, 'glance'), (self.messaging.name, 'openstack')])
def compose(self): super(RabbitMQ, self).compose() rh = self.hosts_with_service('rabbit') self.populate_peer(rh) util.bless_with_principal(rh, [(self.name, 'openstack')])
def compose(self): super(Tempest, self).compose() tempest_nodes = self.hosts_with_component('tempest') util.bless_with_principal(tempest_nodes, [(self.keystone.name, 'admin@default')])