def identity_changed(relation_id=None, remote_unit=None): notifications_checksums = {} notifications_endpoints = {} if is_elected_leader(CLUSTER_RES): if not is_db_ready(): log( "identity-service-relation-changed hook fired before db " "ready - deferring until db ready", level=WARNING) return if not is_db_initialised(): log( "Database not yet initialised - deferring identity-relation " "updates", level=INFO) return if expect_ha() and not is_clustered(): log("Expected to be HA but no hacluster relation yet", level=INFO) return add_service_to_keystone(relation_id, remote_unit) if is_service_present('neutron', 'network'): delete_service_entry('quantum', 'network') settings = relation_get(rid=relation_id, unit=remote_unit) # If endpoint has changed, notify to units related over the # identity-notifications interface. We base the decision to notify on # whether admin_url, public_url or internal_url have changed from # previous notify. service = settings.get('service') if service: key = '%s-endpoint-changed' % service notifications_endpoints[key] = endpoints_dict(settings) notifications_checksums[key] = endpoints_checksum(settings) else: # Some services don't set their name in the 'service' key in the # relation, for those their name is calculated from the prefix of # keys. See `assemble_endpoints()` for details. single = { 'service', 'region', 'public_url', 'admin_url', 'internal_url' } endpoints = assemble_endpoints(settings) for ep in endpoints.keys(): if single.issubset(endpoints[ep]): key = '%s-endpoint-changed' % ep log('endpoint: %s' % ep) notifications_endpoints[key] = (endpoints_dict( endpoints[ep])) notifications_checksums[key] = (endpoints_checksum( endpoints[ep])) else: log('Deferring identity_changed() to service leader.') if notifications_endpoints or notifications_checksums: send_notifications(notifications_checksums, notifications_endpoints)
def identity_changed(relation_id=None, remote_unit=None): CONFIGS.write_all() notifications = {} if is_elected_leader(CLUSTER_RES): if not is_db_ready(): log( "identity-service-relation-changed hook fired before db " "ready - deferring until db ready", level=WARNING) return if not is_db_initialised(): log( "Database not yet initialised - deferring identity-relation " "updates", level=INFO) return if expect_ha() and not is_clustered(): log("Expected to be HA but no hacluster relation yet", level=INFO) return add_service_to_keystone(relation_id, remote_unit) if is_service_present('neutron', 'network'): delete_service_entry('quantum', 'network') settings = relation_get(rid=relation_id, unit=remote_unit) service = settings.get('service', None) if service: # If service is known and endpoint has changed, notify service if # it is related with notifications interface. csum = hashlib.sha256() # We base the decision to notify on whether these parameters have # changed (if csum is unchanged from previous notify, relation will # not fire). csum.update(settings.get('public_url', None)) csum.update(settings.get('admin_url', None)) csum.update(settings.get('internal_url', None)) notifications['%s-endpoint-changed' % (service)] = csum.hexdigest() else: # Each unit needs to set the db information otherwise if the unit # with the info dies the settings die with it Bug# 1355848 for rel_id in relation_ids('identity-service'): peerdb_settings = peer_retrieve_by_prefix(rel_id) # Ensure the null'd settings are unset in the relation. peerdb_settings = filter_null(peerdb_settings) if 'service_password' in peerdb_settings: relation_set(relation_id=rel_id, **peerdb_settings) log('Deferring identity_changed() to service leader.') if notifications: send_notifications(notifications)
def identity_changed(relation_id=None, remote_unit=None): CONFIGS.write_all() notifications = {} if is_elected_leader(CLUSTER_RES): if not is_db_ready(): log("identity-service-relation-changed hook fired before db " "ready - deferring until db ready", level=WARNING) return if not is_db_initialised(): log("Database not yet initialised - deferring identity-relation " "updates", level=INFO) return if expect_ha() and not is_clustered(): log("Expected to be HA but no hacluster relation yet", level=INFO) return add_service_to_keystone(relation_id, remote_unit) if is_service_present('neutron', 'network'): delete_service_entry('quantum', 'network') settings = relation_get(rid=relation_id, unit=remote_unit) service = settings.get('service', None) if service: # If service is known and endpoint has changed, notify service if # it is related with notifications interface. csum = hashlib.sha256() # We base the decision to notify on whether these parameters have # changed (if csum is unchanged from previous notify, relation will # not fire). csum.update(settings.get('public_url', None)) csum.update(settings.get('admin_url', None)) csum.update(settings.get('internal_url', None)) notifications['%s-endpoint-changed' % (service)] = csum.hexdigest() else: # Each unit needs to set the db information otherwise if the unit # with the info dies the settings die with it Bug# 1355848 for rel_id in relation_ids('identity-service'): peerdb_settings = peer_retrieve_by_prefix(rel_id) # Ensure the null'd settings are unset in the relation. peerdb_settings = filter_null(peerdb_settings) if 'service_password' in peerdb_settings: relation_set(relation_id=rel_id, **peerdb_settings) log('Deferring identity_changed() to service leader.') if notifications: send_notifications(notifications)
def test_add_service_to_keystone_clustered_https_none_values( self, b64encode, _resolve_address, _get_manager): relation_id = 'identity-service:0' remote_unit = 'unit/0' _resolve_address.return_value = '10.10.10.10' self.https.return_value = True self.test_config.set('https-service-endpoints', 'True') self.test_config.set('vip', '10.10.10.10') self.test_config.set('admin-port', 80) self.test_config.set('service-port', 81) b64encode.return_value = 'certificate' self.get_requested_roles.return_value = [ 'role1', ] self.relation_get.return_value = { 'service': 'keystone', 'region': 'RegionOne', 'public_url': 'None', 'admin_url': '10.0.0.2', 'internal_url': '192.168.1.2' } utils.add_service_to_keystone(relation_id=relation_id, remote_unit=remote_unit) self.assertTrue(self.https.called) self.assertTrue(self.create_role.called) relation_data = { 'auth_host': '10.10.10.10', 'service_host': '10.10.10.10', 'auth_protocol': 'https', 'service_protocol': 'https', 'auth_port': 80, 'service_port': 81, 'https_keystone': 'True', 'ca_cert': 'certificate', 'region': 'RegionOne' } self.peer_store_and_set.assert_called_with(relation_id=relation_id, **relation_data)
def test_add_service_to_keystone_nosubset( self, KeystoneManager, add_endpoint, ensure_valid_service, ip_config): relation_id = 'identity-service:0' remote_unit = 'unit/0' self.relation_get.return_value = {'ec2_service': 'nova', 'ec2_region': 'RegionOne', 'ec2_public_url': '10.0.0.1', 'ec2_admin_url': '10.0.0.2', 'ec2_internal_url': '192.168.1.2'} self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' KeystoneManager.resolve_tenant_id.return_value = 'tenant_id' utils.add_service_to_keystone( relation_id=relation_id, remote_unit=remote_unit) ensure_valid_service.assert_called_with('nova') add_endpoint.assert_called_with(region='RegionOne', service='nova', publicurl='10.0.0.1', adminurl='10.0.0.2', internalurl='192.168.1.2')
def test_add_service_to_keystone_clustered_https_none_values( self, b64encode, _resolve_address): relation_id = 'identity-service:0' remote_unit = 'unit/0' _resolve_address.return_value = '10.10.10.10' self.https.return_value = True self.test_config.set('https-service-endpoints', 'True') self.test_config.set('vip', '10.10.10.10') self.test_config.set('admin-port', 80) self.test_config.set('service-port', 81) b64encode.return_value = 'certificate' self.get_requested_roles.return_value = ['role1', ] self.relation_get.return_value = {'service': 'keystone', 'region': 'RegionOne', 'public_url': 'None', 'admin_url': '10.0.0.2', 'internal_url': '192.168.1.2'} utils.add_service_to_keystone( relation_id=relation_id, remote_unit=remote_unit) self.assertTrue(self.https.called) self.assertTrue(self.create_role.called) relation_data = {'auth_host': '10.10.10.10', 'service_host': '10.10.10.10', 'auth_protocol': 'https', 'service_protocol': 'https', 'auth_port': 80, 'service_port': 81, 'https_keystone': 'True', 'ca_cert': 'certificate', 'region': 'RegionOne'} self.peer_store_and_set.assert_called_with(relation_id=relation_id, **relation_data)
def test_add_service_to_keystone_no_clustered_no_https_complete_values( self, KeystoneManager, add_endpoint, ensure_valid_service, _resolve_address): relation_id = 'identity-service:0' remote_unit = 'unit/0' self.get_admin_token.return_value = 'token' self.get_service_password.return_value = 'password' self.test_config.set('service-tenant', 'tenant') self.test_config.set('admin-role', 'admin') self.get_requested_roles.return_value = ['role1', ] _resolve_address.return_value = '10.0.0.3' self.test_config.set('admin-port', 80) self.test_config.set('service-port', 81) self.https.return_value = False self.test_config.set('https-service-endpoints', 'False') self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' self.relation_ids.return_value = ['cluster/0'] mock_keystone = MagicMock() mock_keystone.resolve_tenant_id.return_value = 'tenant_id' KeystoneManager.return_value = mock_keystone self.relation_get.return_value = {'service': 'keystone', 'region': 'RegionOne', 'public_url': '10.0.0.1', 'admin_url': '10.0.0.2', 'internal_url': '192.168.1.2'} utils.add_service_to_keystone( relation_id=relation_id, remote_unit=remote_unit) ensure_valid_service.assert_called_with('keystone') add_endpoint.assert_called_with(region='RegionOne', service='keystone', publicurl='10.0.0.1', adminurl='10.0.0.2', internalurl='192.168.1.2') self.assertTrue(self.get_admin_token.called) self.get_service_password.assert_called_with('keystone') self.create_user.assert_called_with('keystone', 'password', 'tenant') self.grant_role.assert_called_with('keystone', 'admin', 'tenant') self.create_role.assert_called_with('role1', 'keystone', 'tenant') relation_data = {'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', 'admin_token': 'token', 'service_port': 81, 'auth_port': 80, 'service_username': '******', 'service_password': '******', 'service_tenant': 'tenant', 'https_keystone': '__null__', 'ssl_cert': '__null__', 'ssl_key': '__null__', 'ca_cert': '__null__', 'auth_protocol': 'http', 'service_protocol': 'http', 'service_tenant_id': 'tenant_id'} filtered = {} for k, v in relation_data.iteritems(): if v == '__null__': filtered[k] = None else: filtered[k] = v self.assertTrue(self.relation_set.called) self.peer_store_and_set.assert_called_with(relation_id=relation_id, **relation_data) self.relation_set.assert_called_with(relation_id=relation_id, **filtered)