def test_ceph_related_icehouse(self): self.relation_ids.return_value = ['ceph:0'] self.os_release.return_value = 'icehouse' service = 'mycinder' self.service_name.return_value = service self.assertEquals( contexts.CephContext()(), {'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver', 'rbd_pool': service, 'rbd_user': service, 'host': service})
def test_ceph_related_ocata(self): self.relation_ids.return_value = ['ceph:0'] self.os_release.return_value = 'ocata' service = 'mycinder' self.service_name.return_value = service self.assertEqual( contexts.CephContext()(), {'ceph_volume_driver': 'cinder.volume.drivers.rbd.RBDDriver', 'rbd_pool': service, 'rbd_user': service, 'rbd_ceph_conf': '/var/lib/charm/mycinder/ceph.conf', 'host': service})
_interfaces.pop('identity') return _interfaces # Map config files to hook contexts and services that will be associated # with file in restart_on_changes()'s service map. BASE_RESOURCE_MAP = OrderedDict([ (CINDER_CONF, { 'contexts': [ context.SharedDBContext(ssl_dir=CINDER_CONF_DIR), context.AMQPContext(ssl_dir=CINDER_CONF_DIR), context.ImageServiceContext(), context.OSConfigFlagContext(), context.SyslogContext(), cinder_contexts.CephContext(), cinder_contexts.HAProxyContext(), cinder_contexts.ImageServiceContext(), cinder_contexts.CinderSubordinateConfigContext( interface=['storage-backend', 'backup-backend'], service='cinder', config_file=CINDER_CONF), cinder_contexts.StorageBackendContext(), cinder_contexts.LoggingConfigContext(), context.IdentityServiceContext(service='cinder', service_user='******'), context.BindHostContext(), context.WorkerConfigContext(), cinder_contexts.RegionContext(), context.InternalEndpointContext(), cinder_contexts.VolumeUsageAuditContext(),
def test_ceph_not_related(self): self.relation_ids.return_value = [] self.assertEqual(contexts.CephContext()(), {})