def _updateDataStore(configDict): """ Post-update configuration hook for making all configured paths relative to their respective root directories rather than the current working directory. """ for root, relativePath in RELATIVE_PATHS: if root in configDict and relativePath in configDict: previousAbsoluteName = ".absolute." + relativePath previousRelativeName = ".relative." + relativePath # If we previously made the name absolute, and the name in the # config is still the same absolute name that we made it, let's # change it to be the relative name again. (This is necessary # because the config data is actually updated several times before # the config *file* has been read, so these keys will be made # absolute based on default values, and need to be made relative to # non-default values later.) -glyph if previousAbsoluteName in configDict and ( configDict[previousAbsoluteName] == configDict[relativePath] ): userSpecifiedPath = configDict[previousRelativeName] else: userSpecifiedPath = configDict[relativePath] configDict[previousRelativeName] = configDict[relativePath] newAbsolutePath = fullServerPath(configDict[root], userSpecifiedPath) configDict[relativePath] = newAbsolutePath configDict[previousAbsoluteName] = newAbsolutePath
def _updatePartitions(configDict): if configDict.Partitioning.Enabled: partitions.setSelfPartition(configDict.Partitioning.ServerPartitionID) partitions.setMaxClients(configDict.Partitioning.MaxClients) partitions.readConfig(fullServerPath(configDict.ConfigRoot, configDict.Partitioning.PartitionConfigFile)) partitions.installReverseProxies() else: partitions.clear()
def __init__(self, params, alwaysStat=False): defaults = { 'xmlFile' : None, 'directoryBackedAddressBook': None, 'recordTypes' : ( self.recordType_users, self.recordType_groups, self.recordType_locations, self.recordType_resources, ), 'cacheTimeout' : 30, 'realmName' : '/Search', } ignored = None params = self.getParams(params, defaults, ignored) self._recordTypes = params['recordTypes'] self.realmName = params['realmName'] super(XMLDirectoryService, self).__init__(params['cacheTimeout']) xmlFile = fullServerPath(config.DataRoot, params.get("xmlFile")) if type(xmlFile) is str: xmlFile = FilePath(xmlFile) if not xmlFile.exists(): xmlFile.setContent("""<?xml version="1.0" encoding="utf-8"?> <accounts realm="%s"> </accounts> """ % (self.realmName,)) uid = -1 if config.UserName: try: uid = pwd.getpwnam(config.UserName).pw_uid except KeyError: self.log_error("User not found: %s" % (config.UserName,)) gid = -1 if config.GroupName: try: gid = grp.getgrnam(config.GroupName).gr_gid except KeyError: self.log_error("Group not found: %s" % (config.GroupName,)) if uid != -1 and gid != -1: os.chown(xmlFile.path, uid, gid) self.xmlFile = xmlFile self._fileInfo = None self._lastCheck = 0 self._alwaysStat = alwaysStat self.directoryBackedAddressBook = params.get('directoryBackedAddressBook') self._accounts()
def __init__(self, xmlFile): self.items = [] self.xmlFile = fullServerPath(config.DataRoot, xmlFile) # Read in XML try: tree = ElementTree(file=self.xmlFile) except ExpatError, e: log.error("Unable to parse file '%s' because: %s" % (self.xmlFile, e,), raiseException=RuntimeError)
def __init__(self, xmlFile): self.items = [] self.xmlFile = fullServerPath(config.DataRoot, xmlFile) # Read in XML try: _ignore_tree, proxies_node = readXML(self.xmlFile, ELEMENT_PROXIES) except ValueError, e: log.error("XML parse error for '%s' because: %s" % (self.xmlFile, e,), raiseException=RuntimeError)
def __init__(self, xmlFile): self.items = [] self.xmlFile = fullServerPath(config.DataRoot, xmlFile) # Read in XML try: _ignore_tree, proxies_node = readXML(self.xmlFile, ELEMENT_PROXIES) except ValueError: log.failure("XML parse error for proxy data file {xmlfile}", xmlfile=self.xmlFile) # FIXME: RuntimeError is dumb. self._parseXML(proxies_node)
def loadConfig(self): configDict = {} if self._configFileName: configDict = self._parseConfigFromFile(self._configFileName) # Now check for Includes and parse and add each of those if "Includes" in configDict: configRoot = os.path.join(configDict.ServerRoot, configDict.ConfigRoot) for include in configDict.Includes: additionalDict = self._parseConfigFromFile(fullServerPath(configRoot, include)) if additionalDict: log.info("Adding configuration from file: '%s'" % (include,)) configDict.update(additionalDict) return configDict
def __init__(self, xmlFiles, cacheTimeout=30): super(AugmentXMLDB, self).__init__() self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles] self.cacheTimeout = cacheTimeout * 60 # Value is mins we want secs self.lastCached = 0 self.db = {} try: self.db = self._parseXML() except RuntimeError: log.error("Failed to parse XML augments file - fatal error on startup") raise self.lastCached = time.time()
def _loadConfig(self): if IScheduleServers._servers is None: IScheduleServers._xmlFile = FilePath( fullServerPath( config.ConfigRoot, config.Scheduling[DeliveryService.serviceType_ischedule]["Servers"] ) ) IScheduleServers._xmlFile.restat() fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize()) if fileInfo != IScheduleServers._fileInfo: parser = IScheduleServersParser(IScheduleServers._xmlFile) IScheduleServers._servers = parser.servers self._mapDomains() IScheduleServers._fileInfo = fileInfo
def load(self, xmlFile=None, ignoreIPLookupFailures=False): if self._xmlFile is None or xmlFile is not None: self._servers = {} if xmlFile: self._xmlFile = xmlFile else: self._xmlFile = fullServerPath( config.ConfigRoot, config.Servers.ConfigFile ) self._servers = ServersParser.parse(self._xmlFile, ignoreIPLookupFailures=ignoreIPLookupFailures) for server in self._servers.values(): if server.thisServer: self._thisServer = server break else: raise ValueError("No server in {} matches this server.".format(self._xmlFile,))
def __init__(self, xmlFiles, statSeconds=15): super(AugmentXMLDB, self).__init__() self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles] self.xmlFileStats = { } for path in self.xmlFiles: self.xmlFileStats[path] = (0, 0) # mtime, size self.statSeconds = statSeconds # Don't stat more often than this value self.lastCached = 0 self.db = {} try: self.db = self._parseXML() except RuntimeError: log.error("Failed to parse XML augments file - fatal error on startup") raise self.lastCached = time.time() self.normalizeUUIDs()
def __init__(self, xmlFiles, statSeconds=15): super(AugmentXMLDB, self).__init__() self.xmlFiles = [fullServerPath(config.DataRoot, path) for path in xmlFiles] self.xmlFileStats = {} for path in self.xmlFiles: self.xmlFileStats[path] = (0, 0) # mtime, size self.statSeconds = statSeconds # Don't stat more often than this value self.lastCached = 0 self.db = {} try: self.db = self._parseXML() except RuntimeError: log.error("Failed to parse XML augments file - fatal error on startup") raise self.lastCached = time.time() self.normalizeUUIDs()
def _loadConfig(self): if config.Scheduling.iSchedule.RemoteServers: if IScheduleServers._servers is None: IScheduleServers._xmlFile = FilePath( fullServerPath( config.ConfigRoot, config.Scheduling.iSchedule.RemoteServers, )) if IScheduleServers._xmlFile.exists(): IScheduleServers._xmlFile.restat() fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize()) if fileInfo != IScheduleServers._fileInfo: parser = IScheduleServersParser(IScheduleServers._xmlFile) IScheduleServers._servers = parser.servers self._mapDomains() IScheduleServers._fileInfo = fileInfo else: IScheduleServers._servers = () IScheduleServers._domainMap = {} else: IScheduleServers._servers = () IScheduleServers._domainMap = {}
def _loadConfig(self): if config.Scheduling.iSchedule.RemoteServers: if IScheduleServers._servers is None: IScheduleServers._xmlFile = FilePath( fullServerPath( config.ConfigRoot, config.Scheduling.iSchedule.RemoteServers, ) ) if IScheduleServers._xmlFile.exists(): IScheduleServers._xmlFile.restat() fileInfo = (IScheduleServers._xmlFile.getmtime(), IScheduleServers._xmlFile.getsize()) if fileInfo != IScheduleServers._fileInfo: parser = IScheduleServersParser(IScheduleServers._xmlFile) IScheduleServers._servers = parser.servers self._mapDomains() IScheduleServers._fileInfo = fileInfo else: IScheduleServers._servers = () IScheduleServers._domainMap = {} else: IScheduleServers._servers = () IScheduleServers._domainMap = {}
def buildDirectory( store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None, cachingSeconds=0, filterStartsWith=False, lookupsBetweenPurges=0, negativeCaching=True, ): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] cachingServices = [] ldapService = None # LDAP DS has extra stats (see augment.py) for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import (DirectoryService as ODDirectoryService) # We don't want system accounts returned in lookups, so tell # the service to suppress them. node = params.node directory = ODDirectoryService(nodeName=node, suppressSystemRecords=True) elif "ldap" in directoryType: from twext.who.ldap import (DirectoryService as LDAPDirectoryService, FieldName as LDAPFieldName, RecordTypeSchema) if params.credentials.dn and params.credentials.password: creds = UsernamePassword(params.credentials.dn, params.credentials.password) else: creds = None mapping = params.mapping extraFilters = params.extraFilters directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, useTLS=params.useTLS, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: mapping.uid, BaseFieldName.guid: mapping.guid, BaseFieldName.shortNames: mapping.shortNames, BaseFieldName.fullNames: mapping.fullNames, BaseFieldName.emailAddresses: mapping.emailAddresses, LDAPFieldName.memberDNs: mapping.memberDNs, CalFieldName.readOnlyProxy: mapping.readOnlyProxy, CalFieldName.readWriteProxy: mapping.readWriteProxy, CalFieldName.loginAllowed: mapping.loginAllowed, CalFieldName.hasCalendars: mapping.hasCalendars, CalFieldName.autoScheduleMode: mapping.autoScheduleMode, CalFieldName.autoAcceptGroup: mapping.autoAcceptGroup, CalFieldName.serviceNodeUID: mapping.serviceNodeUID, CalFieldName.associatedAddress: mapping.associatedAddress, CalFieldName.geographicLocation: mapping.geographicLocation, CalFieldName.streetAddress: mapping.streetAddress, }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=params.rdnSchema.users, attributes=(), ), RecordType.group: RecordTypeSchema( relativeDN=params.rdnSchema.groups, attributes=(), ), CalRecordType.location: RecordTypeSchema( relativeDN=params.rdnSchema.locations, attributes=(), ), CalRecordType.resource: RecordTypeSchema( relativeDN=params.rdnSchema.resources, attributes=(), ), CalRecordType.address: RecordTypeSchema( relativeDN=params.rdnSchema.addresses, attributes=(), ), }), extraFilters={ RecordType.user: extraFilters.get("users", ""), RecordType.group: extraFilters.get("groups", ""), CalRecordType.location: extraFilters.get("locations", ""), CalRecordType.resource: extraFilters.get("resources", ""), CalRecordType.address: extraFilters.get("addresses", ""), }, threadPoolMax=params.get("threadPoolMax", 10), authConnectionMax=params.get("authConnectionMax", 5), queryConnectionMax=params.get("queryConnectionMax", 5), tries=params.get("tries", 3), warningThresholdSeconds=params.get("warningThresholdSeconds", 5), ) ldapService = directory elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName)) fieldNames.append(directory.fieldName) if cachingSeconds: directory = CachingDirectoryService( directory, expireSeconds=cachingSeconds, lookupsBetweenPurges=lookupsBetweenPurges, negativeCaching=negativeCaching, ) cachingServices.append(directory) aggregatedServices.append(directory) # # Setup the Augment Service # serviceClass = { "xml": "twistedcaldav.directory.augment.AugmentXMLDB", } for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(serviceClass[augmentServiceInfo.type]) log.info("Configuring augment service of type: {augmentClass}", augmentClass=augmentClass) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService(userDirectory.realmName, store) # (put at front of list so we don't try to ask the actual DS services # about the delegate-related principals, for performance) aggregatedServices.insert(0, delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService( userDirectory.realmName, wikiServiceInfo.EndpointDescriptor, )) # Aggregate service aggregateDirectory = AggregateDirectoryService(userDirectory.realmName, aggregatedServices) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService(aggregateDirectory, store, augmentService) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) # Tell each caching service what method to use when reporting # times and cache stats for cachingService in cachingServices: cachingService.setTimingMethod(augmented._addTiming) # LDAP has additional stats to report augmented._ldapDS = ldapService except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) if filterStartsWith: augmented.setFilter(startswithFilter) return augmented
def buildDirectory( store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None, cachingSeconds=0, filterStartsWith=False ): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] cachingServices = [] ldapService = None # LDAP DS has extra stats (see augment.py) for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import ( DirectoryService as ODDirectoryService ) # We don't want system accounts returned in lookups, so tell # the service to suppress them. node = params.node directory = ODDirectoryService(nodeName=node, suppressSystemRecords=True) elif "ldap" in directoryType: from twext.who.ldap import ( DirectoryService as LDAPDirectoryService, FieldName as LDAPFieldName, RecordTypeSchema ) if params.credentials.dn and params.credentials.password: creds = UsernamePassword( params.credentials.dn, params.credentials.password ) else: creds = None mapping = params.mapping extraFilters = params.extraFilters directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: mapping.uid, BaseFieldName.guid: mapping.guid, BaseFieldName.shortNames: mapping.shortNames, BaseFieldName.fullNames: mapping.fullNames, BaseFieldName.emailAddresses: mapping.emailAddresses, LDAPFieldName.memberDNs: mapping.memberDNs, CalFieldName.readOnlyProxy: mapping.readOnlyProxy, CalFieldName.readWriteProxy: mapping.readWriteProxy, CalFieldName.hasCalendars: mapping.hasCalendars, CalFieldName.autoScheduleMode: mapping.autoScheduleMode, CalFieldName.serviceNodeUID: mapping.serviceNodeUID, }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=params.rdnSchema.users, attributes=(), ), RecordType.group: RecordTypeSchema( relativeDN=params.rdnSchema.groups, attributes=(), ), CalRecordType.location: RecordTypeSchema( relativeDN=params.rdnSchema.locations, attributes=(), ), CalRecordType.resource: RecordTypeSchema( relativeDN=params.rdnSchema.resources, attributes=(), ), CalRecordType.address: RecordTypeSchema( relativeDN=params.rdnSchema.addresses, attributes=(), ), }), extraFilters={ RecordType.user: extraFilters.get("users", ""), RecordType.group: extraFilters.get("groups", ""), CalRecordType.location: extraFilters.get("locations", ""), CalRecordType.resource: extraFilters.get("resources", ""), CalRecordType.address: extraFilters.get("addresses", ""), } ) ldapService = directory elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName) ) fieldNames.append(directory.fieldName) if cachingSeconds: directory = CachingDirectoryService( directory, expireSeconds=cachingSeconds ) cachingServices.append(directory) aggregatedServices.append(directory) # # Setup the Augment Service # if augmentServiceInfo.type: for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(augmentServiceInfo.type) log.info( "Configuring augment service of type: {augmentClass}", augmentClass=augmentClass ) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise else: augmentService = None userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService( userDirectory.realmName, store ) # (put at front of list so we don't try to ask the actual DS services # about the delegate-related principals, for performance) aggregatedServices.insert(0, delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService( userDirectory.realmName, wikiServiceInfo.EndpointDescriptor, ) ) # Aggregate service aggregateDirectory = AggregateDirectoryService( userDirectory.realmName, aggregatedServices ) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService( aggregateDirectory, store, augmentService ) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) # Tell each caching service what method to use when reporting # times and cache stats for cachingService in cachingServices: cachingService.setTimingMethod(augmented._addTiming) # LDAP has additional stats to report augmented._ldapDS = ldapService except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) if filterStartsWith: augmented.setFilter(startswithFilter) return augmented
def buildDirectory(store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import (DirectoryService as ODDirectoryService) # We don't want system accounts returned in lookups, so tell # the service to suppress them. directory = ODDirectoryService(suppressSystemRecords=True) elif "ldap" in directoryType: if params.credentials.dn and params.credentials.password: creds = UsernamePassword(params.credentials.dn, params.credentials.password) else: creds = None directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: ("apple-generateduid", ), BaseFieldName.guid: ("apple-generateduid", ), BaseFieldName.shortNames: (LDAPAttribute.uid.value, ), BaseFieldName.fullNames: (LDAPAttribute.cn.value, ), BaseFieldName.emailAddresses: (LDAPAttribute.mail.value, ), BaseFieldName.password: (LDAPAttribute.userPassword.value, ), LDAPFieldName.memberDNs: (LDAPAttribute.uniqueMember.value, ), }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=u"ou=People", # (objectClass=inetOrgPerson) attributes=(( LDAPAttribute.objectClass.value, LDAPObjectClass.inetOrgPerson.value, ), ), ), RecordType.group: RecordTypeSchema( relativeDN=u"ou=Groups", # (objectClass=groupOfNames) attributes=(( LDAPAttribute.objectClass.value, LDAPObjectClass.groupOfUniqueNames.value, ), ), ), })) elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName)) fieldNames.append(directory.fieldName) aggregatedServices.append(directory) # # Setup the Augment Service # if augmentServiceInfo.type: for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(augmentServiceInfo.type) log.info("Configuring augment service of type: {augmentClass}", augmentClass=augmentClass) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise else: augmentService = None userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService(userDirectory.realmName, store) aggregatedServices.append(delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService(userDirectory.realmName, wikiServiceInfo.CollabHost, wikiServiceInfo.CollabPort)) # Aggregate service aggregateDirectory = AggregateDirectoryService(userDirectory.realmName, aggregatedServices) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService(aggregateDirectory, store, augmentService) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) return augmented
def buildDirectory( store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None ): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import ( DirectoryService as ODDirectoryService ) # We don't want system accounts returned in lookups, so tell # the service to suppress them. directory = ODDirectoryService(suppressSystemRecords=True) elif "ldap" in directoryType: if params.credentials.dn and params.credentials.password: creds = UsernamePassword( params.credentials.dn, params.credentials.password ) else: creds = None directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: ("apple-generateduid",), BaseFieldName.guid: ("apple-generateduid",), BaseFieldName.shortNames: (LDAPAttribute.uid.value,), BaseFieldName.fullNames: (LDAPAttribute.cn.value,), BaseFieldName.emailAddresses: (LDAPAttribute.mail.value,), BaseFieldName.password: (LDAPAttribute.userPassword.value,), LDAPFieldName.memberDNs: (LDAPAttribute.uniqueMember.value,), }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=u"ou=People", # (objectClass=inetOrgPerson) attributes=( ( LDAPAttribute.objectClass.value, LDAPObjectClass.inetOrgPerson.value, ), ), ), RecordType.group: RecordTypeSchema( relativeDN=u"ou=Groups", # (objectClass=groupOfNames) attributes=( ( LDAPAttribute.objectClass.value, LDAPObjectClass.groupOfUniqueNames.value, ), ), ), }) ) elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName) ) fieldNames.append(directory.fieldName) aggregatedServices.append(directory) # # Setup the Augment Service # if augmentServiceInfo.type: for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(augmentServiceInfo.type) log.info( "Configuring augment service of type: {augmentClass}", augmentClass=augmentClass ) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise else: augmentService = None userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService( userDirectory.realmName, store ) aggregatedServices.append(delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService( userDirectory.realmName, wikiServiceInfo.CollabHost, wikiServiceInfo.CollabPort ) ) # Aggregate service aggregateDirectory = AggregateDirectoryService( userDirectory.realmName, aggregatedServices ) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService( aggregateDirectory, store, augmentService ) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) return augmented
def __init__(self, dbpath): ADBAPISqliteMixin.__init__(self) AugmentADAPI.__init__(self, "Augments", "sqlite3", (fullServerPath(config.DataRoot, dbpath),))
def __init__(self, dbpath): self.dbpath = fullServerPath(config.DataRoot, dbpath) ADBAPISqliteMixin.__init__(self) ProxyDB.__init__(self, "Proxies", "sqlite3", (self.dbpath,))
def __init__(self, dbpath): ADBAPISqliteMixin.__init__(self) ProxyDB.__init__(self, "Proxies", "sqlite3", (fullServerPath(config.DataRoot, dbpath),))
def __init__(self, dbpath): ADBAPISqliteMixin.__init__(self) AugmentADAPI.__init__(self, "Augments", "sqlite3", (fullServerPath(config.DataRoot, dbpath), ))