def test_lookupByName(self):
        constants = set((
            Instruments.hammer,
            Tools.screwdriver,
            Instruments.chisel,
        ))
        container = ConstantsContainer(constants)

        self.assertEquals(
            container.lookupByName("hammer"),
            Instruments.hammer,
        )
        self.assertEquals(
            container.lookupByName("screwdriver"),
            Tools.screwdriver,
        )
        self.assertEquals(
            container.lookupByName("chisel"),
            Instruments.chisel,
        )

        self.assertRaises(
            ValueError,
            container.lookupByName, "plugh",
        )
Beispiel #2
0
class DirectoryService(BaseDirectoryService):
    """
    XML directory service with calendar and contacts data.
    """

    recordType = ConstantsContainer(
        (BaseDirectoryService.recordType, RecordType))

    # MOVE2WHO: Wilfredo had added augment fields into xml, which does make
    # some sense, but for backwards compatibility right now I will take those
    # out, and rely on a separate augment service

    # fieldName = ConstantsContainer(
    #     (BaseDirectoryService.fieldName, FieldName)
    # )

    # XML schema constants

    element = ConstantsContainer((BaseDirectoryService.element, Element))

    attribute = ConstantsContainer((BaseDirectoryService.attribute, Attribute))

    recordTypeValue = ConstantsContainer(
        (BaseDirectoryService.recordTypeValue, RecordTypeValue))

    xmlFieldOrder = BaseDirectoryService.xmlFieldOrder + (
        FieldName.capacity,
        FieldName.floor,
        FieldName.associatedAddress,
        FieldName.abbreviatedName,
        FieldName.streetAddress,
        FieldName.geographicLocation,
    )
Beispiel #3
0
class DirectoryService(BaseDirectoryService):
    """
    XML directory service with calendar and contacts data.
    """

    recordType = ConstantsContainer(
        (BaseDirectoryService.recordType, RecordType)
    )

    # MOVE2WHO: Wilfredo had added augment fields into xml, which does make
    # some sense, but for backwards compatibility right now I will take those
    # out, and rely on a separate augment service

    # fieldName = ConstantsContainer(
    #     (BaseDirectoryService.fieldName, FieldName)
    # )

    # XML schema constants

    element = ConstantsContainer(
        (BaseDirectoryService.element, Element)
    )

    attribute = ConstantsContainer(
        (BaseDirectoryService.attribute, Attribute)
    )

    recordTypeValue = ConstantsContainer(
        (BaseDirectoryService.recordTypeValue, RecordTypeValue)
    )
Beispiel #4
0
    def test_iterconstants(self):
        constants = set((Tools.hammer, Tools.screwdriver, Instruments.chisel))
        container = ConstantsContainer(constants)

        self.assertEquals(
            set(container.iterconstants()),
            constants,
        )
Beispiel #5
0
    def service(self, **kwargs):
        svc = TestService(
            url=self.url,
            baseDN=self.baseDN,
            fieldNameToAttributesMap=TEST_FIELDNAME_MAP,
            recordTypeSchemas=MappingProxyType({
                RecordType.user:
                RecordTypeSchema(
                    relativeDN=u"cn=user",

                    # (objectClass=inetOrgPerson)
                    attributes=((
                        LDAPAttribute.objectClass.value,
                        LDAPObjectClass.inetOrgPerson.value,
                    ), ),
                ),
                RecordType.group:
                RecordTypeSchema(
                    relativeDN=u"cn=group",

                    # (objectClass=groupOfNames)
                    attributes=((
                        LDAPAttribute.objectClass.value,
                        LDAPObjectClass.groupOfUniqueNames.value,
                    ), ),
                ),
            }),
            **kwargs)
        svc.fieldName = ConstantsContainer((svc.fieldName, TestFieldName))
        return svc
Beispiel #6
0
class InMemoryDirectoryService(IndexDirectoryService):
    """
    An in-memory IDirectoryService.  You must call updateRecords( ) if you want
    to populate this service.
    """

    recordType = ConstantsContainer(
        (RecordType.user, RecordType.group, CalRecordType.location,
         CalRecordType.resource, CalRecordType.address))

    def loadRecords(self):
        pass

    @inlineCallbacks
    def updateRecords(self, records, create=False):
        recordsByUID = dict(((record.uid, record) for record in records))
        if not create:
            # Make sure all the records already exist
            for uid, _ignore_record in recordsByUID.items():
                if uid not in self._index[self.fieldName.uid]:
                    raise NoSuchRecordError(uid)

        yield self.removeRecords(recordsByUID.keys())
        self.indexRecords(records)

    def removeRecords(self, uids):
        index = self._index
        for fieldName in self.indexedFields:
            for recordSet in index[fieldName].itervalues():
                for record in list(recordSet):
                    if record.uid in uids:
                        recordSet.remove(record)
        return succeed(None)
Beispiel #7
0
class DirectoryService(BaseDirectoryService):
    recordType = ConstantsContainer(
        (BaseDirectoryService.recordType, RecordType))

    def __init__(self):
        BaseDirectoryService.__init__(self, realmName="no realm")

    def recordWithUID(self, uid, timeoutSeconds=None):
        return DirectoryRecord(self, uid)

    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
        return DirectoryRecord(self, shortName)
Beispiel #8
0
class DirectoryService(BaseDirectoryService):
    """
    Duty Management System directory service.
    """

    log = Logger()

    fieldName = ConstantsContainer((BaseFieldName, IndexFieldName, FieldName))

    recordType = ConstantsContainer((
        BaseRecordType.user,
        BaseRecordType.group,
    ))

    def __init__(self, dms, masterKey=None):
        BaseDirectoryService.__init__(self, realmName=noRealmName)

        self.dms = dms
        self._personnel = None
        self._positions = None
        self._masterKey = masterKey

    @property
    def realmName(self):
        return "{}@{}".format(self.dms.database, self.dms.host)

    @realmName.setter
    def realmName(self, value):
        if value is not noRealmName:
            raise AttributeError("realmName may not be set directly")

    def loadRecords(self):
        # Getting the personnel data from DMS is async, and this API is not,
        # so we're going to call into an async method and when it's eventually
        # done, we'll have some data, but we have no way to tell the caller
        # when that is.
        self._loadRecordsFromPersonnel()

    @inlineCallbacks
    def _loadRecordsFromPersonnel(self):
        try:
            personnel = yield self.dms.personnel()
            positions = yield self.dms.positions()
        except DatabaseError as e:
            self.log.error("Unable to look up personnel data: {error}",
                           error=e)
            return

        if personnel is self._personnel and positions is self._positions:
            return

        self.flush()
        self.indexRecords(
            RangerDirectoryRecord(self, ranger) for ranger in personnel)
        self.indexRecords(
            PositionDirectoryRecord(self, position) for position in positions)

        self.log.info("DMS directory service updated.")

        self._personnel = personnel
        self._positions = positions
Beispiel #9
0
class CachingDirectoryService(
    BaseDirectoryService, CalendarDirectoryServiceMixin
):
    """
    Caching directory service.

    This is a directory service that wraps an L{IDirectoryService} and caches
    directory records.
    """

    fieldName = ConstantsContainer((
        BaseFieldName,
        FieldName,
    ))

    def __init__(self, directory, expireSeconds=30, lookupsBetweenPurges=0, negativeCaching=True):
        BaseDirectoryService.__init__(self, directory.realmName)
        self._directory = directory

        # Patch the wrapped directory service's recordWithXXX to instead
        # use this cache

        directory._wrapped_recordWithUID = directory.recordWithUID
        directory.recordWithUID = self.recordWithUID

        directory._wrapped_recordWithGUID = directory.recordWithGUID
        directory.recordWithGUID = self.recordWithGUID

        directory._wrapped_recordWithShortName = directory.recordWithShortName
        directory.recordWithShortName = self.recordWithShortName

        directory._wrapped_recordsWithEmailAddress = directory.recordsWithEmailAddress
        directory.recordsWithEmailAddress = self.recordsWithEmailAddress

        self._expireSeconds = expireSeconds

        if lookupsBetweenPurges == 0:
            self._purgingEnabled = False
        else:
            self._purgingEnabled = True
            self._lookupsBetweenPurges = lookupsBetweenPurges

        self.negativeCaching = negativeCaching

        self.resetCache()

    def setTimingMethod(self, f):
        """
        Replace the default no-op timing method
        """
        self._addTiming = f

    def _addTiming(self, key, duration):
        """
        Timing won't get recorded by default -- you must call setTimingMethod
        with a callable that takes a key such as a method name, and a duration.
        """
        pass

    def resetCache(self):
        """
        Clear the cache
        """

        log.debug("Resetting cache")
        self._cache = {
            IndexType.uid: {},
            IndexType.guid: {},
            IndexType.shortName: {},  # key is (recordType.name, shortName)
            IndexType.emailAddress: {},
        }
        self._negativeCache = {
            IndexType.uid: {},
            IndexType.guid: {},
            IndexType.shortName: {},  # key is (recordType.name, shortName)
            IndexType.emailAddress: {},
        }
        self._hitCount = 0
        self._requestCount = 0
        if self._purgingEnabled:
            self._lookupsUntilScan = self._lookupsBetweenPurges

        # If DPS is in use we restrict the cache to the DPSClients only, otherwise we can
        # cache in each worker process
        if config.Memcached.Pools.Default.ClientEnabled and (
            not config.DirectoryProxy.Enabled or isinstance(self._directory, DPSClientDirectoryService)
        ):
            self._memcacher = DirectoryMemcacher(
                self._expireSeconds,
                self._directory,
                self._directory.realmName,
                "a" if config.DirectoryProxy.Enabled else "b"
            )
        else:
            self._memcacher = None

    def setTestTime(self, timestamp):
        """
        Only used for unit tests to override the notion of "now"

        @param timestamp: seconds
        @type timestamp: C{float}
        """
        self._test_time = timestamp

    def cacheRecord(self, record, indexTypes, addToMemcache=True):
        """
        Store a record in the cache, within the specified indexes

        @param record: the directory record
        @param indexTypes: an iterable of L{IndexType}
        """

        if hasattr(self, "_test_time"):
            timestamp = self._test_time
        else:
            timestamp = time.time()

        cached = []
        if IndexType.uid in indexTypes:
            self._cache[IndexType.uid][record.uid] = (timestamp, record)
            cached.append((IndexType.uid, record.uid,))

        if IndexType.guid in indexTypes:
            try:
                self._cache[IndexType.guid][record.guid] = (timestamp, record)
                cached.append((IndexType.guid, record.guid,))
            except AttributeError:
                pass
        if IndexType.shortName in indexTypes:
            try:
                typeName = record.recordType.name
                for name in record.shortNames:
                    self._cache[IndexType.shortName][(typeName, name)] = (timestamp, record)
                    cached.append((IndexType.shortName, (typeName, name),))
            except AttributeError:
                pass
        if IndexType.emailAddress in indexTypes:
            try:
                for emailAddress in record.emailAddresses:
                    self._cache[IndexType.emailAddress][emailAddress] = (timestamp, record)
                    cached.append((IndexType.emailAddress, emailAddress,))
            except AttributeError:
                pass

        if addToMemcache and self._memcacher is not None:
            for indexType, key in cached:
                memcachekey = self._memcacher.generateMemcacheKey(indexType, key)
                log.debug("Memcache: storing %s" % (memcachekey,))
                try:
                    self._memcacher.memcacheSetRecord(memcachekey, record)
                except DirectoryMemcacheError:
                    log.error("Memcache: failed to store %s" % (memcachekey,))
                    pass

    def negativeCacheRecord(self, indexType, key):
        """
        Store a record in the negative cache, within the specified indexes

        @param record: the directory record
        @param indexType: an L{IndexType}
        """

        if hasattr(self, "_test_time"):
            timestamp = self._test_time
        else:
            timestamp = time.time()

        self._negativeCache[indexType][key] = timestamp

        # Do memcache
        if self._memcacher is not None:

            # The only time the recordType arg matters is when indexType is
            # short-name, and in that case recordTypes will contain exactly
            # one recordType, so using recordTypes[0] here is always safe:
            memcachekey = self._memcacher.generateMemcacheKey(indexType, key)
            try:
                self._memcacher.memcacheSet("-%s" % (memcachekey,), timestamp)
            except DirectoryMemcacheError:
                log.error("Memcache: failed to store -%s" % (memcachekey,))
                pass

        log.debug(
            "Directory negative cache: {index} {key}",
            index=indexType.value,
            key=key
        )

    def purgeRecord(self, record):
        """
        Remove a record from all indices in the cache

        @param record: the directory record
        """

        if record.uid in self._cache[IndexType.uid]:
            del self._cache[IndexType.uid][record.uid]

        try:
            if record.guid in self._cache[IndexType.guid]:
                del self._cache[IndexType.guid][record.guid]
        except AttributeError:
            pass

        try:
            typeName = record.recordType.name
            for name in record.shortNames:
                key = (typeName, name)
                if key in self._cache[IndexType.shortName]:
                    del self._cache[IndexType.shortName][key]
        except AttributeError:
            pass

        try:
            for emailAddress in record.emailAddresses:
                if emailAddress in self._cache[IndexType.emailAddress]:
                    del self._cache[IndexType.emailAddress][emailAddress]
        except AttributeError:
            pass

    def purgeExpiredRecords(self):
        """
        Scans the cache for expired records and deletes them
        """
        if hasattr(self, "_test_time"):
            now = self._test_time
        else:
            now = time.time()

        for indexType in self._cache:
            for key, (cachedTime, _ignore_record) in self._cache[indexType].items():
                if now - self._expireSeconds > cachedTime:
                    del self._cache[indexType][key]

    def lookupRecord(self, indexType, key, name):
        """
        Looks for a record in the specified index, under the specified key.
        After every config.DirectoryCaching.LookupsBetweenPurges lookups are done,
        purgeExpiredRecords() is called.

        @param index: an index type
        @type indexType: L{IndexType}

        @param key: the key to look up in the specified index
        @type key: any valid type that can be used as a dictionary key

        @return: tuple of (the cached L{DirectoryRecord}, or L{None}) and a L{bool}
            indicating whether a query will be required (not required if a negative cache hit)
        @rtype: L{tuple}
        """

        if self._purgingEnabled:
            if self._lookupsUntilScan == 0:
                self._lookupsUntilScan = self._lookupsBetweenPurges
                self.purgeExpiredRecords()
            else:
                self._lookupsUntilScan -= 1

        if hasattr(self, "_test_time"):
            now = self._test_time
        else:
            now = time.time()

        self._requestCount += 1
        if key in self._cache[indexType]:

            cachedTime, record = self._cache[indexType].get(key, (0.0, None))
            if now - self._expireSeconds > cachedTime:
                log.debug(
                    "Directory cache miss (expired): {index} {key}",
                    index=indexType.value,
                    key=key
                )
                # This record has expired
                self.purgeRecord(record)
                self._addTiming("{}-expired".format(name), 0)

                # Fall through when the in-memory cache expires so that we check memcache
                # for a valid record BEFORE we try an ldap query and recache

            else:
                log.debug(
                    "Directory cache hit: {index} {key}",
                    index=indexType.value,
                    key=key
                )
                self._hitCount += 1
                self._addTiming("{}-hit".format(name), 0)
                return (record, False,)

        # Check negative cache (take cache entry timeout into account)
        if self.negativeCaching:
            try:
                disabledTime = self._negativeCache[indexType][key]
                if now - disabledTime < self._expireSeconds:
                    log.debug(
                        "Directory negative cache hit: {index} {key}",
                        index=indexType.value,
                        key=key
                    )
                    self._addTiming("{}-neg-hit".format(name), 0)
                    return (None, False,)
                else:
                    del self._negativeCache[indexType][key]
            except KeyError:
                pass

        # Check memcache
        if self._memcacher is not None:

            # The only time the recordType arg matters is when indexType is
            # short-name, and in that case recordTypes will contain exactly
            # one recordType, so using recordTypes[0] here is always safe:
            memcachekey = self._memcacher.generateMemcacheKey(indexType, key)

            log.debug("Memcache: checking %s" % (memcachekey,))

            try:
                record = self._memcacher.memcacheGetRecord(memcachekey)
            except DirectoryMemcacheError:
                log.error("Memcache: failed to get %s" % (memcachekey,))
                record = None

            if record is None:
                log.debug("Memcache: miss %s" % (memcachekey,))
            else:
                log.debug("Memcache: hit %s" % (memcachekey,))
                self.cacheRecord(record, (IndexType.uid, IndexType.guid, IndexType.shortName,), addToMemcache=False)
                return (record, False,)

            # Check negative memcache
            if self.negativeCaching:
                try:
                    val = self._memcacher.memcacheGet("-%s" % (memcachekey,))
                except DirectoryMemcacheError:
                    log.error("Memcache: failed to get -%s" % (memcachekey,))
                    val = None
                if val == 1:
                    log.debug("Memcache: negative hit %s" % (memcachekey,))
                    self._negativeCache[indexType][key] = now
                    return (None, False,)

        log.debug(
            "Directory cache miss: {index} {key}",
            index=indexType.value,
            key=key
        )

        self._addTiming("{}-miss".format(name), 0)
        return (None, True,)

    # Cached methods:

    @inlineCallbacks
    def recordWithUID(self, uid, timeoutSeconds=None):

        # First check our cache
        record, doQuery = self.lookupRecord(IndexType.uid, uid, "recordWithUID")
        if record is None and doQuery:
            record = yield self._directory._wrapped_recordWithUID(
                uid, timeoutSeconds=timeoutSeconds
            )
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName)
                )
            else:
                self.negativeCacheRecord(IndexType.uid, uid)

        returnValue(record)

    @inlineCallbacks
    def recordWithGUID(self, guid, timeoutSeconds=None):

        # First check our cache
        record, doQuery = self.lookupRecord(IndexType.guid, guid, "recordWithGUID")
        if record is None and doQuery:
            record = yield self._directory._wrapped_recordWithGUID(
                guid, timeoutSeconds=timeoutSeconds
            )
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName)
                )
            else:
                self.negativeCacheRecord(IndexType.guid, guid)

        returnValue(record)

    @inlineCallbacks
    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):

        # First check our cache
        record, doQuery = self.lookupRecord(
            IndexType.shortName,
            (recordType.name, shortName),
            "recordWithShortName"
        )
        if record is None and doQuery:
            record = yield self._directory._wrapped_recordWithShortName(
                recordType, shortName, timeoutSeconds=timeoutSeconds
            )
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName)
                )
            else:
                self.negativeCacheRecord(IndexType.shortName, (recordType.name, shortName))

        returnValue(record)

    @inlineCallbacks
    def recordsWithEmailAddress(
        self, emailAddress, limitResults=None, timeoutSeconds=None
    ):

        # First check our cache
        record, doQuery = self.lookupRecord(
            IndexType.emailAddress,
            emailAddress,
            "recordsWithEmailAddress"
        )
        if record is None and doQuery:
            records = yield self._directory._wrapped_recordsWithEmailAddress(
                emailAddress,
                limitResults=limitResults, timeoutSeconds=timeoutSeconds
            )
            if len(records) == 1:
                # Only cache if there was a single match (which is the most
                # common scenario).  Caching multiple records for the exact
                # same key/value complicates the data structures.
                # Also, this is the only situation where we do index a cached
                # record on email address.  Otherwise, say we had faulted in
                # on "uid" and then indexed that record on its email address,
                # the next lookup by email address would only get that record,
                # but there might be others in the directory service with that
                # same email address.
                self.cacheRecord(
                    list(records)[0],
                    (
                        IndexType.uid, IndexType.guid,
                        IndexType.shortName, IndexType.emailAddress
                    )
                )
            elif len(records) == 0:
                self.negativeCacheRecord(IndexType.emailAddress, emailAddress)
        else:
            records = [record]

        returnValue(records)

    # Uncached methods:

    @property
    def recordType(self):
        # Defer to the directory service we're caching
        return self._directory.recordType

    def recordTypes(self):
        # Defer to the directory service we're caching
        return self._directory.recordTypes()

    def recordsFromExpression(
        self, expression, recordTypes=None, records=None,
        limitResults=None, timeoutSeconds=None
    ):
        # Defer to the directory service we're caching
        return self._directory.recordsFromExpression(
            expression, recordTypes=recordTypes, records=records,
            limitResults=limitResults, timeoutSeconds=timeoutSeconds
        )

    def recordsWithFieldValue(
        self, fieldName, value,
        limitResults=None, timeoutSeconds=None
    ):
        # Defer to the directory service we're caching
        return self._directory.recordsWithFieldValue(
            fieldName, value,
            limitResults=limitResults, timeoutSeconds=timeoutSeconds
        )

    def updateRecords(self, records, create=False):
        # Defer to the directory service we're caching
        return self._directory.updateRecords(records, create=create)

    def removeRecords(self, uids):
        # Defer to the directory service we're caching
        return self._directory.removeRecords(uids)

    def recordsWithRecordType(
        self, recordType, limitResults=None, timeoutSeconds=None
    ):
        # Defer to the directory service we're caching
        return self._directory.recordsWithRecordType(
            recordType, limitResults=limitResults, timeoutSeconds=timeoutSeconds
        )

    def recordsMatchingTokens(
        self, tokens, context=None, limitResults=None, timeoutSeconds=None
    ):
        return self._directory.recordsMatchingTokens(
            tokens, context=context,
            limitResults=limitResults, timeoutSeconds=timeoutSeconds
        )

    def recordsMatchingFields(
        self, fields, operand, recordType,
        limitResults=None, timeoutSeconds=None
    ):
        return self._directory.recordsMatchingFields(
            fields, operand, recordType,
            limitResults=limitResults, timeoutSeconds=timeoutSeconds
        )

    def recordsWithDirectoryBasedDelegates(self):
        return self._directory.recordsWithDirectoryBasedDelegates()

    def recordWithCalendarUserAddress(self, cua, timeoutSeconds=None):
        # This will get cached by the underlying recordWith... call
        return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(
            self, cua, timeoutSeconds=timeoutSeconds
        )

    def serversDB(self):
        return self._directory.serversDB()

    @inlineCallbacks
    def flush(self):
        if self._memcacher is not None:
            self._memcacher.flush()
        self.resetCache()
        yield self._directory.flush()

    def stats(self):
        return self._directory.stats()
Beispiel #10
0
class DirectoryService(BaseDirectoryService):
    """
    Mac OS X Server Wiki directory service.
    """

    uidPrefix = u"wiki-"

    recordType = RecordType

    fieldName = ConstantsContainer((
        BaseFieldName,
        FieldName,
    ))

    def __init__(self, realmName, endpointDescriptor):
        BaseDirectoryService.__init__(self, realmName)
        self.endpointDescriptor = endpointDescriptor
        self._recordsByName = {}

    # This directory service is rather limited in its skills.
    # We don't attempt to implement any expression handling (ie.
    # recordsFromNonCompoundExpression), and only support a couple of the
    # recordWith* convenience methods.

    def _recordWithName(self, name):
        record = self._recordsByName.get(name)

        if record is not None:
            return succeed(record)

        # FIXME: RPC to the wiki and check for existance of a wiki with the
        # given name...
        #
        # NOTE: Don't use the config module here; pass whatever info we need to
        # __init__().
        wikiExists = True

        if wikiExists:
            record = DirectoryRecord(
                self, {
                    self.fieldName.uid: u"{}{}".format(self.uidPrefix, name),
                    self.fieldName.recordType: RecordType.macOSXServerWiki,
                    self.fieldName.shortNames: [name],
                    self.fieldName.fullNames: [u"Wiki: {}".format(name)],
                })
            self._recordsByName[name] = record
            return succeed(record)

        return succeed(None)

    def recordWithUID(self, uid, timeoutSeconds=None):
        if uid.startswith(self.uidPrefix):
            return self._recordWithName(uid[len(self.uidPrefix):])
        return succeed(None)

    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
        if recordType is RecordType.macOSXServerWiki:
            return self._recordWithName(shortName)
        return succeed(None)

    def recordsFromExpression(self,
                              expression,
                              recordTypes=None,
                              records=None,
                              limitResults=None,
                              timeoutSeconds=None):
        return succeed(())
Beispiel #11
0
def buildDirectory(store,
                   dataRoot,
                   servicesInfo,
                   augmentServiceInfo,
                   wikiServiceInfo,
                   serversDB=None):
    """
    Return a directory without using a config object; suitable for tests
    which need to have mulitple directory instances.

    @param store: The store.
    @param dataRoot: The path to the directory containing xml files for any xml
        based services.
    @param servicesInfo:  An interable of ConfigDicts mirroring the
        DirectoryService and ResourceService sections of stdconfig
    @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section
        of stdconfig
    @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig
    @param serversDB: A ServersDB object to assign to the directory
    """

    aggregatedServices = []

    for serviceValue in servicesInfo:

        if not serviceValue.Enabled:
            continue

        directoryType = serviceValue.type.lower()
        params = serviceValue.params

        if "xml" in directoryType:
            xmlFile = params.xmlFile
            xmlFile = fullServerPath(dataRoot, xmlFile)
            fp = FilePath(xmlFile)
            if not fp.exists():
                fp.setContent(DEFAULT_XML_CONTENT)
            directory = XMLDirectoryService(fp)

        elif "opendirectory" in directoryType:
            from txdav.who.opendirectory import (DirectoryService as
                                                 ODDirectoryService)
            # We don't want system accounts returned in lookups, so tell
            # the service to suppress them.
            directory = ODDirectoryService(suppressSystemRecords=True)

        elif "ldap" in directoryType:
            if params.credentials.dn and params.credentials.password:
                creds = UsernamePassword(params.credentials.dn,
                                         params.credentials.password)
            else:
                creds = None
            directory = LDAPDirectoryService(
                params.uri,
                params.rdnSchema.base,
                credentials=creds,
                fieldNameToAttributesMap=MappingProxyType({
                    BaseFieldName.uid: ("apple-generateduid", ),
                    BaseFieldName.guid: ("apple-generateduid", ),
                    BaseFieldName.shortNames: (LDAPAttribute.uid.value, ),
                    BaseFieldName.fullNames: (LDAPAttribute.cn.value, ),
                    BaseFieldName.emailAddresses: (LDAPAttribute.mail.value, ),
                    BaseFieldName.password:
                    (LDAPAttribute.userPassword.value, ),
                    LDAPFieldName.memberDNs:
                    (LDAPAttribute.uniqueMember.value, ),
                }),
                recordTypeSchemas=MappingProxyType({
                    RecordType.user:
                    RecordTypeSchema(
                        relativeDN=u"ou=People",

                        # (objectClass=inetOrgPerson)
                        attributes=((
                            LDAPAttribute.objectClass.value,
                            LDAPObjectClass.inetOrgPerson.value,
                        ), ),
                    ),
                    RecordType.group:
                    RecordTypeSchema(
                        relativeDN=u"ou=Groups",

                        # (objectClass=groupOfNames)
                        attributes=((
                            LDAPAttribute.objectClass.value,
                            LDAPObjectClass.groupOfUniqueNames.value,
                        ), ),
                    ),
                }))

        elif "inmemory" in directoryType:
            from txdav.who.test.support import CalendarInMemoryDirectoryService
            directory = CalendarInMemoryDirectoryService()

        else:
            log.error("Invalid DirectoryType: {dt}", dt=directoryType)
            raise DirectoryConfigurationError

        # Set the appropriate record types on each service
        types = []
        fieldNames = []
        for recordTypeName in params.recordTypes:
            recordType = {
                "users": RecordType.user,
                "groups": RecordType.group,
                "locations": CalRecordType.location,
                "resources": CalRecordType.resource,
                "addresses": CalRecordType.address,
            }.get(recordTypeName, None)

            if recordType is None:
                log.error("Invalid Record Type: {rt}", rt=recordTypeName)
                raise DirectoryConfigurationError

            if recordType in types:
                log.error("Duplicate Record Type: {rt}", rt=recordTypeName)
                raise DirectoryConfigurationError

            types.append(recordType)

        directory.recordType = ConstantsContainer(types)
        directory.fieldName = ConstantsContainer(
            (directory.fieldName, CalFieldName))
        fieldNames.append(directory.fieldName)
        aggregatedServices.append(directory)

    #
    # Setup the Augment Service
    #
    if augmentServiceInfo.type:
        for augmentFile in augmentServiceInfo.params.xmlFiles:
            augmentFile = fullServerPath(dataRoot, augmentFile)
            augmentFilePath = FilePath(augmentFile)
            if not augmentFilePath.exists():
                augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT)

        augmentClass = namedClass(augmentServiceInfo.type)
        log.info("Configuring augment service of type: {augmentClass}",
                 augmentClass=augmentClass)
        try:
            augmentService = augmentClass(**augmentServiceInfo.params)
        except IOError:
            log.error("Could not start augment service")
            raise
    else:
        augmentService = None

    userDirectory = None
    for directory in aggregatedServices:
        if RecordType.user in directory.recordTypes():
            userDirectory = directory
            break
    else:
        log.error("No directory service set up for users")
        raise DirectoryConfigurationError

    # Delegate service
    delegateDirectory = DelegateDirectoryService(userDirectory.realmName,
                                                 store)
    aggregatedServices.append(delegateDirectory)

    # Wiki service
    if wikiServiceInfo.Enabled:
        aggregatedServices.append(
            WikiDirectoryService(userDirectory.realmName,
                                 wikiServiceInfo.CollabHost,
                                 wikiServiceInfo.CollabPort))

    # Aggregate service
    aggregateDirectory = AggregateDirectoryService(userDirectory.realmName,
                                                   aggregatedServices)

    # Augment service
    try:
        fieldNames.append(CalFieldName)
        augmented = AugmentedDirectoryService(aggregateDirectory, store,
                                              augmentService)
        augmented.fieldName = ConstantsContainer(fieldNames)

        # The delegate directory needs a way to look up user/group records
        # so hand it a reference to the augmented directory.
        # FIXME: is there a better pattern to use here?
        delegateDirectory.setMasterDirectory(augmented)

    except Exception as e:
        log.error("Could not create directory service", error=e)
        raise

    if serversDB is not None:
        augmented.setServersDB(serversDB)

    return augmented
Beispiel #12
0
class DirectoryService(BaseDirectoryService, CalendarDirectoryServiceMixin):
    """
    Client side of directory proxy
    """

    # FIXME: somehow these should come from the actual directory:

    recordType = ConstantsContainer(
        (twext.who.idirectory.RecordType, txdav.who.idirectory.RecordType,
         txdav.who.delegates.RecordType, txdav.who.wiki.RecordType))

    fieldName = ConstantsContainer(
        (twext.who.idirectory.FieldName, txdav.who.idirectory.FieldName,
         txdav.who.augment.FieldName))

    def _dictToRecord(self, serializedFields):
        """
        Turn a dictionary of fields sent from the server into a directory
        record
        """
        if not serializedFields:
            return None

        # print("FIELDS", serializedFields)

        fields = {}
        for fieldName, value in serializedFields.iteritems():
            try:
                field = self.fieldName.lookupByName(fieldName)
            except ValueError:
                # unknown field
                pass
            else:
                valueType = self.fieldName.valueType(field)
                if valueType in (unicode, bool):
                    fields[field] = value
                elif valueType is uuid.UUID:
                    fields[field] = uuid.UUID(value)
                elif issubclass(valueType, Names):
                    if value is not None:
                        fields[field] = field.valueType.lookupByName(value)
                    else:
                        fields[field] = None
                elif issubclass(valueType, NamedConstant):
                    if fieldName == "recordType":  # Is there a better way?
                        fields[field] = self.recordType.lookupByName(value)

        # print("AFTER:", fields)
        return DirectoryRecord(self, fields)

    def _processSingleRecord(self, result):
        """
        Takes a dictionary with a "fields" key whose value is a pickled
        dictionary of a record's fields, and returns a record.
        """
        serializedFields = pickle.loads(result['fields'])
        return self._dictToRecord(serializedFields)

    def _processMultipleRecords(self, result):
        """
        Takes a dictionary with a "items" key whose value is an iterable
        of pickled dictionaries (of records' fields), and returns a list of
        records.
        """
        serializedFieldsList = []
        for fields in result["items"]:
            fields = pickle.loads(fields)
            serializedFieldsList.append(fields)
        results = []
        for serializedFields in serializedFieldsList:
            record = self._dictToRecord(serializedFields)
            if record is not None:
                results.append(record)
        return results

    @inlineCallbacks
    def _getConnection(self):

        from twistedcaldav.config import config
        path = config.DirectoryProxy.SocketPath
        if getattr(self, "_connection", None) is None:
            log.debug("Creating connection")
            connection = (yield ClientCreator(reactor,
                                              amp.AMP).connectUNIX(path))
            self._connection = connection
        returnValue(self._connection)

    @inlineCallbacks
    def _sendCommand(self, command, **kwds):
        """
        Execute a remote AMP command, first making the connection to the peer.
        Any kwds are passed on to the AMP command.

        @param command: the AMP command to call
        @type command: L{twisted.protocols.amp.Command}
        """
        ampProto = (yield self._getConnection())
        try:
            results = (yield ampProto.callRemote(command, **kwds))
        except Exception, e:
            log.error("Failed AMP command", error=e)
            #  FIXME: is there a way to hook into ConnectionLost?
            self._connection = None
            raise
        returnValue(results)
Beispiel #13
0
class CachingDirectoryService(BaseDirectoryService,
                              CalendarDirectoryServiceMixin):
    """
    Caching directory service.

    This is a directory service that wraps an L{IDirectoryService} and caches
    directory records.
    """

    fieldName = ConstantsContainer((
        BaseFieldName,
        FieldName,
    ))

    def __init__(self, directory, expireSeconds=30):
        BaseDirectoryService.__init__(self, directory.realmName)
        self._directory = directory

        # Patch the wrapped directory service's recordWithXXX to instead
        # use this cache

        directory._wrapped_recordWithUID = directory.recordWithUID
        directory.recordWithUID = self.recordWithUID

        directory._wrapped_recordWithGUID = directory.recordWithGUID
        directory.recordWithGUID = self.recordWithGUID

        directory._wrapped_recordWithShortName = directory.recordWithShortName
        directory.recordWithShortName = self.recordWithShortName

        directory._wrapped_recordsWithEmailAddress = directory.recordsWithEmailAddress
        directory.recordsWithEmailAddress = self.recordsWithEmailAddress

        self._expireSeconds = expireSeconds
        self.resetCache()

    def setTimingMethod(self, f):
        """
        Replace the default no-op timing method
        """
        self._addTiming = f

    def _addTiming(self, key, duration):
        """
        Timing won't get recorded by default -- you must call setTimingMethod
        with a callable that takes a key such as a method name, and a duration.
        """
        pass

    def resetCache(self):
        """
        Clear the cache
        """
        self._cache = {
            IndexType.uid: {},
            IndexType.guid: {},
            IndexType.shortName: {},  # key is (recordType.name, shortName)
            IndexType.emailAddress: {},
        }
        self._hitCount = 0
        self._requestCount = 0
        self._lookupsUntilScan = SCAN_AFTER_LOOKUP_COUNT

    def setTestTime(self, timestamp):
        """
        Only used for unit tests to override the notion of "now"

        @param timestamp: seconds
        @type timestamp: C{float}
        """
        self._test_time = timestamp

    def cacheRecord(self, record, indexTypes):
        """
        Store a record in the cache, within the specified indexes

        @param record: the directory record
        @param indexTypes: an iterable of L{IndexType}
        """

        if hasattr(self, "_test_time"):
            timestamp = self._test_time
        else:
            timestamp = time.time()

        if IndexType.uid in indexTypes:
            self._cache[IndexType.uid][record.uid] = (timestamp, record)

        if IndexType.guid in indexTypes:
            try:
                self._cache[IndexType.guid][record.guid] = (timestamp, record)
            except AttributeError:
                pass
        if IndexType.shortName in indexTypes:
            try:
                typeName = record.recordType.name
                for name in record.shortNames:
                    self._cache[IndexType.shortName][(typeName,
                                                      name)] = (timestamp,
                                                                record)
            except AttributeError:
                pass
        if IndexType.emailAddress in indexTypes:
            try:
                for emailAddress in record.emailAddresses:
                    self._cache[IndexType.emailAddress][emailAddress] = (
                        timestamp, record)
            except AttributeError:
                pass

    def purgeRecord(self, record):
        """
        Remove a record from all indices in the cache

        @param record: the directory record
        """

        if record.uid in self._cache[IndexType.uid]:
            del self._cache[IndexType.uid][record.uid]

        try:
            if record.guid in self._cache[IndexType.guid]:
                del self._cache[IndexType.guid][record.guid]
        except AttributeError:
            pass

        try:
            typeName = record.recordType.name
            for name in record.shortNames:
                key = (typeName, name)
                if key in self._cache[IndexType.shortName]:
                    del self._cache[IndexType.shortName][key]
        except AttributeError:
            pass

        try:
            for emailAddress in record.emailAddresses:
                if emailAddress in self._cache[IndexType.emailAddress]:
                    del self._cache[IndexType.emailAddress][emailAddress]
        except AttributeError:
            pass

    def purgeExpiredRecords(self):
        """
        Scans the cache for expired records and deletes them
        """
        if hasattr(self, "_test_time"):
            now = self._test_time
        else:
            now = time.time()

        for indexType in self._cache:
            for key, (cachedTime,
                      _ignore_record) in self._cache[indexType].items():
                if now - self._expireSeconds > cachedTime:
                    del self._cache[indexType][key]

    def lookupRecord(self, indexType, key, name):
        """
        Looks for a record in the specified index, under the specified key.
        After every SCAN_AFTER_LOOKUP_COUNT lookups are done,
        purgeExpiredRecords() is called.

        @param index: an index type
        @type indexType: L{IndexType}

        @param key: the key to look up in the specified index
        @type key: any valid type that can be used as a dictionary key

        @return: the cached directory record, or None
        @rtype: L{DirectoryRecord}
        """

        if self._lookupsUntilScan == 0:
            self._lookupsUntilScan = SCAN_AFTER_LOOKUP_COUNT
            self.purgeExpiredRecords()
        else:
            self._lookupsUntilScan -= 1

        self._requestCount += 1
        if key in self._cache[indexType]:

            if hasattr(self, "_test_time"):
                now = self._test_time
            else:
                now = time.time()

            cachedTime, record = self._cache[indexType].get(key, (0.0, None))
            if now - self._expireSeconds > cachedTime:
                log.debug("Directory cache miss (expired): {index} {key}",
                          index=indexType.value,
                          key=key)
                # This record has expired
                self.purgeRecord(record)
                self._addTiming("{}-expired".format(name), 0)
                return None

            log.debug("Directory cache hit: {index} {key}",
                      index=indexType.value,
                      key=key)
            self._hitCount += 1
            self._addTiming("{}-hit".format(name), 0)
            return record
        else:
            log.debug("Directory cache miss: {index} {key}",
                      index=indexType.value,
                      key=key)

        self._addTiming("{}-miss".format(name), 0)
        return None

    # Cached methods:

    @inlineCallbacks
    def recordWithUID(self, uid, timeoutSeconds=None):

        # First check our cache
        record = self.lookupRecord(IndexType.uid, uid, "recordWithUID")
        if record is None:
            record = yield self._directory._wrapped_recordWithUID(
                uid, timeoutSeconds=timeoutSeconds)
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName))

        returnValue(record)

    @inlineCallbacks
    def recordWithGUID(self, guid, timeoutSeconds=None):

        # First check our cache
        record = self.lookupRecord(IndexType.guid, guid, "recordWithGUID")
        if record is None:
            record = yield self._directory._wrapped_recordWithGUID(
                guid, timeoutSeconds=timeoutSeconds)
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName))

        returnValue(record)

    @inlineCallbacks
    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):

        # First check our cache
        record = self.lookupRecord(IndexType.shortName,
                                   (recordType.name, shortName),
                                   "recordWithShortName")
        if record is None:
            record = yield self._directory._wrapped_recordWithShortName(
                recordType, shortName, timeoutSeconds=timeoutSeconds)
            if record is not None:
                # Note we do not index on email address; see below.
                self.cacheRecord(
                    record,
                    (IndexType.uid, IndexType.guid, IndexType.shortName))

        returnValue(record)

    @inlineCallbacks
    def recordsWithEmailAddress(self,
                                emailAddress,
                                limitResults=None,
                                timeoutSeconds=None):

        # First check our cache
        record = self.lookupRecord(IndexType.emailAddress, emailAddress,
                                   "recordsWithEmailAddress")
        if record is None:
            records = yield self._directory._wrapped_recordsWithEmailAddress(
                emailAddress,
                limitResults=limitResults,
                timeoutSeconds=timeoutSeconds)
            if len(records) == 1:
                # Only cache if there was a single match (which is the most
                # common scenario).  Caching multiple records for the exact
                # same key/value complicates the data structures.
                # Also, this is the only situation where we do index a cached
                # record on email address.  Otherwise, say we had faulted in
                # on "uid" and then indexed that record on its email address,
                # the next lookup by email address would only get that record,
                # but there might be others in the directory service with that
                # same email address.
                self.cacheRecord(
                    list(records)[0],
                    (IndexType.uid, IndexType.guid, IndexType.shortName,
                     IndexType.emailAddress))
        else:
            records = [record]

        returnValue(records)

    # Uncached methods:

    @property
    def recordType(self):
        # Defer to the directory service we're caching
        return self._directory.recordType

    def recordTypes(self):
        # Defer to the directory service we're caching
        return self._directory.recordTypes()

    def recordsFromExpression(self,
                              expression,
                              recordTypes=None,
                              records=None,
                              limitResults=None,
                              timeoutSeconds=None):
        # Defer to the directory service we're caching
        return self._directory.recordsFromExpression(
            expression,
            recordTypes=recordTypes,
            records=records,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    def recordsWithFieldValue(self,
                              fieldName,
                              value,
                              limitResults=None,
                              timeoutSeconds=None):
        # Defer to the directory service we're caching
        return self._directory.recordsWithFieldValue(
            fieldName,
            value,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    def updateRecords(self, records, create=False):
        # Defer to the directory service we're caching
        return self._directory.updateRecords(records, create=create)

    def removeRecords(self, uids):
        # Defer to the directory service we're caching
        return self._directory.removeRecords(uids)

    def recordsWithRecordType(self,
                              recordType,
                              limitResults=None,
                              timeoutSeconds=None):
        # Defer to the directory service we're caching
        return self._directory.recordsWithRecordType(
            recordType,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    def recordsMatchingTokens(self,
                              tokens,
                              context=None,
                              limitResults=None,
                              timeoutSeconds=None):
        return self._directory.recordsMatchingTokens(
            tokens,
            context=context,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    def recordsMatchingFields(self,
                              fields,
                              operand,
                              recordType,
                              limitResults=None,
                              timeoutSeconds=None):
        return self._directory.recordsMatchingFields(
            fields,
            operand,
            recordType,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    def recordsWithDirectoryBasedDelegates(self):
        return self._directory.recordsWithDirectoryBasedDelegates()

    def recordWithCalendarUserAddress(self, cua, timeoutSeconds=None):
        # This will get cached by the underlying recordWith... call
        return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(
            self, cua, timeoutSeconds=timeoutSeconds)

    def serversDB(self):
        return self._directory.serversDB()

    @inlineCallbacks
    def flush(self):
        self.resetCache()
        yield self._directory.flush()

    def stats(self):
        return self._directory.stats()
Beispiel #14
0
class AugmentedDirectoryService(BaseDirectoryService,
                                CalendarDirectoryServiceMixin):
    """
    Augmented directory service.

    This is a directory service that wraps an L{IDirectoryService} and augments
    directory records with additional or modified fields.
    """

    fieldName = ConstantsContainer((
        BaseFieldName,
        FieldName,
    ))

    _timings = {}

    def __init__(self, directory, store, augmentDB):
        BaseDirectoryService.__init__(self, directory.realmName)
        self._directory = directory
        self._store = store
        self._augmentDB = augmentDB

        # An LDAP DS has extra info to expose via the dashboard
        # This is assigned in buildDirectory()
        self._ldapDS = None

    @classmethod
    def _addTiming(cls, key, duration):
        if key not in cls._timings:
            cls._timings[key] = (0, 0.0)
        count, timeSpent = cls._timings[key]
        count += 1
        timeSpent += duration
        cls._timings[key] = (count, timeSpent)

    def flush(self):
        return self._directory.flush()

    def stats(self):
        results = {}
        results.update(self._timings)

        # An LDAP DS has extra info to expose via the dashboard
        if self._ldapDS is not None:
            results.update(self._ldapDS.poolStats)

        return succeed(results)

    @property
    def recordType(self):
        # Defer to the directory service we're augmenting
        return self._directory.recordType

    def recordTypes(self):
        # Defer to the directory service we're augmenting
        return self._directory.recordTypes()

    @inlineCallbacks
    def recordsFromExpression(self,
                              expression,
                              recordTypes=None,
                              limitResults=None,
                              timeoutSeconds=None):
        records = yield self._directory.recordsFromExpression(
            expression,
            recordTypes=recordTypes,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @inlineCallbacks
    def recordsWithFieldValue(self,
                              fieldName,
                              value,
                              limitResults=None,
                              timeoutSeconds=None):
        records = yield self._directory.recordsWithFieldValue(
            fieldName,
            value,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    @inlineCallbacks
    def recordWithUID(self, uid, timeoutSeconds=None):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(uid, unicode):
            # log.warn("Need to change uid to unicode")
            uid = uid.decode("utf-8")

        record = yield self._directory.recordWithUID(
            uid, timeoutSeconds=timeoutSeconds)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordWithGUID(self, guid, timeoutSeconds=None):
        record = yield self._directory.recordWithGUID(
            guid, timeoutSeconds=timeoutSeconds)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordsWithRecordType(self,
                              recordType,
                              limitResults=None,
                              timeoutSeconds=None):
        records = yield self._directory.recordsWithRecordType(
            recordType,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    @inlineCallbacks
    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(shortName, unicode):
            # log.warn("Need to change shortName to unicode")
            shortName = shortName.decode("utf-8")

        record = yield self._directory.recordWithShortName(
            recordType, shortName, timeoutSeconds=timeoutSeconds)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordsWithEmailAddress(self,
                                emailAddress,
                                limitResults=None,
                                timeoutSeconds=None):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(emailAddress, unicode):
            # log.warn("Need to change emailAddress to unicode")
            emailAddress = emailAddress.decode("utf-8")

        records = yield self._directory.recordsWithEmailAddress(
            emailAddress,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    def recordWithCalendarUserAddress(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(
            self, *args, **kwds)

    @timed
    def recordsMatchingTokens(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordsMatchingTokens(
            self, *args, **kwds)

    @timed
    def recordsMatchingFields(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordsMatchingFields(
            self, *args, **kwds)

    @timed
    @inlineCallbacks
    def updateRecords(self, records, create=False):
        """
        Pull out the augmented fields from each record, apply those to the
        augments database, then update the base records.
        """

        baseRecords = []
        augmentRecords = []

        for record in records:

            # Split out the base fields from the augment fields
            baseFields, augmentFields = self._splitFields(record)

            # Ignore groups for now
            if augmentFields and record.recordType != RecordType.group:
                # Create an AugmentRecord
                autoScheduleMode = {
                    AutoScheduleMode.none: "none",
                    AutoScheduleMode.accept: "accept-always",
                    AutoScheduleMode.decline: "decline-always",
                    AutoScheduleMode.acceptIfFree: "accept-if-free",
                    AutoScheduleMode.declineIfBusy: "decline-if-busy",
                    AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
                }.get(augmentFields.get(FieldName.autoScheduleMode, None),
                      None)

                kwargs = {
                    "uid": record.uid,
                    "autoScheduleMode": autoScheduleMode,
                }
                if FieldName.hasCalendars in augmentFields:
                    kwargs["enabledForCalendaring"] = augmentFields[
                        FieldName.hasCalendars]
                if FieldName.hasContacts in augmentFields:
                    kwargs["enabledForAddressBooks"] = augmentFields[
                        FieldName.hasContacts]
                if FieldName.loginAllowed in augmentFields:
                    kwargs["enabledForLogin"] = augmentFields[
                        FieldName.loginAllowed]
                if FieldName.autoAcceptGroup in augmentFields:
                    kwargs["autoAcceptGroup"] = augmentFields[
                        FieldName.autoAcceptGroup]
                if FieldName.serviceNodeUID in augmentFields:
                    kwargs["serverID"] = augmentFields[
                        FieldName.serviceNodeUID]
                augmentRecord = AugmentRecord(**kwargs)

                augmentRecords.append(augmentRecord)

            # Create new base records:
            baseRecords.append(
                DirectoryRecord(
                    self._directory, record._baseRecord.fields if hasattr(
                        record, "_baseRecord") else baseFields))

        # Apply the augment records
        if augmentRecords:
            yield self._augmentDB.addAugmentRecords(augmentRecords)

        # Apply the base records
        if baseRecords:
            try:
                yield self._directory.updateRecords(baseRecords, create=create)
            except NotAllowedError:
                pass

    def _splitFields(self, record):
        """
        Returns a tuple of two dictionaries; the first contains all the non
        augment fields, and the second contains all the augment fields.
        """
        if record is None:
            return None

        augmentFields = {}
        baseFields = record.fields.copy()
        for field in (FieldName.loginAllowed, FieldName.hasCalendars,
                      FieldName.hasContacts, FieldName.autoScheduleMode,
                      FieldName.autoAcceptGroup, FieldName.serviceNodeUID):
            if field in baseFields:
                augmentFields[field] = baseFields[field]
                del baseFields[field]

        return (baseFields, augmentFields)

    @inlineCallbacks
    def removeRecords(self, uids):
        yield self._augmentDB.removeAugmentRecords(uids)
        yield self._directory.removeRecords(uids)

    def _assignToField(self, fields, name, value):
        """
        Assign a value to a field only if not already present in fields.
        """
        field = self.fieldName.lookupByName(name)
        if field not in fields:
            fields[field] = value

    @inlineCallbacks
    def _augment(self, record):
        if record is None:
            returnValue(None)

        augmentRecord = yield self._augmentDB.getAugmentRecord(
            record.uid, self.recordTypeToOldName(record.recordType))
        if augmentRecord is None:
            # Augments does not know about this record type, so return
            # the original record
            returnValue(record)

        fields = record.fields.copy()

        if augmentRecord:

            if record.recordType == RecordType.group:
                self._assignToField(fields, "hasCalendars", False)
                self._assignToField(fields, "hasContacts", False)
            else:
                self._assignToField(fields, "hasCalendars",
                                    augmentRecord.enabledForCalendaring)

                self._assignToField(fields, "hasContacts",
                                    augmentRecord.enabledForAddressBooks)

            # In the case of XML augments, a missing auto-schedule-mode
            # element has the same meaning an element with a value of "default"
            # in which case augmentRecord.autoScheduleMode = "default".  On
            # the record we're augmenting, "default" mode means autoScheduleMode
            # gets set to None (distinct from AutoScheduleMode.none!),
            # which gets swapped for config.Scheduling.Options.AutoSchedule.DefaultMode
            # in checkAttendeeAutoReply().
            # ...Except for locations/resources which will default to automatic

            autoScheduleMode = {
                "none": AutoScheduleMode.none,
                "accept-always": AutoScheduleMode.accept,
                "decline-always": AutoScheduleMode.decline,
                "accept-if-free": AutoScheduleMode.acceptIfFree,
                "decline-if-busy": AutoScheduleMode.declineIfBusy,
                "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
            }.get(augmentRecord.autoScheduleMode, None)

            # Resources/Locations default to automatic
            if record.recordType in (CalRecordType.location,
                                     CalRecordType.resource):
                if autoScheduleMode is None:
                    autoScheduleMode = AutoScheduleMode.acceptIfFreeDeclineIfBusy

            self._assignToField(fields, "autoScheduleMode", autoScheduleMode)

            if augmentRecord.autoAcceptGroup is not None:
                self._assignToField(
                    fields, "autoAcceptGroup",
                    augmentRecord.autoAcceptGroup.decode("utf-8"))

            self._assignToField(fields, "loginAllowed",
                                augmentRecord.enabledForLogin)

            self._assignToField(fields, "serviceNodeUID",
                                augmentRecord.serverID.decode("utf-8"))

        else:
            self._assignToField(fields, "hasCalendars", False)
            self._assignToField(fields, "hasContacts", False)
            self._assignToField(fields, "loginAllowed", False)

        # print("Augmented fields", fields)

        # Clone to a new record with the augmented fields
        augmentedRecord = AugmentedDirectoryRecord(self, record, fields)

        returnValue(augmentedRecord)

    @inlineCallbacks
    def setAutoScheduleMode(self, record, autoScheduleMode):
        augmentRecord = yield self._augmentDB.getAugmentRecord(
            record.uid, self.recordTypeToOldName(record.recordType))
        if augmentRecord is not None:
            autoScheduleMode = {
                AutoScheduleMode.none: "none",
                AutoScheduleMode.accept: "accept-always",
                AutoScheduleMode.decline: "decline-always",
                AutoScheduleMode.acceptIfFree: "accept-if-free",
                AutoScheduleMode.declineIfBusy: "decline-if-busy",
                AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
            }.get(autoScheduleMode)

            augmentRecord.autoScheduleMode = autoScheduleMode
            yield self._augmentDB.addAugmentRecords([augmentRecord])
Beispiel #15
0
 class EnhancedDirectoryService(OpenDirectoryService):
     recordType = ConstantsContainer((
         BaseRecordType.user, BaseRecordType.group, _CSRecordType.location, _CSRecordType.resource
     ))
Beispiel #16
0
def buildDirectory(
    store,
    dataRoot,
    servicesInfo,
    augmentServiceInfo,
    wikiServiceInfo,
    serversDB=None,
    cachingSeconds=0,
    filterStartsWith=False,
    lookupsBetweenPurges=0,
    negativeCaching=True,
):
    """
    Return a directory without using a config object; suitable for tests
    which need to have mulitple directory instances.

    @param store: The store.
    @param dataRoot: The path to the directory containing xml files for any xml
        based services.
    @param servicesInfo:  An interable of ConfigDicts mirroring the
        DirectoryService and ResourceService sections of stdconfig
    @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section
        of stdconfig
    @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig
    @param serversDB: A ServersDB object to assign to the directory
    """

    aggregatedServices = []
    cachingServices = []
    ldapService = None  # LDAP DS has extra stats (see augment.py)

    for serviceValue in servicesInfo:

        if not serviceValue.Enabled:
            continue

        directoryType = serviceValue.type.lower()
        params = serviceValue.params

        if "xml" in directoryType:
            xmlFile = params.xmlFile
            xmlFile = fullServerPath(dataRoot, xmlFile)
            fp = FilePath(xmlFile)
            if not fp.exists():
                fp.setContent(DEFAULT_XML_CONTENT)
            directory = XMLDirectoryService(fp)

        elif "opendirectory" in directoryType:
            from txdav.who.opendirectory import (DirectoryService as
                                                 ODDirectoryService)
            # We don't want system accounts returned in lookups, so tell
            # the service to suppress them.
            node = params.node
            directory = ODDirectoryService(nodeName=node,
                                           suppressSystemRecords=True)

        elif "ldap" in directoryType:
            from twext.who.ldap import (DirectoryService as
                                        LDAPDirectoryService, FieldName as
                                        LDAPFieldName, RecordTypeSchema)

            if params.credentials.dn and params.credentials.password:
                creds = UsernamePassword(params.credentials.dn,
                                         params.credentials.password)
            else:
                creds = None
            mapping = params.mapping
            extraFilters = params.extraFilters
            directory = LDAPDirectoryService(
                params.uri,
                params.rdnSchema.base,
                useTLS=params.useTLS,
                credentials=creds,
                fieldNameToAttributesMap=MappingProxyType({
                    BaseFieldName.uid:
                    mapping.uid,
                    BaseFieldName.guid:
                    mapping.guid,
                    BaseFieldName.shortNames:
                    mapping.shortNames,
                    BaseFieldName.fullNames:
                    mapping.fullNames,
                    BaseFieldName.emailAddresses:
                    mapping.emailAddresses,
                    LDAPFieldName.memberDNs:
                    mapping.memberDNs,
                    CalFieldName.readOnlyProxy:
                    mapping.readOnlyProxy,
                    CalFieldName.readWriteProxy:
                    mapping.readWriteProxy,
                    CalFieldName.loginAllowed:
                    mapping.loginAllowed,
                    CalFieldName.hasCalendars:
                    mapping.hasCalendars,
                    CalFieldName.autoScheduleMode:
                    mapping.autoScheduleMode,
                    CalFieldName.autoAcceptGroup:
                    mapping.autoAcceptGroup,
                    CalFieldName.serviceNodeUID:
                    mapping.serviceNodeUID,
                    CalFieldName.associatedAddress:
                    mapping.associatedAddress,
                    CalFieldName.geographicLocation:
                    mapping.geographicLocation,
                    CalFieldName.streetAddress:
                    mapping.streetAddress,
                }),
                recordTypeSchemas=MappingProxyType({
                    RecordType.user:
                    RecordTypeSchema(
                        relativeDN=params.rdnSchema.users,
                        attributes=(),
                    ),
                    RecordType.group:
                    RecordTypeSchema(
                        relativeDN=params.rdnSchema.groups,
                        attributes=(),
                    ),
                    CalRecordType.location:
                    RecordTypeSchema(
                        relativeDN=params.rdnSchema.locations,
                        attributes=(),
                    ),
                    CalRecordType.resource:
                    RecordTypeSchema(
                        relativeDN=params.rdnSchema.resources,
                        attributes=(),
                    ),
                    CalRecordType.address:
                    RecordTypeSchema(
                        relativeDN=params.rdnSchema.addresses,
                        attributes=(),
                    ),
                }),
                extraFilters={
                    RecordType.user: extraFilters.get("users", ""),
                    RecordType.group: extraFilters.get("groups", ""),
                    CalRecordType.location: extraFilters.get("locations", ""),
                    CalRecordType.resource: extraFilters.get("resources", ""),
                    CalRecordType.address: extraFilters.get("addresses", ""),
                },
                threadPoolMax=params.get("threadPoolMax", 10),
                authConnectionMax=params.get("authConnectionMax", 5),
                queryConnectionMax=params.get("queryConnectionMax", 5),
                tries=params.get("tries", 3),
                warningThresholdSeconds=params.get("warningThresholdSeconds",
                                                   5),
            )
            ldapService = directory

        elif "inmemory" in directoryType:
            from txdav.who.test.support import CalendarInMemoryDirectoryService
            directory = CalendarInMemoryDirectoryService()

        else:
            log.error("Invalid DirectoryType: {dt}", dt=directoryType)
            raise DirectoryConfigurationError

        # Set the appropriate record types on each service
        types = []
        fieldNames = []
        for recordTypeName in params.recordTypes:
            recordType = {
                "users": RecordType.user,
                "groups": RecordType.group,
                "locations": CalRecordType.location,
                "resources": CalRecordType.resource,
                "addresses": CalRecordType.address,
            }.get(recordTypeName, None)

            if recordType is None:
                log.error("Invalid Record Type: {rt}", rt=recordTypeName)
                raise DirectoryConfigurationError

            if recordType in types:
                log.error("Duplicate Record Type: {rt}", rt=recordTypeName)
                raise DirectoryConfigurationError

            types.append(recordType)

        directory.recordType = ConstantsContainer(types)
        directory.fieldName = ConstantsContainer(
            (directory.fieldName, CalFieldName))
        fieldNames.append(directory.fieldName)

        if cachingSeconds:
            directory = CachingDirectoryService(
                directory,
                expireSeconds=cachingSeconds,
                lookupsBetweenPurges=lookupsBetweenPurges,
                negativeCaching=negativeCaching,
            )
            cachingServices.append(directory)

        aggregatedServices.append(directory)

    #
    # Setup the Augment Service
    #
    serviceClass = {
        "xml": "twistedcaldav.directory.augment.AugmentXMLDB",
    }

    for augmentFile in augmentServiceInfo.params.xmlFiles:
        augmentFile = fullServerPath(dataRoot, augmentFile)
        augmentFilePath = FilePath(augmentFile)
        if not augmentFilePath.exists():
            augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT)

    augmentClass = namedClass(serviceClass[augmentServiceInfo.type])
    log.info("Configuring augment service of type: {augmentClass}",
             augmentClass=augmentClass)
    try:
        augmentService = augmentClass(**augmentServiceInfo.params)
    except IOError:
        log.error("Could not start augment service")
        raise

    userDirectory = None
    for directory in aggregatedServices:
        if RecordType.user in directory.recordTypes():
            userDirectory = directory
            break
    else:
        log.error("No directory service set up for users")
        raise DirectoryConfigurationError

    # Delegate service
    delegateDirectory = DelegateDirectoryService(userDirectory.realmName,
                                                 store)
    # (put at front of list so we don't try to ask the actual DS services
    # about the delegate-related principals, for performance)
    aggregatedServices.insert(0, delegateDirectory)

    # Wiki service
    if wikiServiceInfo.Enabled:
        aggregatedServices.append(
            WikiDirectoryService(
                userDirectory.realmName,
                wikiServiceInfo.EndpointDescriptor,
            ))

    # Aggregate service
    aggregateDirectory = AggregateDirectoryService(userDirectory.realmName,
                                                   aggregatedServices)

    # Augment service
    try:
        fieldNames.append(CalFieldName)
        augmented = AugmentedDirectoryService(aggregateDirectory, store,
                                              augmentService)
        augmented.fieldName = ConstantsContainer(fieldNames)

        # The delegate directory needs a way to look up user/group records
        # so hand it a reference to the augmented directory.
        # FIXME: is there a better pattern to use here?
        delegateDirectory.setMasterDirectory(augmented)

        # Tell each caching service what method to use when reporting
        # times and cache stats
        for cachingService in cachingServices:
            cachingService.setTimingMethod(augmented._addTiming)

        # LDAP has additional stats to report
        augmented._ldapDS = ldapService

    except Exception as e:
        log.error("Could not create directory service", error=e)
        raise

    if serversDB is not None:
        augmented.setServersDB(serversDB)

    if filterStartsWith:
        augmented.setFilter(startswithFilter)

    return augmented
Beispiel #17
0
class AugmentedDirectoryService(BaseDirectoryService,
                                CalendarDirectoryServiceMixin):
    """
    Augmented directory service.

    This is a directory service that wraps an L{IDirectoryService} and augments
    directory records with additional or modified fields.
    """

    fieldName = ConstantsContainer((
        BaseFieldName,
        FieldName,
    ))

    _timings = {}

    def __init__(self, directory, store, augmentDB):
        BaseDirectoryService.__init__(self, directory.realmName)
        self._directory = directory
        self._store = store
        self._augmentDB = augmentDB

    def stats(self):
        return self._timings

    @property
    def recordType(self):
        # Defer to the directory service we're augmenting
        return self._directory.recordType

    def recordTypes(self):
        # Defer to the directory service we're augmenting
        return self._directory.recordTypes()

    @inlineCallbacks
    def recordsFromExpression(self, expression, recordTypes=None):
        records = yield self._directory.recordsFromExpression(
            expression, recordTypes=recordTypes)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @inlineCallbacks
    def recordsWithFieldValue(self, fieldName, value):
        records = yield self._directory.recordsWithFieldValue(fieldName, value)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    @inlineCallbacks
    def recordWithUID(self, uid):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(uid, unicode):
            # log.warn("Need to change uid to unicode")
            uid = uid.decode("utf-8")

        record = yield self._directory.recordWithUID(uid)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordWithGUID(self, guid):
        record = yield self._directory.recordWithGUID(guid)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordsWithRecordType(self, recordType):
        records = yield self._directory.recordsWithRecordType(recordType)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    @inlineCallbacks
    def recordWithShortName(self, recordType, shortName):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(shortName, unicode):
            # log.warn("Need to change shortName to unicode")
            shortName = shortName.decode("utf-8")

        record = yield self._directory.recordWithShortName(
            recordType, shortName)
        record = yield self._augment(record)
        returnValue(record)

    @timed
    @inlineCallbacks
    def recordsWithEmailAddress(self, emailAddress):
        # MOVE2WHO, REMOVE THIS:
        if not isinstance(emailAddress, unicode):
            # log.warn("Need to change emailAddress to unicode")
            emailAddress = emailAddress.decode("utf-8")

        records = yield self._directory.recordsWithEmailAddress(emailAddress)
        augmented = []
        for record in records:
            record = yield self._augment(record)
            augmented.append(record)
        returnValue(augmented)

    @timed
    def recordWithCalendarUserAddress(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordWithCalendarUserAddress(
            self, *args, **kwds)

    @timed
    def recordsMatchingTokens(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordsMatchingTokens(
            self, *args, **kwds)

    @timed
    def recordsMatchingFields(self, *args, **kwds):
        return CalendarDirectoryServiceMixin.recordsMatchingFields(
            self, *args, **kwds)

    @timed
    @inlineCallbacks
    def updateRecords(self, records, create=False):
        """
        Pull out the augmented fields from each record, apply those to the
        augments database, then update the base records.
        """

        baseRecords = []
        augmentRecords = []

        for record in records:

            # Split out the base fields from the augment fields
            baseFields, augmentFields = self._splitFields(record)

            if augmentFields:
                # Create an AugmentRecord
                autoScheduleMode = {
                    AutoScheduleMode.none: "none",
                    AutoScheduleMode.accept: "accept-always",
                    AutoScheduleMode.decline: "decline-always",
                    AutoScheduleMode.acceptIfFree: "accept-if-free",
                    AutoScheduleMode.declineIfBusy: "decline-if-busy",
                    AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic",
                }.get(augmentFields.get(FieldName.autoScheduleMode, None),
                      None)

                kwargs = {
                    "uid": record.uid,
                    "autoScheduleMode": autoScheduleMode,
                }
                if FieldName.hasCalendars in augmentFields:
                    kwargs["enabledForCalendaring"] = augmentFields[
                        FieldName.hasCalendars]
                if FieldName.hasContacts in augmentFields:
                    kwargs["enabledForAddressBooks"] = augmentFields[
                        FieldName.hasContacts]
                if FieldName.loginAllowed in augmentFields:
                    kwargs["enabledForLogin"] = augmentFields[
                        FieldName.loginAllowed]
                if FieldName.autoAcceptGroup in augmentFields:
                    kwargs["autoAcceptGroup"] = augmentFields[
                        FieldName.autoAcceptGroup]
                if FieldName.serviceNodeUID in augmentFields:
                    kwargs["serverID"] = augmentFields[
                        FieldName.serviceNodeUID]
                augmentRecord = AugmentRecord(**kwargs)

                augmentRecords.append(augmentRecord)

            # Create new base records:
            baseRecords.append(DirectoryRecord(self._directory, baseFields))

        # Apply the augment records
        if augmentRecords:
            yield self._augmentDB.addAugmentRecords(augmentRecords)

        # Apply the base records
        if baseRecords:
            yield self._directory.updateRecords(baseRecords, create=create)

    def _splitFields(self, record):
        """
        Returns a tuple of two dictionaries; the first contains all the non
        augment fields, and the second contains all the augment fields.
        """
        if record is None:
            return None

        augmentFields = {}
        baseFields = record.fields.copy()
        for field in (FieldName.loginAllowed, FieldName.hasCalendars,
                      FieldName.hasContacts, FieldName.autoScheduleMode,
                      FieldName.autoAcceptGroup, FieldName.serviceNodeUID):
            if field in baseFields:
                augmentFields[field] = baseFields[field]
                del baseFields[field]

        return (baseFields, augmentFields)

    def removeRecords(self, uids):
        self._augmentDB.removeAugmentRecords(uids)
        return self._directory.removeRecords(uids)

    def _assignToField(self, fields, name, value):
        field = self.fieldName.lookupByName(name)
        fields[field] = value

    @inlineCallbacks
    def _augment(self, record):
        if record is None:
            returnValue(None)

        augmentRecord = yield self._augmentDB.getAugmentRecord(
            record.uid, self.recordTypeToOldName(record.recordType))
        if augmentRecord is None:
            # Augments does not know about this record type, so return
            # the original record
            returnValue(record)

        fields = record.fields.copy()

        # print("Got augment record", augmentRecord)

        if augmentRecord:

            self._assignToField(fields, "hasCalendars",
                                augmentRecord.enabledForCalendaring)

            self._assignToField(fields, "hasContacts",
                                augmentRecord.enabledForAddressBooks)

            autoScheduleMode = {
                "none": AutoScheduleMode.none,
                "accept-always": AutoScheduleMode.accept,
                "decline-always": AutoScheduleMode.decline,
                "accept-if-free": AutoScheduleMode.acceptIfFree,
                "decline-if-busy": AutoScheduleMode.declineIfBusy,
                "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
            }.get(augmentRecord.autoScheduleMode, None)

            # Resources/Locations default to automatic
            if record.recordType in (CalRecordType.location,
                                     CalRecordType.resource):
                if autoScheduleMode is None:
                    autoScheduleMode = AutoScheduleMode.acceptIfFreeDeclineIfBusy

            self._assignToField(fields, "autoScheduleMode", autoScheduleMode)

            if augmentRecord.autoAcceptGroup is not None:
                self._assignToField(
                    fields, "autoAcceptGroup",
                    augmentRecord.autoAcceptGroup.decode("utf-8"))

            self._assignToField(fields, "loginAllowed",
                                augmentRecord.enabledForLogin)

            self._assignToField(fields, "serviceNodeUID",
                                augmentRecord.serverID.decode("utf-8"))

            if ((fields.get(self.fieldName.lookupByName("hasCalendars"), False)
                 or fields.get(self.fieldName.lookupByName("hasContacts"),
                               False))
                    and record.recordType == RecordType.group):
                self._assignToField(fields, "hasCalendars", False)
                self._assignToField(fields, "hasContacts", False)

                # For augment records cloned from the Default augment record,
                # don't emit this message:
                if not augmentRecord.clonedFromDefault:
                    log.error(
                        "Group {record} cannot be enabled for "
                        "calendaring or address books",
                        record=record)

        else:
            self._assignToField(fields, "hasCalendars", False)
            self._assignToField(fields, "hasContacts", False)
            self._assignToField(fields, "loginAllowed", False)

        # print("Augmented fields", fields)

        # Clone to a new record with the augmented fields
        returnValue(AugmentedDirectoryRecord(self, record, fields))