def startService(self): """ Start the service. """ directory = directoryFromConfig(self.config) # Load proxy assignments from XML if specified if self.config.ProxyLoadFromFile: proxydbClass = namedClass(self.config.ProxyDBService.type) calendaruserproxy.ProxyDBService = proxydbClass( **self.config.ProxyDBService.params) loader = XMLCalendarUserProxyLoader(self.config.ProxyLoadFromFile) yield loader.updateProxyDB() # Populate the group membership cache if (self.config.GroupCaching.Enabled and self.config.GroupCaching.EnableUpdater): proxydb = calendaruserproxy.ProxyDBService if proxydb is None: proxydbClass = namedClass(self.config.ProxyDBService.type) proxydb = proxydbClass(**self.config.ProxyDBService.params) updater = GroupMembershipCacheUpdater(proxydb, directory, self.config.GroupCaching.UpdateSeconds, self.config.GroupCaching.ExpireSeconds, self.config.GroupCaching.LockSeconds, namespace=self.config.GroupCaching.MemcachedPool, useExternalProxies=self.config.GroupCaching.UseExternalProxies) yield updater.updateCache(fast=True) # Set in motion the work queue based updates: yield scheduleNextGroupCachingUpdate(self.store, 0) uid, gid = getCalendarServerIDs(self.config) dbPath = os.path.join(self.config.DataRoot, "proxies.sqlite") if os.path.exists(dbPath): os.chown(dbPath, uid, gid) # Process old inbox items self.store.setMigrating(True) yield self.processInboxItems() self.store.setMigrating(False) # Migrate mail tokens from sqlite to store yield migrateTokensToStore(self.config.DataRoot, self.store) # Set mail polling in motion if self.config.Scheduling.iMIP.Enabled: yield scheduleNextMailPoll(self.store, 0)
def _simple_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None): """ A simple send operation that returns a value. @param actionName: name of the action. @type actionName: C{str} @param shareeView: sharee resource being operated on. @type shareeView: L{CommonHomeChildExternal} @param objectResource: the resource being operated on, or C{None} for classmethod. @type objectResource: L{CommonObjectResourceExternal} @param transform: a function used to convert the JSON result into return values. @type transform: C{callable} @param args: list of optional arguments. @type args: C{list} @param kwargs: optional keyword arguments. @type kwargs: C{dict} """ action, recipient = self._send(actionName, shareeView, objectResource) if args is not None: action["arguments"] = args if kwargs is not None: action["keywords"] = kwargs result = yield self.sendRequest(shareeView._txn, recipient, action) if result["result"] == "ok": returnValue(result["value"] if transform is None else transform(result["value"], shareeView, objectResource)) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def test_namedClassLookup(self): """ L{namedClass} should return the class object for the name it is passed. """ self.assertIdentical( reflect.namedClass("twisted.python.reflect.Summer"), reflect.Summer)
def makeService(self, options): # # Configure Memcached Client Pool # memcachepool.installPools( config.Memcached.Pools, config.Memcached.MaxClients, ) multiService = service.MultiService() notifiers = [] for key, settings in config.Notifications.Services.iteritems(): if settings["Enabled"]: notifier = namedClass(settings["Service"])(settings) notifier.setServiceParent(multiService) notifiers.append(notifier) internet.TCPServer( config.Notifications.InternalNotificationPort, InternalNotificationFactory(notifiers, delaySeconds=config.Notifications.CoalesceSeconds), interface=config.Notifications.BindAddress ).setServiceParent(multiService) return multiService
def makeConfigurable(self, cfgInfo, container, name): """Create a new configurable to a container, based on input from web form.""" cmd, args = string.split(cfgInfo, ' ', 1) if cmd == "new": # create obj = coil.createConfigurable(reflect.namedClass(args), container, name) elif cmd == "dis": # dispense methodHash = int(args) if components.implements(container, coil.IConfigurator) and container.getType(name): interface = container.getType(name) elif components.implements(container, coil.IConfigCollection): interface = container.entityType else: interface = None for t in self.dispensers.getDispensers(interface): obj, methodName, desc = t if hash(t) == methodHash: cfg = coil.getConfigurator(obj) obj = getattr(cfg, methodName)() print "created %s from dispenser" % obj break else: raise ValueError, "Unrecognized command %r in cfgInfo %r" % (cmd, cfgInfo) self.dispensers.addObject(obj) return obj
def main(): # Setup the option parser parser = optparse.OptionParser() parser.add_option('-d', '--database', type="string", dest='database', help='database name') parser.add_option('-u', '--user', type="string", dest='user', help='database user') parser.add_option('-t', '--type', type="string", dest='type', help='item type') parser.add_option('-e', '--execfile', type="string", dest='execfile', help='code to exec to get attrs') # Parse the command line options (options, args) = parser.parse_args() if options.database is None or options.type is None: parser.print_help() return -1 # Extract the item type and turn the positional args into a dict of attrs type = reflect.namedClass(options.type) args = dict(arg.split('=', 1) for arg in args) g = {} execfile(options.execfile,g) attrs=g['attrs'] # Create the object store store = makeStore(database=options.database, user=options.user) # Add the item and shutdown the reactor when it's complete d = store.runInSession(createItem, type, args, attrs) d.addCallbacks(itemCreated, error) d.addBoth(lambda ignore: reactor.stop()) # Off we go reactor.run() sys.exit(exitCode)
def test_namedClassLookup(self): """ L{namedClass} should return the class object for the name it is passed. """ self.assertIs( reflect.namedClass("twisted.test.test_reflect.Summer"), Summer)
def unjellyFromDOM_1(self, unjellier, element): from twisted.python.reflect import namedClass self.integer = int(element.getAttribute("integer")) self.instance = namedClass(element.getAttribute("instance"))() self.name = element.getAttribute("name") # just give us any ol' list self.sequence = [self.instance, self.instance]
def send_add_attachment(self, objectResource, rids, content_type, filename, stream): """ Managed attachment addAttachment call. @param objectResource: child resource having an attachment added @type objectResource: L{CalendarObject} @param rids: list of recurrence ids @type rids: C{list} @param content_type: content type of attachment data @type content_type: L{MimeType} @param filename: name of attachment @type filename: C{str} @param stream: attachment data stream @type stream: L{IStream} """ actionName = "add-attachment" shareeView = objectResource._parentCollection action, recipient = self._send(actionName, shareeView, objectResource) action["rids"] = rids action["filename"] = filename result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type) if result["result"] == "ok": returnValue(result["value"]) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def send_update_attachment(self, objectResource, managed_id, content_type, filename, stream): """ Managed attachment updateAttachment call. @param objectResource: child resource having an attachment added @type objectResource: L{CalendarObject} @param managed_id: managed-id to update @type managed_id: C{str} @param content_type: content type of attachment data @type content_type: L{MimeType} @param filename: name of attachment @type filename: C{str} @param stream: attachment data stream @type stream: L{IStream} """ actionName = "update-attachment" shareeView = objectResource._parentCollection action, recipient = self._send(actionName, shareeView, objectResource) action["managedID"] = managed_id action["filename"] = filename result = yield self.sendRequest(shareeView._txn, recipient, action, stream, content_type) if result["result"] == "ok": returnValue(result["value"]) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def raise_error(): # failure.parents[-1] will be the exception class for local # failures and the string name of the exception class # for remote failures (which might not exist in our # namespace) # # failure.value will be the tuple of arguments to the # exception in the local case, or a string # representation of that in the remote case (see # pb.CopyableFailure.getStateToCopy()). # # we can only reproduce a remote exception if the # exception class is in our namespace, and it only takes # one string argument. if either condition is not true, # we wrap the strings in a default Exception. k, v = failure.parents[-1], failure.value try: if isinstance(k, str): k = reflect.namedClass(k) if isinstance(v, tuple): e = k(*v) else: e = k(v) except Exception: e = Exception('%s: %r' % (failure.type, v)) raise e
def send_freebusy( self, calresource, timerange, matchtotal, excludeuid, organizer, organizerPrincipal, same_calendar_user, servertoserver, event_details, ): action, recipient = self._send("freebusy", calresource) action["timerange"] = [timerange.start.getText(), timerange.end.getText()] action["matchtotal"] = matchtotal action["excludeuid"] = excludeuid action["organizer"] = organizer action["organizerPrincipal"] = organizerPrincipal action["same_calendar_user"] = same_calendar_user action["servertoserver"] = servertoserver action["event_details"] = event_details result = yield self.sendRequest(calresource._txn, recipient, action) if result["result"] == "ok": returnValue((result["fbresults"], result["matchtotal"],)) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def opt_class(self, className): """A class that will be used to serve the root resource. Must implement hack.web2.iweb.IResource and take no arguments. """ if self['root']: raise usage.UsageError("You may only have one root resource.") classObj = reflect.namedClass(className) self['root'] = iweb.IResource(classObj())
def opt_processor(self, proc): """`ext=class' where `class' is added as a Processor for files ending with `ext'. """ if not isinstance(self['root'], static.File): raise usage.UsageError("You can only use --processor after --path.") ext, klass = proc.split('=', 1) self['root'].processors[ext] = reflect.namedClass(klass)
def tasks(self): """ @return: list of class objects for each task defined in the current configuration. """ allTasks = self.get('allTasks').split(',') excludeTasks = self.get('excludeTasks').split(',') return [ namedClass(t) for t in allTasks if not t in excludeTasks ]
def main(argv=None): log.startLogging(file('child.log', 'w')) if argv is None: argv = sys.argv[1:] if argv: klass = reflect.namedClass(argv[0]) else: klass = ConsoleManhole runWithProtocol(klass)
def opt_processor(self, proc): """ `ext=class' where `class' is added as a Processor for files ending with `ext'. """ if not isinstance(self['root'], static.File): raise usage.UsageError( "You can only use --processor after --path.") ext, klass = proc.split('=', 1) self['root'].processors[ext] = reflect.namedClass(klass)
def main(argv=None): log.startLogging(open('child.log', 'w')) if argv is None: argv = sys.argv[1:] if argv: klass = reflect.namedClass(argv[0]) else: klass = ConsoleManhole runWithProtocol(klass)
def stepWithResult(self, result): if self.doPostImport: directory = directoryFromConfig(self.config) # Load proxy assignments from XML if specified if self.config.ProxyLoadFromFile: proxydbClass = namedClass(self.config.ProxyDBService.type) calendaruserproxy.ProxyDBService = proxydbClass( **self.config.ProxyDBService.params) loader = XMLCalendarUserProxyLoader(self.config.ProxyLoadFromFile) yield loader.updateProxyDB() # Populate the group membership cache if (self.config.GroupCaching.Enabled and self.config.GroupCaching.EnableUpdater): proxydb = calendaruserproxy.ProxyDBService if proxydb is None: proxydbClass = namedClass(self.config.ProxyDBService.type) proxydb = proxydbClass(**self.config.ProxyDBService.params) updater = GroupMembershipCacheUpdater(proxydb, directory, self.config.GroupCaching.UpdateSeconds, self.config.GroupCaching.ExpireSeconds, self.config.GroupCaching.LockSeconds, namespace=self.config.GroupCaching.MemcachedPool, useExternalProxies=self.config.GroupCaching.UseExternalProxies) yield updater.updateCache(fast=True) uid, gid = getCalendarServerIDs(self.config) dbPath = os.path.join(self.config.DataRoot, "proxies.sqlite") if os.path.exists(dbPath): os.chown(dbPath, uid, gid) # Process old inbox items self.store.setMigrating(True) yield self.processInboxItems() self.store.setMigrating(False) # Migrate mail tokens from sqlite to store yield migrateTokensToStore(self.config.DataRoot, self.store)
def deadProperties(self): if not hasattr(self, "_dead_properties"): # Get the property store from super deadProperties = namedClass(config.RootResourcePropStoreClass)(self) # Wrap the property store in a memory store if isinstance(deadProperties, xattrPropertyStore): deadProperties = CachingPropertyStore(deadProperties) self._dead_properties = deadProperties return self._dead_properties
def deadProperties(self): if not hasattr(self, "_dead_properties"): # Get the property store from super deadProperties = (namedClass( config.RootResourcePropStoreClass)(self)) # Wrap the property store in a memory store if isinstance(deadProperties, xattrPropertyStore): deadProperties = CachingPropertyStore(deadProperties) self._dead_properties = deadProperties return self._dead_properties
def directoryService(self): """ Get an appropriate directory service for this L{DBInspectService}'s configuration, creating one first if necessary. """ if self._directory is None: self._directory = directoryFromConfig(self.config) proxydbClass = namedClass(config.ProxyDBService.type) try: calendaruserproxy.ProxyDBService = proxydbClass(**config.ProxyDBService.params) except IOError: print("Could not start proxydb service") return self._directory
def sendRequestToServer(self, txn, server, data, stream=None, streamType=None): request = self.conduitRequestClass(server, data, stream, streamType) try: response = (yield request.doRequest(txn)) except Exception as e: raise FailedCrossPodRequestError("Failed cross-pod request: {}".format(e)) if response["result"] == "exception": raise namedClass(response["class"])(response["details"]) elif response["result"] != "ok": raise FailedCrossPodRequestError("Cross-pod request failed: {}".format(response)) else: returnValue(response.get("value"))
def get_plugins(self): if self.sysinfo_plugins is None: include = ALL_PLUGINS else: include = self.get_plugin_names(self.sysinfo_plugins) if self.exclude_sysinfo_plugins is None: exclude = [] else: exclude = self.get_plugin_names(self.exclude_sysinfo_plugins) plugins = [x for x in include if x not in exclude] return [namedClass("landscape.sysinfo.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in plugins]
def makeService(self, options): """Construct a server using MLLPFactory.""" from twisted.internet import reactor from twistedhl7.mllp import IHL7Receiver, MLLPFactory receiver_name = options['receiver'] receiver_class = reflect.namedClass(receiver_name) verifyClass(IHL7Receiver, receiver_class) factory = MLLPFactory(receiver_class()) endpoint = endpoints.serverFromString(reactor, options['endpoint']) server = internet.StreamServerEndpointService(endpoint, factory) server.setName(u"mllp-{0}".format(receiver_name)) return server
def __init__(self, dirname): self.dirname = dirname if not os.path.exists(dirname): os.mkdir(dirname) self.identityToUID = hashless.HashlessWeakKeyDictionary() self.uidToIdentity = weakref.WeakValueDictionary() self.classes = self.structured("classes", (int, "version"), (FixedSizeString(512), "classname")) self.oidsFile = self.structured("objects", (int, "hash"), (bool, "root"), (int, "refcount"), (int, "offset"), (int, "classId")) if len(self.oidsFile) == 0: self.oidsFile.append((0, 0, 0, 0, 0)) if len(self.classes) == 0: self.classes.append((0, '')) self.classToClassId = {} self.tables = [] mapFile = opj(self.dirname, "mappers") if os.path.exists(mapFile): self.typeMapperKeyToMapper = cPickle.load(open(mapFile)) self.typeMapperTupToKey = {} for k, v in self.typeMapperKeyToMapper.iteritems(): self.typeMapperTupToKey[v.toTuple()] = k else: self.typeMapperTupToKey = {} self.typeMapperKeyToMapper = {} c = 0 self.tables.append(None) for version, cn in self.classes: #classname = cn.strip('\x00') # strip(arg) is in 2.2.2, but not 2.2.0 or 2.2.1 classname = cn while len(classname) and classname[-1] == '\x00': classname = classname[:-1] if classname: currentClass = reflect.namedClass(classname) currentVersion = getattr(currentClass, "schemaVersion", 1) if currentVersion == version: self.tables.append(Table(self, classname)) self.classToClassId[classname] = c elif currentVersion < version: assert False, "I can't open this database because it has newer data than I know how to read." else: self.tables.append(Table(self, classname, version)) c += 1
def makeService(self, options): """Construct a server using MLLPFactory. :rtype: :py:class:`twisted.application.internet.StreamServerEndpointService` """ from twisted.internet import reactor from txHL7.mllp import IHL7Receiver, MLLPFactory receiver_name = options['receiver'] receiver_class = reflect.namedClass(receiver_name) verifyClass(IHL7Receiver, receiver_class) factory = MLLPFactory(receiver_class()) endpoint = endpoints.serverFromString(reactor, options['endpoint']) server = internet.StreamServerEndpointService(endpoint, factory) server.setName(u"mllp-{0}".format(receiver_name)) return server
def resourcePlugger(name, *args, **kwargs): resrcClass = None for p in getPlugins(iweb.IResource): if p.name == name: resrcClass = namedClass(p.className) break if resrcClass is None: resrcClass = kwargs.get('defaultResource', None) if resrcClass is None: return NoPlugin(name) del kwargs['defaultResource'] return resrcClass(*args, **kwargs)
def main(argv=None, reactor=None): log.startLogging(file('child.log', 'w')) audioDev = 'coreaudio' if argv is None: argv = sys.argv[1:] if argv: audioDev = argv[0] argv = argv[2:] if argv: klass = reflect.namedClass(argv[0]) else: klass = ConsoleManhole log.msg('audio dev: %s' % audioDev) runWithProtocol(klass, audioDev)
def provisionDirectory(self): if self.directory is None: directoryClass = namedClass(config.DirectoryAddressBook.type) log.info("Configuring: %s:%r" % (config.DirectoryAddressBook.type, config.DirectoryAddressBook.params)) #add self as "directoryBackedAddressBook" parameter params = config.DirectoryAddressBook.params.copy() params["directoryBackedAddressBook"] = self self.directory = directoryClass(params) return self.directory.createCache() #print ("DirectoryBackedAddressBookResource.provisionDirectory: provisioned") return succeed(None)
def provisionDirectory(self): if self.directory is None: directoryClass = namedClass(config.DirectoryAddressBook.type) log.info("Configuring: %s:%r" % (config.DirectoryAddressBook.type, config.DirectoryAddressBook.params)) #add self as "directoryBackedAddressBook" parameter params = config.DirectoryAddressBook.params.copy() params["directoryBackedAddressBook"] = self try: self.directory = directoryClass(params) except ImportError, e: log.error("Unable to set up directory address book: %s" % (e,)) return succeed(None) return self.directory.createCache()
def opt_vhost_class(self, virtualHost): """Specify a virtual host in the form of domain=class, where class can be adapted to an iweb.IResource and has a zero-argument constructor. """ if (self['root'] and not \ isinstance(self['root'], vhost.NameVirtualHost)): raise usage.UsageError("You can not use --vhost-class with " "--path or --class.") domain, className = virtualHost.split('=', 1) if not self['root']: self['root'] = vhost.NameVirtualHost() classObj = reflect.namedClass(className) self['root'].addHost(domain, iweb.IResource(classObj()))
def makeService(self, options): """Construct a server using MLLPFactory. :rtype: :py:class:`twisted.application.internet.StreamServerEndpointService` """ from twisted.internet import reactor from txHL7.mllp import IHL7Receiver, MLLPFactory receiver_name = options['receiver'] receiver_class = reflect.namedClass(receiver_name) verifyClass(IHL7Receiver, receiver_class) factory = MLLPFactory(receiver_class()) multi_service = MultiService() for port_number in PORTS: port = "tcp:interface={0}:port={1}".format(HOST, port_number,) endpoint = endpoints.serverFromString(reactor, port) server = internet.StreamServerEndpointService(endpoint, factory) server.setName(u"mllp-{0}-{1}".format(receiver_name, port_number)) multi_service.addService(server) return multi_service
def migrateAutoSchedule(config, directory): # Fetch the autoSchedule assignments from resourceinfo.sqlite and store # the values in augments augmentService = None serviceClass = { "xml": "twistedcaldav.directory.augment.AugmentXMLDB", } augmentClass = namedClass(serviceClass[config.AugmentService.type]) try: augmentService = augmentClass(**config.AugmentService.params) except: log.error("Could not start augment service") if augmentService: augmentRecords = [] dbPath = os.path.join(config.DataRoot, ResourceInfoDatabase.dbFilename) if os.path.exists(dbPath): log.warn("Migrating auto-schedule settings") resourceInfoDatabase = ResourceInfoDatabase(config.DataRoot) results = resourceInfoDatabase._db_execute( "select GUID, AUTOSCHEDULE from RESOURCEINFO" ) for uid, autoSchedule in results: if uid is not None: record = yield directory.recordWithUID(uid) if record is not None: augmentRecord = ( yield augmentService.getAugmentRecord( uid, directory.recordTypeToOldName(record.recordType) ) ) augmentRecord.autoScheduleMode = ( "automatic" if autoSchedule else "default" ) augmentRecords.append(augmentRecord) if augmentRecords: yield augmentService.addAugmentRecords(augmentRecords) log.warn("Migrated {len} auto-schedule settings", len=len(augmentRecords))
def sendRequestToServer( self, txn, server, data, stream=None, streamType=None, writeStream=None ): request = self.conduitRequestClass( server, data, stream, streamType, writeStream ) try: response = (yield request.doRequest(txn)) except Exception as e: raise FailedCrossPodRequestError( "Failed cross-pod request: {}".format(e) ) if response["result"] == "exception": raise namedClass(response["class"])(response["details"]) elif response["result"] != "ok": raise FailedCrossPodRequestError( "Cross-pod request failed: {}".format(response) ) else: returnValue(response.get("value"))
def migrateResourceInfo(config, directory, uid, gid): """ Retrieve delegate assignments and auto-schedule flag from the directory service, because in "v1" that's where this info lived. """ log.warn("Fetching delegate assignments and auto-schedule settings from directory") resourceInfo = directory.getResourceInfo() if len(resourceInfo) == 0: # Nothing to migrate, or else not appleopendirectory log.warn("No resource info found in directory") return log.warn("Found info for %d resources and locations in directory; applying settings" % (len(resourceInfo),)) resourceInfoDatabase = ResourceInfoDatabase(config.DataRoot) proxydbClass = namedClass(config.ProxyDBService.type) calendarUserProxyDatabase = proxydbClass(**config.ProxyDBService.params) for guid, autoSchedule, proxy, readOnlyProxy in resourceInfo: resourceInfoDatabase.setAutoScheduleInDatabase(guid, autoSchedule) if proxy: yield calendarUserProxyDatabase.setGroupMembersInDatabase( "%s#calendar-proxy-write" % (guid,), [proxy] ) if readOnlyProxy: yield calendarUserProxyDatabase.setGroupMembersInDatabase( "%s#calendar-proxy-read" % (guid,), [readOnlyProxy] ) dbPath = os.path.join(config.DataRoot, ResourceInfoDatabase.dbFilename) if os.path.exists(dbPath): os.chown(dbPath, uid, gid) dbPath = os.path.join(config.DataRoot, "proxies.sqlite") if os.path.exists(dbPath): os.chown(dbPath, uid, gid)
def send_remove_attachment(self, objectResource, rids, managed_id): """ Managed attachment removeAttachment call. @param objectResource: child resource having an attachment added @type objectResource: L{CalendarObject} @param rids: list of recurrence ids @type rids: C{list} @param managed_id: managed-id to update @type managed_id: C{str} """ actionName = "remove-attachment" shareeView = objectResource._parentCollection action, recipient = self._send(actionName, shareeView, objectResource) action["rids"] = rids action["managedID"] = managed_id result = yield self.sendRequest(shareeView._txn, recipient, action) if result["result"] == "ok": returnValue(result["value"]) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def send_remove_attachment(self, objectResource, rids, managed_id): """ Managed attachment removeAttachment call. @param objectResource: child resource having an attachment added @type objectResource: L{CalendarObject} @param rids: list of recurrence ids @type rids: C{list} @param managed_id: managed-id to update @type managed_id: C{str} """ actionName = "remove-attachment" shareeView = objectResource._parentCollection action, recipient = yield self._send(actionName, shareeView, objectResource) action["rids"] = rids action["managedID"] = managed_id result = yield self.sendRequest(shareeView._txn, recipient, action) if result["result"] == "ok": returnValue(result["value"]) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def makeService(self, options): """Construct a server using MLLPFactory. :rtype: :py:class:`twisted.application.internet.StreamServerEndpointService` """ from twisted.internet import reactor from txHL7.mllp import IHL7Receiver, MLLPFactory receiver_name = options['receiver'] receiver_class = reflect.namedClass(receiver_name) verifyClass(IHL7Receiver, receiver_class) factory = MLLPFactory(receiver_class()) multi_service = MultiService() for port_number in PORTS: port = "tcp:interface={0}:port={1}".format( HOST, port_number, ) endpoint = endpoints.serverFromString(reactor, port) server = internet.StreamServerEndpointService(endpoint, factory) server.setName(u"mllp-{0}-{1}".format(receiver_name, port_number)) multi_service.addService(server) return multi_service
def _simple_send(self, actionName, shareeView, objectResource=None, transform=None, args=None, kwargs=None): """ A simple send operation that returns a value. @param actionName: name of the action. @type actionName: C{str} @param shareeView: sharee resource being operated on. @type shareeView: L{CommonHomeChildExternal} @param objectResource: the resource being operated on, or C{None} for classmethod. @type objectResource: L{CommonObjectResourceExternal} @param transform: a function used to convert the JSON result into return values. @type transform: C{callable} @param args: list of optional arguments. @type args: C{list} @param kwargs: optional keyword arguments. @type kwargs: C{dict} """ action, recipient = yield self._send(actionName, shareeView, objectResource) if args is not None: action["arguments"] = args if kwargs is not None: action["keywords"] = kwargs result = yield self.sendRequest(shareeView._txn, recipient, action) if result["result"] == "ok": returnValue(result["value"] if transform is None else transform( result["value"], shareeView, objectResource)) elif result["result"] == "exception": raise namedClass(result["class"])(result["message"])
def testClassLookup(self): self.assertEquals(reflect.namedClass("twisted.python.reflect.Summer"), reflect.Summer)
def unjellyNode(self, node): if node.tagName.lower() == "none": retval = None elif node.tagName == "string": # XXX FIXME this is obviously insecure # if you doubt: # >>> unjellyFromXML('''<string value="h"+str(__import__("sys"))+"i" />''') # "h<module 'sys' (built-in)>i" # XXX Freevo changes: # wrap this around 'Unicode()' retval = Unicode(str(eval('"%s"' % node.getAttribute("value")))) elif node.tagName == "int": retval = int(node.getAttribute("value")) elif node.tagName == "float": retval = float(node.getAttribute("value")) elif node.tagName == "longint": retval = long(node.getAttribute("value")) elif node.tagName == "bool": retval = int(node.getAttribute("value")) if retval: retval = True else: retval = False elif node.tagName == "module": retval = namedModule(str(node.getAttribute("name"))) elif node.tagName == "class": retval = namedClass(str(node.getAttribute("name"))) elif node.tagName == "unicode": retval = unicode( str(node.getAttribute("value")).replace("\\n", "\n").replace( "\\t", "\t"), "raw_unicode_escape") elif node.tagName == "function": retval = namedObject(str(node.getAttribute("name"))) elif node.tagName == "method": im_name = node.getAttribute("name") im_class = namedClass(node.getAttribute("class")) im_self = self.unjellyNode(getValueElement(node)) if im_class.__dict__.has_key(im_name): if im_self is None: retval = getattr(im_class, im_name) elif isinstance(im_self, NotKnown): retval = _InstanceMethod(im_name, im_self, im_class) else: retval = instancemethod(im_class.__dict__[im_name], im_self, im_class) else: raise "instance method changed" elif node.tagName == "tuple": l = [] tupFunc = tuple for subnode in node.childNodes: if isinstance(subnode, Element): l.append(None) if isinstance(self.unjellyInto(l, len(l) - 1, subnode), NotKnown): tupFunc = _Tuple retval = tupFunc(l) elif node.tagName == "list": l = [] finished = 1 for subnode in node.childNodes: if isinstance(subnode, Element): l.append(None) self.unjellyInto(l, len(l) - 1, subnode) retval = l elif node.tagName == "dictionary": d = {} keyMode = 1 for subnode in node.childNodes: if isinstance(subnode, Element): if keyMode: kvd = _DictKeyAndValue(d) if not subnode.getAttribute("role") == "key": raise "Unjellying Error: key role not set" self.unjellyInto(kvd, 0, subnode) else: self.unjellyInto(kvd, 1, subnode) keyMode = not keyMode retval = d elif node.tagName == "instance": className = node.getAttribute("class") clasz = namedClass(className) if issubclass(clasz, DOMJellyable): retval = instance(clasz, {}) retval.unjellyFromDOM(self, node) else: state = self.unjellyNode(getValueElement(node)) if hasattr(clasz, "__setstate__"): inst = instance(clasz, {}) inst.__setstate__(state) else: inst = instance(clasz, state) retval = inst elif node.tagName == "reference": refkey = node.getAttribute("key") retval = self.references.get(refkey) if retval is None: der = _Dereference(refkey) self.references[refkey] = der retval = der elif node.tagName == "copyreg": nodefunc = namedObject(node.getAttribute("loadfunc")) loaddef = self.unjellyLater(getValueElement(node)).addCallback( lambda result, _l: apply(_l, result), nodefunc) retval = loaddef else: raise "Unsupported Node Type: %s" % str(node.tagName) if node.hasAttribute("reference"): refkey = node.getAttribute("reference") ref = self.references.get(refkey) if ref is None: self.references[refkey] = retval elif isinstance(ref, NotKnown): ref.resolveDependants(retval) self.references[refkey] = retval else: assert 0, "Multiple references with the same ID!" return retval
def opt_class(self, className): """Create a Resource subclass with a zero-argument constructor. """ classObj = reflect.namedClass(className) self['root'] = classObj()
def buildDirectory( store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None, cachingSeconds=0, filterStartsWith=False, lookupsBetweenPurges=0, negativeCaching=True, ): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] cachingServices = [] ldapService = None # LDAP DS has extra stats (see augment.py) for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import (DirectoryService as ODDirectoryService) # We don't want system accounts returned in lookups, so tell # the service to suppress them. node = params.node directory = ODDirectoryService(nodeName=node, suppressSystemRecords=True) elif "ldap" in directoryType: from twext.who.ldap import (DirectoryService as LDAPDirectoryService, FieldName as LDAPFieldName, RecordTypeSchema) if params.credentials.dn and params.credentials.password: creds = UsernamePassword(params.credentials.dn, params.credentials.password) else: creds = None mapping = params.mapping extraFilters = params.extraFilters directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, useTLS=params.useTLS, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: mapping.uid, BaseFieldName.guid: mapping.guid, BaseFieldName.shortNames: mapping.shortNames, BaseFieldName.fullNames: mapping.fullNames, BaseFieldName.emailAddresses: mapping.emailAddresses, LDAPFieldName.memberDNs: mapping.memberDNs, CalFieldName.readOnlyProxy: mapping.readOnlyProxy, CalFieldName.readWriteProxy: mapping.readWriteProxy, CalFieldName.loginAllowed: mapping.loginAllowed, CalFieldName.hasCalendars: mapping.hasCalendars, CalFieldName.autoScheduleMode: mapping.autoScheduleMode, CalFieldName.autoAcceptGroup: mapping.autoAcceptGroup, CalFieldName.serviceNodeUID: mapping.serviceNodeUID, CalFieldName.associatedAddress: mapping.associatedAddress, CalFieldName.geographicLocation: mapping.geographicLocation, CalFieldName.streetAddress: mapping.streetAddress, }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=params.rdnSchema.users, attributes=(), ), RecordType.group: RecordTypeSchema( relativeDN=params.rdnSchema.groups, attributes=(), ), CalRecordType.location: RecordTypeSchema( relativeDN=params.rdnSchema.locations, attributes=(), ), CalRecordType.resource: RecordTypeSchema( relativeDN=params.rdnSchema.resources, attributes=(), ), CalRecordType.address: RecordTypeSchema( relativeDN=params.rdnSchema.addresses, attributes=(), ), }), extraFilters={ RecordType.user: extraFilters.get("users", ""), RecordType.group: extraFilters.get("groups", ""), CalRecordType.location: extraFilters.get("locations", ""), CalRecordType.resource: extraFilters.get("resources", ""), CalRecordType.address: extraFilters.get("addresses", ""), }, threadPoolMax=params.get("threadPoolMax", 10), authConnectionMax=params.get("authConnectionMax", 5), queryConnectionMax=params.get("queryConnectionMax", 5), tries=params.get("tries", 3), warningThresholdSeconds=params.get("warningThresholdSeconds", 5), ) ldapService = directory elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName)) fieldNames.append(directory.fieldName) if cachingSeconds: directory = CachingDirectoryService( directory, expireSeconds=cachingSeconds, lookupsBetweenPurges=lookupsBetweenPurges, negativeCaching=negativeCaching, ) cachingServices.append(directory) aggregatedServices.append(directory) # # Setup the Augment Service # serviceClass = { "xml": "twistedcaldav.directory.augment.AugmentXMLDB", } for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(serviceClass[augmentServiceInfo.type]) log.info("Configuring augment service of type: {augmentClass}", augmentClass=augmentClass) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService(userDirectory.realmName, store) # (put at front of list so we don't try to ask the actual DS services # about the delegate-related principals, for performance) aggregatedServices.insert(0, delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService( userDirectory.realmName, wikiServiceInfo.EndpointDescriptor, )) # Aggregate service aggregateDirectory = AggregateDirectoryService(userDirectory.realmName, aggregatedServices) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService(aggregateDirectory, store, augmentService) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) # Tell each caching service what method to use when reporting # times and cache stats for cachingService in cachingServices: cachingService.setTimingMethod(augmented._addTiming) # LDAP has additional stats to report augmented._ldapDS = ldapService except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) if filterStartsWith: augmented.setFilter(startswithFilter) return augmented
def test_namedClassLookup(self): """ L{namedClass} should return the class object for the name it is passed. """ self.assertIdentical( reflect.namedClass("twisted.test.test_reflect.Summer"), Summer)
def makeService(self, options): if self._serviceMaker is None: self._serviceMaker = reflect.namedClass(self.serviceMakerClass)() return self._serviceMaker.makeService(options)
def getProperty(self): return getattr(reflect.namedClass(self.serviceMakerClass), propname)
def buildDirectory(store, dataRoot, servicesInfo, augmentServiceInfo, wikiServiceInfo, serversDB=None): """ Return a directory without using a config object; suitable for tests which need to have mulitple directory instances. @param store: The store. @param dataRoot: The path to the directory containing xml files for any xml based services. @param servicesInfo: An interable of ConfigDicts mirroring the DirectoryService and ResourceService sections of stdconfig @param augmentServiceInfo: A ConfigDict mirroring the AugmentService section of stdconfig @param wikiServiceInfo: A ConfigDict mirroring the Wiki section of stdconfig @param serversDB: A ServersDB object to assign to the directory """ aggregatedServices = [] for serviceValue in servicesInfo: if not serviceValue.Enabled: continue directoryType = serviceValue.type.lower() params = serviceValue.params if "xml" in directoryType: xmlFile = params.xmlFile xmlFile = fullServerPath(dataRoot, xmlFile) fp = FilePath(xmlFile) if not fp.exists(): fp.setContent(DEFAULT_XML_CONTENT) directory = XMLDirectoryService(fp) elif "opendirectory" in directoryType: from txdav.who.opendirectory import (DirectoryService as ODDirectoryService) # We don't want system accounts returned in lookups, so tell # the service to suppress them. directory = ODDirectoryService(suppressSystemRecords=True) elif "ldap" in directoryType: if params.credentials.dn and params.credentials.password: creds = UsernamePassword(params.credentials.dn, params.credentials.password) else: creds = None directory = LDAPDirectoryService( params.uri, params.rdnSchema.base, credentials=creds, fieldNameToAttributesMap=MappingProxyType({ BaseFieldName.uid: ("apple-generateduid", ), BaseFieldName.guid: ("apple-generateduid", ), BaseFieldName.shortNames: (LDAPAttribute.uid.value, ), BaseFieldName.fullNames: (LDAPAttribute.cn.value, ), BaseFieldName.emailAddresses: (LDAPAttribute.mail.value, ), BaseFieldName.password: (LDAPAttribute.userPassword.value, ), LDAPFieldName.memberDNs: (LDAPAttribute.uniqueMember.value, ), }), recordTypeSchemas=MappingProxyType({ RecordType.user: RecordTypeSchema( relativeDN=u"ou=People", # (objectClass=inetOrgPerson) attributes=(( LDAPAttribute.objectClass.value, LDAPObjectClass.inetOrgPerson.value, ), ), ), RecordType.group: RecordTypeSchema( relativeDN=u"ou=Groups", # (objectClass=groupOfNames) attributes=(( LDAPAttribute.objectClass.value, LDAPObjectClass.groupOfUniqueNames.value, ), ), ), })) elif "inmemory" in directoryType: from txdav.who.test.support import CalendarInMemoryDirectoryService directory = CalendarInMemoryDirectoryService() else: log.error("Invalid DirectoryType: {dt}", dt=directoryType) raise DirectoryConfigurationError # Set the appropriate record types on each service types = [] fieldNames = [] for recordTypeName in params.recordTypes: recordType = { "users": RecordType.user, "groups": RecordType.group, "locations": CalRecordType.location, "resources": CalRecordType.resource, "addresses": CalRecordType.address, }.get(recordTypeName, None) if recordType is None: log.error("Invalid Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError if recordType in types: log.error("Duplicate Record Type: {rt}", rt=recordTypeName) raise DirectoryConfigurationError types.append(recordType) directory.recordType = ConstantsContainer(types) directory.fieldName = ConstantsContainer( (directory.fieldName, CalFieldName)) fieldNames.append(directory.fieldName) aggregatedServices.append(directory) # # Setup the Augment Service # if augmentServiceInfo.type: for augmentFile in augmentServiceInfo.params.xmlFiles: augmentFile = fullServerPath(dataRoot, augmentFile) augmentFilePath = FilePath(augmentFile) if not augmentFilePath.exists(): augmentFilePath.setContent(DEFAULT_AUGMENT_CONTENT) augmentClass = namedClass(augmentServiceInfo.type) log.info("Configuring augment service of type: {augmentClass}", augmentClass=augmentClass) try: augmentService = augmentClass(**augmentServiceInfo.params) except IOError: log.error("Could not start augment service") raise else: augmentService = None userDirectory = None for directory in aggregatedServices: if RecordType.user in directory.recordTypes(): userDirectory = directory break else: log.error("No directory service set up for users") raise DirectoryConfigurationError # Delegate service delegateDirectory = DelegateDirectoryService(userDirectory.realmName, store) aggregatedServices.append(delegateDirectory) # Wiki service if wikiServiceInfo.Enabled: aggregatedServices.append( WikiDirectoryService(userDirectory.realmName, wikiServiceInfo.CollabHost, wikiServiceInfo.CollabPort)) # Aggregate service aggregateDirectory = AggregateDirectoryService(userDirectory.realmName, aggregatedServices) # Augment service try: fieldNames.append(CalFieldName) augmented = AugmentedDirectoryService(aggregateDirectory, store, augmentService) augmented.fieldName = ConstantsContainer(fieldNames) # The delegate directory needs a way to look up user/group records # so hand it a reference to the augmented directory. # FIXME: is there a better pattern to use here? delegateDirectory.setMasterDirectory(augmented) except Exception as e: log.error("Could not create directory service", error=e) raise if serversDB is not None: augmented.setServersDB(serversDB) return augmented
def get_plugins(self): """Return instances of all the plugins enabled in the configuration.""" return [namedClass("landscape.client.manager.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in self.config.plugin_factories]
def get_plugins(self): return [ namedClass("landscape.monitor.%s.%s" % (plugin_name.lower(), plugin_name))() for plugin_name in self.config.plugin_factories ]