def test_product(self): self.product = Product(store=self.siteStore) self.product.types = [ n.decode('ascii') for n in [qual(Foo), qual(Baz)]] self.product.installProductOn(self.userStore) i = self.userStore.findUnique(Installation) self.assertEqual(i.types, self.product.types)
def startFactory(self): # disable coredumps if resource: resource.setrlimit(resource.RLIMIT_CORE, (0,0)) else: log.msg('INSECURE: unable to disable core dumps.') if not hasattr(self,'publicKeys'): self.publicKeys = self.getPublicKeys() for keyType, value in self.publicKeys.items(): if isinstance(value, str): warnings.warn("Returning a mapping from strings to " "strings from getPublicKeys()/publicKeys (in %s) " "is deprecated. Return a mapping from " "strings to Key objects instead." % (qual(self.__class__)), DeprecationWarning, stacklevel=1) self.publicKeys[keyType] = keys.Key.fromString(value) if not hasattr(self,'privateKeys'): self.privateKeys = self.getPrivateKeys() for keyType, value in self.privateKeys.items(): if not isinstance(value, keys.Key): warnings.warn("Returning a mapping from strings to " "PyCrypto key objects from " "getPrivateKeys()/privateKeys (in %s) " "is deprecated. Return a mapping from " "strings to Key objects instead." % (qual(self.__class__),), DeprecationWarning, stacklevel=1) self.privateKeys[keyType] = keys.Key(value) if not self.publicKeys or not self.privateKeys: raise error.ConchError('no host keys, failing') if not hasattr(self,'primes'): self.primes = self.getPrimes()
def startFactory(self): """ Check for public and private keys. """ if not hasattr(self, "publicKeys"): self.publicKeys = self.getPublicKeys() for keyType, value in self.publicKeys.items(): if isinstance(value, str): warnings.warn( "Returning a mapping from strings to " "strings from getPublicKeys()/publicKeys (in %s) " "is deprecated. Return a mapping from " "strings to Key objects instead." % (qual(self.__class__)), DeprecationWarning, stacklevel=1, ) self.publicKeys[keyType] = keys.Key.fromString(value) if not hasattr(self, "privateKeys"): self.privateKeys = self.getPrivateKeys() for keyType, value in self.privateKeys.items(): if not isinstance(value, keys.Key): warnings.warn( "Returning a mapping from strings to " "PyCrypto key objects from " "getPrivateKeys()/privateKeys (in %s) " "is deprecated. Return a mapping from " "strings to Key objects instead." % (qual(self.__class__),), DeprecationWarning, stacklevel=1, ) self.privateKeys[keyType] = keys.Key(value) if not self.publicKeys or not self.privateKeys: raise error.ConchError("no host keys, failing") if not hasattr(self, "primes"): self.primes = self.getPrimes()
def test_deprecatedPreservesName(self): """ The decorated function has the same name as the original. """ version = Version('Twisted', 8, 0, 0) dummy = deprecated(version)(dummyCallable) self.assertEqual(dummyCallable.__name__, dummy.__name__) self.assertEqual(qual(dummyCallable), qual(dummy))
def test_createProduct(self): """ Verify that L{ProductConfiguration.createProduct} creates a correctly configured L{Product} and returns it. """ conf = ProductConfiguration(store=self.userStore) product = conf.createProduct([Foo, Baz]) self.assertEqual(product.types, [qual(Foo), qual(Baz)])
def reconfigServiceSlaves(self, new_config): timer = metrics.Timer("BotMaster.reconfigServiceSlaves") timer.start() # arrange slaves by name old_by_name = dict([ (s.slavename, s) for s in list(self) if interfaces.IBuildSlave.providedBy(s) ]) old_set = set(old_by_name.iterkeys()) new_by_name = dict([ (s.slavename, s) for s in new_config.slaves ]) new_set = set(new_by_name.iterkeys()) # calculate new slaves, by name, and removed slaves removed_names, added_names = util.diffSets(old_set, new_set) # find any slaves for which the fully qualified class name has # changed, and treat those as an add and remove for n in old_set & new_set: old = old_by_name[n] new = new_by_name[n] # detect changed class name if reflect.qual(old.__class__) != reflect.qual(new.__class__): removed_names.add(n) added_names.add(n) if removed_names or added_names: log.msg("adding %d new slaves, removing %d" % (len(added_names), len(removed_names))) for n in removed_names: slave = old_by_name[n] del self.slaves[n] slave.master = None slave.botmaster = None wfd = defer.waitForDeferred( defer.maybeDeferred(lambda : slave.disownServiceParent())) yield wfd wfd.getResult() for n in added_names: slave = new_by_name[n] slave.setServiceParent(self) slave.botmaster = self slave.master = self.master self.slaves[n] = slave metrics.MetricCountEvent.log("num_slaves", len(self.slaves), absolute=True) timer.stop()
def _checkRoundTrip(self, obj): """ Make sure that an object will properly round-trip through 'qual' and 'namedAny'. Raise a L{RuntimeError} if they aren't. """ tripped = reflect.namedAny(reflect.qual(obj)) if tripped is not obj: raise RuntimeError("importing %r is not the same as %r" % (reflect.qual(obj), obj))
def makeConfigMenu(self, interface): """Make a menu for adding a new object to a collection.""" l = [] if 1: for realClass in coil.getImplementors(interface): cfgClass = coil.getConfiguratorClass(realClass) nm = getattr(cfgClass, 'configName', None) or reflect.qual(realClass) l.append(['new '+reflect.qual(realClass), 'new '+nm]) for t in self.dispensers.getDispensers(interface): obj, methodName, desc = t l.append(['dis %d' % hash(t), desc]) return l
def spewer(frame, s, ignored): """A trace function for sys.settrace that prints every method call.""" from twisted.python import reflect if frame.f_locals.has_key('self'): se = frame.f_locals['self'] if hasattr(se, '__class__'): k = reflect.qual(se.__class__) else: k = reflect.qual(type(se)) print 'method %s of %s at %s' % ( frame.f_code.co_name, k, id(se) )
def versionUpgrade(self): """(internal) Do a version upgrade. """ bases = _aybabtu(self.__class__) # put the bases in order so superclasses' persistenceVersion methods # will be called first. bases.reverse() bases.append(self.__class__) # don't forget me!! # first let's look for old-skool versioned's if self.__dict__.has_key("persistenceVersion"): # Hacky heuristic: if more than one class subclasses Versioned, # we'll assume that the higher version number wins for the older # class, so we'll consider the attribute the version of the older # class. There are obviously possibly times when this will # eventually be an incorrect assumption, but hopefully old-school # persistenceVersion stuff won't make it that far into multiple # classes inheriting from Versioned. pver = self.__dict__["persistenceVersion"] del self.__dict__["persistenceVersion"] highestVersion = 0 highestBase = None for base in bases: if not base.__dict__.has_key("persistenceVersion"): continue if base.persistenceVersion > highestVersion: highestBase = base highestVersion = base.persistenceVersion if highestBase: self.__dict__["%s.persistenceVersion" % reflect.qual(highestBase)] = pver for base in bases: # ugly hack, but it's what the user expects, really if Versioned not in base.__bases__ and not base.__dict__.has_key("persistenceVersion"): continue currentVers = base.persistenceVersion pverName = "%s.persistenceVersion" % reflect.qual(base) persistVers = self.__dict__.get(pverName) or 0 if persistVers: del self.__dict__[pverName] assert persistVers <= currentVers, "Sorry, can't go backwards in time." while persistVers < currentVers: persistVers = persistVers + 1 method = base.__dict__.get("upgradeToVersion%s" % persistVers, None) if method: log.msg( "Upgrading %s (of %s @ %s) to version %s" % (reflect.qual(base), reflect.qual(self.__class__), id(self), persistVers) ) method(self) else: log.msg("Warning: cannot upgrade %s to version %s" % (base, persistVers))
def reconfigServiceWithBuildbotConfig(self, new_config): # arrange childs by name old_by_name = self.namedServices old_set = set(old_by_name.iterkeys()) new_config_attr = getattr(new_config, self.config_attr) if isinstance(new_config_attr, list): new_by_name = dict([(s.name, s) for s in new_config_attr]) elif isinstance(new_config_attr, dict): new_by_name = new_config_attr else: raise TypeError("config.%s should be a list or dictionary" % (self.config_attr)) new_set = set(new_by_name.iterkeys()) # calculate new childs, by name, and removed childs removed_names, added_names = util.diffSets(old_set, new_set) # find any childs for which the fully qualified class name has # changed, and treat those as an add and remove for n in old_set & new_set: old = old_by_name[n] new = new_by_name[n] # detect changed class name if reflect.qual(old.__class__) != reflect.qual(new.__class__): removed_names.add(n) added_names.add(n) if removed_names or added_names: log.msg("adding %d new %s, removing %d" % (len(added_names), self.config_attr, len(removed_names))) for n in removed_names: child = old_by_name[n] yield child.disownServiceParent() for n in added_names: child = new_by_name[n] yield child.setServiceParent(self) # get a list of child services to reconfigure reconfigurable_services = [svc for svc in self] # sort by priority reconfigurable_services.sort(key=lambda svc: -svc.reconfig_priority) for svc in reconfigurable_services: if not svc.name: raise ValueError("%r: child %r should have a defined name attribute", self, svc) config_sibling = new_by_name.get(svc.name) yield svc.reconfigServiceWithSibling(config_sibling)
def jellyToAO(self, obj): """I turn an object into an AOT and return it.""" objType = type(obj) self.stack.append(repr(obj)) if objType in _SIMPLE_BUILTINS: retval = obj elif objType is types.MethodType: retval = InstanceMethod(obj.im_func.__name__, reflect.qual(obj.im_class), self.jellyToAO(obj.im_self)) elif objType is types.ModuleType: retval = Module(obj.__name__) elif objType is types.ClassType: retval = Class(reflect.qual(obj)) elif issubclass(objType, type): retval = Class(reflect.qual(obj)) elif objType is types.FunctionType: retval = Function(reflect.fullFuncName(obj)) else: #mutable! gotta watch for refs. if self.prepared.has_key(id(obj)): oldRef = self.prepared[id(obj)] if oldRef.refnum: key = oldRef.refnum else: self._ref_id = self._ref_id + 1 key = self._ref_id oldRef.setRef(key) return Deref(key) retval = Ref() self.prepareForRef(retval, obj) if objType is types.ListType: retval.setObj(map(self.jellyToAO, obj)) #hah! elif objType is types.TupleType: retval.setObj(tuple(map(self.jellyToAO, obj))) elif objType is types.DictionaryType: d = {} for k,v in obj.items(): d[self.jellyToAO(k)] = self.jellyToAO(v) retval.setObj(d) elif objType is types.InstanceType: if hasattr(obj, "__getstate__"): state = self.jellyToAO(obj.__getstate__()) else: state = self.jellyToAO(obj.__dict__) retval.setObj(Instance(reflect.qual(obj.__class__), state)) elif copy_reg.dispatch_table.has_key(objType): unpickleFunc, state = copy_reg.dispatch_table[objType](obj) retval.setObj(Copyreg( reflect.fullFuncName(unpickleFunc), self.jellyToAO(state))) else: raise "Unsupported type: %s" % objType.__name__ del self.stack[-1] return retval
def spewer(frame, s, ignored): """A trace function for sys.settrace that prints every function or method call.""" from twisted.python import reflect if frame.f_locals.has_key("self"): se = frame.f_locals["self"] if hasattr(se, "__class__"): k = reflect.qual(se.__class__) else: k = reflect.qual(type(se)) print "method %s of %s at %s" % (frame.f_code.co_name, k, id(se)) else: print "function %s in %s, line %s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)
def reconfigServiceSlaves(self, new_config): timer = metrics.Timer("BuildSlaveManager.reconfigServiceSlaves") timer.start() # first we deconfigure everything to let the slaves register again yield self.master.data.updates.deconfigureAllBuidslavesForMaster(self.master.masterid) # arrange slaves by name old_by_name = dict([(s.slavename, s) for s in list(self) if IBuildSlave.providedBy(s)]) old_set = set(old_by_name.iterkeys()) new_by_name = dict([(s.slavename, s) for s in new_config.slaves]) new_set = set(new_by_name.iterkeys()) # calculate new slaves, by name, and removed slaves removed_names, added_names = util.diffSets(old_set, new_set) # find any slaves for which the fully qualified class name has # changed, and treat those as an add and remove for n in old_set & new_set: old = old_by_name[n] new = new_by_name[n] # detect changed class name if reflect.qual(old.__class__) != reflect.qual(new.__class__): removed_names.add(n) added_names.add(n) if removed_names or added_names: log.msg("adding %d new slaves, removing %d" % (len(added_names), len(removed_names))) for n in removed_names: slave = old_by_name[n] del self.slaves[n] slave.master = None yield slave.disownServiceParent() for n in added_names: slave = new_by_name[n] yield slave.setServiceParent(self) self.slaves[n] = slave metrics.MetricCountEvent.log("num_slaves", len(self.slaves), absolute=True) timer.stop()
def display(self, request): """Implement me to represent your widget. I must return a list of strings and twisted.internet.defer.Deferred instances. """ raise NotImplementedError("%s.display" % reflect.qual(self.__class__))
def __repr__(self): factoryName = reflect.qual(self.factory.__class__) if hasattr(self, 'socket'): return '<%s on %r>' % ( factoryName, _coerceToFilesystemEncoding('', self.port)) else: return '<%s (not listening)>' % (factoryName,)
def proxyForInterface(iface, originalAttribute='original'): """ Create a class which proxies all method calls which adhere to an interface to another provider of that interface. This function is intended for creating specialized proxies. The typical way to use it is by subclassing the result:: class MySpecializedProxy(proxyForInterface(IFoo)): def someInterfaceMethod(self, arg): if arg == 3: return 3 return self.original.someInterfaceMethod(arg) @param iface: The Interface to which the resulting object will conform, and which the wrapped object must provide. @param originalAttribute: name of the attribute used to save the original object in the resulting class. Default to C{original}. @type originalAttribute: C{str} @return: A class whose constructor takes the original object as its only argument. Constructing the class creates the proxy. """ def __init__(self, original): setattr(self, originalAttribute, original) contents = {"__init__": __init__} for name in iface: contents[name] = _ProxyDescriptor(name, originalAttribute) proxy = type("(Proxy for %s)" % (reflect.qual(iface),), (object,), contents) directlyProvides(proxy, iface) return proxy
def _setUpStore(self): """ Set up a store, install a L{HyperbolaPublicPresence} and its dependencies, and create a role """ self.siteStore = Store(filesdir=self.mktemp()) Mantissa().installSite( self.siteStore, u"localhost", u"", generateCert=False) # Make it standard so there's no port number in the generated URL. # This kind of sucks. I don't want people assuming SSLPorts are # created by Mantissa().installSite(). Oh right, I should add a better # API for initializing a Mantissa server. -exarkun site = self.siteStore.findUnique(SiteConfiguration) ssls = list(site.store.query(SSLPort)) ssls[0].portNumber = 443 self.loginSystem = self.siteStore.findUnique(LoginSystem) product = Product(store=self.siteStore, types=[qual(HyperbolaPublicPresence)]) acct = self.loginSystem.addAccount( u'user', u'localhost', u'asdf', internal=True) self.userStore = acct.avatars.open() product.installProductOn(self.userStore) self.publicPresence = self.userStore.findUnique( HyperbolaPublicPresence) self.role = sharing.Role( store=self.userStore, externalID=u'foo@host', description=u'foo')
def _makeContext(self): ctx = SSL.Context(self.method) ctx.set_app_data(_SSLApplicationData()) if self.certificate is not None and self.privateKey is not None: ctx.use_certificate(self.certificate) ctx.use_privatekey(self.privateKey) # Sanity check ctx.check_privatekey() verifyFlags = SSL.VERIFY_NONE if self.verify: verifyFlags = SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT if self.caCerts: store = ctx.get_cert_store() for cert in self.caCerts: store.add_cert(cert) def _trackVerificationProblems(conn,cert,errno,depth,preverify_ok): return True ctx.set_verify(verifyFlags, _trackVerificationProblems) if self.enableSessions: sessionName = md5.md5("%s-%d" % (reflect.qual(self.__class__), _sessionCounter())).hexdigest() ctx.set_session_id(sessionName) return ctx
def spawn(self, hereProto, thereProto, childFDs=None): """ Spawn a subprocess with a connected pair of protocol objects, one in the current process, one in the subprocess. @param hereProto: a L{Protocol} instance to listen in this process. @param thereProto: a top-level class or function that will be imported and called in the spawned subprocess. @param childFDs: File descriptors to share with the subprocess; same format as L{IReactorProcess.spawnProcess}. @return: a L{Deferred} that fires when C{hereProto} is ready. """ if not self.running: self.pendingSpawns.append((hereProto, thereProto)) return name = qual(thereProto) argv = [sys.executable, '-u', '-m', __name__, name] self.reactor.spawnProcess( BridgeProtocol(self, hereProto), sys.executable, argv, os.environ, childFDs=childFDs ) return succeed(hereProto)
def rendered(req): self.assertIn( qual(ThemedSubclass), req.v) self.assertIn( 'specified no <code>fragmentName</code> attribute.', req.v)
def jellyFor(self, jellier): """ @see: L{twisted.spread.interfaces.IJellyable.jellyFor} """ sxp = jellier.prepare(self) sxp.extend([qual(self.__class__), jellier.jelly(self.getStateFor(jellier))]) return jellier.preserve(self, sxp)
def fullyQualifiedName(self): # XXX: this is an example of silly redundancy, this really ought to be # refactored to work like any other attribute (including being # explicitly covered in the schema, which has other good qualities like # allowing tables to be VACUUM'd without destroying oid stability and # every storeID reference ever. --glyph return qual(self.type) + ".storeID"
def powerupsFor(self, interface): """ Returns powerups installed using C{powerUp}, in order of descending priority. Powerups found to have been deleted, either during the course of this powerupsFor iteration, during an upgrader, or previously, will not be returned. """ name = unicode(qual(interface), "ascii") for cable in self.store.query( _PowerupConnector, AND(_PowerupConnector.interface == name, _PowerupConnector.item == self), sort=_PowerupConnector.priority.descending, ): pup = cable.powerup if pup is None: # this powerup was probably deleted during an upgrader. cable.deleteFromStore() else: indirector = IPowerupIndirector(pup, None) if indirector is not None: yield indirector.indirect(interface) else: yield pup
def setUp(self): """ Create an account and log in using it. """ IntegrationTestsMixin.setUp(self) # Make an account to be already logged in. self.userAccount = self.login.addAccount(self.username, self.domain, u"password", internal=True) self.userStore = self.userAccount.avatars.open() # Make a product that includes PrivateApplication. This is probably # the minimum requirement for web access. web = Product(store=self.store, types=[qual(PrivateApplication)]) # Give it to Alice. web.installProductOn(self.userStore) # Log in to the web as Alice. login = getWithSession( self.factory, 3, "/__login__?username=%s@%s&password=%s" % (self.username.encode("ascii"), self.domain.encode("ascii"), "password"), {"host": self.domain.encode("ascii")}, ) def loggedIn(request): self.cookies = request.cookies login.addCallback(loggedIn) return login
def doRead(self): """Called when data is avaliable for reading. Subclasses must override this method. The result will be interpreted in the same way as a result of doWrite(). """ raise NotImplementedError("%s does not implement doRead" % reflect.qual(self.__class__))
def operationError(self, error): """Example callback for database operation failure. Override this, and/or define your own callbacks. """ log.msg("%s Operation Failed: %s" % (reflect.qual(self.__class__), error)) log.err(error)
def format(self, form, write, request): """I display an HTML FORM according to the result of self.getFormFields. """ write('<form ENCTYPE="multipart/form-data" METHOD="post" ACTION="%s">\n' '<table BORDER="0">\n' % (self.actionURI or request.uri)) for field in form: if len(field) == 5: inputType, displayName, inputName, inputValue, description = field else: inputType, displayName, inputName, inputValue = field description = "" write('<tr>\n<td ALIGN="right" VALIGN="top"><B>%s</B></td>\n' '<td VALIGN="%s">\n' % (displayName, ((inputType == 'text') and 'top') or 'middle')) self.formGen[inputType](write, inputName, inputValue) write('\n<br />\n<font size="-1">%s</font></td>\n</tr>\n' % description) write('<tr><td></td><td ALIGN="left"><hr />\n') for submitName in self.submitNames: write('<INPUT TYPE="submit" NAME="submit" VALUE="%s" />\n' % submitName) write('</td></tr>\n</table>\n' '<INPUT TYPE="hidden" NAME="__formtype__" VALUE="%s" />\n' % (reflect.qual(self.__class__))) fid = self.getFormID() if fid: write('<INPUT TYPE="hidden" NAME="__formid__" VALUE="%s" />\n' % fid) write("</form>\n")
def setUp(self): self.runners = [] self.config = trial.Options() # whitebox hack a reporter in, because plugins are CACHED and will # only reload if the FILE gets changed. parts = reflect.qual(CapturingReporter).split('.') package = '.'.join(parts[:-1]) klass = parts[-1] plugins = [twisted_trial._Reporter( "Test Helper Reporter", package, description="Utility for unit testing.", longOpt="capturing", shortOpt=None, klass=klass)] # XXX There should really be a general way to hook the plugin system # for tests. def getPlugins(iface, *a, **kw): self.assertEqual(iface, IReporter) return plugins + list(self.original(iface, *a, **kw)) self.original = plugin.getPlugins plugin.getPlugins = getPlugins self.standardReport = ['startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest', 'startTest', 'addSuccess', 'stopTest']
def getComponent(self, interface, default=None): """Create or retrieve an adapter for the given interface. If such an adapter has already been created, retrieve it from the cache that this instance keeps of all its adapters. Adapters created through this mechanism may safely store system-specific state. If you want to register an adapter that will be created through getComponent, but you don't require (or don't want) your adapter to be cached and kept alive for the lifetime of this Componentized object, set the attribute 'temporaryAdapter' to True on your adapter class. If you want to automatically register an adapter for all appropriate interfaces (with addComponent), set the attribute 'multiComponent' to True on your adapter class. """ k = reflect.qual(interface) if self._adapterCache.has_key(k): return self._adapterCache[k] else: adapter = interface.__adapt__(self) if adapter is not None and not ( hasattr(adapter, "temporaryAdapter") and adapter.temporaryAdapter): self._adapterCache[k] = adapter if (hasattr(adapter, "multiComponent") and adapter.multiComponent): self.addComponent(adapter) if adapter is None: return default return adapter
def run_command_version(options, reactor=None, **kwargs): """ Subcommand "crossbar version". """ log = make_logger() # Python py_ver = '.'.join([str(x) for x in list(sys.version_info[:3])]) py_ver_string = "[%s]" % sys.version.replace('\n', ' ') if 'pypy_version_info' in sys.__dict__: py_ver_detail = "{}-{}".format( platform.python_implementation(), '.'.join(str(x) for x in sys.pypy_version_info[:3])) else: py_ver_detail = platform.python_implementation() # Twisted / Reactor tx_ver = "%s-%s" % (pkg_resources.require("Twisted")[0].version, reactor.__class__.__name__) tx_loc = "[%s]" % qual(reactor.__class__) # txaio txaio_ver = '%s' % pkg_resources.require("txaio")[0].version # Autobahn ab_ver = pkg_resources.require("autobahn")[0].version ab_loc = "[%s]" % qual(WebSocketProtocol) # UTF8 Validator s = qual(Utf8Validator) if 'wsaccel' in s: utf8_ver = 'wsaccel-%s' % pkg_resources.require('wsaccel')[0].version elif s.startswith('autobahn'): utf8_ver = 'autobahn' else: # could not detect UTF8 validator type/version utf8_ver = '?' utf8_loc = "[%s]" % qual(Utf8Validator) # XOR Masker s = qual(XorMaskerNull) if 'wsaccel' in s: xor_ver = 'wsaccel-%s' % pkg_resources.require('wsaccel')[0].version elif s.startswith('autobahn'): xor_ver = 'autobahn' else: # could not detect XOR masker type/version xor_ver = '?' xor_loc = "[%s]" % qual(XorMaskerNull) # JSON Serializer supported_serializers = ['JSON'] from autobahn.wamp.serializer import JsonObjectSerializer json_ver = JsonObjectSerializer.JSON_MODULE.__name__ # If it's just 'json' then it's the stdlib one... if json_ver == 'json': json_ver = 'stdlib' else: json_ver = (json_ver + "-%s") % pkg_resources.require(json_ver)[0].version # MsgPack Serializer try: import umsgpack # noqa msgpack_ver = 'u-msgpack-python-%s' % pkg_resources.require( 'u-msgpack-python')[0].version supported_serializers.append('MessagePack') except ImportError: msgpack_ver = '-' # CBOR Serializer try: import cbor # noqa cbor_ver = 'cbor-%s' % pkg_resources.require('cbor')[0].version supported_serializers.append('CBOR') except ImportError: cbor_ver = '-' # UBJSON Serializer try: import ubjson # noqa ubjson_ver = 'ubjson-%s' % pkg_resources.require( 'py-ubjson')[0].version supported_serializers.append('UBJSON') except ImportError: ubjson_ver = '-' # LMDB try: import lmdb # noqa lmdb_lib_ver = '.'.join([str(x) for x in lmdb.version()]) lmdb_ver = '{}/lmdb-{}'.format( pkg_resources.require('lmdb')[0].version, lmdb_lib_ver) except ImportError: lmdb_ver = '-' # Release Public Key release_pubkey = _read_release_pubkey() def decorate(text): return click.style(text, fg='yellow', bold=True) Node = node_classes[options.personality][u'class'] for line in Node.BANNER.splitlines(): log.info(decorate("{:>40}".format(line))) pad = " " * 22 log.info(" Crossbar.io : {ver} ({personality})", ver=decorate(crossbar.__version__), personality=Node.PERSONALITY) log.info(" Autobahn : {ver} (with {serializers})", ver=decorate(ab_ver), serializers=', '.join(supported_serializers)) log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(ab_loc)) log.debug(" txaio : {ver}", ver=decorate(txaio_ver)) log.debug(" UTF8 Validator : {ver}", ver=decorate(utf8_ver)) log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(utf8_loc)) log.debug(" XOR Masker : {ver}", ver=decorate(xor_ver)) log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(xor_loc)) log.debug(" JSON Codec : {ver}", ver=decorate(json_ver)) log.debug(" MessagePack Codec : {ver}", ver=decorate(msgpack_ver)) log.debug(" CBOR Codec : {ver}", ver=decorate(cbor_ver)) log.debug(" UBJSON Codec : {ver}", ver=decorate(ubjson_ver)) log.info(" Twisted : {ver}", ver=decorate(tx_ver)) log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(tx_loc)) log.info(" LMDB : {ver}", ver=decorate(lmdb_ver)) log.info(" Python : {ver}/{impl}", ver=decorate(py_ver), impl=decorate(py_ver_detail)) log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(py_ver_string)) log.info(" OS : {ver}", ver=decorate(platform.platform())) log.info(" Machine : {ver}", ver=decorate(platform.machine())) log.info(" Release key : {release_pubkey}", release_pubkey=decorate(release_pubkey[u'base64'])) log.info("")
def __repr__(self): protocolName = reflect.qual(self.protocol.__class__, ) if hasattr(self, 'socket'): return '<%s on %r>' % (protocolName, self.port) else: return '<%s (not listening)>' % (protocolName, )
def __repr__(self): factoryName = reflect.qual(self.factory.__class__) if hasattr(self, 'socket'): return '<%s on %r>' % (factoryName, self.port) else: return '<%s (not listening)>' % (factoryName, )
def jellyToDOM_1(self, jellier, element): from twisted.python.reflect import qual element.setAttribute("integer", str(self.integer)) element.setAttribute("instance", qual(self.instance.__class__)) # not l33t enough element.setAttribute("name", str(self.name))
def addReader(self, reader): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement addReader")
def jelly(self, obj): if isinstance(obj, Jellyable): preRef = self._checkMutable(obj) if preRef: return preRef return obj.jellyFor(self) objType = type(obj) if self.taster.isTypeAllowed(qual(objType).encode("utf-8")): # "Immutable" Types if objType in (bytes, int, float): return obj elif isinstance(obj, types.MethodType): aSelf = obj.__self__ aFunc = obj.__func__ aClass = aSelf.__class__ return [ b"method", aFunc.__name__, self.jelly(aSelf), self.jelly(aClass), ] elif objType is str: return [b"unicode", obj.encode("UTF-8")] elif isinstance(obj, type(None)): return [b"None"] elif isinstance(obj, types.FunctionType): return [b"function", obj.__module__ + "." + obj.__qualname__] elif isinstance(obj, types.ModuleType): return [b"module", obj.__name__] elif objType is bool: return [b"boolean", obj and b"true" or b"false"] elif objType is datetime.datetime: if obj.tzinfo: raise NotImplementedError( "Currently can't jelly datetime objects with tzinfo" ) return [ b"datetime", " ".join( [ str(x) for x in ( obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second, obj.microsecond, ) ] ).encode("utf-8"), ] elif objType is datetime.time: if obj.tzinfo: raise NotImplementedError( "Currently can't jelly datetime objects with tzinfo" ) return [ b"time", " ".join( [ str(x) for x in (obj.hour, obj.minute, obj.second, obj.microsecond) ] ).encode("utf-8"), ] elif objType is datetime.date: return [ b"date", " ".join([str(x) for x in (obj.year, obj.month, obj.day)]).encode( "utf-8" ), ] elif objType is datetime.timedelta: return [ b"timedelta", " ".join( [str(x) for x in (obj.days, obj.seconds, obj.microseconds)] ).encode("utf-8"), ] elif issubclass(objType, type): return [b"class", qual(obj).encode("utf-8")] elif objType is decimal.Decimal: return self.jelly_decimal(obj) else: preRef = self._checkMutable(obj) if preRef: return preRef # "Mutable" Types sxp = self.prepare(obj) if objType is list: sxp.extend(self._jellyIterable(list_atom, obj)) elif objType is tuple: sxp.extend(self._jellyIterable(tuple_atom, obj)) elif objType in DictTypes: sxp.append(dictionary_atom) for key, val in obj.items(): sxp.append([self.jelly(key), self.jelly(val)]) elif objType in _SetTypes: sxp.extend(self._jellyIterable(set_atom, obj)) elif objType in _ImmutableSetTypes: sxp.extend(self._jellyIterable(frozenset_atom, obj)) else: className = qual(obj.__class__).encode("utf-8") persistent = None if self.persistentStore: persistent = self.persistentStore(obj, self) if persistent is not None: sxp.append(persistent_atom) sxp.append(persistent) elif self.taster.isClassAllowed(obj.__class__): sxp.append(className) if hasattr(obj, "__getstate__"): state = obj.__getstate__() else: state = obj.__dict__ sxp.append(self.jelly(state)) else: self.unpersistable( "instance of class %s deemed insecure" % qual(obj.__class__), sxp, ) return self.preserve(obj, sxp) else: raise InsecureJelly("Type not allowed for object: %s %s" % (objType, obj))
def doWrite(self): """Raises a RuntimeError""" raise RuntimeError("doWrite called on a %s" % reflect.qual(self.__class__))
def installWaker(self): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement installWaker")
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False): """ Assert that L{printDetailedTraceback} produces and prints a detailed traceback. The detailed traceback consists of a header:: *--- Failure #20 --- The body contains the stacktrace:: /twisted/trial/_synctest.py:1180: _run(...) /twisted/python/util.py:1076: runWithWarningsSuppressed(...) --- <exception caught here> --- /twisted/test/test_failure.py:39: getDivisionFailure(...) If C{captureVars} is enabled the body also includes a list of globals and locals:: [ Locals ] exampleLocalVar : 'xyz' ... ( Globals ) ... Or when C{captureVars} is disabled:: [Capture of Locals and Globals disabled (use captureVars=True)] When C{cleanFailure} is enabled references to other objects are removed and replaced with strings. And finally the footer with the L{Failure}'s value:: exceptions.ZeroDivisionError: float division *--- End of Failure #20 --- @param captureVars: Enables L{Failure.captureVars}. @type captureVars: C{bool} @param cleanFailure: Enables L{Failure.cleanFailure}. @type cleanFailure: C{bool} """ if captureVars: exampleLocalVar = 'xyz' # Silence the linter as this variable is checked via # the traceback. exampleLocalVar f = getDivisionFailure(captureVars=captureVars) out = NativeStringIO() if cleanFailure: f.cleanFailure() f.printDetailedTraceback(out) tb = out.getvalue() start = "*--- Failure #%d%s---\n" % (f.count, (f.pickled and ' (pickled) ') or ' ') end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual( f.type), reflect.safe_str(f.value), f.count) self.assertTracebackFormat(tb, start, end) # Variables are printed on lines with 2 leading spaces. linesWithVars = [ line for line in tb.splitlines() if line.startswith(' ') ] if captureVars: self.assertNotEqual([], linesWithVars) if cleanFailure: line = ' exampleLocalVar : "\'xyz\'"' else: line = " exampleLocalVar : 'xyz'" self.assertIn(line, linesWithVars) else: self.assertEqual([], linesWithVars) self.assertIn( ' [Capture of Locals and Globals disabled (use ' 'captureVars=True)]\n', tb)
def jellyToAO(self, obj): """I turn an object into an AOT and return it.""" objType = type(obj) self.stack.append(repr(obj)) # immutable: We don't care if these have multiple refs! if objType in _SIMPLE_BUILTINS: retval = obj elif issubclass(objType, types.MethodType): # TODO: make methods 'prefer' not to jelly the object internally, # so that the object will show up where it's referenced first NOT # by a method. retval = InstanceMethod( _funcOfMethod(obj).__name__, reflect.qual(_classOfMethod(obj)), self.jellyToAO(_selfOfMethod(obj)), ) elif issubclass(objType, types.ModuleType): retval = Module(obj.__name__) elif issubclass(objType, type): retval = Class(reflect.qual(obj)) elif objType is types.FunctionType: retval = Function(reflect.fullFuncName(obj)) else: # mutable! gotta watch for refs. # Marmalade had the nicety of being able to just stick a 'reference' attribute # on any Node object that was referenced, but in AOT, the referenced object # is *inside* of a Ref call (Ref(num, obj) instead of # <objtype ... reference="1">). The problem is, especially for built-in types, # I can't just assign some attribute to them to give them a refnum. So, I have # to "wrap" a Ref(..) around them later -- that's why I put *everything* that's # mutable inside one. The Ref() class will only print the "Ref(..)" around an # object if it has a Reference explicitly attached. if id(obj) in self.prepared: oldRef = self.prepared[id(obj)] if oldRef.refnum: # it's been referenced already key = oldRef.refnum else: # it hasn't been referenced yet self._ref_id = self._ref_id + 1 key = self._ref_id oldRef.setRef(key) return Deref(key) retval = Ref() def _stateFrom(state): retval.setObj( Instance(reflect.qual(obj.__class__), self.jellyToAO(state))) self.prepareForRef(retval, obj) if objType is list: retval.setObj([self.jellyToAO(o) for o in obj]) # hah! elif objType is tuple: retval.setObj(tuple(map(self.jellyToAO, obj))) elif objType is dict: d = {} for k, v in obj.items(): d[self.jellyToAO(k)] = self.jellyToAO(v) retval.setObj(d) elif objType in copy_reg.dispatch_table: unpickleFunc, state = copy_reg.dispatch_table[objType](obj) retval.setObj( Copyreg(reflect.fullFuncName(unpickleFunc), self.jellyToAO(state))) elif hasattr(obj, "__getstate__"): _stateFrom(obj.__getstate__()) elif hasattr(obj, "__dict__"): _stateFrom(obj.__dict__) else: raise TypeError("Unsupported type: %s" % objType.__name__) del self.stack[-1] return retval
def __init__(self, original): self.original = original key = reflect.qual(original.__class__) count = _counters.get(key, 0) _counters[key] = count + 1 self._logFile = open(key + '-' + str(count), 'w')
def run(): """ Entry point into (native) worker processes. This wires up stuff such that a worker instance is talking WAMP-over-stdio to the node controller. """ import os import sys import platform import signal # make sure logging to something else than stdio is setup _first_ # from crossbar._logging import make_JSON_observer, cb_logging_aware from txaio import make_logger, start_logging from twisted.logger import globalLogPublisher from twisted.python.reflect import qual log = make_logger() # Print a magic phrase that tells the capturing logger that it supports # Crossbar's rich logging print(cb_logging_aware, file=sys.__stderr__) sys.__stderr__.flush() flo = make_JSON_observer(sys.__stderr__) globalLogPublisher.addObserver(flo) # Ignore SIGINT so we get consistent behavior on control-C versus # sending SIGINT to the controller process. When the controller is # shutting down, it sends TERM to all its children but ctrl-C # handling will send a SIGINT to all the processes in the group # (so then the controller sends a TERM but the child already or # will very shortly get a SIGINT as well). Twisted installs signal # handlers, but not for SIGINT if there's already a custom one # present. def ignore(sig, frame): log.debug("Ignoring SIGINT in worker.") signal.signal(signal.SIGINT, ignore) # create the top-level parser # import argparse parser = argparse.ArgumentParser() parser.add_argument('--reactor', default=None, choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'], help='Explicit Twisted reactor selection (optional).') parser.add_argument('--loglevel', default="info", choices=['none', 'error', 'warn', 'info', 'debug', 'trace'], help='Initial log level.') parser.add_argument('-c', '--cbdir', type=six.text_type, help="Crossbar.io node directory (required).") parser.add_argument('-r', '--realm', type=six.text_type, help='Crossbar.io node (management) realm (required).') parser.add_argument('-k', '--klass', type=six.text_type, help='Crossbar.io worker class (required).') parser.add_argument('-n', '--node', type=six.text_type, help='Crossbar.io node ID (required).') parser.add_argument('-w', '--worker', type=six.text_type, help='Crossbar.io worker ID (required).') parser.add_argument('--title', type=six.text_type, default=None, help='Worker process title to set (optional).') parser.add_argument('--expose_controller', type=bool, default=False, help='Expose node controller session to all components (this feature requires Crossbar.io Fabric extension).') parser.add_argument('--expose_shared', type=bool, default=False, help='Expose a shared object to all components (this feature requires Crossbar.io Fabric extension).') parser.add_argument('--shutdown', type=six.text_type, default=None, help='Shutdown method') options = parser.parse_args() # actually begin logging start_logging(None, options.loglevel) # we use an Autobahn utility to import the "best" available Twisted reactor # from autobahn.twisted.choosereactor import install_reactor reactor = install_reactor(options.reactor) # eg: crossbar.worker.container.ContainerWorkerSession l = options.klass.split('.') worker_module, worker_klass = '.'.join(l[:-1]), l[-1] # now load the worker module and class _mod = importlib.import_module(worker_module) klass = getattr(_mod, worker_klass) log.info( 'Started {worker_title} worker "{worker_id}" on node "{node_id}" [{klass} / {python}-{reactor}]', worker_title=klass.WORKER_TITLE, klass=options.klass, node_id=options.node, worker_id=options.worker, pid=os.getpid(), python=platform.python_implementation(), reactor=qual(reactor.__class__).split('.')[-1], ) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass)) # node directory # options.cbdir = os.path.abspath(options.cbdir) os.chdir(options.cbdir) # log.msg("Starting from node directory {}".format(options.cbdir)) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle( 'crossbar-worker [{}]'.format(options.klass) ) from twisted.internet.error import ConnectionDone from autobahn.twisted.websocket import WampWebSocketServerProtocol class WorkerServerProtocol(WampWebSocketServerProtocol): def connectionLost(self, reason): # the behavior here differs slightly whether we're shutting down orderly # or shutting down because of "issues" if isinstance(reason.value, ConnectionDone): was_clean = True else: was_clean = False try: # this log message is unlikely to reach the controller (unless # only stdin/stdout pipes were lost, but not stderr) if was_clean: log.info("Connection to node controller closed cleanly") else: log.warn("Connection to node controller lost: {reason}", reason=reason) # give the WAMP transport a change to do it's thing WampWebSocketServerProtocol.connectionLost(self, reason) except: # we're in the process of shutting down .. so ignore .. pass finally: # after the connection to the node controller is gone, # the worker is "orphane", and should exit # determine process exit code if was_clean: exit_code = 0 else: exit_code = 1 # exit the whole worker process when the reactor has stopped reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code) # stop the reactor try: reactor.stop() except ReactorNotRunning: pass try: # create a WAMP application session factory # from autobahn.twisted.wamp import ApplicationSessionFactory from autobahn.wamp.types import ComponentConfig session_config = ComponentConfig(realm=options.realm, extra=options) session_factory = ApplicationSessionFactory(session_config) session_factory.session = klass # create a WAMP-over-WebSocket transport server factory # from autobahn.twisted.websocket import WampWebSocketServerFactory transport_factory = WampWebSocketServerFactory(session_factory, u'ws://localhost') transport_factory.protocol = WorkerServerProtocol transport_factory.setProtocolOptions(failByDrop=False) # create a protocol instance and wire up to stdio # from twisted.python.runtime import platform as _platform from twisted.internet import stdio proto = transport_factory.buildProtocol(None) if _platform.isWindows(): stdio.StandardIO(proto) else: stdio.StandardIO(proto, stdout=3) # now start reactor loop # if False: log.info("vmprof enabled.") import os import vmprof PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid()) outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(outfd, period=0.01) log.info("Entering event loop...") reactor.run() vmprof.disable() else: log.debug("Entering event loop...") reactor.run() except Exception as e: log.info("Unhandled exception: {e}", e=e) if reactor.running: reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1) reactor.stop() else: sys.exit(1)
def jellyFor(self, jellier): qual = reflect.qual(PBMind) if isinstance(qual, unicode): qual = qual.encode("utf-8") return qual, jellier.invoker.registerReference(self)
def run_command_start(options, reactor=None): """ Subcommand "crossbar start". """ assert reactor # do not allow to run more than one Crossbar.io instance # from the same Crossbar.io node directory # pid_data = check_is_running(options.cbdir) if pid_data: print( "Crossbar.io is already running from node directory {} (PID {}).". format(options.cbdir, pid_data['pid'])) sys.exit(1) else: fp = os.path.join(options.cbdir, _PID_FILENAME) with open(fp, 'wb') as fd: argv = options.argv options_dump = vars(options) pid_data = { 'pid': os.getpid(), 'argv': argv, 'options': { x: y for x, y in options_dump.items() if x not in ["func", "argv"] } } fd.write("{}\n".format( json.dumps(pid_data, sort_keys=False, indent=4, separators=(', ', ': '), ensure_ascii=False)).encode('utf8')) # remove node PID file when reactor exits # def remove_pid_file(): fp = os.path.join(options.cbdir, _PID_FILENAME) if os.path.isfile(fp): os.remove(fp) reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file) log = make_logger() # represents the running Crossbar.io node # Node = node_classes[options.personality][u'class'] node = Node(options.cbdir, reactor=reactor) # possibly generate new node key # pubkey = node.maybe_generate_key(options.cbdir) # Print the banner. # for line in Node.BANNER.splitlines(): log.info(click.style(("{:>40}").format(line), fg='yellow', bold=True)) bannerFormat = "{:<12} {:<24}" log.info( bannerFormat.format( "Version:", click.style('{} {}'.format(node.PERSONALITY, crossbar.__version__), fg='yellow', bold=True))) if pubkey: log.info( bannerFormat.format("Public Key:", click.style(pubkey, fg='yellow', bold=True))) log.info() log.info( 'Node created with personality "{node_personality}" [{node_class}]', node_personality=options.personality, node_class='{}.{}'.format(Node.__module__, Node.__name__)) log.info('Running from node directory "{cbdir}"', cbdir=options.cbdir) # check and load the node configuration # try: node.load(options.config) except InvalidConfigException as e: log.error("Invalid node configuration") log.error("{e!s}", e=e) sys.exit(1) except: raise log.info("Controller process starting ({python}-{reactor}) ..", python=platform.python_implementation(), reactor=qual(reactor.__class__).split('.')[-1]) # now actually start the node .. # def start_crossbar(): d = node.start() def on_error(err): log.error("{e!s}", e=err.value) log.error("Could not start node: {err}", err=err) if reactor.running: reactor.stop() d.addErrback(on_error) reactor.callWhenRunning(start_crossbar) # enter event loop # try: reactor.run() except Exception: log.failure("Could not start reactor - {log_failure.value}")
def reconfigServiceWithBuildbotConfig(self, new_config): # arrange childs by name old_by_name = self.namedServices old_set = set(old_by_name) new_config_attr = getattr(new_config, self.config_attr) if isinstance(new_config_attr, list): new_by_name = {s.name: s for s in new_config_attr} elif isinstance(new_config_attr, dict): new_by_name = new_config_attr else: raise TypeError("config.{} should be a list or dictionary".format( self.config_attr)) new_set = set(new_by_name) # calculate new childs, by name, and removed childs removed_names, added_names = util.diffSets(old_set, new_set) # find any childs for which the fully qualified class name has # changed, and treat those as an add and remove # While we're at it find any service that don't know how to reconfig, # and, if they have changed, add them to both removed and added, so that we # run the new version for n in old_set & new_set: old = old_by_name[n] new = new_by_name[n] # detect changed class name if reflect.qual(old.__class__) != reflect.qual(new.__class__): removed_names.add(n) added_names.add(n) # compare using ComparableMixin if they don't support reconfig elif not hasattr(old, 'reconfigServiceWithBuildbotConfig'): if old != new: removed_names.add(n) added_names.add(n) if removed_names or added_names: log.msg("adding {} new {}, removing {}".format( len(added_names), self.config_attr, len(removed_names))) for n in removed_names: child = old_by_name[n] # disownServiceParent calls stopService after removing the relationship # as child might use self.master.data to stop itself, its better to stop it first # (this is related to the fact that self.master is found by recursively looking at # self.parent for a master) yield child.stopService() # it has already called, so do not call it again child.stopService = lambda: None yield child.disownServiceParent() for n in added_names: child = new_by_name[n] # setup service's objectid if hasattr(child, 'objectid'): class_name = '{}.{}'.format(child.__class__.__module__, child.__class__.__name__) objectid = yield self.master.db.state.getObjectId( child.name, class_name) child.objectid = objectid yield defer.maybeDeferred(child.setServiceParent, self) # As the services that were just added got # reconfigServiceWithSibling called by # setServiceParent->startService, # we avoid calling it again by selecting # in reconfigurable_services, services # that were not added just now reconfigurable_services = [ svc for svc in self if svc.name not in added_names ] # sort by priority reconfigurable_services.sort(key=lambda svc: -svc.reconfig_priority) for svc in reconfigurable_services: if not svc.name: raise ValueError( "{}: child {} should have a defined name attribute".format( self, svc)) config_sibling = new_by_name.get(svc.name) try: yield svc.reconfigServiceWithSibling(config_sibling) except NotImplementedError: # legacy support. Its too painful to transition old code to new Service life cycle # so we implement switch of child when the service raises NotImplementedError # Note this means that self will stop, and sibling will take ownership # means that we have a small time where the service is unavailable. yield svc.disownServiceParent() config_sibling.objectid = svc.objectid yield config_sibling.setServiceParent(self)
def rebuild(module, doLog=1): """ Reload a module and do as much as possible to replace its references. """ global lastRebuild lastRebuild = time.time() if hasattr(module, 'ALLOW_TWISTED_REBUILD'): # Is this module allowed to be rebuilt? if not module.ALLOW_TWISTED_REBUILD: raise RuntimeError("I am not allowed to be rebuilt.") if doLog: log.msg('Rebuilding {}...'.format(str(module.__name__))) # Safely handle adapter re-registration from twisted.python import components components.ALLOW_DUPLICATES = True d = module.__dict__ _modDictIDMap[id(d)] = module newclasses = {} classes = {} functions = {} values = {} if doLog: log.msg(' (scanning {}): '.format(str(module.__name__))) for k, v in d.items(): if _isClassType(type(v)): # ClassType exists on Python 2.x and earlier. # Failure condition -- instances of classes with buggy # __hash__/__cmp__ methods referenced at the module level... if v.__module__ == module.__name__: classes[v] = 1 if doLog: log.logfile.write("c") log.logfile.flush() elif type(v) == types.FunctionType: if v.__globals__ is module.__dict__: functions[v] = 1 if doLog: log.logfile.write("f") log.logfile.flush() elif isinstance(v, type): if v.__module__ == module.__name__: newclasses[v] = 1 if doLog: log.logfile.write("o") log.logfile.flush() values.update(classes) values.update(functions) fromOldModule = values.__contains__ newclasses = newclasses.keys() classes = classes.keys() functions = functions.keys() if doLog: log.msg('') log.msg(' (reload {})'.format(str(module.__name__))) # Boom. reload(module) # Make sure that my traceback printing will at least be recent... linecache.clearcache() if doLog: log.msg(' (cleaning {}): '.format(str(module.__name__))) for clazz in classes: if getattr(module, clazz.__name__) is clazz: log.msg("WARNING: class {} not replaced by reload!".format( reflect.qual(clazz))) else: if doLog: log.logfile.write("x") log.logfile.flush() clazz.__bases__ = () clazz.__dict__.clear() clazz.__getattr__ = __injectedgetattr__ clazz.__module__ = module.__name__ if newclasses: import gc for nclass in newclasses: ga = getattr(module, nclass.__name__) if ga is nclass: log.msg("WARNING: new-class {} not replaced by reload!".format( reflect.qual(nclass))) else: for r in gc.get_referrers(nclass): if getattr(r, '__class__', None) is nclass: r.__class__ = ga if doLog: log.msg('') log.msg(' (fixing {}): '.format(str(module.__name__))) modcount = 0 for mk, mod in sys.modules.items(): modcount = modcount + 1 if mod == module or mod is None: continue if not hasattr(mod, '__file__'): # It's a builtin module; nothing to replace here. continue if hasattr(mod, '__bundle__'): # PyObjC has a few buggy objects which segfault if you hash() them. # It doesn't make sense to try rebuilding extension modules like # this anyway, so don't try. continue changed = 0 for k, v in mod.__dict__.items(): try: hash(v) except Exception: continue if fromOldModule(v): if _isClassType(type(v)): if doLog: log.logfile.write("c") log.logfile.flush() nv = latestClass(v) else: if doLog: log.logfile.write("f") log.logfile.flush() nv = latestFunction(v) changed = 1 setattr(mod, k, nv) else: # Replace bases of non-module classes just to be sure. if _isClassType(type(v)): for base in v.__bases__: if fromOldModule(base): latestClass(v) if doLog and not changed and ((modcount % 10) == 0): log.logfile.write(".") log.logfile.flush() components.ALLOW_DUPLICATES = False if doLog: log.msg('') log.msg(' Rebuilt {}.'.format(str(module.__name__))) return module
def removeAll(self): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement removeAll")
def install_optimal_reactor(verbose=False): """ Try to install the optimal Twisted reactor for platform. :param verbose: If ``True``, print what happens. :type verbose: bool """ import sys from twisted.python import reflect ## determine currently installed reactor, if any ## if 'twisted.internet.reactor' in sys.modules: current_reactor = reflect.qual( sys.modules['twisted.internet.reactor'].__class__).split('.')[-1] else: current_reactor = None ## depending on platform, install optimal reactor ## if 'bsd' in sys.platform or sys.platform.startswith('darwin'): ## *BSD and MacOSX ## if current_reactor != 'KQueueReactor': try: v = sys.version_info if v[0] == 1 or (v[0] == 2 and v[1] < 6) or (v[0] == 2 and v[1] == 6 and v[2] < 5): raise Exception( "Python version too old ({0}) to use kqueue reactor". format(sys.version)) from twisted.internet import kqreactor kqreactor.install() except Exception as e: print( "WARNING: Running on *BSD or MacOSX, but cannot install kqueue Twisted reactor ({0})." .format(e)) else: if verbose: print( "Running on *BSD or MacOSX and optimal reactor (kqueue) was installed." ) else: if verbose: print( "Running on *BSD or MacOSX and optimal reactor (kqueue) already installed." ) elif sys.platform in ['win32']: ## Windows ## if current_reactor != 'IOCPReactor': try: from twisted.internet.iocpreactor import reactor as iocpreactor iocpreactor.install() except Exception as e: print( "WARNING: Running on Windows, but cannot install IOCP Twisted reactor ({0})." .format(e)) else: if verbose: print( "Running on Windows and optimal reactor (ICOP) was installed." ) else: if verbose: print( "Running on Windows and optimal reactor (ICOP) already installed." ) elif sys.platform.startswith('linux'): ## Linux ## if current_reactor != 'EPollReactor': try: from twisted.internet import epollreactor epollreactor.install() except Exception as e: print( "WARNING: Running on Linux, but cannot install Epoll Twisted reactor ({0})." .format(e)) else: if verbose: print( "Running on Linux and optimal reactor (epoll) was installed." ) else: if verbose: print( "Running on Linux and optimal reactor (epoll) already installed." ) else: try: from twisted.internet import default as defaultreactor defaultreactor.install() except Exception as e: print( "WARNING: Could not install default Twisted reactor for this platform ({0})." .format(e))
def jelly(self, obj): if isinstance(obj, Jellyable): preRef = self._checkMutable(obj) if preRef: return preRef return obj.jellyFor(self) objType = type(obj) if self.taster.isTypeAllowed(qual(objType)): # "Immutable" Types if ((objType is StringType) or (objType is IntType) or (objType is LongType) or (objType is FloatType)): return obj elif objType is MethodType: return [ "method", obj.im_func.__name__, self.jelly(obj.im_self), self.jelly(obj.im_class) ] elif UnicodeType and objType is UnicodeType: return ['unicode', obj.encode('UTF-8')] elif objType is NoneType: return ['None'] elif objType is FunctionType: name = obj.__name__ return [ 'function', str(pickle.whichmodule(obj, obj.__name__)) + '.' + name ] elif objType is ModuleType: return ['module', obj.__name__] elif objType is BooleanType: return ['boolean', obj and 'true' or 'false'] elif objType is datetime.datetime: if obj.tzinfo: raise NotImplementedError( "Currently can't jelly datetime objects with tzinfo") return [ 'datetime', '%s %s %s %s %s %s %s' % (obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second, obj.microsecond) ] elif objType is datetime.time: if obj.tzinfo: raise NotImplementedError( "Currently can't jelly datetime objects with tzinfo") return [ 'time', '%s %s %s %s' % (obj.hour, obj.minute, obj.second, obj.microsecond) ] elif objType is datetime.date: return ['date', '%s %s %s' % (obj.year, obj.month, obj.day)] elif objType is datetime.timedelta: return [ 'timedelta', '%s %s %s' % (obj.days, obj.seconds, obj.microseconds) ] elif objType is ClassType or issubclass(objType, type): return ['class', qual(obj)] elif decimal is not None and objType is decimal.Decimal: return self.jelly_decimal(obj) else: preRef = self._checkMutable(obj) if preRef: return preRef # "Mutable" Types sxp = self.prepare(obj) if objType is ListType: sxp.extend(self._jellyIterable(list_atom, obj)) elif objType is TupleType: sxp.extend(self._jellyIterable(tuple_atom, obj)) elif objType in DictTypes: sxp.append(dictionary_atom) for key, val in obj.items(): sxp.append([self.jelly(key), self.jelly(val)]) elif (_set is not None and objType is set or objType is _sets.Set): sxp.extend(self._jellyIterable(set_atom, obj)) elif (_set is not None and objType is frozenset or objType is _sets.ImmutableSet): sxp.extend(self._jellyIterable(frozenset_atom, obj)) else: className = qual(obj.__class__) persistent = None if self.persistentStore: persistent = self.persistentStore(obj, self) if persistent is not None: sxp.append(persistent_atom) sxp.append(persistent) elif self.taster.isClassAllowed(obj.__class__): sxp.append(className) if hasattr(obj, "__getstate__"): state = obj.__getstate__() else: state = obj.__dict__ sxp.append(self.jelly(state)) else: self.unpersistable( "instance of class %s deemed insecure" % qual(obj.__class__), sxp) return self.preserve(obj, sxp) else: if objType is InstanceType: raise InsecureJelly("Class not allowed for instance: %s %s" % (obj.__class__, obj)) raise InsecureJelly("Type not allowed for object: %s %s" % (objType, obj))
def getWriters(self): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement getWriters")
def __repr__(self): return "<%s instance at 0x%x %s %s>" % (reflect.qual( self.__class__), id(self), self.state, self.getDestination())
def removeWriter(self, writer): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement removeWriter")
def __repr__(self): return ('<%s run=%d errors=%d failures=%d todos=%d dones=%d skips=%d>' % (reflect.qual(self.__class__), self.testsRun, len(self.errors), len(self.failures), len(self.expectedFailures), len(self.skips), len(self.unexpectedSuccesses)))
def getDestination(self): raise NotImplementedError( reflect.qual(self.__class__) + " did not implement " "getDestination")
def logPrefix(self): """ Returns the name of my class, to prefix log entries with. """ return reflect.qual(self.factory.__class__)
def reconfigService(self, new_config): timer = metrics.Timer("SchedulerManager.reconfigService") timer.start() old_by_name = dict((sch.name, sch) for sch in self) old_set = set(old_by_name.iterkeys()) new_by_name = new_config.schedulers new_set = set(new_by_name.iterkeys()) removed_names, added_names = util.diffSets(old_set, new_set) # find any schedulers that don't know how to reconfig, and, if they # have changed, add them to both removed and added, so that we # run the new version. While we're at it, find any schedulers whose # fully qualified class name has changed, and consider those a removal # and re-add as well. for n in old_set & new_set: old = old_by_name[n] new = new_by_name[n] # detect changed class name if reflect.qual(old.__class__) != reflect.qual(new.__class__): removed_names.add(n) added_names.add(n) # compare using ComparableMixin if they don't support reconfig elif not hasattr(old, 'reconfigService'): if old != new: removed_names.add(n) added_names.add(n) # removals first for sch_name in removed_names: log.msg("removing scheduler '%s'" % (sch_name,)) sch = old_by_name[sch_name] yield defer.maybeDeferred(lambda : sch.disownServiceParent()) sch.master = None # .. then additions for sch_name in added_names: log.msg("adding scheduler '%s'" % (sch_name,)) sch = new_by_name[sch_name] # get the scheduler's objectid class_name = '%s.%s' % (sch.__class__.__module__, sch.__class__.__name__) objectid = yield self.master.db.state.getObjectId( sch.name, class_name) # set up the scheduler sch.objectid = objectid sch.master = self.master # *then* attacah and start it sch.setServiceParent(self) metrics.MetricCountEvent.log("num_schedulers", len(list(self)), absolute=True) # reconfig any newly-added schedulers, as well as existing yield config.ReconfigurableServiceMixin.reconfigService(self, new_config) timer.stop()
def _start_native_worker(self, worker_type, worker_id, worker_options=None, details=None): # prohibit starting a worker twice # if worker_id in self._workers: emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format( worker_id) self.log.error(emsg) raise ApplicationError('crossbar.error.worker_already_running', emsg) # check worker options # options = worker_options or {} if 'extra' in options: worker_options_extra = binascii.b2a_hex( cbor2.dumps(dict(options['extra']))) else: worker_options_extra = None try: if worker_type in self._node._native_workers: if self._node._native_workers[worker_type][ 'checkconfig_options']: self._node._native_workers[worker_type][ 'checkconfig_options'](self.personality, options) else: raise Exception( 'No checkconfig_options for worker type "{worker_type}" implemented!' .format(worker_type=worker_type)) else: raise Exception('invalid worker type "{}"'.format(worker_type)) except Exception as e: emsg = "Could not start native worker: invalid configuration ({})".format( e) self.log.error(emsg) raise ApplicationError('crossbar.error.invalid_configuration', emsg) # the fully qualified worker class as a string worker_class = qual( self._node._native_workers[worker_type]['worker_class']) # allow override Python executable from options # if 'python' in options: exe = options['python'] # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy # if not os.path.isabs(exe): emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format( exe) self.log.error(emsg) raise ApplicationError('crossbar.error.invalid_configuration', emsg) # of course the path must exist and actually be executable # if not (os.path.isfile(exe) and os.access(exe, os.X_OK)): emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format( exe) self.log.error(emsg) raise ApplicationError('crossbar.error.invalid_configuration', emsg) else: exe = sys.executable # allow override default Python module search paths from options # if 'pythonpath' in options: pythonpaths_to_add = [ os.path.abspath(os.path.join(self._node._cbdir, p)) for p in options.get('pythonpath', []) ] else: pythonpaths_to_add = [] # assemble command line for forking the worker # # all native workers (routers and containers for now) start # from the same script in crossbar/worker/process.py or # from the command "crossbar _exec_worker" when crossbar is # running from a frozen executable (single-file, pyinstaller, etc) # if getattr(sys, 'frozen', False): # if we are inside a frozen crossbar executable, we need to invoke # the crossbar executable with a command ("_exec_worker") args = [exe, self._node.personality.NAME, "_exec_worker"] else: # we are invoking via "-m" so that .pyc files, __pycache__ # etc work properly. this works everywhere, but frozen executables args = [exe, "-u", "-m", "crossbar.worker.main"] args.extend(["--cbdir", self._node._cbdir]) args.extend(["--node", str(self._node._node_id)]) args.extend(["--worker", str(worker_id)]) args.extend(["--realm", self._realm]) args.extend(["--personality", class_name(self._node.personality)]) args.extend(["--klass", worker_class]) args.extend(["--loglevel", get_global_log_level()]) if self._node.options.debug_lifecycle: args.append("--debug-lifecycle") if self._node.options.debug_programflow: args.append("--debug-programflow") if self._node.options.enable_vmprof: args.append("--vmprof") if "shutdown" in options: args.extend(["--shutdown", options["shutdown"]]) if "restart" in options: args.extend(["--restart", options["restart"]]) if worker_options_extra: args.extend(["--extra", worker_options_extra]) # Node-level callback to inject worker arguments # self._node._extend_worker_args(args, options) # allow override worker process title from options # if options.get('title', None): args.extend(['--title', options['title']]) # forward explicit reactor selection # if 'reactor' in options and sys.platform in options['reactor']: args.extend(['--reactor', options['reactor'][sys.platform]]) # FIXME # elif self._node.options.reactor: # args.extend(['--reactor', self._node.options.reactor]) # create worker process environment # worker_env = create_process_env(options) # We need to use the same PYTHONPATH we were started with, so we can # find the Crossbar we're working with -- it may not be the same as the # one on the default path worker_env["PYTHONPATH"] = os.pathsep.join(pythonpaths_to_add + sys.path) # log name of worker # worker_logname = self._node._native_workers[worker_type]['logname'] # each worker is run under its own dedicated WAMP auth role # worker_auth_role = 'crossbar.worker.{}'.format(worker_id) # topic URIs used (later) # starting_topic = self._node._native_workers[worker_type]['topics'][ 'starting'] started_topic = self._node._native_workers[worker_type]['topics'][ 'started'] # add worker tracking instance to the worker map .. # WORKER = self._node._native_workers[worker_type]['class'] worker = WORKER(self, worker_id, details.caller, keeplog=options.get('traceback', None)) self._workers[worker_id] = worker # create a (custom) process endpoint. # if platform.isWindows(): childFDs = None # Use the default Twisted ones else: # The communication between controller and container workers is # using WAMP running over 2 pipes. # For controller->native-worker traffic this runs over FD 0 (`stdin`) # and for the native-worker->controller traffic, this runs over FD 3. # # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for # container->controller traffic, so that components running in the # container which happen to write to `stdout` or `stderr` do not # interfere with the container-controller communication. childFDs = {0: "w", 1: "r", 2: "r", 3: "r"} ep = WorkerProcessEndpoint(self._node._reactor, exe, args, env=worker_env, worker=worker, childFDs=childFDs) # ready handling # def on_ready_success(worker_id): self.log.debug( '{worker_type} worker "{worker_id}" process {pid} started', worker_type=worker_logname, worker_id=worker.id, pid=worker.pid) self._node._reactor.addSystemEventTrigger( 'before', 'shutdown', self._cleanup_worker, self._node._reactor, worker, ) worker.on_worker_started() started_info = { 'id': worker.id, 'status': worker.status, 'started': utcstr(worker.started), 'who': worker.who, 'pid': worker.pid, 'startup_time': (worker.started - worker.created).total_seconds() if worker.started else None } # FIXME: make start of stats printer dependent on log level .. if False: worker.log_stats(5.) self.publish(started_topic, started_info, options=PublishOptions(exclude=details.caller)) return started_info def on_ready_error(err): del self._workers[worker.id] emsg = 'Failed to start native worker: {}'.format(err.value) self.log.error(emsg) raise ApplicationError("crossbar.error.cannot_start", emsg, worker.getlog()) worker.ready.addCallbacks(on_ready_success, on_ready_error) def on_exit_success(_): self.log.info("Node worker {worker.id} ended successfully", worker=worker) # clear worker log worker.log_stats(0) # remove the dedicated node router authrole we dynamically # added for the worker self._node._drop_worker_role(worker_auth_role) # remove our metadata tracking for the worker del self._workers[worker.id] # indicate that the worker excited successfully return True def on_exit_error(err): self.log.info("Node worker {worker.id} ended with error ({err})", worker=worker, err=err) # clear worker log worker.log_stats(0) # remove the dedicated node router authrole we dynamically # added for the worker self._node._drop_worker_role(worker_auth_role) # remove our metadata tracking for the worker del self._workers[worker.id] # indicate that the worker excited with error return False def check_for_shutdown(was_successful): self.log.info( 'Checking for node shutdown: worker_exit_success={worker_exit_success}, shutdown_requested={shutdown_requested}, node_shutdown_triggers={node_shutdown_triggers}', worker_exit_success=was_successful, shutdown_requested=self._shutdown_requested, node_shutdown_triggers=self._node._node_shutdown_triggers) shutdown = self._shutdown_requested # automatically shutdown node whenever a worker ended (successfully, or with error) # if NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers: self.log.info( "Node worker ended, and trigger '{trigger}' is active: will shutdown node ..", trigger=NODE_SHUTDOWN_ON_WORKER_EXIT) term_print('CROSSBAR:NODE_SHUTDOWN_ON_WORKER_EXIT') shutdown = True # automatically shutdown node when worker ended with error # elif not was_successful and NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers: self.log.info( "Node worker ended with error, and trigger '{trigger}' is active: will shutdown node ..", trigger=NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR) term_print('CROSSBAR:NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR') shutdown = True # automatically shutdown node when no more workers are left # elif len( self._workers ) == 0 and NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers: self.log.info( "No more node workers running, and trigger '{trigger}' is active: will shutdown node ..", trigger=NODE_SHUTDOWN_ON_LAST_WORKER_EXIT) term_print('CROSSBAR:NODE_SHUTDOWN_ON_LAST_WORKER_EXIT') shutdown = True # initiate shutdown (but only if we are not already shutting down) # if shutdown: self.shutdown() else: self.log.info('Node will continue to run!') d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error) d_on_exit.addBoth(check_for_shutdown) # create a transport factory for talking WAMP to the native worker # transport_factory = create_native_worker_client_factory( self._node._router_session_factory, worker_auth_role, worker.ready, worker.exit) transport_factory.noisy = False self._workers[worker_id].factory = transport_factory # now (immediately before actually forking) signal the starting of the worker # starting_info = { 'id': worker_id, 'status': worker.status, 'created': utcstr(worker.created), 'who': worker.who, } # the caller gets a progressive result .. if details.progress: details.progress(starting_info) # .. while all others get an event self.publish(starting_topic, starting_info, options=PublishOptions(exclude=details.caller)) # only the following line will actually exec a new worker process - everything before is just setup # for this moment: self.log.debug( 'Starting new managed worker process for {worker_logname} worker "{worker_id}" using {exe} with args {args}', worker_id=worker_id, worker_logname=worker_logname, exe=exe, args=args) d = ep.connect(transport_factory) def on_connect_success(proto): # this seems to be called immediately when the child process # has been forked. even if it then immediately fails because # e.g. the executable doesn't even exist. in other words, # I'm not sure under what conditions the deferred will errback .. self.log.debug('Native worker "{worker_id}" connected', worker_id=worker_id) worker.on_worker_connected(proto) # dynamically add a dedicated authrole to the router # for the worker we've just started self._node._add_worker_role(worker_auth_role, options) def on_connect_error(err): # not sure when this errback is triggered at all .. self.log.error( "Internal error: connection to forked native worker failed ({err})", err=err) # in any case, forward the error .. worker.ready.errback(err) d.addCallbacks(on_connect_success, on_connect_error) return worker.ready
def setLogStr(self): self.logstr = reflect.qual(self.protocol.__class__) + " (TUNTAP)"
def _stateFrom(state): retval.setObj( Instance(reflect.qual(obj.__class__), self.jellyToAO(state)))
def doIteration(self, delay): """ Do one iteration over the readers and writers which have been added. """ raise NotImplementedError( reflect.qual(self.__class__) + " did not implement doIteration")