def test_resourceFDImplementation(self): """ L{_FDDetector._fallbackFDImplementation} uses the L{resource} module if it is available, returning a range of integers from 0 to the minimum of C{1024} and the hard I{NOFILE} limit. """ # When the resource module is here, use its value. self.revealResourceModule(512) self.assertEqual( list(range(512)), list(self.detector._fallbackFDImplementation())) # But limit its value to the arbitrarily selected value 1024. self.revealResourceModule(2048) self.assertEqual( list(range(1024)), list(self.detector._fallbackFDImplementation()))
def test_singleThread(self): """ The submission of a new job to a thread pool in response to the C{onResult} callback does not cause a new thread to be added to the thread pool. This requires that the thread which calls C{onResult} to have first marked itself as available so that when the new job is queued, that thread may be considered to run it. This is desirable so that when only N jobs are ever being executed in the thread pool at once only N threads will ever be created. """ # Ensure no threads running self.assertEqual(self.threadpool.workers, 0) event = threading.Event() event.clear() def onResult(success, counter): event.set() for i in range(10): self.threadpool.callInThreadWithCallback( onResult, lambda: None) event.wait(10) event.clear() self.assertEqual(self.threadpool.workers, 1)
def test_synchronization(self): """ If multiple threads are waiting on an event (via blocking on something in a callable passed to L{threadpool.ThreadPool.callInThread}), and there is spare capacity in the threadpool, sending another callable which will cause those to un-block to L{threadpool.ThreadPool.callInThread} will reliably run that callable and un-block the blocked threads promptly. @note: This is not really a unit test, it is a stress-test. You may need to run it with C{trial -u} to fail reliably if there is a problem. It is very hard to regression-test for this particular bug - one where the thread pool may consider itself as having "enough capacity" when it really needs to spin up a new thread if it possibly can - in a deterministic way, since the bug can only be provoked by subtle race conditions. """ timeout = self.getTimeout() self.threadpool.callInThread(self.event.set) self.event.wait(timeout) self.event.clear() for i in range(3): self.threadpool.callInThread(self.event.wait) self.threadpool.callInThread(self.event.set) self.event.wait(timeout) if not self.event.isSet(): self.event.set() self.fail( "'set' did not run in thread; timed out waiting on 'wait'." )
def _threadpoolTest(self, method): """ Test synchronization of calls made with C{method}, which should be one of the mechanisms of the threadpool to execute work in threads. """ # This is a schizophrenic test: it seems to be trying to test # both the callInThread()/dispatch() behavior of the ThreadPool as well # as the serialization behavior of threadable.synchronize(). It # would probably make more sense as two much simpler tests. N = 10 tp = threadpool.ThreadPool() tp.start() self.addCleanup(tp.stop) waiting = threading.Lock() waiting.acquire() actor = Synchronization(N, waiting) for i in range(N): method(tp, actor) self._waitForLock(waiting) self.assertFalse(actor.failures, "run() re-entered %d times" % (actor.failures,))
def test_producer(self): """ Verify that the transport of a protocol connected to L{StandardIO} is a working L{IProducer} provider. """ p = StandardIOTestProcessProtocol() d = p.onCompletion written = [] toWrite = list(range(100)) def connectionMade(ign): if toWrite: written.append(intToBytes(toWrite.pop()) + b"\n") proc.write(written[-1]) reactor.callLater(0.01, connectionMade, None) proc = self._spawnProcess(p, b'stdio_test_producer') p.onConnection.addCallback(connectionMade) def processEnded(reason): self.assertEqual(p.data[1], b''.join(written)) self.assertFalse( toWrite, "Connection lost with %d writes left to go." % (len(toWrite),)) reason.trap(error.ProcessDone) return self._requireFailure(d, processEnded)
def _parseAttributes(self, data): flags ,= struct.unpack('!L', data[:4]) attrs = {} data = data[4:] if flags & FILEXFER_ATTR_SIZE == FILEXFER_ATTR_SIZE: size ,= struct.unpack('!Q', data[:8]) attrs['size'] = size data = data[8:] if flags & FILEXFER_ATTR_OWNERGROUP == FILEXFER_ATTR_OWNERGROUP: uid, gid = struct.unpack('!2L', data[:8]) attrs['uid'] = uid attrs['gid'] = gid data = data[8:] if flags & FILEXFER_ATTR_PERMISSIONS == FILEXFER_ATTR_PERMISSIONS: perms ,= struct.unpack('!L', data[:4]) attrs['permissions'] = perms data = data[4:] if flags & FILEXFER_ATTR_ACMODTIME == FILEXFER_ATTR_ACMODTIME: atime, mtime = struct.unpack('!2L', data[:8]) attrs['atime'] = atime attrs['mtime'] = mtime data = data[8:] if flags & FILEXFER_ATTR_EXTENDED == FILEXFER_ATTR_EXTENDED: extended_count ,= struct.unpack('!L', data[:4]) data = data[4:] for i in range(extended_count): extended_type, data = getNS(data) extended_data, data = getNS(data) attrs['ext_%s' % nativeString(extended_type)] = extended_data return attrs, data
def _waitForLock(self, lock): items = range(1000000) for i in items: if lock.acquire(False): break time.sleep(1e-5) else: self.fail("A long time passed without succeeding")
def test_fallbackFDImplementation(self): """ L{_FDDetector._fallbackFDImplementation}, the implementation of last resort, succeeds with a fixed range of integers from 0 to 1024 when the L{resource} module is not importable. """ self.hideResourceModule() self.assertEqual(list(range(1024)), list(self.detector._fallbackFDImplementation()))
def gotTagEnd(self, name): # print ' '*self.indentlevel, 'end tag',name # self.indentlevel -= 1 if not self.elementstack: if self.beExtremelyLenient: return raise MismatchedTags(*((self.filename, "NOTHING", name) +self.saveMark()+(0,0))) el = self.elementstack.pop() pfxdix = self.nsstack[-1][2] if self.nsstack[-1][1] is el: nstuple = self.nsstack.pop() else: nstuple = None if self.caseInsensitive: tn = el.tagName.lower() cname = name.lower() else: tn = el.tagName cname = name nsplit = name.split(':',1) if len(nsplit) == 2: pfx, newname = nsplit ns = pfxdix.get(pfx,None) if ns is not None: if el.namespace != ns: if not self.beExtremelyLenient: raise MismatchedTags(*((self.filename, el.tagName, name) +self.saveMark()+el._markpos)) if not (tn == cname): if self.beExtremelyLenient: if self.elementstack: lastEl = self.elementstack[0] for idx in range(len(self.elementstack)): if self.elementstack[-(idx+1)].tagName == cname: self.elementstack[-(idx+1)].endTag(name) break else: # this was a garbage close tag; wait for a real one self.elementstack.append(el) if nstuple is not None: self.nsstack.append(nstuple) return del self.elementstack[-(idx+1):] if not self.elementstack: self.documents.append(lastEl) return else: raise MismatchedTags(*((self.filename, el.tagName, name) +self.saveMark()+el._markpos)) el.endTag(name) if not self.elementstack: self.documents.append(el) if self.beExtremelyLenient and el.tagName == "script": self._fixScriptElement(el)
def __init__(self, l, containerType): """ @param l: The list of object which may contain some not yet referenced objects. @param containerType: A type of container objects (e.g., C{tuple} or C{set}). """ NotKnown.__init__(self) self.containerType = containerType self.l = l self.locs = list(range(len(l))) for idx in range(len(l)): if not isinstance(l[idx], NotKnown): self.locs.remove(idx) else: l[idx].addDependant(self, idx) if not self.locs: self.resolveDependants(self.containerType(self.l))
def threadedFunction(): # Hopefully a hundred thousand queued calls is enough to # trigger the error condition for i in range(100000): try: reactor.callFromThread(lambda: None) except: self.failure = failure.Failure() break waiter.set()
def _resetSignalDisposition(self): # The Python interpreter ignores some signals, and our child # process will inherit that behaviour. To have a child process # that responds to signals normally, we need to reset our # child process's signal handling (just) after we fork and # before we execvpe. for signalnum in range(1, signal.NSIG): if signal.getsignal(signalnum) == signal.SIG_IGN: # Reset signal handling to the default signal.signal(signalnum, signal.SIG_DFL)
def packet_NAME(self, data): d, data = self._parseRequest(data) count, = struct.unpack('!L', data[:4]) data = data[4:] files = [] for i in range(count): filename, data = getNS(data) longname, data = getNS(data) attrs, data = self._parseAttributes(data) files.append((filename, longname, attrs)) d.callback(files)
def openfile(self, fname, mode): """ This is a mock for L{open}. It keeps track of opened files so extra descriptors can be returned from the mock for L{os.listdir} when used on one of the list-of-filedescriptors directories. A L{FakeFile} is returned which can be closed to remove the new descriptor from the open list. """ # Find the smallest unused file descriptor and give it to the new file. f = FakeFile(self, min(set(range(1024)) - set(self._files))) self._files.append(f.fd) return f
def test_tooMuchWorkBeforeStarting(self): """ If the amount of work before starting exceeds the maximum number of threads allowed to the threadpool, only the maximum count will be started. """ helper = PoolHelper(self, 0, 10) n = 50 for x in range(n): helper.threadpool.callInThread(lambda: None) helper.performAllCoordination() self.assertEqual(helper.workers, []) helper.threadpool.start() helper.performAllCoordination() self.assertEqual(len(helper.workers), helper.threadpool.max)
def test_workBeforeStarting(self): """ If a threadpool is told to do work before starting, then upon starting up, it will start enough workers to handle all of the enqueued work that it's been given. """ helper = PoolHelper(self, 0, 10) n = 5 for x in range(n): helper.threadpool.callInThread(lambda: None) helper.performAllCoordination() self.assertEqual(helper.workers, []) helper.threadpool.start() helper.performAllCoordination() self.assertEqual(len(helper.workers), n)
def test_callMultiple(self): """ L{threads.callMultipleInThread} calls multiple functions in a thread. """ L = [] N = 10 d = defer.Deferred() def finished(): self.assertEqual(L, list(range(N))) d.callback(None) threads.callMultipleInThread([ (L.append, (i,), {}) for i in range(N) ] + [(reactor.callFromThread, (finished,), {})]) return d
def _setupChild(self, masterfd, slavefd): """ Set up child process after C{fork()} but before C{exec()}. This involves: - closing C{masterfd}, since it is not used in the subprocess - creating a new session with C{os.setsid} - changing the controlling terminal of the process (and the new session) to point at C{slavefd} - duplicating C{slavefd} to standard input, output, and error - closing all other open file descriptors (according to L{_listOpenFDs}) - re-setting all signal handlers to C{SIG_DFL} @param masterfd: The master end of a PTY file descriptors opened with C{openpty}. @type masterfd: L{int} @param slavefd: The slave end of a PTY opened with C{openpty}. @type slavefd: L{int} """ os.close(masterfd) os.setsid() fcntl.ioctl(slavefd, termios.TIOCSCTTY, '') for fd in range(3): if fd != slavefd: os.close(fd) os.dup2(slavefd, 0) # stdin os.dup2(slavefd, 1) # stdout os.dup2(slavefd, 2) # stderr for fd in _listOpenFDs(): if fd > 2: try: os.close(fd) except: pass self._resetSignalDisposition()
def _setupChild(self, masterfd, slavefd): """ Set up child process after C{fork()} but before C{exec()}. This involves: - closing C{masterfd}, since it is not used in the subprocess - creating a new session with C{os.setsid} - changing the controlling terminal of the process (and the new session) to point at C{slavefd} - duplicating C{slavefd} to standard input, output, and error - closing all other open file descriptors (according to L{_listOpenFDs}) - re-setting all signal handlers to C{SIG_DFL} @param masterfd: The master end of a PTY file descriptors opened with C{openpty}. @type masterfd: L{int} @param slavefd: The slave end of a PTY opened with C{openpty}. @type slavefd: L{int} """ os.close(masterfd) os.setsid() fcntl.ioctl(slavefd, termios.TIOCSCTTY, '') for fd in range(3): if fd != slavefd: os.close(fd) os.dup2(slavefd, 0) # stdin os.dup2(slavefd, 1) # stdout os.dup2(slavefd, 2) # stderr for fd in _listOpenFDs(): if fd > 2: try: os.close(fd) except: pass self._resetSignalDisposition()
def instantiateAddCallbacksAfterResult(n): """ Create a deferred, shoots it and then adds a trivial callback/errback/both to it the given number of times. The result is processed through the callbacks as they are added. """ d = defer.Deferred() def f(result): return result d.callback(1) for i in range(n): d.addCallback(f) d.addErrback(f) d.addBoth(f) d.addCallbacks(f)
def test_callMultiple(self): """ L{threads.callMultipleInThread} calls multiple functions in a thread. """ L = [] N = 10 d = defer.Deferred() def finished(): self.assertEqual(L, list(range(N))) d.callback(None) threads.callMultipleInThread([(L.append, (i, ), {}) for i in range(N)] + [(reactor.callFromThread, (finished, ), {})]) return d
def benchmark(chunkSize, lineLength, numLines): bytes = (b'x' * lineLength + b'\r\n') * numLines chunkCount = len(bytes) // chunkSize + 1 chunks = [] for n in range(chunkCount): chunks.append(bytes[n*chunkSize:(n+1)*chunkSize]) assert b''.join(chunks) == bytes, (chunks, bytes) p = CollectingLineReceiver() before = time.clock() deliver(p, chunks) after = time.clock() assert bytes.splitlines() == p.lines, (bytes.splitlines(), p.lines) print('chunkSize:', chunkSize, end=' ') print('lineLength:', lineLength, end=' ') print('numLines:', numLines, end=' ') print('CPU Time: ', after - before)
def _fallbackFDImplementation(self): """ Fallback implementation where either the resource module can inform us about the upper bound of how many FDs to expect, or where we just guess a constant maximum if there is no resource module. All possible file descriptors from 0 to that upper bound are returned with no attempt to exclude invalid file descriptor values. """ try: import resource except ImportError: maxfds = 1024 else: # OS-X reports 9223372036854775808. That's a lot of fds to close. # OS-X should get the /dev/fd implementation instead, so mostly # this check probably isn't necessary. maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1]) return range(maxfds)
def pauseUnpause(n): """ Adds the given number of callbacks/errbacks/both to a deferred while it is paused, and unpauses it, trigerring the processing of the value through the callbacks. """ d = defer.Deferred() def f(result): return result d.callback(1) d.pause() for i in range(n): d.addCallback(f) d.addErrback(f) d.addBoth(f) d.addCallbacks(f) d.unpause()
def benchmark(chunkSize, lineLength, numLines): bytes = (b"x" * lineLength + b"\r\n") * numLines chunkCount = len(bytes) // chunkSize + 1 chunks = [] for n in range(chunkCount): chunks.append(bytes[n * chunkSize:(n + 1) * chunkSize]) assert b"".join(chunks) == bytes, (chunks, bytes) p = CollectingLineReceiver() before = time.clock() deliver(p, chunks) after = time.clock() assert bytes.splitlines() == p.lines, (bytes.splitlines(), p.lines) print("chunkSize:", chunkSize, end=" ") print("lineLength:", lineLength, end=" ") print("numLines:", numLines, end=" ") print("CPU Time: ", after - before)
def _fallbackFDImplementation(self): """ Fallback implementation where either the resource module can inform us about the upper bound of how many FDs to expect, or where we just guess a constant maximum if there is no resource module. All possible file descriptors from 0 to that upper bound are returned with no attempt to exclude invalid file descriptor values. """ try: import resource except ImportError: maxfds = 1024 else: # OS-X reports 9223372036854775808. That's a lot of fds to close. # OS-X should get the /dev/fd implementation instead, so mostly # this check probably isn't necessary. maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1]) return range(maxfds)
def test_processCommandLineArguments(self): """ Arguments given to spawnProcess are passed to the child process as originally intended. """ us = b"twisted.internet.test.process_cli" args = [ b'hello', b'"', b' \t|<>^&', br'"\\"hello\\"', br'"foo\ bar baz\""' ] # Ensure that all non-NUL characters can be passed too. allChars = "".join(map(chr, range(1, 255))) if isinstance(allChars, unicode): allChars.encode("utf-8") reactor = self.buildReactor() def processFinished(finishedArgs): output, err, code = finishedArgs output = output.split(b'\0') # Drop the trailing \0. output.pop() self.assertEqual(args, output) def shutdown(result): reactor.stop() return result def spawnChild(): d = succeed(None) d.addCallback(lambda dummy: utils.getProcessOutputAndValue( pyExe, [b"-m", us] + args, env=properEnv, reactor=reactor)) d.addCallback(processFinished) d.addBoth(shutdown) reactor.callWhenRunning(spawnChild) self.runReactor(reactor)
def test_processCommandLineArguments(self): """ Arguments given to spawnProcess are passed to the child process as originally intended. """ us = b"twisted.internet.test.process_cli" args = [b'hello', b'"', b' \t|<>^&', br'"\\"hello\\"', br'"foo\ bar baz\""'] # Ensure that all non-NUL characters can be passed too. allChars = "".join(map(chr, range(1, 255))) if isinstance(allChars, unicode): allChars.encode("utf-8") reactor = self.buildReactor() def processFinished(finishedArgs): output, err, code = finishedArgs output = output.split(b'\0') # Drop the trailing \0. output.pop() self.assertEqual(args, output) def shutdown(result): reactor.stop() return result def spawnChild(): d = succeed(None) d.addCallback(lambda dummy: utils.getProcessOutputAndValue( pyExe, [b"-m", us] + args, env=properEnv, reactor=reactor)) d.addCallback(processFinished) d.addBoth(shutdown) reactor.callWhenRunning(spawnChild) self.runReactor(reactor)
def finished(): self.assertEqual(L, list(range(N))) d.callback(None)
def _fork(self, path, uid, gid, executable, args, environment, **kwargs): """ Fork and then exec sub-process. @param path: the path where to run the new process. @type path: L{bytes} or L{unicode} @param uid: if defined, the uid used to run the new process. @type uid: L{int} @param gid: if defined, the gid used to run the new process. @type gid: L{int} @param executable: the executable to run in a new process. @type executable: L{str} @param args: arguments used to create the new process. @type args: L{list}. @param environment: environment used for the new process. @type environment: L{dict}. @param kwargs: keyword arguments to L{_setupChild} method. """ collectorEnabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: # Still in the parent process if collectorEnabled: gc.enable() raise else: if self.pid == 0: # A return value of 0 from fork() indicates that we are now # executing in the child process. # Do not put *ANY* code outside the try block. The child # process must either exec or _exit. If it gets outside this # block (due to an exception that is not handled here, but # which might be handled higher up), there will be two copies # of the parent running in parallel, doing all kinds of damage. # After each change to this code, review it to make sure there # are no exit paths. try: # Stop debugging. If I am, I don't care anymore. sys.settrace(None) self._setupChild(**kwargs) self._execChild(path, uid, gid, executable, args, environment) except: # If there are errors, try to write something descriptive # to stderr before exiting. # The parent's stderr isn't *necessarily* fd 2 anymore, or # even still available; however, even libc assumes that # write(2, err) is a useful thing to attempt. try: # On Python 3, print_exc takes a text stream, but # on Python 2 it still takes a byte stream. So on # Python 3 we will wrap up the byte stream returned # by os.fdopen using TextIOWrapper. # We hard-code UTF-8 as the encoding here, rather # than looking at something like # getfilesystemencoding() or sys.stderr.encoding, # because we want an encoding that will be able to # encode the full range of code points. We are # (most likely) talking to the parent process on # the other end of this pipe and not the filesystem # or the original sys.stderr, so there's no point # in trying to match the encoding of one of those # objects. stderr = io.TextIOWrapper(os.fdopen(2, 'wb'), encoding="utf-8") msg = ("Upon execvpe {0} {1} in environment id {2}" "\n:").format(executable, str(args), id(environment)) stderr.write(msg) traceback.print_exc(file=stderr) stderr.flush() for fd in range(3): os.close(fd) except: # Handle all errors during the error-reporting process # silently to ensure that the child terminates. pass # See comment above about making sure that we reach this line # of code. os._exit(1) # we are now in parent process if collectorEnabled: gc.enable() self.status = -1 # this records the exit status of the child
def _junkPath(self): junkPath = self.mktemp() with open(junkPath, 'wb') as junkFile: for i in range(1024): junkFile.write(intToBytes(i) + b'\n') return junkPath
def finished(): self.assertEqual(L, list(range(N))) d.callback(None)
def cbLost(reason): self.assertEqual(next(count), howMany + 1) self.assertEqual( path.getContent(), b''.join(map(intToBytes, range(howMany))))
def _fork(self, path, uid, gid, executable, args, environment, **kwargs): """ Fork and then exec sub-process. @param path: the path where to run the new process. @type path: L{bytes} or L{unicode} @param uid: if defined, the uid used to run the new process. @type uid: L{int} @param gid: if defined, the gid used to run the new process. @type gid: L{int} @param executable: the executable to run in a new process. @type executable: L{str} @param args: arguments used to create the new process. @type args: L{list}. @param environment: environment used for the new process. @type environment: L{dict}. @param kwargs: keyword arguments to L{_setupChild} method. """ collectorEnabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: # Still in the parent process if collectorEnabled: gc.enable() raise else: if self.pid == 0: # A return value of 0 from fork() indicates that we are now # executing in the child process. # Do not put *ANY* code outside the try block. The child # process must either exec or _exit. If it gets outside this # block (due to an exception that is not handled here, but # which might be handled higher up), there will be two copies # of the parent running in parallel, doing all kinds of damage. # After each change to this code, review it to make sure there # are no exit paths. try: # Stop debugging. If I am, I don't care anymore. sys.settrace(None) self._setupChild(**kwargs) self._execChild(path, uid, gid, executable, args, environment) except: # If there are errors, try to write something descriptive # to stderr before exiting. # The parent's stderr isn't *necessarily* fd 2 anymore, or # even still available; however, even libc assumes that # write(2, err) is a useful thing to attempt. try: stderr = os.fdopen(2, 'wb') msg = ("Upon execvpe {0} {1} in environment id {2}" "\n:").format(executable, str(args), id(environment)) if _PY3: # On Python 3, print_exc takes a text stream, but # on Python 2 it still takes a byte stream. So on # Python 3 we will wrap up the byte stream returned # by os.fdopen using TextIOWrapper. # We hard-code UTF-8 as the encoding here, rather # than looking at something like # getfilesystemencoding() or sys.stderr.encoding, # because we want an encoding that will be able to # encode the full range of code points. We are # (most likely) talking to the parent process on # the other end of this pipe and not the filesystem # or the original sys.stderr, so there's no point # in trying to match the encoding of one of those # objects. stderr = io.TextIOWrapper(stderr, encoding="utf-8") stderr.write(msg) traceback.print_exc(file=stderr) stderr.flush() for fd in range(3): os.close(fd) except: # Handle all errors during the error-reporting process # silently to ensure that the child terminates. pass # See comment above about making sure that we reach this line # of code. os._exit(1) # we are now in parent process if collectorEnabled: gc.enable() self.status = -1 # this records the exit status of the child
def _junkPath(self): junkPath = self.mktemp() with open(junkPath, 'wb') as junkFile: for i in range(1024): junkFile.write(intToBytes(i) + b'\n') return junkPath
def cbLost(reason): self.assertEqual(next(count), howMany + 1) self.assertEqual(path.getContent(), b''.join(map(intToBytes, range(howMany))))
def shouldPreserveSpace(self): for edx in range(len(self.elementstack)): el = self.elementstack[-edx] if el.tagName == 'pre' or el.getAttribute("xml:space", '') == 'preserve': return 1 return 0