def _runWithCallback(self, callback, errback, func, args, kwargs): try: result = apply(func, args, kwargs) except Exception, e: task.schedule(errback, e) else: task.schedule(callback, result) def dispatchWithCallback(self, owner, callback, errback, func, *args, **kw): """Dispatch a function, returning the result to a callback function. The callback function will be called in the main event loop thread. """ self.dispatchApply(owner, callback, errback, func, args, kw) def dispatchApply(self, owner, callback, errback, func, args, kw): self.dispatch(owner, self._runWithCallback, callback, errback, func, args, kw) theDispatcher = ThreadDispatcher(0) def dispatchApply(callback, errback, func, args, kw): theDispatcher.dispatchApply(log.logOwner.owner(), callback, errback, func, args, kw) def dispatch(callback, errback, func, *args, **kw): dispatchApply(callback, errback, func, args, kw) main.addShutdown(theDispatcher.stop) threadable.synchronize(ThreadDispatcher)
# code is broken. self.lock.release() else: self.failures += 1 # This is just the only way I can think of to wake up the test # method. It doesn't actually have anything to do with the # test. self.lock.acquire() self.runs.append(None) if len(self.runs) == self.N: self.waiting.release() self.lock.release() synchronized = ["run"] threadable.synchronize(Synchronization) class ThreadPoolTestCase(unittest.SynchronousTestCase): """ Test threadpools. """ def getTimeout(self): """ Return number of seconds to wait before giving up. """ return 5 # Really should be order of magnitude less
except StopIteration: self.request.unregisterProducer() self.request.finish() self.request = None else: self.request.write(chunk) def pauseProducing(self): pass def stopProducing(self): self.request = None synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(QuixoteProducer) def run(create_publisher, host='', port=80): """Runs a Twisted HTTP server server that publishes a Quixote application.""" publisher = create_publisher() factory = QuixoteFactory(publisher) reactor.listenTCP(port, factory, interface=host) reactor.run() if __name__ == '__main__': from quixote.server.util import main main(run)
self.request = None # Remotely relay producer interface. def view_resumeProducing(self, issuer): self.resumeProducing() def view_pauseProducing(self, issuer): self.pauseProducing() def view_stopProducing(self, issuer): self.stopProducing() synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(MPEGTSTransfer) class DynamTSTransfer(pb.Viewable): def __init__(self, path, pmt, *pids): self.path = path #log.msg("DynamTSTransfer: pmt: %s, pids: %s" % (pmt, pids)) self.pmt = pmt self.pids = pids self.didpat = False def resumeProducing(self): if not self.request: return repcnt = 0 data = self.fp.read(min(abstract.FileDescriptor.bufferSize,
from twisted.python import threadable class TestObject: synchronized = ['aMethod'] x = -1 y = 1 def aMethod(self): for i in range(10): self.x, self.y = self.y, self.x self.z = self.x + self.y assert self.z == 0, "z == %d, not 0 as expected" % (self.z,) threadable.synchronize(TestObject) class SynchronizationTests(unittest.SynchronousTestCase): def setUp(self): """ Reduce the CPython check interval so that thread switches happen much more often, hopefully exercising more possible race conditions. Also, delay actual test startup until the reactor has been started. """ if _PY3: if getattr(sys, 'getswitchinterval', None) is not None: self.addCleanup(sys.setswitchinterval, sys.getswitchinterval()) sys.setswitchinterval(0.0000001) else: if getattr(sys, 'getcheckinterval', None) is not None: self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
def resumeProducing(self): if not self.request: return self.request.write(self.file.read(abstract.FileDescriptor.bufferSize)) if self.file.tell() == self.size: self.request.finish() self.request = None def pauseProducing(self): pass def stopProducing(self): self.file.close() self.request = None # Remotely relay producer interface. def view_resumeProducing(self, issuer): self.resumeProducing() def view_pauseProducing(self, issuer): self.pauseProducing() def view_stopProducing(self, issuer): self.stopProducing() synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(FileTransfer)
self.streamIter = None # Remotely relay producer interface. def view_resumeProducing(self, issuer): self.resumeProducing() def view_pauseProducing(self, issuer): self.pauseProducing() def view_stopProducing(self, issuer): self.stopProducing() synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(TWProducer) class QuixoteFactory(http.HTTPFactory): def __init__(self, publisher): self.publisher = publisher http.HTTPFactory.__init__(self, None) def buildProtocol(self, addr): p = http.HTTPFactory.buildProtocol(self, addr) p.requestFactory = QuixoteTWRequest return p
import pickle, time from twisted.trial import unittest from twisted.python import log, threadable from twisted.internet import reactor, interfaces class Counter(log.Logger): index = 0 def add(self): self.index = self.index + 1 synchronized = ["add"] threadable.synchronize(Counter) class ThreadPoolTestCase(unittest.TestCase): """Test threadpools.""" def testPersistence(self): tp = threadpool.ThreadPool(7, 20) tp.start() time.sleep(0.1) self.assertEquals(len(tp.threads), 7) self.assertEquals(tp.min, 7) self.assertEquals(tp.max, 20) # check that unpickled threadpool has same number of threads s = pickle.dumps(tp) tp2 = pickle.loads(s)
for name in glob.glob("%s.*" % self.path): try: counter = int(name.split('.')[-1]) if counter: result.append(counter) except ValueError: pass result.sort() return result def __getstate__(self): state = BaseLogFile.__getstate__(self) del state["lastDate"] return state threadable.synchronize(WaderLogFile) def _get_application(): """ Factory function that returns an Application object. If the object does not exist then it creates a new Application object. (Internal use only). """ global _application if _application is not None: return _application _application = Application(consts.APP_NAME) logfile = WaderLogFile(consts.LOG_NAME, consts.LOG_DIR, maxRotatedFiles=consts.LOG_NUMBER)
task.schedule(callback, result) def dispatchWithCallback(self, owner, callback, errback, func, *args, **kw): """Dispatch a function, returning the result to a callback function. The callback function will be called in the main event loop thread. """ self.dispatchApply(owner, callback, errback, func, args, kw) def dispatchApply(self, owner, callback, errback, func, args, kw): self.dispatch(owner, self._runWithCallback, callback, errback, func, args, kw) theDispatcher = ThreadDispatcher(0) def dispatchApply(callback, errback, func, args, kw): theDispatcher.dispatchApply(log.logOwner.owner(), callback, errback, func, args, kw) def dispatch(callback, errback, func, *args, **kw): dispatchApply(callback, errback, func, args, kw) main.addShutdown(theDispatcher.stop) threadable.synchronize(ThreadDispatcher)
changeid, t) from buildbot.changes.changes import Change c = Change(who=who, files=files, comments=comments, isdir=isdir, links=links, revision=revision, when=when, branch=branch, category=category, revlink=revlink, repository=repository, project=project) c.properties.updateFromProperties(p) c.number = changeid return c def doCleanup(self): """ Perform any periodic database cleanup tasks. @returns: Deferred """ d = self.changes.pruneChanges(self.changeHorizon) d.addErrback(log.err, 'while pruning changes') return d threadable.synchronize(DBConnector)
def add(self, id, thing): if id in self._cache: self._cached_ids.remove(id) self._cached_ids.append(id) return while len(self._cached_ids) >= self._max_size: del self._cache[self._cached_ids.pop(0)] self._cache[id] = thing self._cached_ids.append(id) __setitem__ = add def setMaxSize(self, max_size): self._max_size = max_size threadable.synchronize(LRUCache) def none_or_str(x): """Cast X to a str if it is not None""" if x is not None and not isinstance(x, str): return str(x) return x # place a working json module at 'buildbot.util.json'. Code is from # Paul Wise <*****@*****.**>: # http://lists.debian.org/debian-python/2010/02/msg00016.html try: import json # python 2.6 assert json except ImportError:
raise NotImplementedError, failure def fetch_genres(self): if self.havegenre: return self.genre if not self.fetchinggenre: # Need to start fetching getPage(self.genre_url.encode('ascii')) \ .addCallbacks(self.gotGenre, self.errGenre) self.fetchinggenre = defer.Deferred() # Always raise this if we are waiting. raise self.fetchinggenre synchronized = ['fetch_genres', 'gotGenre', ] threadable.synchronize(GenreFeedAsync) class ShoutcastFeedAsync(feeds.ShoutcastFeed): def __init__(self, *args, **kwargs): feeds.ShoutcastFeed.__init__(self, *args, **kwargs) self.shout_url = \ 'http://www.shoutcast.com/sbin/newxml.phtml?genre=' + \ self.genre self.havestations = False self.fetchingstations = None def gotStations(self, page): self.stations = page self.havestations = True
self.request = None # Remotely relay producer interface. def view_resumeProducing(self, issuer): self.resumeProducing() def view_pauseProducing(self, issuer): self.pauseProducing() def view_stopProducing(self, issuer): self.stopProducing() synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(IterTransfer) class IterGenResource(resource.Resource): isLeaf = True def __init__(self, itergen): resource.Resource.__init__(self) self.itergen = itergen def render(self, request): request.setHeader('content-type', 'video/mpeg') if request.method == 'HEAD': return ''
'table_clear', 'table_delete', 'table_modify', 'table_set_default', 'table_set_timeout', ], 'SimpleSwitchAPI': [ 'mirroring_add', ], } asyncified_flat = set(itertools.chain.from_iterable(asyncified.values())) # synchronize all of those methods, because RuntimeAPI is *not* threadsafe for cls in RuntimeAPI, SimpleSwitchAPI: cls.synchronized = asyncified[cls.__name__] threadable.synchronize(cls) class SimpleSwitchAPIAsyncWrapper(object): """ All non-dunder methods are deferred to a thread and return a Deferred. Note that the underlying thing is NOT thread-safe, so things actually won't happen in parallel even if you try. """ def __init__(self, *args, **kwargs): self._switch_api = SimpleSwitchAPI(*args, **kwargs) def __getattr__(self, name): f = getattr(self._switch_api, name) if name not in asyncified_flat: return f
" (closing connection):\n") self.consumer.write( "\n\n\nXXXXXX Internal error in DaCHS software.\n" "If you are seeing this, please notify [email protected]\n" "with as many details (like a URL) as possible.\n" "Also, the following traceback may help people there figure out\n" "the problem:\n" + utils.getTracebackAsString()) # All producing is done in the thread, so when no one's writing any # more, we should have delivered everything to the consumer finally: reactor.callFromThread(self.cleanup) synchronized = ['resumeProducing', 'pauseProducing', 'stopProducing'] threadable.synchronize(DataStreamer) def streamOut(writeStreamTo, request): """sets up the thread to have writeStreamTo write to request from a thread. For convenience, this function returns request.deferred, you you can write things like return streamOut(foo, request) in your renderHTTP (or analoguous). """ t = DataStreamer(writeStreamTo, request) t.start() return request.deferred
def write(self, data): """ Write some data to the file. """ # remove and write footer if self.size > 0: self._file.seek(self.size-len(self.footer)) logfile.DailyLogFile.write(self, data+self.footer) self.size = self._file.tell() # https://stackoverflow.com/questions/17074330/python-why-a-method-from-super-class-not-seen DailyLogger.__dict__ = dict( DailyLogger.__dict__.items() + logfile.DailyLogFile.__dict__.items() ) threadable.synchronize(DailyLogger) class LogPublisher(log.LogPublisher): pass class PgSQLLogger: """ A postgresql log data collector for muc. """ TYPES = {'join':0, 'leave':1, 'message':3, 'topic':4, 'error':5, }
try: counter = int(name.split(".")[-1]) if counter: result.append(counter) except ValueError: pass result.sort() return result def __getstate__(self): state = BaseLogFile.__getstate__(self) del state["size"] return state threadable.synchronize(LogFile) class DailyLogFile(BaseLogFile): """A log file that is rotated daily (at or after midnight localtime) """ def _openFile(self): BaseLogFile._openFile(self) self.lastDate = self.toDate(os.stat(self.path)[8]) def shouldRotate(self): """Rotate when the date has changed since last write""" return self.toDate() > self.lastDate def toDate(self, *args):
if file is None: self.msg( warning=message, category=reflect.qual(category), filename=filename, lineno=lineno, format="%(filename)s:%(lineno)s: %(category)s: %(warning)s") else: if sys.version_info < (2, 6): _oldshowwarning(message, category, filename, lineno, file) else: _oldshowwarning(message, category, filename, lineno, file, line) synchronize(LogPublisher) try: theLogPublisher except NameError: theLogPublisher = LogPublisher() addObserver = theLogPublisher.addObserver removeObserver = theLogPublisher.removeObserver msg = theLogPublisher.msg showwarning = theLogPublisher.showwarning def _safeFormat(fmtString, fmtDict): """ Try to format the string C{fmtString} using C{fmtDict} arguments, swallowing all errors to always return a string.
DailyLogFile._openFile(self) def fix_path(self): file_name = '%04d-%02d-%02d' % (self.toDate()) self.path = os.path.join(self.directory, "%s-%s" % (self.name, file_name)) def rotate(self): if not (os.access(self.directory, os.W_OK)):return next_date = self.toDate(os.stat(self.path)[8]) print 'Close Old LogFile(%s) ' % self.path self._file.close() self._openFile() self.lastDate = next_date print 'Open New LogFile(%s)' % self.path threadable.synchronize(EveryDayLogFile) SERVER_NOTE = logging.FATAL + 10 log_inited = False _log = None _stdout = sys.stdout _stderr = sys.stderr AFTER_LOG_OPER = None LOG_TAG = None def init_log_path(dir_name, name_pre): global log_inited global _log
for name in glob.glob("%s.*" % self.path): try: counter = int(name.split('.')[-1]) if counter: result.append(counter) except ValueError: pass result.sort() return result def __getstate__(self): state = BaseLogFile.__getstate__(self) del state["size"] return state threadable.synchronize(LogFile) class DailyLogFile(BaseLogFile): """A log file that is rotated daily (at or after midnight localtime) """ def _openFile(self): BaseLogFile._openFile(self) self.lastDate = self.toDate(os.stat(self.path)[8]) def shouldRotate(self): """Rotate when the date has changed since last write""" return self.toDate() > self.lastDate def toDate(self, *args): """Convert a unixtime to (year, month, day) localtime tuple,
return list(bsids) def get_buildset_info(self, bsid): bset_obj = rpc.RpcProxy('software_dev.commit') res = bset_obj.read(bsid, ['external_idstring', 'reason', 'complete', 'results']) if res: external_idstring = res['external_idstring'] or None reason = res['reason'] or None complete = bool(res['complete']) return (external_idstring, reason, bsid, complete, res['results']) return None # shouldn't happen def get_pending_brids_for_builder(self, buildername): breq_obj = rpc.RpcProxy('software_dev.commit') bids = breq_obj.search([('buildername', '=', buildername), ('complete', '=', False), ('claimed_at', '=', False)]) return list(bids) # test/debug methods def has_pending_operations(self): return bool(self._pending_operation_count) def setChangeCacheSize(self, max_size): self._change_cache.setMaxSize(max_size) threadable.synchronize(OERPConnector) #eof
self.request.unregisterProducer() self.request.finish() self.request = None def pauseProducing(self): pass def stopProducing(self): self.file.close() self.request = None def view_resumeProducing(self, issuer): self.resumeProducing() def view_pauseProducing(self, issuer): self.pauseProducing() def view_stopProducing(self, issuer): self.stopProducing() synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(FileTransfer) """I contain AsIsProcessor, which serves files 'As Is' Inspired by Apache's mod_asis """ class ASISProcessor: __implements__ = inevow.IResource def __init__(self, path, registry=None): self.path = path self.registry = registry or Registry() def renderHTTP(self, ctx): request = inevow.IRequest(ctx) request.startedWriting = 1 return File(self.path, registry=self.registry) def locateChild(self, ctx, segments): return NotFound
bset_obj = rpc.RpcProxy('software_dev.commit') res = bset_obj.read( bsid, ['external_idstring', 'reason', 'complete', 'results']) if res: external_idstring = res['external_idstring'] or None reason = res['reason'] or None complete = bool(res['complete']) return (external_idstring, reason, bsid, complete, res['results']) return None # shouldn't happen def get_pending_brids_for_builder(self, buildername): breq_obj = rpc.RpcProxy('software_dev.commit') bids = breq_obj.search([('buildername', '=', buildername), ('complete', '=', False), ('claimed_at', '=', False)]) return list(bids) # test/debug methods def has_pending_operations(self): return bool(self._pending_operation_count) def setChangeCacheSize(self, max_size): self._change_cache.setMaxSize(max_size) threadable.synchronize(OERPConnector) #eof
(external, reason, ssid, complete, results) = res[0] external_idstring = str_or_none(external) reason = str_or_none(reason) complete = bool(complete) return (external_idstring, reason, ssid, complete, results) return None # shouldn't happen def get_pending_brids_for_builder(self, buildername): return self.runInteractionNow(self._txn_get_pending_brids_for_builder, buildername) def _txn_get_pending_brids_for_builder(self, t, buildername): # "pending" means unclaimed and incomplete. When a build is returned # to the pool (self.resubmit_buildrequests), the claimed_at= field is # reset to zero. t.execute(self.quoteq("SELECT id FROM buildrequests" " WHERE buildername=? AND" " complete=0 AND claimed_at=0"), (buildername,)) return [brid for (brid,) in t.fetchall()] # test/debug methods def has_pending_operations(self): return bool(self._pending_operation_count) def setChangeCacheSize(self, max_size): self._change_cache.setMaxSize(max_size) threadable.synchronize(DBConnector)
self.request.unregisterProducer() self.request.finish() self.request = None else: self.request.write(chunk) def pauseProducing(self): pass def stopProducing(self): self.request = None synchronized = ['resumeProducing', 'stopProducing'] threadable.synchronize(QuixoteProducer) def run(create_publisher, host='', port=80): """Runs a Twisted HTTP server server that publishes a Quixote application.""" publisher = create_publisher() factory = QuixoteFactory(publisher) reactor.listenTCP(port, factory, interface=host) reactor.run() if __name__ == '__main__': from quixote.server.util import main main(run)
If C{file} is C{None}, the default behaviour is to emit the warning to the log system, otherwise the original L{warnings.showwarning} Python function is called. """ if file is None: self.msg(warning=message, category=reflect.qual(category), filename=filename, lineno=lineno, format="%(filename)s:%(lineno)s: %(category)s: %(warning)s") else: if sys.version_info < (2, 6): _oldshowwarning(message, category, filename, lineno, file) else: _oldshowwarning(message, category, filename, lineno, file, line) synchronize(LogPublisher) try: theLogPublisher except NameError: theLogPublisher = LogPublisher() addObserver = theLogPublisher.addObserver removeObserver = theLogPublisher.removeObserver msg = theLogPublisher.msg showwarning = theLogPublisher.showwarning def _safeFormat(fmtString, fmtDict):
self._cached_ids = [] # = [LRU .. MRU] def get(self, id): thing = self._cache.get(id, None) if thing is not None: self._cached_ids.remove(id) self._cached_ids.append(id) return thing __getitem__ = get def add(self, id, thing): if id in self._cache: self._cached_ids.remove(id) self._cached_ids.append(id) return while len(self._cached_ids) >= self._max_size: del self._cache[self._cached_ids.pop(0)] self._cache[id] = thing self._cached_ids.append(id) __setitem__ = add threadable.synchronize(LRUCache) def none_or_str(x): """Cast X to a str if it is not None""" if x is not None and not isinstance(x, str): return str(x) return x
func(*args) self.finished(args) def finished(self, args): if self.debug: print "Finished with", args self.jobsfinished += 1 def getjobstodo(self): return self.jobstodo def addjob(self, func, *arglist): self.jobstodo += 1 self.tp.callInThread(self._run, func, *arglist) synchronized = ["finished", "getjobstodo", "addjob"] threadable.synchronize(ThreadedJobs) if __name__ == "__main__": def sleeper(amount, bogusarg): print "sleeping for", amount time.sleep(amount) print "done sleeping for", amount times = [(t, 2) for t in range(20)] random.shuffle(times) tj = ThreadedJobs(sleeper, times) tj.start()
log.msg("Storing cookie: %s" % binascii.hexlify(cookie)) return cookie def pop(self, challenge, attempt): """Find a cookie, and pop it's value from the container. Check if the given attempt, based on the given challenge, matches any of the cookies. If it does, the cookie is removed from the container, and the value returned.""" # The client encrypts the challenge with the cookie. We can't deduce the cookie # from the encrypted challenge, so we'll just have to try with each and see if one matches. for cookie, item in self.cookies.iteritems(): aesobj = AES.new(cookie, AES.MODE_ECB) expected = aesobj.encrypt(challenge) if attempt == expected: value, timer = item del self.cookies[cookie] timer.cancel() log.msg("Retrieved cookie: %s" % binascii.hexlify(cookie)) return cookie, value threadable.synchronize(RFBCookieContainer) cookies = None def init(reactor): global cookies cookies = RFBCookieContainer(reactor)