def _handle_event(self, event_type, args=(), respond_to=False): """handles event dispatch""" for event in self.events[event_type]: if self.config['threaded']: threads.deferToThread(self._event, event, args, respond_to) else: defer.execute(self._event, event, args, respond_to)
def pullSerialized(self, *keys): if len(keys) > 1: pulledDeferreds = [] for key in keys: d = defer.execute(self.shell.get, key) pulledDeferreds.append(d) # This will fire on the first failure and log the rest. dList = gatherBoth(pulledDeferreds, fireOnOneErrback=1, logErrors=0, consumeErrors=1) @dList.addCallback def packThemUp(values): serials = [] for v in values: try: serials = newserialized.serialize(v) except: return defer.fail(failure.Failure()) return dict(zip(keys, values)) return packThemUp else: key = keys[0] d = defer.execute(self.shell.get, key) d.addCallback(newserialized.serialize) return d
def handle_line(self, line): """Deal with a line of input.""" # TODO this should be part of debug output in the irc shell, too, and # not baked in here log.msg("received: " + repr(line)) from twisted.internet import defer # XXX this is here cause it allows exceptions to actually be caught; be more careful with that in general defer.execute(self._handle_line, line)
def pull(self, *keys): if len(keys) > 1: pulledDeferreds = [] for key in keys: pulledDeferreds.append(defer.execute(self.shell.get, key)) d = gatherBoth(pulledDeferreds) return d else: return defer.execute(self.shell.get, keys[0])
def _start_tasks(self): """starts all tasks""" if self.tasks: for task in self.tasks: func, repeat, scale = self.tasks[task] if self.config['threaded']: threads.deferToThread(self._task, task, func, repeat, scale) else: defer.execute(self._task, task, func, repeat, scale)
def _handle_command(self, channel, nick, host, msg): """handles command dispatch""" msg = msg.split() command = self.commands.get(msg[0][1:].lower(), None) if command: if self._check_permission(command._restrict, nick, host): if self.config['threaded']: threads.deferToThread(self._command, command, channel, nick, host, msg) else: defer.execute(self._command, command, channel, nick, host, msg)
def pull(self, *keys): if len(keys) > 1: pulledDeferreds = [] for key in keys: pulledDeferreds.append(defer.execute(self.shell.get, key)) # This will fire on the first failure and log the rest. d = gatherBoth(pulledDeferreds, fireOnOneErrback=1, logErrors=1, consumeErrors=1) return d else: return defer.execute(self.shell.get, keys[0])
def test_assertFailure(): """ Does assertFailure catch failures, and also fail when failures don't fail? """ d = defer.execute(lambda: 1 / 0) yield conftest.assertFailure(d, ZeroDivisionError) # assertFailure with wrong exception type is a failed assertFailure d = defer.execute(lambda: 1 / 0) dd = conftest.assertFailure(d, TypeError) yield conftest.assertFailure(dd, AssertionError) # assertFailure without a failure is a failed assertFailure d = conftest.assertFailure(defer.succeed(None), TypeError) yield conftest.assertFailure(d, AssertionError)
def wrapper(*args, **kwargs): assert len(kwargs) == 0, ( "The Provisioning API is meant to be used via XML-RPC, " "for now, so its methods are prevented from use with " "keyword arguments, which XML-RPC does not support.") # TODO: Convert exceptions into Faults. return defer.execute(func, *args)
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) d = defer.execute(self.nbc.disconnectUser, userID) except Exception, e: return self.packageFailure(failure.Failure(e))
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen on a TCP socket """ return defer.execute( self._reactor.listenTCP, self._port, protocolFactory, backlog=self._backlog, interface=self._interface )
def wrapper(*args, **kwargs): if not _testing: return deferToThreadPool(reactor, _threadpool, run_in_tx, fun, *args, **kwargs) else: # No threading during testing return defer.execute(run_in_tx, fun, *args, **kwargs)
def wrapper(*args, **kwargs): assert len(kwargs) == 0, ( "The Provisioning API is meant to be used via XML-RPC, " "for now, so its methods are prevented from use with " "keyword arguments, which XML-RPC does not support.") # TODO: Convert exceptions into Faults. return defer.execute(func, *args)
def loadModel(self, path, request): """Load a model, for the given path and request. @rtype: L{Deferred} """ from twisted.internet.defer import execute return execute(self.loadModelNow, path, request)
def pullSerialized(self, *keys): if len(keys) > 1: pulledDeferreds = [] for key in keys: d = defer.execute(self.shell.get, key) d.addCallback(serialized.serialize, key) d.addErrback(self.handlePullProblems) pulledDeferreds.append(d) dList = gatherBoth(pulledDeferreds) return dList else: key = keys[0] d = defer.execute(self.shell.get, key) d.addCallback(serialized.serialize, key) d.addErrback(self.handlePullProblems) return d
def loadModel(self, path, request): """Load a model, for the given path and request. @rtype: L{Deferred} """ from twisted.internet.defer import execute return execute(self.loadModelNow, path, request)
def wrapper(*args, **kwargs): if not _testing: return deferToThreadPool(reactor, _threadpool, run_in_tx, fun, *args, **kwargs) else: # No threading during testing return defer.execute(run_in_tx, fun, *args, **kwargs)
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) title = request.args['title'][0] d = defer.execute(self.nbc.addNotebook, userID, title) except Exception, e: return self.packageFailure(failure.Failure(e))
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) nbID = int(request.args['notebookID'][0]) d = defer.execute(self.nbc.dropNotebook, userID, nbID) except Exception, e: return self.packageFailure(failure.Failure(e))
def listen(self, protocolFactory): return defer.execute(twisted.listenTLS, self._reactor, self._port, protocolFactory, self._credentials, backlog=self._backlog, interface=self._interface)
def listen(self, protocolFactory): return defer.execute(twisted.listenTLS, self._reactor, self._port, protocolFactory, self._credentials, backlog=self._backlog, interface=self._interface)
def pushSerialized(self, **sNamespace): ns = {} for k, v in sNamespace.iteritems(): try: unserialized = newserialized.IUnSerialized(v) ns[k] = unserialized.getObject() except: return defer.fail() return defer.execute(self.shell.update, ns)
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket. """ return defer.execute(self._reactor.listenUNIX, self._address, protocolFactory, backlog=self._backlog, mode=self._mode, wantPID=self._wantPID)
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket. """ return defer.execute(self._reactor.listenUNIX, self._address, protocolFactory, backlog=self._backlog, mode=self._mode, wantPID=self._wantPID)
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen on a TCP socket """ return defer.execute(self._reactor.listenTCP, self._port, protocolFactory, backlog=self._backlog, interface=self._interface)
def listen(self, factory): """Implement IStreamServerEndpoint.listen to listen on TCP. Optionally configuring TLS behind the HAProxy protocol. """ if self._ssl_cf: factory = TLSMemoryBIOFactory(self._ssl_cf, False, factory) proxyf = self.wrapper_factory(factory) return defer.execute(self._listen, self._port, proxyf, **self._kwargs)
def callRemote(self, method, *args, **kw): if hasattr(self.interfaceClass, method): result = defer.execute(self._callMethod, method, *args, **kw) return result elif self.failWhenNotImplemented: return defer.fail( Failure(NotImplementedError, "No Such Method in Interface: %s" % method)) else: return defer.succeed(None)
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen for SSL on a TCP socket. """ return defer.execute(self._reactor.listenSSL, self._port, protocolFactory, contextFactory=self._sslContextFactory, backlog=self._backlog, interface=self._interface)
def listen(self, protocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen for SSL on a TCP socket. """ return defer.execute(self._reactor.listenSSL, self._port, protocolFactory, contextFactory=self._sslContextFactory, backlog=self._backlog, interface=self._interface)
def renderHTTP(self, request): print request, request.args try: uname = request.args['username'][0] email = request.args['email'][0] if not email: email = None d = defer.execute(self.nbc.connectUser, uname, email) except Exception, e: return self.packageFailure(failure.Failure(e))
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) nodeID = int(request.args['nodeID'][0]) parentID = int(request.args['parentID'][0]) index = int(request.args['index'][0]) d = defer.execute(self.nbc.moveNode, userID, nodeID, parentID, index) except Exception, e: return self.packageFailure(failure.Failure(e))
def get_state(self): """ Reads state from the file. :returns: `Deferred` that fires with the current state. """ def _read(): if os.path.exists(self.filename): return open(self.filename).read() return defer.execute(_read)
def renderHTTP(self, request): print request, request.args try: flags = {} for k, v in request.args.iteritems(): if k in ['notebookID']: flags[k] = int(v[0]) userID = int(request.args['userID'][0]) d = defer.execute(self.nbc.getNotebook, userID, **flags) except Exception, e: return self.packageFailure(failure.Failure(e))
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) nodeID = int(request.args['nodeID'][0]) for tag in request.args['tags']: self.nbc.addTag(userID, nodeID, tag) d = defer.execute(self.nbc.getNode, userID, nodeID=nodeID) d.addCallback(lambda _: _[0]) except Exception, e: return self.packageFailure(failure.Failure(e))
def pushSerialized(self, **sNamespace): ns = {} for k, v in sNamespace.iteritems(): if isinstance(v, serialized.Serialized): try: ns[k] = v.unpack() except pickle.PickleError: return defer.fail() else: ns[k] = v return defer.execute(self.shell.update, ns)
def renderHTTP(self, request): print request, request.args try: userID = int(request.args['userID'][0]) xmlstr = request.args['xmlstr'][0] justme = request.args.get('justme') if justme: justme = justme[0] d = defer.execute(self.nbc.loadNotebookFromXML, userID, xmlstr) except Exception, e: return self.packageFailure(failure.Failure(e))
def lookupAddress(self, name, timeout=None): """ The twisted function which is called when an A record lookup is requested. :param name: The domain name being queried for (e.g. example.org). :param timeout: Time in seconds to wait for the query response. (optional, default: None) :return: A DNS response for the record query. """ log.msg("Query for %s" % name) d = defer.execute(queryAddress, name) return d
def renderHTTP(self, request): print request, request.args try: flags = {} for k, v in request.args.iteritems(): if k in ['textData', 'input', 'nodeType' 'output', 'comment']: flags[k] = v[0] elif k in ['nodeID', 'parentID', 'nextID', 'previousID']: flags[k] = int(v[0]) userID = int(request.args['userID'][0]) d = defer.execute(self.nbc.getNode, userID, **flags) except Exception, e: return self.packageFailure(failure.Failure(e))
def freeze_commit(self, events): """Unfreezes the frozen path, sending received events if not dirty. If events for that path happened: - return True else: - push the here received events, return False """ if self._processor.frozen_path is None: raise ValueError("Committing with nothing frozen!") d = defer.execute(self._processor.freeze_commit, events) return d
def connectionMade(self): if not self.factory.retrieved_file_queue: # Create once, queue persists in the factory self.factory.retrieved_file_queue = collections.deque('', SEARCH_SIZE) # Only need to scan the local directory during startup if len(self.factory.retrieved_file_queue) == 0: d = defer.execute(self._process_local_files) d.addErrback(self._process_local_files_errback) d.addCallback(lambda ignore: self._change_directory()) # suppress the returned result else: self._change_directory()
def run(self): d = defer.execute(self.__callbacks[0], *self.__args) if not self.__err_back: self.__err_back = self.on_error d.addErrback(self.__err_back) for cb in self.__callbacks[1:]: d.addCallback(cb) self.__reactor.callLater(self.__interval, self.run) if not self.__reactor.running: self.__reactor.run()
def get(self, *args, **kwargs): """HTTP GET Return a list of known channelIDs for a given UAID """ uaid = self.valid_input['uaid'] self.add_header("Content-Type", "application/json") d = defer.execute(self._check_uaid, uaid) d.addCallback(lambda _: deferToThread( self.ap_settings.message.all_channels, str(uaid))) d.addCallback(self._write_channels, uaid) d.addErrback(self._uaid_not_found_err) d.addErrback(self._response_err) return d
def execute_db_retriever(engine, database, **kwargs): log.msg("Get db data is called with %s:%s" % (engine, database)) def callback(): headerRow = ["name", "city", "country"] dataRows = [["Lionel", "Rosario", "Argentina"], ["Andres", "Albacete", "Spain"]] stream = Stream() stream.configs = dict(kwargs) stream.configs["engine"] = engine stream.configs["database"] = database db = DatabaseRetriever(stream) dataRows = db.execute() result = {"headerRow": stream.configs["columns"], "dataRows": dataRows} return result return defer.execute(callback)
def setup_display(self, result=None): def func0(result=None): self.surface = pygame.display.set_mode(DISPLAY_SIZE) self.surface.fill((0, 0, 0)) self.background = pygame.Surface(DISPLAY_SIZE) self.background.fill((0, 0, 0)) def func1(result=None): return threads.deferToThread(self.handle_display) def func2(result=None): c = task.LoopingCall(func1) c.start(FPS, now=False) d = defer.execute(func0) d.addCallback(func2) return d
def post(self, *args, **kwargs): """HTTP POST Endpoint generation and optionally router type/data registration. """ self.add_header("Content-Type", "application/json") uaid = self.valid_input['uaid'] router = self.valid_input["router"] router_type = self.valid_input["router_type"] router_token = self.valid_input.get("router_token") router_data = self.valid_input['router_data'] # If the client didn't provide a CHID, make one up. # Note, valid_input may explicitly set "chid" to None # THIS VALUE MUST MATCH WHAT'S SPECIFIED IN THE BRIDGE CONNECTIONS. # currently hex formatted. chid = router_data["channelID"] = (self.valid_input["chid"] or uuid.uuid4().hex) self.ap_settings.metrics.increment("updates.client.register", tags=self.base_tags()) if not uaid: uaid = uuid.uuid4() d = defer.execute(router.register, uaid.hex, router_data=router_data, app_id=router_token, uri=self.request.uri) d.addCallback(lambda _: deferToThread( self._register_user_and_channel, uaid, chid, router, router_type, router_data)) d.addCallback(self._write_endpoint, uaid, chid, router, router_data) d.addErrback(self._router_fail_err) d.addErrback(self._response_err) else: d = deferToThread(self._register_channel, uaid, chid, router_data.get("key")) d.addCallback(self._write_endpoint, uaid, chid) d.addErrback(self._response_err) return d
def callRemote(self, methname, *args, **kwargs): """ Call the given method on the wrapped object, passing the given arguments. Arguments are checked for conformance to the remote interface but the return value is not (because I don't know how -exarkun). :return Deferred: The result of the call on the wrapped object. """ schema = self._referenceable.getInterface()[methname] if self.check_args: schema.checkAllArgs(args, kwargs, inbound=False) # TODO: Figure out how to call checkResults on the result. return execute( self._referenceable.doRemoteCall, methname, args, kwargs, )
def get_buddyicon(flickr, data, size=48): """Lookup the buddyicon from the data in @data using @flickr and resize it to @size pixels.""" global __buddy_cache if __buddy_cache is None: folder = os.path.join(get_cache_path(), "postr") if not os.path.exists(folder): os.makedirs(folder) path = os.path.join(folder, "buddyicons") try: __buddy_cache = bsddb3.hashopen(path, "c") except bsddb3.db.DBInvalidArgError: # The database needs upgrading, so delete it os.remove(path) __buddy_cache = bsddb3.hashopen(path, "c") def load_thumb(page, size): loader = GdkPixbuf.PixbufLoader() loader.set_size(size, size) loader.write(page) loader.close() return loader.get_pixbuf() def got_data(page, url, size): __buddy_cache[url] = page return load_thumb(page, size) if int(data.get("iconfarm")) > 0: url = "http://farm%s.static.flickr.com/%s/buddyicons/%s.jpg" % ( data.get("iconfarm"), data.get("iconserver"), data.get("nsid")) else: url = "http://www.flickr.com/images/buddyicon.jpg" if __buddy_cache.has_key(url): return defer.execute(load_thumb, __buddy_cache[url], size) else: deferred = getPage(url) deferred.addCallback(got_data, url, size) deferred.addErrback(log.err) return deferred
def get_buddyicon(flickr, data, size=48): """Lookup the buddyicon from the data in @data using @flickr and resize it to @size pixels.""" global __buddy_cache if __buddy_cache is None: folder = os.path.join (get_cache_path(), "postr") if not os.path.exists(folder): os.makedirs(folder) path = os.path.join (folder, "buddyicons") try: __buddy_cache = bsddb3.hashopen(path, "c") except bsddb3.db.DBInvalidArgError: # The database needs upgrading, so delete it os.remove(path) __buddy_cache = bsddb3.hashopen(path, "c") def load_thumb(page, size): loader = GdkPixbuf.PixbufLoader() loader.set_size (size, size) loader.write(page) loader.close() return loader.get_pixbuf() def got_data(page, url, size): __buddy_cache[url] = page return load_thumb(page, size) if int(data.get("iconfarm")) > 0: url = "http://farm%s.static.flickr.com/%s/buddyicons/%s.jpg" % (data.get("iconfarm"), data.get("iconserver"), data.get("nsid")) else: url = "http://www.flickr.com/images/buddyicon.jpg" if __buddy_cache.has_key(url): return defer.execute(load_thumb, __buddy_cache[url], size) else: deferred = getPage(url) deferred.addCallback(got_data, url, size) deferred.addErrback(log.err) return deferred
def abort(self, taskid): """ Remove a task from the queue if it has not been run already. """ if not isinstance(taskid, int): return defer.fail(failure.Failure(TypeError("an integer task id expected: %r" % taskid))) try: self.scheduler.pop_task(taskid) except IndexError as e: if taskid in list(self.finishedResults.keys()): d = defer.fail(IndexError("Task Already Completed")) elif taskid in self.abortPending: d = defer.fail(IndexError("Task Already Aborted")) elif taskid in self._pendingTaskIDs():# task is pending self.abortPending.append(taskid) d = defer.succeed(None) else: d = defer.fail(e) else: d = defer.execute(self._doAbort, taskid) return d
def sendmail(server, from_addr, to_addrs, msg, port): """Receives outgoing messages produced by the server.""" def assertions(): """Validates the outgoing messages.""" # Check compliance with outgoing message configuration in # self.config self.assertEqual(server, self.config.get('outgoing', 'server')) self.assertEqual(from_addr, self.config.get('outgoing', 'envelope_sender')) self.assertEqual(port, self.config.getint('outgoing', 'port')) # Parse message headers and content msg_parser = email.parser.FeedParser() msg_parser.feed(msg) parsed_msg = msg_parser.close() self.assertEqual('body\n', parsed_msg.get_payload()) # Call user-supplied function for further validation if validate: validate(to_addrs, parsed_msg) # Defer assertions so the reactor reaches a clean state even if # assertions fail return defer.execute(assertions)
def func(): d = defer.execute(pygame.event.get) d.addCallback(self.handle_events) return d
def _scan_one_dir(self, scan_info): """Gets one dir and compares with fsm.""" share, dirpath, udf_mode = scan_info log_debug("Adding watch to %r", dirpath) yield self.eq.add_watch(dirpath) to_later = [] self.eq.freeze_begin(dirpath) def scan(): """The scan, really.""" log_debug("scanning the dir %r", dirpath) dircontent = listdir(dirpath) # get the info from disk dnames = [] fnames = [] for something in dircontent: fullname = os.path.join(dirpath, something) stat_result = get_stat(fullname) if stat_result is None: # gone between the listdir and now continue if is_link(fullname): log_info("Ignoring path as it's a symlink: %r", fullname) continue if not is_valid_name(fullname): m = "Ignoring path because it's invalid (non utf8): %r" log_info(m, fullname) continue if not access(fullname): log_warning("Ignoring path as we don't have enough " "permissions to track it: %r", fullname) continue if stat.S_ISDIR(stat_result.st_mode): dnames.append(something) elif stat.S_ISREG(stat_result.st_mode): fnames.append(something) else: log_warning("Path: %r isn't a dir, file or symlink.", fullname) events, to_scan_later = self._compare(dirpath, dnames, fnames, share) to_later.extend(to_scan_later) return events delete_events = [] def control(dirty): """controls that everything was ok""" if dirty: self.eq.freeze_rollback() raise ScanTransactionDirty("dirty!") # delete metadata for the filtered delete_events fsm = self.fsm for evtname, path in delete_events: parentpath = os.path.dirname(path) log_info("UDF mode! Resetting hashes to dir %r", parentpath) self.fsm.set_by_path(parentpath, local_hash="", server_hash="") if evtname == "FS_DIR_DELETE": log_info("UDF mode! Removing metadata from dir %r", path) tree = fsm.get_paths_starting_with(path, include_base=True) for p, is_dir in tree: fsm.delete_metadata(p) elif evtname == "FS_FILE_DELETE": log_info("UDF mode! Removing metadata from file %r", path) fsm.delete_metadata(path) else: raise ValueError("Bad delete event! got %s (on %r)" % (evtname, path)) return to_later def filter_delete_events(events): """Separate the delete events if it was an UDF.""" for evt in events[:]: if evt[0] in ("FS_DIR_DELETE", "FS_FILE_DELETE"): events.remove(evt) delete_events.append(evt) return events d = defer.execute(scan) if udf_mode: d.addCallback(filter_delete_events) d.addCallback(self.eq.freeze_commit) d.addCallback(control) result = yield d defer.returnValue(result)
if not isinstance(taskid, int): return defer.fail(failure.Failure(TypeError("an integer task id expected: %r" % taskid))) try: self.scheduler.pop_task(taskid) except IndexError, e: if taskid in self.finishedResults.keys(): d = defer.fail(IndexError("Task Already Completed")) elif taskid in self.abortPending: d = defer.fail(IndexError("Task Already Aborted")) elif taskid in self._pendingTaskIDs():# task is pending self.abortPending.append(taskid) d = defer.succeed(None) else: d = defer.fail(e) else: d = defer.execute(self._doAbort, taskid) return d def barrier(self, taskids): dList = [] if isinstance(taskids, int): taskids = [taskids] for id in taskids: d = self.get_task_result(id, block=True) dList.append(d) d = DeferredList(dList, consumeErrors=1) d.addCallbacks(lambda r: None) return d def spin(self):
def connectionLost(self, reason): if reason.check(ResponseDone): self._done.callback("".join(self._data)) else: def _failed(): raise Exception(reason.getErrorMessage()) self._done.errback(defer.execute(_failed))
def loadOID(self, oid): """Implement me to return a Deferred if you want to implement asynchronous loading. """ return defer.execute(self.loadOIDNow, oid)
def _wrapper(*a, **kw): return defer.execute(f, *a, **kw)
def put(path, entry): return defer.execute(_put, path, entry)
def listen(self, stdioProtocolFactory): """ Implement L{IStreamServerEndpoint.listen} to listen on stdin/stdout """ return defer.execute(stdio.StandardIO, stdioProtocolFactory.buildProtocol(PipeAddress()))