def getRemoteResult(self, jobid, mapid, partition_number, put): d = Deferred() collector = SimplePageCollector(d, put) d.addCallbacks(self.ok, self.nok) #return self.kvreferenceable.callRemote("getResult", collector, jobid, mapid, partition_number) self.kvreferenceable.callRemote("getResult", collector, jobid, mapid, partition_number) return d
def exchange(ls, receivers): # Send share to all receivers. pc = tuple(self.program_counter) keyLists = [] for other_id in receivers: message_string = "" for inx, beDOZaContents in enumerate(ls): keyLists.append(beDOZaContents.get_keys()) message_string += "%s:%s;" % \ (beDOZaContents.get_value().value, beDOZaContents.get_mac(other_id - 1).value) self.protocols[other_id].sendData(pc, TEXT, message_string) if self.id in receivers: def deserialize(s): def field_long(x): return field(long(x)) xs = s[0:-1].split(';') ys = [x.split(':') for x in xs] return [map(field_long, xs) for xs in ys] num_players = len(self.players.keys()) values = num_players * [None] for inx, other_id in enumerate(self.players.keys()): d = Deferred() d.addCallbacks(deserialize, self.error_handler) self._expect_data(other_id, TEXT, d) values[inx] = d result = gatherResults(values) result.addCallbacks(recombine_value, self.error_handler, callbackArgs=(keyLists, len(shares))) return result
def command_received(self, cmd_data): LOG_MSG('Poll command got response. Response: {0}.'.format(cmd_data)) for cmd in cmd_data: # Obtain only new commands next time cmd_date = parse_date(cmd['timestamp']) if self.owner.timestamp is not None: self.owner.timestamp = max(self.owner.timestamp, cmd_date) else: self.owner.timestamp = cmd_date # device-application will use this deferred object to notify me about the command progress. thiscmd = cmd def ok(result): self.command_done(thiscmd, result) def err(reason): self.command_failed(thiscmd, reason) defer = Deferred() defer.addCallbacks(ok, err) try: LOG_MSG('Executing command {0} handler.'.format(cmd)) self.owner.run_command(cmd, defer) except Exception, err: LOG_ERR( 'Failed to execute device-delegate on_command. Reason: <{0}>.' .format(err)) self.command_failed(cmd, err)
def on_api_finished(response, url): """Success callback when an API response is received Args: response: The twisted.web.client.Response for the HTTP request url: The url for this API request Returns: A twisted.internet.defer.Deferred that is fired when the API body is processed A twisted.python.failure.Failure on error """ if response.code / 100 != 2: return self._create_http_failure(url, response) api_finished_deferred = Deferred() def on_api_body_read(api_json_body): """Success callback for reading the body of an API Args: api_json_body: The JSON body for the API Returns: api_json_body """ write_resource_file(api_json_body) return api_json_body api_finished_deferred.addCallbacks(on_api_body_read, on_error) response.deliverBody(JSONBodyReceiver(api_finished_deferred)) return api_finished_deferred
def on_timeout(self): if self.sign_deferred.called: self._logger.debug( "Race condition encountered with timeout/removal of HalfBlockSignCache, recovering." ) return self._logger.info( "Timeout for sign request for half block %s, note that it can still arrive!", self.half_block) if self.timeouts < 360: self.community.send_block(self.half_block, address=self.socket_address) def add_later(_): self.community.request_cache.add( HalfBlockSignCache(self.community, self.half_block, self.sign_deferred, self.socket_address, self.timeouts + 1)) later = Deferred() self.community.request_cache.register_anonymous_task("add-later", later, delay=0.0) later.addCallbacks( add_later, lambda _: None) # If the re-add is cancelled, just exit. else: self.sign_deferred.errback( Failure(RuntimeError("Signature request timeout")))
def test(): d = Deferred() d.addCallbacks(gotPoets, gotFailed) # traceback.print_stack() # d.callback('tips poetm short') d.errback(Exception('tips poetm shorts')) print "....end....."
class Manager(object): """The manager object receives all the events and forwards them to the interested objects. """ def __init__(self): self.listeners = WeakKeyDictionary() self.eventQueue = Deferred() # Uses a deferred object to handle the # queueing of events self.eventQueue.callback(1) # kick-off the queue on initialization # so it attempts to run the next job # whenever it can def register(self, listener): assert isinstance(listener, Listener), "Only classes inheriting from \ the Listener base-class can register with the manager as a listener" self.listeners[listener] = 1 def queueEvent(self, event): self.eventQueue.addCallbacks(self.post, self.printError, (event, )) def post(self, __, event): """This method is designed to be queued in a deferred object, so its first parameter is ignored, and its return in None, because the return of a callback in a deferred is passed in as the first parameter of the next callback method, so posts can be chained together in the eventQueue deferred indefinitely.""" for listener in self.listeners.keys(): listener.getEvent(event) return 1 def printError(self, fail): print fail.getTraceback() fail.trap(RuntimeError)
def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) def callback(r): object.__setattr__(self, "_result", (True, r)) while self._observers: try: # TODO: Handle errors here. self._observers.pop().callback(r) except Exception: pass return r def errback(f): object.__setattr__(self, "_result", (False, f)) while self._observers: # This is a little bit of magic to correctly propagate stack # traces when we `await` on one of the observer deferreds. f.value.__failure__ = f try: # TODO: Handle errors here. self._observers.pop().errback(f) except Exception: pass if consumeErrors: return None else: return f deferred.addCallbacks(callback, errback)
def _startDTrace(self, pid): started = Deferred() stopped = Deferred() process = reactor.spawnProcess( IOMeasureConsumer(started, stopped, _DTraceParser(self)), "/usr/sbin/dtrace", ["/usr/sbin/dtrace", # process preprocessor macros "-C", # search for include targets in the source directory containing this file "-I", dirname(__file__), # suppress most implicitly generated output (which would mess up our parser) "-q", # make this pid the target "-p", str(pid), # load this script "-s", self._dScript]) def eintr(reason): reason.trap(DTraceBug) print 'Dtrace startup failed (', reason.getErrorMessage().strip(), '), retrying.' return self._startDTrace(pid) def ready(passthrough): # Once the dtrace process is ready, save the state and # have the stopped Deferred deal with the results. We # don't want to do either of these for failed dtrace # processes. self.dtraces[pid] = process stopped.addCallback(self._cleanup, pid) return passthrough started.addCallbacks(ready, eintr) return started, stopped
def success(self, response): LOG_MSG( 'Got command poll response from the server for device {0}.'.format( self.owner.info)) if response.code in [200, 201]: def err(reason): LOG_ERR( 'Failed to parse command request response. Reason: <{0}>.'. format(reason)) self.failure(reason) result = Deferred() result.addCallbacks(self.command_received, err) response.deliverBody(JsonDataConsumer(result)) else: def on_get_response_text(error_text): LOG_ERR( 'Invalid response has been received during command polling. Reason: {0}.' .format(error_text)) self.failure(DhError(error_text)) d = Deferred() d.addBoth(on_get_response_text) response.deliverBody(TextDataConsumer(d))
def join(self, _): def send(_, node): factory = NodeClientFactory(OverlayService(self), {"command" : \ "join","node":self.my_node}) reactor.connectTCP(node["host"], node["tcp_port"], factory) factory.deferred.addCallback(lambda _: node) def sendcallback(_): return node def senderrback(_): raise Exception() factory.deferred.addCallbacks(sendcallback,senderrback) return factory.deferred def success(node): coordinator = node def error(e): self.is_coordinator = True self.my_node["id"] = 0 self.nextid = 1 self.members[self.my_node["host"]] = self.my_node send_log("Notice", "I am coordinator") return e # search for running loadbalancers and join the overlay network initialized = False d = Deferred() for node in self.config["nodes"]: d.addErrback(send, node) d.addCallbacks(success, error) d.errback(0) return d
def new_question(self): # forward question index self.question_idx += 1 if self.question_idx >= len(self.questions): if self.loop: random.shuffle(self.questions) self.question_idx = 0 else: self.end_of_game() return self.question = self.questions[self.question_idx] self.qid = self.question['qid'] self.question_text = self.question['text'].split() self.question_length = len(self.question_text) self.position = 0 self.start_streamer() def callback(x): self.new_question_1() def errback(x): logger.warning('[new question] streamer timed out') condition = partial(self._check_streamer, 'qid', self.qid) if condition(): callback(None) else: deferred = Deferred() deferred.addTimeout(3, reactor) deferred.addCallbacks(callback, errback) self._deferreds.append((deferred, condition, 'streamer check'))
class Manager(object): """The manager object receives all the events and forwards them to the interested objects. """ def __init__(self): self.listeners = WeakKeyDictionary() self.eventQueue = Deferred() # Uses a deferred object to handle the # queueing of events self.eventQueue.callback(1) # kick-off the queue on initialization # so it attempts to run the next job # whenever it can def register(self, listener): assert isinstance(listener, Listener), "Only classes inheriting from \ the Listener base-class can register with the manager as a listener" self.listeners[listener] = 1 def queueEvent(self, event): self.eventQueue.addCallbacks(self.post, self.printError, (event,) ) def post(self, __, event): """This method is designed to be queued in a deferred object, so its first parameter is ignored, and its return in None, because the return of a callback in a deferred is passed in as the first parameter of the next callback method, so posts can be chained together in the eventQueue deferred indefinitely.""" for listener in self.listeners.keys(): listener.getEvent(event) return 1 def printError(self, fail): print fail.getTraceback() fail.trap(RuntimeError)
def upload(self, fd, bucket, object_name, content_type=None, metadata={}, parts_generator=None, part_handler=None, amz_headers={}, on_part_generated=None): self.log.msg('Beginning upload to bucket=%s,key=%s' % ( bucket, object_name)) client = self.region.get_s3_client() if parts_generator is None: parts_generator = FileIOPartsGenerator() if part_handler is None: part_handler = SingleProcessPartUploader() # TODO - probably need some pluggable strategy for getting the parts # count (if desired) or not (optimization) - maybe parts_count() # method on IPartsGenerator. # Or maybe this whole counter idea is just plain wrong. counter = self.counter_factory(parts_generator.count_parts(fd)) counter.context = '[object_name=%s] ' % object_name part_handler.client = client d = Deferred() task = MultipartUpload(client, fd, parts_generator, part_handler, counter, d, self.log) task.on_part_generated = on_part_generated task.throughput_counter = self.throughput_counter self.uploads.add(task) d.addCallbacks(self._completed_upload, self.log.err)\ .addBoth(self._remove_upload, task) task.upload(bucket, object_name, content_type, metadata, amz_headers) return d
def goingLive(self, ctx, client): """Called each time the page is served/refreshed""" if self.closing: self.closing.cancel() self.closing = None inevow.IRequest(ctx).setHeader('content-type', 'application/vnd.mozilla.xul+xml') def setUpHandler(func, name, *args, **kwargs): """ Convience function link funcs to hander ids and store them """ kwargs['identifier'] = name hndlr = handler(func, *args, **kwargs) hndlr(ctx, client) # Stores it setUpHandler(self.handleIsPackageDirty, 'isPackageDirty') setUpHandler(self.handlePackageFileName, 'getPackageFileName') setUpHandler(self.handleSavePackage, 'savePackage') setUpHandler(self.handleLoadPackage, 'loadPackage') setUpHandler(self.handleLoadRecent, 'loadRecent') setUpHandler(self.handleExport, 'exportPackage') setUpHandler(self.handleQuit, 'quit') setUpHandler(self.handleRegister, 'register') setUpHandler(self.handleReportIssue, 'reportIssue') setUpHandler(self.handleInsertPackage, 'insertPackage') setUpHandler(self.handleExtractPackage, 'extractPackage') setUpHandler(self.outlinePane.handleSetTreeSelection, 'setTreeSelection') self.idevicePane.client = client d = Deferred() d.addCallbacks(self.onClose, self.onClose) client.closeNotifications.append(d) handleId = "'", client.handleId, "'"
def receive_shares(player_id): Cx = Deferred() xi = self._expect_share(player_id, field) rho1 = self._expect_share(player_id, field) rho2 = self._expect_share(player_id, field) self._expect_data(player_id, TEXT, Cx) Cx.addCallbacks(commitment.deserialize, self.error_handler) return gatherResults([xi, rho1, rho2, Cx])
def test1(): d = Deferred() # add a callback/errback pair to the chain d.addCallbacks(successHandle, failedHandle) # fire the chain with a normal result d.callback('Successful') print "Finished test1"
def handle_buzzing(self, end_of_question=False): buzzing_inds = [] for i, user in enumerate(self.users): if self.buzzed[user.peer]: continue if end_of_question: buzzing_inds.append(i) continue if user.peer in self.user_responses and \ 'type' in self.user_responses[user.peer]: rsp_type = self.user_responses[user.peer]['type'] if rsp_type == MSG_TYPE_BUZZING_REQUEST: buzzing_inds.append(i) if len(buzzing_inds) == 0: if end_of_question: self.end_of_question() return random.shuffle(buzzing_inds) buzzing_idx = buzzing_inds[0] logger.info('[buzzing] Player {} answering'.format(buzzing_idx)) red_msg = { 'type': MSG_TYPE_BUZZING_RED, 'qid': self.qid, 'uid': buzzing_idx, 'length': 8 } red_users = self.users[:buzzing_idx] + self.users[buzzing_idx + 1:] self.broadcast(red_users, red_msg) green_msg = { 'type': MSG_TYPE_BUZZING_GREEN, 'qid': self.qid, 'length': 8 } green_user = self.users[buzzing_idx] green_user.sendMessage(json.dumps(green_msg).encode('utf-8')) self.buzzed[green_user.peer] = True condition = partial(self._check_user, green_user.peer, 'type', MSG_TYPE_BUZZING_ANSWER) callback = partial(self.handle_buzzing_ok, buzzing_idx, end_of_question) errback = partial(self.handle_buzzing_timeout, buzzing_idx, end_of_question) if condition(): callback(None) else: deferred = Deferred() deferred.addTimeout(ANSWER_TIME_OUT, reactor) deferred.addCallbacks(callback, errback) self._deferreds.append( (deferred, condition, 'wait for user answer'))
def onResponse(self, resp): log.debug('entered wsapi:wsApiServer: onResponse') log.error(resp) headers = list(resp.headers.getAllRawHeaders()) log.debug(headers) finished = Deferred() resp.deliverBody(wsapiResponse(finished)) finished.addCallbacks(self.getJsonResult,self.onError) return finished
def connectBox(self): if not self.proto: cb = ConnectBox(self) if cb.exec_(): (self.host, self.port) = cb.getValues() d = Deferred() d.addCallbacks(self.connectSuccess, self.connectFailed) from twisted.internet import reactor reactor.connectTCP(self.host, int(self.port), GameClientFactory(d))
def request_deferred(request): '''Wrap a request inside a Deferred. This returns a Deferred whose first pair of callbacks are the request callback and errback. ''' d = Deferred() if request.callback: d.addCallbacks(request.callback, request.errback) request.callback, request.errback = d.callback, d.errback return d
def api_received(self, response): if response.code == 200 : result_proto = Deferred() result_proto.addCallbacks(self.api_succeed, self.api_failed) response.deliverBody(JsonDataConsumer(result_proto)) else : def get_response_text(reason): self.api_failed(reason) response_defer = Deferred() response_defer.addBoth(get_response_text) response.deliverBody(TextDataConsumer(response_defer))
def _startDTrace(self, script, pid): """ Launch a dtrace process. @param script: A C{str} giving the path to the dtrace program to run. @param pid: A C{int} to target dtrace at a particular process, or C{None} not to. @return: A two-tuple of L{Deferred}s. The first will fire when the dtrace process is ready to go, the second will fire when it exits. """ started = Deferred() stopped = Deferred() proto = IOMeasureConsumer(started, stopped, _DTraceParser(self)) command = [ "/usr/sbin/dtrace", # process preprocessor macros "-C", # search for include targets in the source directory containing this file "-I", dirname(__file__), # suppress most implicitly generated output (which would mess up our parser) "-q", # load this script "-s", script ] if pid is not None: # make this pid the target command.extend(["-p", str(pid)]) process = reactor.spawnProcess(proto, command[0], command) def eintr(reason): reason.trap(DTraceBug) msg('Dtrace startup failed (%s), retrying.' % (reason.getErrorMessage().strip(), )) return self._startDTrace(script, pid) def ready(passthrough): # Once the dtrace process is ready, save the state and # have the stopped Deferred deal with the results. We # don't want to do either of these for failed dtrace # processes. msg("dtrace tracking pid=%s" % (pid, )) self.dtraces[pid] = (process, proto) stopped.addCallback(self._cleanup, pid) return passthrough started.addCallbacks(ready, eintr) return started, stopped
def on_command_insert(self, cmd, info): """ @type cmd: C{object} @param cmd: object which implements C{ICommand} @type info: C{object} @param info: C{IDeviceInfo} object which is receiving the command """ LOG_MSG('Command {0} has been received for device {1}.'.format( cmd, info)) def on_ok(result): LOG_MSG('The command "{0}" successfully processed. Result: {1}.'. format(cmd, result)) if isinstance(result, CommandResult): cmd.status = result.status cmd.result = result.result else: cmd.status = 'Success' cmd.result = result self.update_command(cmd, device_id=info.id, device_key=info.key) # def on_err(reason): LOG_ERR('Failed to process command "{0}". Reason: {1}.'.format( cmd, reason)) if isinstance(reason, Exception): cmd.status = 'Failed' cmd.result = reason.message elif hasattr(reason, 'value'): if isinstance(reason.value, CommandResult): cmd.status = reason.value.status cmd.result = reason.value.result elif isinstance(reason.value, Exception): cmd.status = 'Failed' cmd.result = reason.value.message else: cmd.status = 'Failed' cmd.result = reason.value else: cmd.status = 'Failed' cmd.result = 'Unhandled Exception' self.update_command(cmd, device_id=info.id, device_key=info.key) # finished = Deferred() finished.addCallbacks(on_ok, on_err) try: self.handler.on_command(info.id, cmd, finished) except Exception as ex: err = DhError('Failed to invoke command {0}. Reason: {1}.'.format( cmd, ex.message)) LOG_ERR(err.message) on_err(err)
def main(reactor, masterIP): deferred = Deferred() def callback(msg): print(msg) reactor.stop() deferred.addCallbacks(callback, callback) reactor.connectTCP(masterIP, MASTER_UID_PORT, UIDClient(deferred)) reactor.run()
def on_timeout(self): if not self.candidates or self.circuit.state == CIRCUIT_STATE_CLOSING: return def retry_later(_): self.retry_func(self.circuit, self.candidates) later = Deferred() self.community.request_cache.register_anonymous_task("retry-later", later, delay=0.0) later.addCallbacks(retry_later, lambda _: None)
def request_deferred(request): """Wrap a request inside a Deferred. This returns a Deferred whose first pair of callbacks are the request callback and errback. The Deferred also triggers when the request callback/errback is executed (ie. when the request is downloaded) """ d = Deferred() if request.callback: d.addCallbacks(request.callback, request.errback) request.callback, request.errback = d.callback, d.errback return d
def server_setup(self): """ Setup the web server. """ # TODO set up twisted to use gzip compression # Disable directory listings class FileNoDirectoryListings(File): def directoryListing(self): return ForbiddenResource() # root handler is a static web server self.root = FileNoDirectoryListings( os.path.abspath(self.config['server']["html_dir"])) self.root.processors = {'.rpy': script.ResourceScript} self.root.ignoreExt('.rpy') # TODO: config rdflib some day # register("json-ld", Serializer, "rdfliblocal.jsonld", "JsonLDSerializer") ## initialize handlers [handler(self) for handler in handlers.HANDLERS] ## start boxes self.register_boxes(self.root) self.appshandler = AppsMetaHandler(self) self.root.putChild('apps', self.appshandler) self.start() # load a web browser once the server has started def on_start(arg): logging.debug("Server started successfully.") try: reactor.callInThread( lambda empty: self.start_syncing(), None) # separately do indx syncing in a twisted thread if not self.config['no_browser']: import webbrowser webbrowser.open(self.server_url) except Exception as e: logging.debug("Couldn't load webbrowser: {0}".format(e)) def start_failed(arg): logging.debug("start_failed: " + str(arg)) # calls the web browser opening function above when the reactor has finished starting up d = Deferred() d.addCallbacks(on_start, start_failed) reactor.callWhenRunning(d.callback, "INDX HTTP startup") #@UndefinedVariable reactor.addSystemEventTrigger( "during", "shutdown", lambda *x: self.shutdown()) #@UndefinedVariable
def api_received(self, response): if response.code == 200: result_proto = Deferred() result_proto.addCallbacks(self.api_succeed, self.api_failed) response.deliverBody(JsonDataConsumer(result_proto)) else: def get_response_text(reason): self.api_failed(reason) response_defer = Deferred() response_defer.addBoth(get_response_text) response.deliverBody(TextDataConsumer(response_defer))
def _(d: Deferred) -> Future: f = Future() # type: Future[Any] def errback(failure: failure.Failure) -> None: try: failure.raiseException() # Should never happen, but just in case raise Exception("errback called without error") except: future_set_exc_info(f, sys.exc_info()) d.addCallbacks(f.set_result, errback) return f
def process_chain_both(callbacks: Iterable[Callable], errbacks: Iterable[Callable], input, *a, **kw) -> Deferred: """Return a Deferred built by chaining the given callbacks and errbacks""" d = Deferred() for cb, eb in zip(callbacks, errbacks): d.addCallbacks( callback=cb, errback=eb, callbackArgs=a, callbackKeywords=kw, errbackArgs=a, errbackKeywords=kw, ) if isinstance(input, failure.Failure): d.errback(input) else: d.callback(input) return d
def success(self, response): LOG_MSG('Got command poll response from the server for device {0}.'.format(self.owner.info)) if response.code in [200, 201] : def err(reason): LOG_ERR('Failed to parse command request response. Reason: <{0}>.'.format(reason)) self.failure(reason) result = Deferred() result.addCallbacks(self.command_received, err) response.deliverBody(JsonDataConsumer(result)) else : def txterrf(errtxt): LOG_ERR('Invalid response has been received during command polling. Reason: {0}.'.format(errtxt)) self.failure(DhError(errtxt)) self.get_response_text(response, txterrf)
def findNode(self, id, callback, errback=None): """ returns the contact info for node, or the k closest nodes, from the global table """ # get K nodes out of local table/cache, or the node we want nodes = self.table.findNodes(id) d = Deferred() if errback: d.addCallbacks(callback, errback) else: d.addCallback(callback) if len(nodes) == 1 and nodes[0].id == id: d.callback(nodes) else: # create our search state state = FindNode(self, id, d.callback) reactor.callFromThread(state.goWithNodes, nodes)
def findNode(self, id, callback, errback=None): """ returns the contact info for node, or the k closest nodes, from the global table """ # get K nodes out of local table/cache, or the node we want nodes = self.table.findNodes(id) d = Deferred() if errback: d.addCallbacks(callback, errback) else: d.addCallback(callback) if len(nodes) == 1 and nodes[0].id == id : d.callback(nodes) else: # create our search state state = FindNode(self, id, d.callback) reactor.callFromThread(state.goWithNodes, nodes)
def setup_read_queue(self, exchange, routing_key=None, callback=None, queue_name=None, exclusive=False, durable=False, auto_delete=True, no_ack=True, requeue_on_error=True, requeue_timeout=120, read_error_handler=None, autodeclare=True, autobind=True): ''' if you need read queue support, you should call this method ''' self.autodeclare = autodeclare self.autobind = autobind self.rq_enabled = True self.rq_exchange = exchange if routing_key: self.rq_rk = routing_key self.rq_dynamic_route = False else: self.rq_dynamic_route = True if queue_name: self.rq_name = queue_name else: self.rq_dynamic = True self.change_rq_name() self.rq_exclusive = exclusive self.rq_durable = durable self.rq_auto_delete = auto_delete self.no_ack = no_ack self.rq_callback = callback self.requeue_on_error = requeue_on_error self.requeue_timeout = requeue_timeout self.read_error_handler = read_error_handler if not self.consumer_tag: self.consumer_tag = self.rq_name def _add_cb(_none): ret = self.client.on_read_loop_started() ret.addCallback(self.read_message_loop) ret.addErrback(self._error) return ret if not self.connected.called: c = self.connected else: c = Deferred() c.callback(True) self.client.start_read_loop() c.addCallbacks(_add_cb, self._error) return c
def server_setup(self): """ Setup the web server. """ # TODO set up twisted to use gzip compression # Disable directory listings class FileNoDirectoryListings(File): def directoryListing(self): return ForbiddenResource() # root handler is a static web server self.root = FileNoDirectoryListings(os.path.abspath(self.config['server']["html_dir"])) #self.root.processors = {'.rpy': script.ResourceScript} #self.root.ignoreExt('.rpy') # TODO: config rdflib some day # register("json-ld", Serializer, "rdfliblocal.jsonld", "JsonLDSerializer") ## initialize handlers [handler(self) for handler in handlers.HANDLERS] ## start boxes self.register_boxes(self.root) ## XXX TODO temporaily remove for debugging self.appshandler = AppsMetaHandler(self) self.root.putChild('apps', self.appshandler) self.start() # load a web browser once the server has started def on_start(arg): logging.debug("Server started successfully.") try: reactor.callInThread(lambda empty: self.start_syncing(), None) # separately do indx syncing in a twisted thread if not self.config['no_browser']: import webbrowser webbrowser.open(self.server_url) except Exception as e: logging.debug("Couldn't load webbrowser: {0}".format(e)) def start_failed(arg): logging.debug("start_failed: "+str(arg)) # calls the web browser opening function above when the reactor has finished starting up d = Deferred() d.addCallbacks(on_start, start_failed) reactor.callWhenRunning(d.callback, "INDX HTTP startup") #@UndefinedVariable reactor.addSystemEventTrigger("during", "shutdown", lambda *x: self.shutdown()) #@UndefinedVariable
def on_timeout(self): if self.circuit.state == CIRCUIT_STATE_CLOSING: return if not self.candidates or self.max_tries < 1: reason = 'timeout on RetryRequestCache (tries left: %d)' % self.max_tries self.community.remove_circuit(self.circuit.circuit_id, reason) return def retry_later(_): self.retry_func(self.circuit, self.candidates, self.max_tries) later = Deferred() self.community.request_cache.register_anonymous_task("retry-later", later, delay=0.0) later.addCallbacks(retry_later, lambda _: None)
def remote_make_request(self, request_from_browser): engine = self.crawler.engine if not engine.running: raise ConnectionAborted("Scrapy engine stopping") spider = self.crawler.spider if spider not in engine.open_spiders: raise ConnectionAborted("Spider closed") dfd = Deferred() dfd.addCallbacks(self.process_response, self.process_failure) scrapy_req = self._make_scrapy_request(request_from_browser, dfd.callback, dfd.errback) assert scrapy_req.dont_filter engine.crawl(scrapy_req, spider) return dfd
def _startDTrace(self, script, pid): """ Launch a dtrace process. @param script: A C{str} giving the path to the dtrace program to run. @param pid: A C{int} to target dtrace at a particular process, or C{None} not to. @return: A two-tuple of L{Deferred}s. The first will fire when the dtrace process is ready to go, the second will fire when it exits. """ started = Deferred() stopped = Deferred() proto = IOMeasureConsumer(started, stopped, _DTraceParser(self)) command = [ "/usr/sbin/dtrace", # process preprocessor macros "-C", # search for include targets in the source directory containing this file "-I", dirname(__file__), # suppress most implicitly generated output (which would mess up our parser) "-q", # load this script "-s", script] if pid is not None: # make this pid the target command.extend(["-p", str(pid)]) process = reactor.spawnProcess(proto, command[0], command) def eintr(reason): reason.trap(DTraceBug) msg('Dtrace startup failed (%s), retrying.' % (reason.getErrorMessage().strip(),)) return self._startDTrace(script, pid) def ready(passthrough): # Once the dtrace process is ready, save the state and # have the stopped Deferred deal with the results. We # don't want to do either of these for failed dtrace # processes. msg("dtrace tracking pid=%s" % (pid,)) self.dtraces[pid] = (process, proto) stopped.addCallback(self._cleanup, pid) return passthrough started.addCallbacks(ready, eintr) return started, stopped
def on_command_insert(self, cmd, info): """ @type cmd: C{object} @param cmd: object which implements C{ICommand} @type info: C{object} @param info: C{IDeviceInfo} object which is receiving the command """ LOG_MSG('Command {0} has been received for device {1}.'.format(cmd, info)) def on_ok(result): LOG_MSG('The command "{0}" successfully processed. Result: {1}.'.format(cmd, result)) if isinstance(result, CommandResult) : cmd.status = result.status cmd.result = result.result else : cmd.status = 'Success' cmd.result = result self.update_command(cmd, device_id = info.id, device_key = info.key) # def on_err(reason): LOG_ERR('Failed to process command "{0}". Reason: {1}.'.format(cmd, reason)) if isinstance(reason, Exception) : cmd.status = 'Failed' cmd.result = reason.message elif hasattr(reason, 'value') : if isinstance(reason.value, CommandResult) : cmd.status = reason.value.status cmd.result = reason.value.result elif isinstance(reason.value, Exception) : cmd.status = 'Failed' cmd.result = reason.value.message else : cmd.status = 'Failed' cmd.result = reason.value else : cmd.status = 'Failed' cmd.result = 'Unhandled Exception' self.update_command(cmd, device_id = info.id, device_key = info.key) # finished = Deferred() finished.addCallbacks(on_ok, on_err) try : self.handler.on_command(info.id, cmd, finished) except Exception as ex: err = DhError('Failed to invoke command {0}. Reason: {1}.'.format(cmd, ex.message)) LOG_ERR(err.message) on_err(err)
class ConsumerQueue(object): def __init__(self, stop_on_error=False, empty=None): self.stop_on_error = stop_on_error self.empty = empty self.queue = DeferredQueue() self.size = 0 self.running = True self._deferred = Deferred() def _consume_next(self, *args): if not self.running: return self._deferred = self.queue.get() self._deferred.addCallbacks(self._consumer, self._error) def _consumer(self, item): self.size -= 1 r = self.consume(item) if self.size == 0 and self.empty is not None: self.empty() if isinstance(r, Deferred): r.addCallbacks(self._consume_next, self._consume_next) else: self._consume_next() def _error(self, fail): self.error(fail) if not self.stop_on_error: self._consume_next() def add(self, item): self.size += 1 self.queue.put(item) def consume(self, item): raise NotImplementedError def error(self, fail): raise NotImplementedError def start(self): self.running = True self._consume_next() def stop(self): self.running = False self._deferred.cancel()
def test_complex(self, runtime): def check(ls): for x, v in ls: self.assertEquals(runtime.list_str(v), ['7', '9', '13']) receivers = [1, 2, 3] def exchange((xi, rhoi1, rhoi2)): # Send share to all receivers. ds = runtime.broadcast(receivers, receivers, str((str(xi), str(rhoi1), str(rhoi2)))) dls = DeferredList(ds) dls.addCallbacks(check, runtime.error_handler) return dls result = Deferred() result.addCallbacks(exchange, runtime.error_handler) result.callback((7, 9, 13)) return result
def parseTransferRequest(self, request): def successfulParse(data): self._inboundRequests.append(data) return sortedDump({'error': None, 'status': 'ok'}) def parseFailure(fail): log.msg(fail) return sortedDump({'error': 'error parsing request', 'status': 'error'}) deferred = Deferred() deferred.addCallback(self._fileRequestParser) deferred.addCallbacks(successfulParse, parseFailure) deferred.callback((request.args, self._downloadTo,)) # was returning not done yet return deferred
def __collect_resource(self, response): class ResourceCollector(Protocol): def __init__(self, finished_inner): self.finished = finished_inner self.response = b"" def dataReceived(self, data): self.response += data def connectionLost(self, reason): self.finished.callback(self.response) finished = Deferred() finished.addCallbacks(self.__success, self.__error) response.deliverBody(ResourceCollector(finished))
def get_node_info(self): if self.status: d = Deferred() d.addCallbacks(self.success, self.failed) try: rpc_server = xmlrpclib.Server("http://{}".format( self.rpc_address)) rpc_info = rpc_server.get_node_info() rpc_info['name'] = self.name if self.factory.OnlineProtocol.cache.get(self.name) and \ len(self.factory.OnlineProtocol.cache.get(self.name)) == self.factory.OnlineProtocol.length: self.factory.OnlineProtocol.cache.get(self.name).pop(0) self.factory.OnlineProtocol.cache.get( self.name).append(rpc_info) d.callback(rpc_info) except Exception as e: d.errback(e)
def _find_nearest(cars): d = Deferred() nearest_cars = nsmallest(count, cars, key=lambda c: c.distance(location)) result = [] for car in nearest_cars: result.append((car, car.distance(location))) d.callback(result) return d.addCallbacks(Report(request).report_nearest_cars, Report(request).report_error)
def _gotResponse(self, response, message, uri): if response.headers.hasHeader('content-type'): mimeType = response.headers.getRawHeaders('content-type')[0].split(';')[0].strip().lower() if mimeType == 'text/html': d = Deferred() d.addCallbacks( callback = self._successResult, callbackArgs = (message, uri) ) response.deliverBody(_BodyCollector(d)) return self._prepareResult(uri, mimeType, message) return self._prepareResult(uri, 'Unknown', message)
def _buzzing(self, buzzing_ids, end_of_question): random.shuffle(buzzing_ids) buzzing_id = buzzing_ids[0] green_player = self.players[buzzing_id] logger.info('[buzzing] player {} answering'.format(green_player.name)) self.info_text += NEW_LINE + BADGE_BUZZ self.info_text += ' {}: '.format(bodify(green_player.name)) self.bell_positions.append(self.position_map[self.position]) msg = {'qid': self.qid, 'length': ANSWER_TIME_OUT, 'info_text': self.info_text} msg['type'] = MSG_TYPE_BUZZING_GREEN green_player.sendMessage(msg) green_player.buzzed = True green_player.position_buzz = self.position msg['type'] = MSG_TYPE_BUZZING_RED for player in self.players.values(): if player.uid != buzzing_id: player.sendMessage(msg) self.latest_buzzing_msg = msg condition = partial(self.check_player_response, green_player, 'type', MSG_TYPE_BUZZING_ANSWER) def callback(x): self._buzzing_after(buzzing_id, end_of_question, timed_out=False) def errback(x): logger.info('[buzzing] player {} answer time out'.format( green_player.name)) self._buzzing_after(buzzing_id, end_of_question, timed_out=True) if condition(): callback(None) else: deferred = Deferred() deferred.addTimeout(ANSWER_TIME_OUT, reactor) deferred.addCallbacks(callback, errback) self.deferreds.append((deferred, condition))
def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]\n"+ "Connects to a running bitcoin node and "+ "prints all or part of the best-block-chain.") parser.add_option("--host", dest="host", default="127.0.0.1", help="IP/hostname to connect to (default: %default)") parser.add_option("--port", dest="port", default="8333", type="int", help="port to connect to (default: %default)") parser.add_option("--testnet", dest="testnet", action="store_true", default=False, help="Speak testnet protocol") parser.add_option("--n", dest="n_blocks", type="int", default=99999, help="Dump this many blocks (default: all)") parser.add_option("--verbose", dest="verbose", action="store_true", default=False, help="Print all messages sent/received") parser.add_option("--version", dest="version", default="0.4.0", help="Version of the protocol to speak") (options, args) = parser.parse_args() # Convert string like "1.0" or "0.3.24.0" to integer version where 1.0.0.0 == 1000000 version = sum([ int(j)*(100**(3-i)) for (i,j) in enumerate(options.version.split(".")) ]) def print_blocks(blocks): for b in blocks: print b def start_dumper(connection): d = Deferred() d.addCallback(print_blocks) d.addCallback(lambda x: reactor.stop() ) connection.getBlocks(None, options.n_blocks, d) def connection_failed(reason): print("Connection failed: "+reason) reactor.stop() d = Deferred() d.addCallbacks(start_dumper, connection_failed) factory = BitcoinClientFactory(options.testnet, version, d) reactor.connectTCP(options.host, options.port, factory) reactor.run()
def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) def callback(r): object.__setattr__(self, "_result", (True, r)) while self._observers: observer = self._observers.pop() try: observer.callback(r) except Exception as e: logger.exception( "%r threw an exception on .callback(%r), ignoring...", observer, r, exc_info=e, ) return r def errback(f): object.__setattr__(self, "_result", (False, f)) while self._observers: # This is a little bit of magic to correctly propagate stack # traces when we `await` on one of the observer deferreds. f.value.__failure__ = f observer = self._observers.pop() try: observer.errback(f) except Exception as e: logger.exception( "%r threw an exception on .errback(%r), ignoring...", observer, f, exc_info=e, ) if consumeErrors: return None else: return f deferred.addCallbacks(callback, errback)
def parseTransferRequest(self, request): def successfulParse(data): self._inboundRequests.append(data) return sortedDump({'error': None, 'status': 'ok'}) def parseFailure(fail): log.msg(fail) return sortedDump({ 'error': 'error parsing request', 'status': 'error' }) deferred = Deferred() deferred.addCallback(self._fileRequestParser) deferred.addCallbacks(successfulParse, parseFailure) deferred.callback(( request.args, self._downloadTo, )) # was returning not done yet return deferred
def _getNotificationReferral(self): def timeout(): self.timeout = None dispatchFactory.d = None if not d.called: d.errback(Exception("Timeout")) self.logOut() # Clean up everything self.timeout = reactor.callLater(30, timeout) dispatchFactory = msn.DispatchFactory() dispatchFactory.userHandle = self.username dispatchFactory.protocol = DispatchClient d = Deferred() dispatchFactory.d = d d.addCallbacks(self._gotNotificationReferral, self.connectionFailed) self.connectors.append( reactor.connectTCP("messenger.hotmail.com", 1863, dispatchFactory, bindAddress=(MSNConnection.BINDADDRESS, 0))) LogEvent(INFO, self.ident)
class RemoteAmarokServer(LineReceiver): end = 'quit' def __init__(self): self.d = None def connectionMade(self): self.client_ip = self.transport.getPeer() log.msg('Connection from: %s' % self.client_ip) self.createDeferred() def lineReceived(self, line): log.msg('Line received from: %s - %s' % (self.client_ip, line)) self.d.callback(line) def process_received(self, data): print 'Processing received:', data if data: if data == self.end: print 'Quit' self.transport.loseConnection() return if data in AM_COMMANDS: d = threads.deferToThread(run_amarok_cmd, data) d.addCallback(self.send_result) else: log.msg('Error in cmd: %s' % data) self.sendLine('Error: No such command \'%s\'' % data) self.createDeferred() def send_result(self, result): if result: self.sendLine(str(result)) def err_handler(self, failure): failure.trap(AttributeError) def createDeferred(self): self.d = Deferred() self.d.addCallbacks(self.process_received, self.err_handler)