def OnTimeout(self): """On a timeout condition, raise an error""" if not self.finished: self.finished = 1 self.result = defer.TimeoutError('SNMP request timed out') self.success = 0 reactor.crash()
def gotOutput(self, node, sender, data): global success if sender == "retriever": return if self.expected.get(node) == None: self.expected[node] = 0 if (self.expected.get(node, int(data)) != int(data)): print "Got " + data.rstrip() + " instead of " + \ str(self.expected[node]) + " from " + node.name success = False reactor.crash() if not sender in node.peers: print "Sender " + sender + " not in node peers" success = False reactor.crash() self.expected[node] = int(data) + 1 if self.expected[node] == 10: self.done += 1
def gotOutput(self, node, sender, data): global success if self.expected.get(node) == None: self.expected[node] = 0 if (self.expected.get(node, int(data)) != int(data)): print "Got " + data.rstrip() + " instead of " + \ str(self.expected[node]) + " from " + node.name success = False reactor.crash() if not sender in node.peers: print "Sender " + sender + " not in node peers" success = False reactor.crash() self.expected[node] = int(data) + 1 if self.expected[node] == 10: self.done += 1 if self.done == NUMNODES - 2: for x in self.nodes: x.stats() self.nodes[-2].disconnect()
def tearDownClass(cls): for p in cls.processes: p.terminate() cls.network.stop() reactor.crash() simulaqron_settings.default_settings()
def requestResource(self): request = coap.Message(code=coap.GET) # Send request to "coap://coap.me:5683/test, raspberry 168.188.124.196" global result result = fire.get('/control_HW/ONOFF', None) if result == "alert": print 'check alert' request.opt.uri_path = ('counter', ) # Danger = clientPUT.Agent(self) # 아직 Alert에 대한 구체적인 방안 검토 필요, 현재는 모터 회전으로 사용 elif result == "ON" or result == "on": # Led on print "check on" reactor.crash() elif result == "OFF" or result == "off": # Led off print "check off" request.opt.uri_path = ('counter', ) request.opt.observe = 0 request.remote = (ip_address("168.188.124.196"), coap.COAP_PORT) d = protocol.request(request, observeCallback=self.printResponse) d.addCallback(self.printResponse) d.addErrback(self.noResponse)
def gotOutput(self, node, sender, data): global success if self.expected.get(node) == None: self.expected[node] = 0 if (self.expected.get(node, int(data)) != int(data)): print "Got " + data.rstrip() + " instead of " + \ str(self.expected[node]) + " from " + node.name success = False reactor.crash() if not sender in node.peers: print "Sender " + sender + " not in node peers" success = False reactor.crash() self.expected[node] = int(data) + 1 if self.expected[node] == 10: self.done += 1 if self.done == NUMNODES - 1: for x in self.nodes: x.stats() self.nodes[-1].disconnect()
def receiveError(self, reasonCode, desc): global theTest reactor.crash() self.expectedLoseConnection = 1 if not self.allowedToError: theTest.fail('got disconnect for %s: reason %s, desc: %s' % (self, reasonCode, desc))
def OnTimeout( self ): """On a timeout condition, raise an error""" if not self.finished: self.finished = 1 self.result = defer.TimeoutError('SNMP request timed out') self.success = 0 reactor.crash()
def command_received(self, clm, cmd): try: self.file_out.write(cmd + '\n') self.file_out.flush() except: self.rval = 1 reactor.crash() return
def results_received(results): global external_command external_command.shutdown() # Give threads time to finish sleep(0.05) if not results == ('foo', 'bar'): exit(1) reactor.crash()
def _failTest(): fac.proto.transport.loseConnection() cfac.client.transport.loseConnection() reactor.iterate(0.1) reactor.iterate(0.1) reactor.iterate(0.1) reactor.crash() self.fail('test took too long')
def subtest_done(self, subtest): self.nsubtests_running -= 1 if subtest.rval == 0: self.rval -= 1 if self.nsubtests_running == 0: if self.rval == 1: self.rval = 0 reactor.crash()
def _timer(self): self.print_update() if reactor._stopped: # pylint: disable=no-member reactor.crash() # pylint: disable=no-member elif self.pending_count + self.running_count > 0: deferLater(reactor, 0.10, self._timer) else: reactor.stop() # pylint: disable=no-member
def getChannel(self, ct, ws, mp, d): if ct != 'session': global theTest theTest.fail('should not get %s as a channel type' % ct) reactor.crash() return SSHTestServerSession(remoteWindow = ws, remoteMaxPacket = mp, conn = self)
def exec_test_deferred (funs, params, protocol=None, timeout=None): colourer = None if sys.stdout.isatty(): colourer = install_colourer() queue = servicetest.IteratingEventQueue(timeout) queue.verbose = ( os.environ.get('CHECK_TWISTED_VERBOSE', '') != '' or '-v' in sys.argv) bus = dbus.SessionBus() sim = Simulator() bus.add_signal_receiver( lambda *args, **kw: queue.append( Event('dbus-signal', path=unwrap(kw['path']), signal=kw['member'], args=map(unwrap, args), interface=kw['interface'])), None, # signal name None, # interface None, path_keyword='path', member_keyword='member', interface_keyword='interface', byte_arrays=True ) try: for f in funs: conn = make_connection(bus, queue.append, params) f(queue, bus, conn) except Exception: import traceback traceback.print_exc() try: if colourer: sys.stdout = colourer.fh reactor.crash() # force Disconnect in case the test crashed and didn't disconnect # properly. We need to call this async because the BaseIRCServer # class must do something in response to the Disconnect call and if we # call it synchronously, we're blocking ourself from responding to the # quit method. servicetest.call_async(queue, conn, 'Disconnect') if 'RING_TEST_REFDBG' in os.environ: # we have to wait for the timeout so the process is properly # exited and refdbg can generate its report time.sleep(5.5) except dbus.DBusException: pass
def _free(x): wd.tasks.discard(tid) del self.deferreds[tid] if wd.exclusive and wd.tasks: log.critical('FATAL: impossible happened: worker was,' 'exclusive, but still has tasks left. Aborting.') reactor.crash() wd.exclusive = False return x
def buildProtocol(self, addr): if hasattr(self, 'proto'): global theTest reactor.crash() theTest.fail('connected twice to factory') self.proto = SSHTestServer() self.proto.supportedPublicKeys = self.privateKeys.keys() self.proto.factory = self return self.proto
def writeSomething(self): if self.disconnected: if not self.finished: self.fail() else: reactor.crash() if not self.disconnected: self.transport.write('foo') reactor.callLater(1, self.writeSomething)
def exec_test_deferred(funs, params, protocol=None, timeout=None): colourer = None if sys.stdout.isatty(): colourer = install_colourer() queue = servicetest.IteratingEventQueue(timeout) queue.verbose = (os.environ.get('CHECK_TWISTED_VERBOSE', '') != '' or '-v' in sys.argv) bus = dbus.SessionBus() sim = Simulator() bus.add_signal_receiver( lambda *args, **kw: queue.append( Event('dbus-signal', path=unwrap(kw['path']), signal=kw['member'], args=map(unwrap, args), interface=kw['interface'])), None, # signal name None, # interface None, path_keyword='path', member_keyword='member', interface_keyword='interface', byte_arrays=True) try: for f in funs: conn = make_connection(bus, queue.append, params) f(queue, bus, conn) except Exception: import traceback traceback.print_exc() try: if colourer: sys.stdout = colourer.fh reactor.crash() # force Disconnect in case the test crashed and didn't disconnect # properly. We need to call this async because the BaseIRCServer # class must do something in response to the Disconnect call and if we # call it synchronously, we're blocking ourself from responding to the # quit method. servicetest.call_async(queue, conn, 'Disconnect') if 'RING_TEST_REFDBG' in os.environ: # we have to wait for the timeout so the process is properly # exited and refdbg can generate its report time.sleep(5.5) except dbus.DBusException: pass
def stop(ignored=None, connector=None): if isinstance(ignored, Exception): raise ignored if reactor.running: if getattr(reactor, 'threadpool', None) is not None: reactor.threadpool.stop() reactor.threadpool = None reactor.crash() if connector: connector.disconnect()
def _free(x): wd.tasks.discard(tid) del self.deferreds[tid] if wd.is_running_cpu_exec and wd.tasks: log.critical( 'FATAL: impossible happened: worker was running ' 'cpu-exec job, but still has tasks left. Aborting.') reactor.crash() wd.is_running_cpu_exec = False return x
def timedOut(self): """I'm called when the safety timer expires indicating test probably won't complete""" # FIXME: how do we cancel this test and cleanup remaining deferreds? self.pending_deferreds = [] if debug: "**** timedOut callback, test did not complete" self.fail("Safety timeout callback ... test did not complete") reactor.crash()
def start_if_not_running(running_daemons): if running_daemons: error("ERROR: The following daemons are already running: %s" % (", ".join(x.program for x in running_daemons))) self.exit_code = 1 reactor.crash() # so stopService isn't called. return self._daemonize() info("Watchdog watching for daemons.") return self.watchdog.start()
def gotReply(reply): if reply != [b"REP: REQ1"]: print("Unexpected reply: %r" % (reply, )) global exitCode exitCode = 1 reactor.crash() return print("OK") reactor.crash()
def got_result(self, result): if self.fout != None: try: self.fout.write('%s\n' % result) self.fout.flush() except: self.rval = 1 reactor.crash() return self.responses.append(result) self.issue_next_cmd()
def got_result(self, result): if self.fout != None: try: self.fout.write("%s\n" % result) self.fout.flush() except: self.rval = 1 reactor.crash() return self.responses.append(result) self.issue_next_cmd()
def issue_next_cmd(self): if self.commands != None: if len(self.commands) == 0: reactor.crash() return command = self.commands.pop(0) else: command = self.fin.readline() if command == None or len(command) == 0: reactor.crash() return self.rc.send_command(command, self.got_result)
def stop(self): self._log.debug( 'stop' ) from twisted.internet import reactor reactor.callFromThread( reactor.stop ) maxSleep = 15 while self.__running and maxSleep > 0: time.sleep(1) maxSleep = maxSleep - 1 reactor.crash() self._tlog.stop() self._tlog = None
def on_timeout(d): e = defer.TimeoutError("(%s) still running at %s secs" % (method.__name__, timeout)) f = failure.Failure(e) try: d.errback(f) except defer.AlreadyCalledError: # if the deferred has been called already but the *back chain # is still unfinished, crash the reactor and report timeout # error ourself. reactor.crash() raise
def newNode(self, data): MeshNode.newNode(self, data) print data + " joined" if (data == failnode.name): m.removeMeshNode(failnode) n = MeshNode("joinnode", m) m.addMeshNode(n) m.connect_duplex(self, n, 1024, 50, 0.30) if (data == "joinnode"): global success success = True reactor.crash()
def quit(sig, frame): print "Received signal %s, quitting" % sig global masterProcess global slaveProcesses for process in [masterProcess] + slaveProcesses: process.terminate() try: reactor.crash() except: pass
def _poll(self): """ even boards with no inputs need some polling to see if they're still ok """ try: self._pollWork() except serial.SerialException: reactor.crash() raise except Exception as e: import traceback; traceback.print_exc() log.warn("poll: %r" % e)
def request_exit_status(self, status): status = struct.unpack('>L', status)[0] if status == 0: global theTest theTest.fail('false exit status was 0') reactor.crash() self.conn.results +=1 log.msg('finished false') if self.conn.results == 3: # all tests run self.conn.transport.expectedLoseConnection = 1 theTest.fac.proto.expectedLoseConnection = 1 self.loseConnection() reactor.crash() return 1
def _failTest(): try: os.kill(p.transport.pid, 9) except OSError: pass try: fac.proto.transport.loseConnection() except AttributeError: pass reactor.iterate(0.1) reactor.iterate(0.1) reactor.iterate(0.1) reactor.crash() p.done = 1 self.fail('test took too long')
def signal_callback(self, signum, frame): try: try: self.unload() except Exception: self.logger.exception(_("Error while unloading!")) try: reactor.stop() except Exception: try: reactor.crash() except Exception: pass except Exception: exit(0)
def _panic(self, entity): print "\x1b[41m ____ __ __ _ _ _ _ \x1b[0m" print "\x1b[41m / ___| \/ | __| | ___| |_ ___ ___| |_ ___ __| |\x1b[0m" print "\x1b[41m| | _| |\/| | / _` |/ _ \ __/ _ \/ __| __/ _ \/ _` |\x1b[0m" print "\x1b[41m| |_| | | | | | (_| | __/ || __/ (__| || __/ (_| |\x1b[0m" print "\x1b[41m \____|_| |_| \__,_|\___|\__\___|\___|\__\___|\__,_|\x1b[0m" all_entities = self._client.world.entities while True: print entity if not isinstance(entity, Item): break parent = entity.parent_serial if parent is None or parent not in all_entities: break entity = all_entities[parent] reactor.crash()
def _panic(self, entity): print("\x1b[41m ____ __ __ _ _ _ _ \x1b[0m") print("\x1b[41m / ___| \/ | __| | ___| |_ ___ ___| |_ ___ __| |\x1b[0m") print("\x1b[41m| | _| |\/| | / _` |/ _ \ __/ _ \/ __| __/ _ \/ _` |\x1b[0m") print("\x1b[41m| |_| | | | | | (_| | __/ || __/ (__| || __/ (_| |\x1b[0m") print("\x1b[41m \____|_| |_| \__,_|\___|\__\___|\___|\__\___|\__,_|\x1b[0m") all_entities = self._client.world.entities while True: print(entity) if not isinstance(entity, Item): break parent = entity.parent_serial if parent is None or parent not in all_entities: break entity = all_entities[parent] reactor.crash()
def printResponse(self, response): print 'First result: ' + response.payload global sensordatas sensordatas = response.payload global check check = sensordatas.split() global humi global temp global o2 global co2 # 습도, 온도 , o2(빗물), co2(수위) humi = float(check[1]) temp = float(check[3]) o2 = float(check[5]) co2 = float(check[7]) reactor.crash()
def onTimeout(d): e = defer.TimeoutError("%r (%s) still running at %s secs" % (self, methodName, timeout)) f = failure.Failure(e) # try to errback the deferred that the test returns (for no gorram # reason) (see issue1005 and test_errorPropagation in test_deferred) try: d.errback(f) except defer.AlreadyCalledError: # if the deferred has been called already but the *back chain is # still unfinished, crash the reactor and report timeout error # ourself. reactor.crash() self._timedOut = True # see self._wait todo = self.getTodo() if todo is not None and todo.expected(f): result.addExpectedFailure(self, f, todo) else: result.addError(self, f)
def gotOutput(self, node, sender, data): global success value = int(data.rstrip()) if (node in self.nodes[0:3]): if (self.nodes.index(node) == (value + 1) % 3): reactor.callLater(0.1, (lambda: node.pushInput( str(value + 1) + "\n"))) else: print node.name + " - " + sender + " - " + data.rstrip() if self.expected == None: self.expected = value if self.expected > 2 and self.expected != value: print "Expected: " + str(self.expected) + " But got: " + str(value) success = False reactor.crash() self.expected = value + 1 if self.expected > 50: reactor.crash()
def runOneDeferred(d): L = [] d.addBoth(L.append) reactor.callLater(0, d.addCallback, lambda ign: reactor.crash()) reactor.run() if L: if isinstance(L[0], failure.Failure): L[0].trap() return L[0] raise unittest.FailTest("Keyboard Interrupt")
def _scrapy_job(self): logg.info("Scrapy Start000") dfs = set() runner = CrawlerRunner(get_project_settings()) for i in [KuaidailiSpider,S31fSpider,XicidailiSpider,S89ipSpider,YqieSpider]: dfs.add(runner.crawl(i)) defer.DeferredList(dfs).addBoth(lambda _: reactor.crash()) reactor.run(installSignalHandlers=0)
def leftNode (self, data): global observers_done global retriever_receiving global success MeshNode.leftNode (self, data) print self.name + " => " + data + " left" if (data != "node0"): print "Wrong node left!" success = False reactor.crash() return if (self.mesh.done < observers_done): print "Observer done before getting all info" success = False reactor.crash() return observers_done += 1 if (observers_done == NUMOBSERVERS -1): retriever_receiving = True if (observers_done == NUMOBSERVERS): reactor.crash() retriever.pushInput("blaat\n");
def leftNode(self, data): global observers_done global retriever_receiving global success MeshNode.leftNode(self, data) print self.name + " => " + data + " left" if (data != "node0"): print "Wrong node left!" success = False reactor.crash() return if (self.mesh.done < observers_done): print "Observer done before getting all info" success = False reactor.crash() return observers_done += 1 if (observers_done == NUMOBSERVERS - 1): retriever_receiving = True if (observers_done == NUMOBSERVERS): reactor.crash() retriever.pushInput("blaat\n")
def poll(self): ms = get_idle_time() ctx = DEV['xidle/%s' % host] subj = URIRef("http://bigasterisk.com/host/%s/xidle" % host) lastMinActive = ms < 60 * 1000 now = int(time.time()) nextGraphUpdate = self.lastGraphSentTime + min(10, ms / 1000 / 2) if self.lastGraphSent != lastMinActive or now > nextGraphUpdate: masterGraph.patchObject(ctx, subj, ROOM['idleTimeMs'], Literal(ms)) masterGraph.patchObject(ctx, subj, ROOM['idleTimeMinutes'], Literal(round(ms / 1000 / 60, 2))) self.lastGraphSent = lastMinActive self.lastGraphSentTime = now if self.lastSent != lastMinActive or now > self.lastSentTime + 3600: self.points.append({ "measurement": "presence", "tags": { "host": host, "sensor": "xidle" }, "fields": { "value": 1 if lastMinActive else 0 }, "time": now }) self.lastSent = lastMinActive self.lastSentTime = now try: client.write_points(self.points, time_precision='s') except influxdb.exceptions.InfluxDBServerError as e: log.error(repr(e)) reactor.crash() self.points = []