def _recv_command(self): timeout = Timeout(self.command_timeout) timeout.start() try: return self.io.recv_command() finally: timeout.cancel()
def execute(self, name, args, func_type='PY'): module_path = settings['MODULE_PATH'] #"c:/mtp/mabotech/maboss1.1" info = "[%s]%s:%s" % (module_path, func_type, name) log.debug( info) t = time.time() if name == "time": #for reconnection testing return t #Sync Code Here !!! timeout = Timeout(5, Exception) timeout.start() try: #... # exception will be raised here, after *seconds* passed since start() call rtn = "OK" rtn = py_executor.execute(name, args, module_path) #gevent.sleep(0.02) pf_log.debug("%10.5f,%s,%s" %(time.time()-t, func_type, name ) ) return rtn except Exception, e: log.error(e.message)
def sample(): timeout = Timeout(5) timeout.start() try: gevent.spawn(wait).join() except Timeout: print 'Could not complete'
def __init__(self, url): self.url = url self.protocol, self.domain = self.url.split("://") #e.g. news.bbc.co.uk self.domain = self.domain.split('/')[0] self.site_data = sites[self.domain] self.total_words = {} timeout = Timeout(30, TimeoutError) timeout.start() try: self.html = self.read_url() except TimeoutError: print url + " timed out" return finally: timeout.cancel() self.text = self.boiler_extract() self.soup = BeautifulSoup(self.html, 'lxml') self.article = self.is_article() if self.article: self.calc_total_words() articles.put(self) self.find_links()
def func1(): utc = arrow.utcnow() local = utc.to('Asia/Shanghai') ts = local.timestamp print arrow.get(ts) #print local.format('YYYY-MM-DD HH:mm:ss ZZ') """function and heartbeat""" ex = TimeoutException("timeout ex") #gevent timeout timeout = Timeout(6, ex) #start timeout.start() try: # exception will be raised here, after *seconds* # passed since start() call gevent.sleep(3 * random.randint(1,4)) #print "f1 heart beat" heartbeat("f1") except TimeoutException as ex: print ex finally: #cancel timeout timeout.cancel()
def initial_test(self, address): try: timeout = Timeout(self.TIMEOUT, TestTimeout('The server timed out on the first command.')) timeout.start() TestClient(address).put('key', 'value') finally: timeout.cancel()
def make_conn(url,timelimit): global SUCCESS_RECORD,FAIL_RECORD,TOTAL_SIZE,REQTIME_ARR time_start = time.time() if timelimit: timeout = Timeout(timelimit) timeout.start() try: f = urlopen(url) if f.getcode() == 200: time_end = time.time() server_info = f.info() content_type = server_info['content-type'].split(";")[0] if content_type == "text/html": data = f.read() size = int(server_info['content-length']) else: size = int(server_info['content-length']) REQTIME_ARR.append((time_end - time_start) * 1000) TOTAL_SIZE = TOTAL_SIZE + size SUCCESS_RECORD += 1 else: FAIL_RECORD += 1 except Timeout: FAIL_RECORD += 1 return except Exception,e: FAIL_RECORD += 1 return
def _get_message_data(self): max_size = self.extensions.getparam('SIZE', filter=int) reader = DataReader(self.io, max_size) err = None timeout = Timeout(self.data_timeout) timeout.start() try: data = reader.recv() except ConnectionLost: raise except SmtpError as e: data = None err = e finally: timeout.cancel() reply = Reply('250', '2.6.0 Message Accepted for Delivery') self._call_custom_handler('HAVE_DATA', reply, data, err) self.io.send_reply(reply) self.io.flush_send() self.have_mailfrom = None self.have_rcptto = None
def read_url(url): timeout = Timeout(10) timeout.start() try: response = urllib2.urlopen(url) reason, other = response.getcode(), response.msg except Timeout, t: reason, other = 'gevent timeout', 0
def run_cmd(args, timeout=None): _init() args = list(args) for i, x in enumerate(args): if isinstance(x, unicode): args[i] = x.encode("utf-8") sp = socket.socketpair() pid = os.fork() if pid == 0: # client try: os.dup2(sp[1].fileno(), 1) os.dup2(sp[1].fileno(), 2) sp[0].close() sp[1].close() os.execvp(args[0], args) except: stderr = os.fdopen(2, "w", 0) os.write(2, "failed to exec child process: %r\nPATH=%r" % (args, os.environ.get('PATH'))) traceback.print_exc(file=stderr) finally: os._exit(97) pid2status[pid] = event.AsyncResult() if not _nochild: def cb(): pid2status[pid].set(child_watcher.rstatus) child_watcher.stop() child_watcher = get_hub().loop.child(pid) child_watcher.start(cb) sp[1].close() chunks = [] # prevent loopexit. see test_run_cmd_trigger_loopexit in test_proc.py if timeout is None: timeout = 2 ** 30 timeout = Timeout(timeout) timeout.start() try: while 1: chunk = sp[0].recv(4096) if not chunk: break chunks.append(chunk) st = pid2status[pid].get() del pid2status[pid] return st, "".join(chunks) except Timeout, t: if t is not timeout: raise
def reposts_crawler(): ''' greenlet reposts crawler ''' while not reposts_fetch_queue.empty(): IS_NEED_REFETCH = False #when timeout or errors occur,put the url back into the task queue and the make sure the task is not set to done! try: wait_time = Timeout(MAX_WAIT_TIME) wait_time.start() url = reposts_fetch_queue.get() gevent.sleep(0.0) reposts_time = _http_call(url) for status in reposts_time['reposts']: if not status.get('deleted'): weibo_created_at = datetime.strptime(status.get('created_at'), '%a %b %d %H:%M:%S +0800 %Y') user_created_at = datetime.strptime(status.get('user').get('created_at'), '%a %b %d %H:%M:%S +0800 %Y') reposts_status_id = -1 if status.get('retweeted_status') is not None: reposts_status = status['retweeted_status'] reposts_status_id = reposts_status['id'] weibo_params = ( status['id'], status['user']['id'], status['text'], status['source'], weibo_created_at, reposts_status_id) user_params = ( status['user']['id'], status['user']['screen_name'], status['user']['name'], status['user']['province'], status['user']['city'], status['user']['location'], status['user']['description'], status['user']['profile_image_url'], status['user']['domain'], status['user']['gender'], status['user']['followers_count'], status['user']['friends_count'], status['user']['statuses_count'] , status['user']['favourites_count'], user_created_at, status['user']['verified'], status['user']['verified_type'], status['user']['verified_reason'], status['user']['bi_followers_count'] ) cursor.execute(REPOSTS_WEIBO_INSERT_SQL, weibo_params) cursor.execute(REPOSTS_USER_INSERT_SQL, user_params) except Timeout as t: if t is wait_time: # print '处理超时,等待重新抓取!' #put timeout url back into the task queue IS_NEED_REFETCH = True except Exception as e: IS_NEED_REFETCH = True logger.error(traceback.format_exc()) finally: wait_time.cancel() if IS_NEED_REFETCH is not True: reposts_fetch_queue.task_done() # print url + ' 抓取完成 --- 转发' else: reposts_fetch_queue.put(url) print status print url + ' 抓取失败 --- 转发'
def serve_for_test(self): timeout = Timeout(10) timeout.start() try: while self.is_connected(): if len(self.re_schedule_events) == 10 and len(self.heartbeat_events) == 10: break gevent.sleep(0.01) finally: timeout.cancel()
def timeout_wrapper(*args, **kwargs): t = Timeout(seconds, TestTimeout('Timed out after %d seconds' % seconds) ) t.start() try: ret = func(*args, **kwargs) finally: t.cancel() return ret
def requestGet(self, url): wait = random.random() * (wait_time[1] - wait_time[0]) sleep(wait) timeout = Timeout(request_timeout) timeout.start() try: req = requests.get(url=url, verify=True, headers=headers, proxies=proxies) except IncompleteRead: pass # todo:未知错误,暂还未查清 timeout.cancel() return req
def foo1(): timeout = Timeout(seconds) timeout.start() def wait(): gevent.sleep(10) try: gevent.spawn(wait).join() except Timeout: print('Could not complete') else: print('Complete!')
def query_documents_with_timeout(*args, **kwargs): timeout = Timeout(30) timeout.start() try: gevent.sleep(0.0001) return query_documents(*args, **kwargs) except: return [[], 0] finally: timeout.cancel()
def _get_message_data(self): max_size = self.extensions.getparam('SIZE', filter=int) reader = DataReader(self.io, max_size) err = None timeout = Timeout(self.data_timeout) timeout.start() try: data = reader.recv() except ConnectionLost: raise except SmtpError, e: data = None err = e
def get_commits(): print('Start - {0}'.format(datetime.datetime.now())) timeout = Timeout(10) timeout.start() try: job_stack = [gevent.spawn(download(url)) for url in urls] gevent.joinall(job_stack) except Timeout: pass finally: timeout.cancel() cntx = OrderedDict(sorted(result.items())) return render_template('start.html', cntx=cntx) print('End - {0}'.format(datetime.datetime.now()))
def generate(): result = None while result is None: try: timeout = Timeout(25) timeout.start() result = json.dumps(client.get_events( queue_id=queue_id, last_event_id=last_event_id)) logging.debug('got a response') except Timeout: pass finally: timeout.cancel() yield result or ' '
def _exec_pipe(the_pipe): logging.info('----Begin to process batch----') startTime= datetime.now() timeout = Timeout(10, False) error_code = S_OK pipe_result = [] timeout.start() try: pipe_result = the_pipe.execute() except Timeout, t: if t is not timeout: raise # not my timeout logging.error('unable to execute the_pipe (possibly gevent.Timeout)') error_code = S_ERR
def wrapper(*args, **kwargs): timeout = Timeout(650) timeout.start() try: token = kwargs.get("token", None) result = func(*args, **kwargs) worker_db.Update_Token_DB(token, result, "success") except Timeout: LOG.exception('%s: Gevent task %s TIMEOUT!!' % (func.__name__, token)) worker_db.Update_Token_DB(token, "token timed out!!", "failed") except Exception, e: LOG.exception('%s: api_call exception: %s' % (func.__name__, str(e))) worker_db.Update_Token_DB(token, str(e), "failed")
def __init__(self, server, sock, address): self._rip, self._rport = address self._lip, self._lport = sock.getsockname() self._server = server self._config = server.config self._socket = sock self._file = sock.makefile() self._rhost = socket.getfqdn(self._rip) self._lhost = socket.getfqdn(self._lip) self._timeout = Timeout(30, error.TimeoutError) self._hello = None self._hello_host = '' self._relay_client = False self._connected = True self._transaction = None # Generate a unique identifier for this connection sha_hash = hashlib.sha1(self._rip) sha_hash.update(str(time.time())) sha_hash.update(str(random.getrandbits(64))) self._cid = sha_hash.hexdigest() log.connection_id = self._cid[:7] # Add all the command controller methods self._commands = dict([(c, getattr(self, c)) for c in dir(self) if getattr(getattr(self, c), '_is_command', False)])
def wait(self, timeout=None): if self.ready(): return self.value else: switch = getcurrent().switch self.rawlink(switch) try: timer = Timeout.start_new(timeout) if timeout is not None else None try: getattr(getcurrent(), 'awaiting_batch', lambda: None)() result = get_hub().switch() assert result is self, 'Invalid switch into AsyncResult.wait(): %r' % (result, ) finally: if timer is not None: timer.cancel() except Timeout as exc: self.unlink(switch) if exc is not timer: raise except: self.unlink(switch) raise # not calling unlink() in non-exception case, because if switch() # finished normally, link was already removed in _notify_links return self.value
def foo3(): # 对各种Greenlet和数据结构相关的调用,gevent也提供了超时参数 def wait(): gevent.sleep(2) timer = Timeout(5).start() # 5s没跑完就抛出Timeout异常 thread1 = gevent.spawn(wait) try: thread1.join(timeout=timer) except Timeout: print('Thread 1 timed out') else: print('Thread 1 complete') timer = Timeout.start_new(1) thread2 = gevent.spawn(wait) try: thread2.get(timeout=timer) except Timeout: print('Thread 2 timed out') try: gevent.with_timeout(1, wait) except Timeout: print('Thread 3 timed out')
def curl(ip): url = 'http://' + ip request = urllib2.Request(url=url) reason, other = None, 0 timeout = Timeout(CONNECT_TIMEOUT + DATA_TIMEOUT) timeout.start() try: rsp = urllib2.urlopen(request) print rsp.read() reason, other = rsp.getcode(), rsp.msg except Timeout, t: if t is timeout: reason, other = 'gevent timeout', 0 else: reason, other= 'gevent timeout 2', 0
def main(): timer = Timeout(1).start() thread1 = gevent.spawn(wait) try: thread1.join(timeout = timer) except Timeout: print('Thread1 timed out') # -- timer = Timeout.start_new(1) thread2 = gevent.spawn(wait) try: thread2.get(timeout = timer) except Timeout: print('thread2 timedout') # -- try: gevent.with_timeout(1, wait) except Timeout: print('thread 3 timeout')
def read(self, nbytes): if self.timeout is None: timeout = None else: timeout = Timeout(self.timeout) timeout.start() try: buf = fd.read(self.fd, nbytes) except Timeout as e: if e is not timeout: raise raise TIMEOUT('Timeout reading from fd') else: if timeout is not None: timeout.cancel() return buf
def handle(self, body): t = int((self.timestamp + self.expiration) - time.time()) worker = self.get_worker(self.routing_key) log.debug("Running {0} with timeout {1} sec.".format(self.w_name, t)) timeout = Timeout(t, TimeoutError) timeout.start() try: res = worker(body) log.debug('Task finished.') return res except Exception as e: log.debug(traceback.format_exc()) log.error('Task error: {0}'.format(unicode(e))) return e finally: timeout.cancel()
def _read_result(): timeout = Timeout(self._read_timeout, Timeout) timeout.start() try: result = self._read_result(cmd) result_channel.put(result) except Timeout: raise except: self.log.exception("read error in defer_command") result_channel.put((MemcacheResult.ERROR, error_value)) self.log.warn("Error communicating with Memcache %s, disconnecting", self._address) self.disconnect() finally: timeout.cancel()
def __init__( self, concurrent_num=20, crawl_tags=[], depth=3, max_url_num=300, internal_timeout=60, spider_timeout=6 * 3600, crawler_mode=0, same_origin=True, dynamic_parse=False, ): """ concurrent_num : 并行crawler和fetcher数量 crawl_tags : 爬行时收集URL所属标签列表 depth : 爬行深度限制 max_url_num : 最大收集URL数量 internal_timeout : 内部调用超时时间 spider_timeout : 爬虫超时时间 crawler_mode : 爬取器模型(0:多线程模型,1:gevent模型) same_origin : 是否限制相同域下 dynamic_parse : 是否使用WebKit动态解析 """ self.logger.setLevel(logging.DEBUG) hd = logging.StreamHandler() formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") hd.setFormatter(formatter) self.logger.addHandler(hd) self.stopped = event.Event() self.internal_timer = Timeout(internal_timeout) self.crawler_mode = crawler_mode # 爬取器模型 self.concurrent_num = concurrent_num self.fetcher_pool = pool.Pool(self.concurrent_num) if self.crawler_mode == 0: self.crawler_pool = threadpool.ThreadPool(min(50, self.concurrent_num)) else: self.crawler_pool = pool.Pool(self.concurrent_num) # self.fetcher_queue = queue.JoinableQueue(maxsize=self.concurrent_num*100) self.fetcher_queue = threadpool.Queue(maxsize=self.concurrent_num * 100) self.crawler_queue = threadpool.Queue(maxsize=self.concurrent_num * 100) self.fetcher_cache = UrlCache() self.crawler_cache = UrlCache() self.default_crawl_tags = ["a", "base", "iframe", "frame", "object"] self.ignore_ext = ["js", "css", "png", "jpg", "gif", "bmp", "svg", "exif", "jpeg", "exe", "rar", "zip"] self.crawl_tags = list(set(self.default_crawl_tags) | set(crawl_tags)) self.same_origin = same_origin self.depth = depth self.max_url_num = max_url_num self.dynamic_parse = dynamic_parse if self.dynamic_parse: self.webkit = WebKit() self.crawler_stopped = event.Event()
def delete_object(obj): """This function is the worker that will delete obj from the object list""" with Timeout(5, False): try: cont.delete_object(obj) log.info("successfully deleted [%s]" % obj) except Exception as e: #We will maintain a list of errors on the screen log.error("Failed to delete %s" % obj)
def _wait_hardware(self, value, timeout=None): """Wait timeout seconds till hardware in place. Args: value (str, int): value to be tested. timeout(float): Timeout [s]. None means infinite timeout. """ with Timeout(timeout, RuntimeError("Timeout waiting for hardware")): while self.value_channel.get_value() != value: sleep(0.5)
def test_blocks_until_eof(self): stream = self.stream data = binary_data() stream.feed(data) # no EOF mark was fed with self.assertRaises(Timeout): with Timeout(2): stream.read()
def _wait_ready(self, timeout=None): """Wait timeout seconds till status is ready. Args: timeout(float): Timeout [s]. None means infinite timeout. """ with Timeout(timeout, RuntimeError("Timeout waiting for status ready")): while not self.get_state() == self.STATES.READY: sleep(0.5)
def connect(self): with Timeout(self.timeout): ftp = _FTP(self.host) ftp.login(self.user, self.password) try: yield ftp finally: ftp.close()
def scout(self, leader, timeout=2): scout = self._zmq.socket(zmq.REQ) scout.connect("tcp://{}:{}".format(leader, self.greeter_port)) scout.send('HELLO') response = [] with Timeout(timeout, False): response = scout.recv_multipart() scout.close() return response
def wait_for_room_with_address(transport: MatrixTransport, address: Address, timeout: int = 10): with Timeout(timeout): while True: room = transport._get_room_for_address(address) if room is not None: break gevent.sleep(0.1)
def _authenticate(self): try: credentials = self.credentials() except TypeError: credentials = self.credentials with Timeout(self.command_timeout): auth = self.client.auth(*credentials) if auth.is_error(): raise SmtpRelayError.factory(auth)
def foo2(): # 超时类也可以在上下文管理器中使用 time_to_wait = 5 class TooLong(Exception): pass with Timeout(time_to_wait, TooLong): gevent.sleep(10)
def inner(tag): with Timeout(timeout): res = con._get_response() if res: log.debug('received: %r', res.decode()) bad = con.tagged_commands[tag] if bad: raise Error(bad) match()
def _check_server_timeout(self): try: if self.client.has_reply_waiting(): with Timeout(self.command_timeout): self.client.get_reply() return True except SmtpError: return True return False
def _ehlo(self): try: ehlo_as = self.ehlo_as(self.address) except TypeError: ehlo_as = self.ehlo_as with Timeout(self.command_timeout): ehlo = self.client.ehlo(ehlo_as) if ehlo.is_error(): raise SmtpRelayError.factory(ehlo)
def _disconnect(self): try: with Timeout(self.command_timeout): self.client.quit() except (Timeout, Exception): pass finally: if self.client: self.client.io.close()
def _send_message_data(self, envelope): header_data, message_data = envelope.flatten() with Timeout(self.data_timeout): send_data = self.client.send_data( header_data.encode('ascii'), message_data) self.client._flush_pipeline() if isinstance(send_data, Reply) and send_data.is_error(): raise SmtpRelayError.factory(send_data) return send_data
def test_matrix_invite_private_room_unhappy_case1(matrix_transports, expected_join_rule0, expected_join_rule1): raiden_service0 = MockRaidenService(None) raiden_service1 = MockRaidenService(None) transport0, transport1 = matrix_transports transport0.start(raiden_service0, raiden_service0.message_handler, None) transport1.start(raiden_service1, raiden_service1.message_handler, None) transport0.start_health_check(raiden_service1.address) transport1.start_health_check(raiden_service0.address) room_id = transport0._get_room_for_address(raiden_service1.address).room_id with Timeout(40): while True: try: room_state0 = transport0._client.api.get_room_state(room_id) break except MatrixRequestError: gevent.sleep(0.1) join_rule0 = [ event["content"].get("join_rule") for event in room_state0 if event["type"] == "m.room.join_rules" ][0] assert join_rule0 == expected_join_rule0 with Timeout(40): while True: try: room_state1 = transport1._client.api.get_room_state(room_id) break except MatrixRequestError: gevent.sleep(0.1) join_rule1 = [ event["content"].get("join_rule") for event in room_state1 if event["type"] == "m.room.join_rules" ][0] assert join_rule1 == expected_join_rule1
def execute(self, call): """ Calls a method for an RPC call (part of ``ConnectionHandler``'s ``call_handler`` interface). """ callable = self.get_call_callable(call) timeout = None if self.call_timeout is not None: timeout = Timeout(getattr(callable, "_timeout", self.call_timeout)) call_semaphore = self._get_call_semaphore(call) if call_semaphore.locked(): log.warning("too many concurrent callers (%r); call %r will block", self.max_concurrent_calls, call) call_semaphore.acquire() def finished_callback(is_error): self.active_calls.remove(call) self.call_stats["completed"] += 1 if is_error: self.call_stats["errors"] += 1 call_semaphore.release() if timeout is not None: timeout.cancel() got_err = True result_is_generator = False try: if timeout is not None: timeout.start() time_in_queue = time.time() - call.meta.get("time_received", 0) call.meta["time_in_queue"] = time_in_queue self.active_calls.append(call) result = callable(*call.args, **call.kwargs) if isiter(result): result = self.wrap_generator_result(call, result, finished_callback) result_is_generator = True got_err = False finally: if not result_is_generator: finished_callback(is_error=got_err) return result
def _get_ip_from_dns_server(self, dns_server, ip_result_set, timeout_value=5): try: with Timeout(timeout_value): resolver = dns.resolver.Resolver() resolver.nameservers = [dns_server] answers = resolver.query(self.host, 'A') for answer in answers: ip_result_set.add(answer.to_text()) except (timeout.Timeout, dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): pass
def test_runforever_exit(self): """ Tests that stopping a server from one greenlet causes serve_forever() to return. """ server = QdbServer(client_port=0, tracer_port=0) with Timeout(1, False): spawn_later(0.3, server.stop) # Stop the server in 0.3 seconds. server.serve_forever() self.assertFalse(server.is_running)
def receive(self, timeout=0.1): """ Try receiving data from client. Ignore garbage data. :param timeout: Amount of time before giving up """ with Timeout(timeout, False): try: return json.loads(self.websocket.receive()) except json.JSONDecodeError: pass
def expect_num_warnings(n, message=None, timeout=None): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") yield with Timeout(timeout, exception=False): while len(w) < n: idle() eq_( len(w), n, message or "expected %s warnings but found %s: %s" % (n, len(w), ', '.join(map(str, w))))
def wait_motor_move(self, timeout=20): """Wait until the end of move ended using the motor state. Args: timeout(float): Timeout [s]. Default value is 20 s Raises: RuntimeError: Execution timeout. """ with Timeout(timeout, RuntimeError("Execution timeout")): while self.get_state() != self.STATES.READY: sleep(0.01)
def _wait_ready(self, timeout=3): """Wait for the state to be "Ready". Args: timeout (float): waiting time [s]. Raises: RuntimeError: Execution timeout. """ with Timeout(timeout, RuntimeError("Execution timeout")): while not self._ready(): sleep(0.01)
def connect_to_xenserver(): for host in settings.XEN: with Timeout(1.0): try: proxy = xmlrpclib.ServerProxy("http://" + host[0]) result = proxy.session.login_with_password(host[1], host[2]) session_id = result['Value'] global_xenserver_conn[host[0]] = session_id except Exception, e: logger.exception(e)
def curl(ip): ''' 使用urllib2探测IP是否可以访问,并抽取应答码 和错误原因 ''' url = 'http://' + ip request = urllib2.Request(url=url) reason, other = None, 0 timeout = Timeout(CONNECT_TIMEOUT + DATA_TIMEOUT) timeout.start() try: rsp = urllib2.urlopen(request) reason, other = rsp.getcode(), rsp.msg except Timeout, t: if t is timeout: reason, other = 'gevent timeout', 0 else: reason, other = 'gevent timeout 2', 0
def probe_proxy_ip(proxy_ip): """代理检测""" proxy = urllib2.ProxyHandler(proxy_ip) opener = urllib2.build_opener(proxy) urllib2.install_opener(opener) timeout = Timeout(30) timeout.start() try: html = urllib2.urlopen('http://1212.ip138.com/ic.asp') if html: return True else: return False except Exception as e: print 'URLopen error' return False except Timeout: print 'Timeout' return False
def test_matrix_invitee_receives_invite_on_restart(matrix_transports): raiden_service0 = MockRaidenService(None) raiden_service1 = MockRaidenService(None) transport0, transport1 = matrix_transports room_creator_address = my_place_or_yours(raiden_service0.address, raiden_service1.address) if room_creator_address == raiden_service0.address: inviter_service = raiden_service0 invitee_service = raiden_service1 inviter_transport = transport0 invitee_transport = transport1 else: inviter_service = raiden_service1 invitee_service = raiden_service0 inviter_transport = transport1 invitee_transport = transport0 inviter_transport.start(inviter_service, [], None) invitee_transport.start(invitee_service, [], None) inviter_transport.start_health_check(invitee_service.address) invitee_transport.start_health_check(inviter_service.address) wait_for_peer_reachable(inviter_transport, invitee_service.address) wait_for_peer_reachable(invitee_transport, inviter_service.address) assert is_reachable(invitee_transport, inviter_service.address) assert is_reachable(inviter_transport, invitee_service.address) invitee_transport.stop() wait_for_peer_unreachable(inviter_transport, invitee_service.address) assert not is_reachable(inviter_transport, invitee_service.address) room_id = inviter_transport._get_room_for_address( invitee_service.address).room_id invitee_transport.start(invitee_service, [], None) invitee_transport.start_health_check(inviter_service.address) inviter_transport.stop() with Timeout(TIMEOUT_MESSAGE_RECEIVE): while True: try: room_state1 = invitee_transport._client.api.get_room_state( room_id) break except MatrixRequestError: gevent.sleep(0.1) assert room_state1 is not None
def thread_network(self, ref, transport, req, res): try: log_network_request(req) try: timeout_time = req['timeout'] or 31536000 with Timeout( timeout_time, OperationTimeoutError( 'Timed out while reading response', Timeout(timeout_time), )): if isinstance(req, CallbackRequest): req['network_callback'](req, res) else: transport.request(req, res) except OperationTimeoutError as ex: #logging.error(ex) error = ex except (req.retry_errors or (NetworkError, DataNotValid)) as ex: #logging.error(ex) error = ex except Exception as ex: #logging.error(ex) raise else: error = None if isinstance(req, CallbackRequest): res.error = error else: transport.prepare_response(req, res, error, raise_network_error=False) self.resultq.put({ 'request': req, 'response': res, }) except Exception as ex: ctx = collect_error_context(req) self.fatalq.put((sys.exc_info(), ctx)) finally: self.free_handler(ref)
def handle_client(self, environ, start_response): path = environ['PATH_INFO'] ws = environ['wsgi.websocket'] addr = environ['REMOTE_ADDR'] try: match = self.route.match(path) if not match: # This did not match our route. return log.info('Client request from %s' % addr) uuid = match.group(1) start_event = None with Timeout(self.auth_timeout, False): start_event = self.get_event(ws) failed = False message = '' # Fall through the various ways to fail to generate a more helpful # error message. if not start_event: message = 'No start event received' failed = True elif start_event['e'] != 'start': message = "First event must be of type: 'start'" failed = True elif not self.auth_fn(start_event.get('p', '')): log.warn('Client %s failed to authenticate' % addr) message = 'Authentication failed' failed = True if failed: try: self.send_error(ws, 'auth', message) ws.send(fmt_msg('disable', serial=json.dumps)) except WebSocketError: # We are unable to send the disable message for some # reason; however, they already failed auth so suppress # it and close. pass return if not self.session_store.attach_client(uuid, ws): # We are attaching to a client that does not exist. return self.session_store.send_to_tracer(uuid, event=start_event) for event in self.get_events(ws): self.session_store.send_to_tracer(uuid, event=event) finally: log.info('Closing websocket to client %s' % addr) ws.close()
def download(name, keyword, save_to): search_url = SEARCH_BASE_URL % ((name+' '+keyword).replace(' ', '+')) downloaded_number = 0 next_paged = False while downloaded_number < DOWNLOAD_NUMBER: # 搜索结果页,user-agent是config里那个可以获得不需要js加载搜索结果的静态网页 resp = requests.get(search_url, headers={'User-Agent': USER_AGENT}) search_result = pq(resp.content) # 每个结果的class是image,href是这个结果的详情页 detail_pages = [d.attrib['href'] for d in search_result('.image')] # 对每个结果获取详情页 detail_requests = (grequests.get(u, headers={'User-Agent': USER_AGENT}) for u in detail_pages) detail_resps = grequests.map(detail_requests) # 详情页里下标是3的a标签的链接是原图链接 images_urls = [] for r in detail_resps: try: images_urls.append(pq(r.content)('a')[4].attrib['href']) except Exception as e: logger.error('download get image url exception, url: %s, raw: %s' % (search_url, e)) download_requests = (grequests.get('http://images.google.com/' + u, stream=True, timeout=3) for u in images_urls) images = grequests.map(download_requests) # 保存图片 for i in range(len(images_urls)): img = images[i] if img is not None: success = False with Timeout(2, False): with open(path.join(save_to, '%d.jpg' % downloaded_number), 'wb') as f: # requests库文档推荐写法,但是grequests不知道该不该也这么写 for chunk in img.iter_content(4096): f.write(chunk) downloaded_number += 1 success = True if success is False: logger.info('download timed out, image: %s' % images_urls[i]) # 下一页搜索结果 prev_next_buttons = search_result('#navbar').children() if len(prev_next_buttons) == 0 or len(prev_next_buttons) == 1 and next_paged: logger.info('%s reched end of search result. break' % name) break search_url = 'http://www.google.com' + prev_next_buttons[-1].attrib['href'] logger.info('downloaded %s %d/%d' % (name, downloaded_number, DOWNLOAD_NUMBER)) next_paged = True
def _connect(self): try: with Timeout(self.connect_timeout): self.socket = self.socket_creator(self.address) except socket_error: reply = Reply('451', '4.3.0 Connection failed', command=self.current_command) raise SmtpRelayError.factory(reply) else: log.connect(self.socket, self.address) self.client = self._client_class(self.socket, self.tls_wrapper)