def get_html_text_with_params(self, url, payload): # this method send requests with parameters in query to particular URL # payloads is a dictionary comprising of key value pair html_file = None hdr = GetSource.set_header() try: with eventlet.Timeout(10, False): html_file = requests.get(url, headers=hdr, params=payload, verify=False) self.base_url = html_file.url html_file.raise_for_status() except requests.exceptions.SSLError: # check for https print "SSLError exception caught" except requests.exceptions.ConnectionError: print "No Internet Connection!\nWaiting for connection" self.wait_for_connection() raise return html_file.text
def test_dependency_call_lifecycle_errors(container_factory, rabbit_config, method_name): container = container_factory(ExampleService, rabbit_config) container.start() dependency = get_extension(container, EventDispatcher) with patch.object(dependency, method_name, autospec=True) as method: err = "error in {}".format(method_name) method.side_effect = Exception(err) # use a standalone rpc proxy to call exampleservice.task() with ServiceRpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error proxy.task.call_async() # verify that the error bubbles up to container.wait() with eventlet.Timeout(1): with pytest.raises(Exception) as exc_info: container.wait() assert str(exc_info.value) == err
def test_socket_file_read_non_int(): listen_socket = eventlet.listen(('localhost', 0)) def server(): conn, _ = listen_socket.accept() conn.recv(1) conn.sendall(b'response') conn.close() eventlet.spawn(server) sock = eventlet.connect(listen_socket.getsockname()) fd = sock.makefile('rwb') fd.write(b'?') fd.flush() with eventlet.Timeout(1): try: fd.read("This shouldn't work") assert False except TypeError: pass
def obj_Get(obj, BEGIN_URL): getURLProcess_mark = True tryConnectLoop_mark = True while tryConnectLoop_mark: try: eventlet.monkey_patch() while getURLProcess_mark: with eventlet.Timeout(10, False): obj.get(BEGIN_URL) getURLProcess_mark = False # 从而退出循环 tryConnectLoop_mark = False # 从而退出最外层循环 except: tryConnectLoop_mark = True print("---obj_Get Error !---sleep 3 seconds !---") time.sleep(3) return obj
def tara(ip): try: with eventlet.Timeout(3): address = 'http://' + ip + ':8545' data = { "jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1 } headers = { 'Content-type': 'application/json', 'Accept': 'text/plain' } r = requests.post(address, data=json.dumps(data), headers=headers, timeout=3) return r.text except: pass
def web_ReturnHTML(lead_detail): if lead_detail[9] and lead_detail[9].startswith( 'www.') or lead_detail[9].startswith( 'http://') or lead_detail[9].startswith('https://'): try: page_url = lead_detail[9] if lead_detail[9].startswith('www.'): page_url = 'http://' + lead_detail[9] page = '' print "Entered URL ", page_url with eventlet.Timeout(60, False): try: page = urllib2.urlopen(page_url).read() except (HTTPException, urllib2.URLError) as e: web_ReturnHTML(lead_detail) return (lead_detail[0], page) except Exception, msg: time.sleep(10) print "UrlError" print msg web_ReturnHTML(lead_detail)
def _update_max_enrollment_for_crns(term, crns): """ This method queries the registrar for max enrollment on all 'new_crns' and updates the courses table accordingly ScheduleBuilder provides all course information except max enrollment, which is exclusive to the Registrar and Sisweb data sources. """ r = Registrar() for crn in crns: course = None while not course: with eventlet.Timeout(const.QUERY_TIMEOUT, False): print('fetching max enrollment for crn {}'.format(crn)) course = r.course_detail(term, crn) if course.max_enrollment: cur.execute( """UPDATE courses SET max_enrollment = %s WHERE term_year = %s AND term_month = %s AND crn = %s""", (course.max_enrollment, int( term.year), int(term.session.value), crn))
def request_with_proxy(url_body, ip, port, kind, retry_max=req_retry_max, timeout=3): kind = kind.lower() cur_url = url_body.format(kind) cur_proxy = {kind: kind_D[kind].format(ip, port)} retry_cnt = 0 # t1 = time.time() while (retry_cnt < req_retry_max): try: with eventlet.Timeout(timeout): r = requests.get(cur_url, headers=headers(), proxies=cur_proxy) status_code = r.status_code break except: # print(e) retry_cnt += 1 return retry_cnt
def getDataCenterSpeed(dataCenterUrl): timeBeginForError = time.time() dataPrev = 0 speedLast = "0KB/s" dataNow = 0 withError = False try: response = getValidResponse(dataCenterUrl) contentSize = int(response.headers['content-length']) with eventlet.Timeout(downloadTimeout + 2): timeBegin = time.time() * downloadSpeedRefreshRate timePrev = timeBegin timeNow = timeBegin data = True while data and timeNow - timeBegin <= downloadTimeout * downloadSpeedRefreshRate: data = response.read(downloadChunkSize) timeNow = time.time() * downloadSpeedRefreshRate dataNow += len(data) downloadProgress = (dataNow / contentSize) * 100 if timeNow - timePrev > 1: [timePrev, dataPrev, speedLast] = getNewSpeed(timePrev, dataPrev, dataNow, timeNow) print("\r下载进度:%d%% - %s " % (downloadProgress, speedLast), end=" ") if not data: print("\r下载进度:100%% - %s " % (speedLast), end=" ") print('\n\n') return [(timeNow - timeBegin) / downloadSpeedRefreshRate, dataNow, withError] except eventlet.Timeout: print('\n') print("超时,测试下一个数据中心.") except Exception as e: print('\n') print("发生错误:", str(e)) withError = True print('\n') return [time.time() - timeBeginForError, dataNow, withError]
def get_authors(): if os.path.exists(path_df_authors): df_authors = pd.read_csv(path_df_authors) else: df_authors = pd.DataFrame() df_links = pd.read_csv(path_df_links) count_to_spider = df_links[df_links['is_spider'] == False].shape[0] while count_to_spider: for i in range(df_links.shape[0]): href = df_links.loc[i, 'href'] is_spider = df_links.loc[i, 'is_spider'] if is_spider: continue print(href, is_spider) with eventlet.Timeout(10, False): # 设置超时时间为10秒 df_tmp = parse_v(href) df_authors = df_authors.append(df_tmp) df_links.loc[i, 'is_spider'] = True print(href, 'success\n') df_authors.to_csv(path_df_authors, index=False, encoding='utf8') count_to_spider = df_links[df_links['is_spider'] == False].shape[0] df_links.to_csv(path_df_links, index=False)
def test_error_stops_consumer_thread(): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = Exception('test') queue_consumer.start() with pytest.raises(Exception) as exc_info: queue_consumer._gt.wait() assert exc_info.value.args == ('test', )
def create(self): ''' Create the stack and all of the resources. Creation will fail if it exceeds the specified timeout. The default is 60 minutes, set in the constructor ''' self.state_set(self.CREATE_IN_PROGRESS, 'Stack creation started') stack_status = self.CREATE_COMPLETE reason = 'Stack successfully created' res = None with eventlet.Timeout(self.timeout_mins * 60) as tmo: try: for res in self: if stack_status != self.CREATE_FAILED: result = res.create() if result: stack_status = self.CREATE_FAILED reason = 'Resource %s failed with: %s' % (str(res), result) else: res.state_set(res.CREATE_FAILED, 'Stack creation aborted') except eventlet.Timeout as t: if t is tmo: stack_status = self.CREATE_FAILED reason = 'Timed out waiting for %s' % str(res) else: # not my timeout raise self.state_set(stack_status, reason) if stack_status == self.CREATE_FAILED and not self.disable_rollback: self.delete(action=self.ROLLBACK)
def check_cloudDiskLoad_or_unload_ok(response, vmid, volumeid, typee=1): username = "******" password = "******" ip = "172.16.130.254" cmd = 'kubectl get vm {0} -o yaml|grep {1}|wc -l'.format(vmid, volumeid) flag = False eventlet.monkey_patch() with eventlet.Timeout(180, False): while 1: time.sleep(0.5) ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd) if typee: if int(ret): # print(1111) flag = True break else: if not int(ret): # print(1111) flag = True break assert flag
def check_changeBootSequence_ok(response, vmid, bootSeq, hostip): assert response.json().get("id") username = "******" password = "******" ip = hostip cmd = "kubectl get vm {0} -o yaml|grep order|cut -d: -f 2".format(vmid, ) flag = False eventlet.monkey_patch() with eventlet.Timeout(180, False): while 1: time.sleep(0.5) ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd) ret = ret.decode("utf-8").replace("\n", "").replace(" ", "") if bootSeq == 1: if ret == "12": flag = True break elif bootSeq == 2: if ret == "21": flag = True break assert flag
def run(self): acceptors = [] for sock in self.sockets: s = GreenSocket(family_or_realsock=sock) s.setblocking(1) hfun = partial(self.handle, s) acceptor = eventlet.spawn(eventlet.serve, s, hfun, self.worker_connections) acceptors.append(acceptor) while self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break eventlet.sleep(1.0) self.notify() with eventlet.Timeout(self.cfg.graceful_timeout, False): [eventlet.kill(a, eventlet.StopServe) for a in acceptors]
def check_cloudHost_add_ok(response): body = response.json() print(body) assert body.get("code") == 1 id = body.get("id") id_len = len(id.split(",")) id = id.replace(",", "|") username = "******" password = "******" ip = "172.16.130.254" cmd = 'kubectl get vm|grep -E "{0}"|wc -l'.format(id) flag = False eventlet.monkey_patch() with eventlet.Timeout(180, False): while 1: time.sleep(0.5) ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd) if int(ret) == id_len: #print(1111) flag = True break assert flag
def fetch(url): body = str() #body = binary_type() timeout = eventlet.Timeout(timedelta(minutes=1).seconds) goturl = "" try: #print os.environ.get("malucrawl_foo", "no changes") #os.environ["malucrawl_foo"] = "changed" u = urlparse(url) if "ftp" in u.scheme: response = urllib2.urlopen(url) body = response.read() goturl = response.geturl() else: r = requests.get(url, stream=True, headers=headers) data = r.content body = data goturl = r.url #while data and (len(body) <= MAX_SIZE): # data = r.raw.read(CHUNK) # body += data #expected_length = int(r.headers.get("Content-Length", 0)) # If len(data) < CHUNK we're at the end of the file. # If it's the expected length, that's great we got the whole file, # even if len(data) == CHUNK. # Otherwise because we've read a full chunk of an unknown total # length we're not at the end of the file. #if not len(body) == expected_length and not (len(data) < CHUNK): # print "file too big", url except eventlet.Timeout as t: if t is not timeout: raise # not my timeout # print "took too long", url finally: timeout.cancel() return body, goturl
def test_timeout_and_final_write(self): # This test verifies that a write on a socket that we've # stopped listening for doesn't result in an incorrect switch server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) bound_port = server.getsockname()[1] def sender(evt): s2, addr = server.accept() wrap_wfile = s2.makefile('wb') eventlet.sleep(0.02) wrap_wfile.write(b'hi') s2.close() evt.send(b'sent via event') evt = event.Event() eventlet.spawn(sender, evt) # lets the socket enter accept mode, which # is necessary for connect to succeed on windows eventlet.sleep(0) try: # try and get some data off of this pipe # but bail before any is sent eventlet.Timeout(0.01) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', bound_port)) wrap_rfile = client.makefile() wrap_rfile.read(1) self.fail() except eventlet.Timeout: pass result = evt.wait() self.assertEqual(result, b'sent via event') server.close() client.close()
def test_putting_to_queue(self): timer = eventlet.Timeout(0.1) try: size = 2 self.pool = IntPool(min_size=0, max_size=size) queue = Queue() results = [] def just_put(pool_item, index): self.pool.put(pool_item) queue.put(index) for index in six.moves.range(size + 1): pool_item = self.pool.get() eventlet.spawn(just_put, pool_item, index) for _ in six.moves.range(size + 1): x = queue.get() results.append(x) self.assertEqual(sorted(results), list(six.moves.range(size + 1))) finally: timer.cancel()
def connect(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("127.0.0.1", 38297)) s.listen(5) print("Listening for TCP connection on localhost.38297") conn, addr = s.accept() print("Connected with: ", addr) while True: command = input("pty> ") if len(command) == 0: continue if "exit" in command: conn.send(encode("exit")) conn.close() break else: conn.send(encode(command)) with eventlet.Timeout(3, False): print(str(conn.recv(1024), encoding="utf-8"))
def test_parent(self): """ Checks that a terminating greenthread whose parent was a previous, now-defunct hub greenlet returns execution to the hub runloop and not the hub greenlet's parent. """ hub = hubs.get_hub() def dummyproc(): pass g = eventlet.spawn(dummyproc) assert hub.greenlet.parent == eventlet.greenthread.getcurrent() self.assertRaises(KeyboardInterrupt, hub.greenlet.throw, KeyboardInterrupt()) assert not g.dead # check dummyproc hasn't completed with eventlet.Timeout(0.5, self.CustomException()): # we now switch to the hub which will allow # completion of dummyproc. # this should return execution back to the runloop and not # this greenlet so that hub.switch() would block indefinately. self.assertRaises(self.CustomException, hub.switch) assert g.dead # sanity check that dummyproc has completed
def check_delete_all_resource_ok(response, flag="vm"): username = "******" password = "******" ip = "172.16.130.254" ids = response.json().get("id") ids_list = ids.split(",") for id in ids_list: if flag == "vm": cmd = "kubectl get vm|grep {0}|wc -l".format(id) else: cmd = "kubectl get vmp|grep {0}|wc -l".format(id) flag = False eventlet.monkey_patch() with eventlet.Timeout(30, False): while 1: time.sleep(0.5) ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd) #print("this is flag...", flag) if not int(ret): flag = True break assert flag
def get_cset_diff(self, timeout=50): '''Fetch and parse changeset diff''' csets = {} url = self._data_url() logger.debug('Fetching url {}'.format(url)) with eventlet.Timeout(timeout): req = requests.get(url) if req.status_code != 200: raise OsmDiffException('Error fetching URL: {}:{}'.format( req.status_code, req.text, url)) resp = req.content dfile = gzip.GzipFile(fileobj=StringIO(resp)) logging.debug('Parsing diff {}, url {} (logger {})'.format( self.sequenceno, url, logger)) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) for event, element in etree.iterparse(dfile): if element.tag == 'changeset': cid = int(element.attrib['id']) if 'min_lon' in element.attrib: bbox = poly.BBox(float(element.attrib['min_lon']), float(element.attrib['min_lat']), float(element.attrib['max_lon']), float(element.attrib['max_lat'])) else: bbox = poly.BBox() csets[cid] = { 'cid': cid, 'uid': int(element.attrib['uid']), 'user': element.attrib['user'], 'open': element.attrib['open'], 'comments_count': int(element.attrib['comments_count']), 'source': { 'type': self.type, 'sequenceno': self.sequenceno, 'observed': now }, 'bbox': bbox } return csets
def delete_stack_user(self, user_id): user = self.client_v2.users.get(user_id) # FIXME (shardy) : need to test, do we still need this retry logic? # Copied from user.py, but seems like something we really shouldn't # need to do, no bug reference in the original comment (below)... # tempory hack to work around an openstack bug. # seems you can't delete a user first time - you have to try # a couple of times - go figure! tmo = eventlet.Timeout(10) status = 'WAITING' reason = 'Timed out trying to delete user' try: while status == 'WAITING': try: user.delete() status = 'DELETED' except Exception as ce: reason = str(ce) logger.warning( _("Problem deleting user %(user)s: " "%(reason)s") % { 'user': user_id, 'reason': reason }) eventlet.sleep(1) except eventlet.Timeout as t: if t is not tmo: # not my timeout raise else: status = 'TIMEDOUT' finally: tmo.cancel() if status != 'DELETED': raise exception.Error(reason)
def _send_msg(self, msg, reply_cls=None, reply_multi=False, active_bundle=None): timeout_sec = cfg.CONF.OVS.of_request_timeout timeout = eventlet.Timeout(seconds=timeout_sec) if active_bundle is not None: (dp, ofp, ofpp) = self._get_dp() msg = ofpp.ONFBundleAddMsg(dp, active_bundle['id'], active_bundle['bundle_flags'], msg, []) try: result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi) except ryu_exc.RyuException as e: m = _("ofctl request %(request)s error %(error)s") % { "request": msg, "error": e, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) except eventlet.Timeout as e: with excutils.save_and_reraise_exception() as ctx: if e is timeout: ctx.reraise = False m = _("ofctl request %(request)s timed out") % { "request": msg, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) finally: timeout.cancel() LOG.debug("ofctl request %(request)s result %(result)s", { "request": msg, "result": result }) return result
def list_articles(self, data): ''' receive raw json data and parse them into articles list and query to download ''' articles_list = [] for i in range(len(data)): article = Article(data[i]) # print(Fore.RED + 'some red text') article.print_info(i) articles_list.append(article) down_input = input( 'Input num to start downloading: [type exit to end]') if (down_input == 'exit'): self.end = True elif (down_input == 'no'): return elif (not down_input): return else: try: positions = map(int, down_input.strip().split(' ')) for i in positions: try: print('Start downloading article [' + str(i) + ']') with eventlet.Timeout(60): self.download_pdf(articles_list[i - 1]) print(Back.GREEN + '[' + str(i) + ']' + ' complete!' + Style.RESET_ALL) if (self.ris_down): self.download_ris(article['ar_num'], article.filename_ris) except: print("Download fail") except ValueError: print('Input Error') # self.download_pdf(articles_list[0]) return
def get_data(self, params, url): r = None data = {} self.t = time.time() params['client_id'] = self.client_id while True: try: with eventlet.Timeout(10): r = requests.get(url, params=params, proxies={ 'https': self.proxy_address, 'http': self.proxy_address }) # 成功请求到数据,则无需重试 try: data = json.loads(json.dumps(r.json())) except ValueError as detail: # 有些数据返回之后没法解析,这些数据直接跳过 data = r.text.split(' ') print 'in get_data, req module' print data break if 'error' in data and data['error']['code'] == 1017: # 如果访问频率超过限制,切换client_id self.client_id = self.client.nextClientId() params['client_id'] = self.client_id print 'switching client_id', self.client_id continue return data # 如果访问api的响应时间太慢,说明youku开始封锁ip,选择另外一个代理ip except (eventlet.timeout.Timeout, requests.exceptions.ProxyError, requests.exceptions.ConnectionError) as detail: self.proxy_address = self.proxy.nextProxy() except Exception as detail: self.proxy_address = self.proxy.nextProxy() print 'get wrong, try again.' print detail
def start_simulators(data_dir, *ports): sim_map = {} with open(os.devnull, 'w') as null: for port in ports: args = [ 'java', '-jar', JAR_PATH, str(port), os.path.join(data_dir, str(port)), str(port + 443) ] sim_map[port] = subprocess.Popen(args, stdout=null, stderr=null) time.sleep(1) connected = [] backoff = 0.1 timeout = time.time() + 3 while len(connected) < len(sim_map) and time.time() < timeout: for port in sim_map: if port in connected: continue sock = socket.socket() try: sock.connect(('localhost', port)) except socket.error: time.sleep(backoff) backoff *= 2 else: connected.append(port) sock.close() if len(connected) < len(sim_map): teardown_simulators(sim_map) raise Exception('only able to connect to %r out of %r' % (connected, sim_map)) for port, process in sim_map.items(): with eventlet.Timeout(1, exception=False): if process.wait() is not None: raise Exception('%d has terminated, you may have ghost ' 'simulators running...' % sim_map[port].pid) return sim_map
def test_graceful_stop_on_one_container_error(runner_factory, rabbit_config): runner = runner_factory(rabbit_config, ExampleService, SecondService) runner.start() container = get_container(runner, ExampleService) second_container = get_container(runner, SecondService) original_stop = second_container.stop with patch.object(second_container, 'stop', autospec=True, wraps=original_stop) as stop: rpc_consumer = get_dependency(container, RpcConsumer) with patch.object(rpc_consumer, 'handle_result', autospec=True) as handle_result: exception = Exception("error") handle_result.side_effect = exception # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread # yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to runner.wait() with pytest.raises(Exception) as exc_info: runner.wait() assert exc_info.value == exception # Check that the second service was stopped due to the first # service being killed stop.assert_called_once_with()
def test_prefetch_throughput(container_factory, rabbit_config): """Make sure even max_workers=1 can consumer faster than 1 msg/second Regression test for https://github.com/nameko/nameko/issues/417 """ class Service(object): name = "service" @rpc def method(self): pass rabbit_config[MAX_WORKERS_CONFIG_KEY] = 1 container = container_factory(Service, rabbit_config) container.start() replies = [] with ServiceRpcProxy("service", rabbit_config) as proxy: for _ in range(5): replies.append(proxy.method.call_async()) with eventlet.Timeout(1): [reply.result() for reply in replies]