def get_image_url_list(self, pattern, url): logger.info("[url]\t" + url) time.sleep(int(get_conf.find(("tieba",))["time_wait"])) try: http_response = requests.get(url, headers=self.http_header, timeout=10) http_response_status = http_response.status_code http_response = http_response.content with open("http_response.txt", "w+") as f: f.write(http_response) try_cnt = 3 null_flag = False while (http_response_status >= 300 and try_cnt > 0 and not null_flag): time.sleep(int(get_conf.find(("tieba",))["time_wait"])) http_response = requests.get(url, headers=self.http_header, timeout=10) http_response_status = http_response.status_code http_response = http_response.content.decode('utf-8') try_cnt -= 1 if len(re.findall(pattern, http_response)) == 0: null_flag = True if http_response_status >= 300: logger.warn(url) return [obj_url.replace("\\", "") for obj_url in re.findall(pattern, http_response)] except Exception as e: logger.error(e) return []
def gettitle(url): """ Can't process gzip compress """ tre = re.compile(b'<title[^>]*>([^<]*)<') content = b'' title = b'' try: with request.urlopen(url) as r: for i in range(300): content += r.read(64) if len(content) < 64: break m = tre.search(content, re.IGNORECASE) n = re.search(b'charset=.+?>', content, re.IGNORECASE) #有些SB网站把charset写在后面 if m and n: title = m.group(1) break except: logger.warn('url time out') if content.upper().find(b'UTF-8') != -1: charset = 'utf-8' elif content.upper().find(b'GB2312') != -1 or content.upper().find(b'GBK'): charset = 'gbk' elif content.upper().find(b'BIG5') != -1: charset = 'big5' else: charset= 'utf-8' try: title = title.decode(charset).replace('\n', '') title = title.strip() except UnboundLocalError: #G.F.W title = 'G.F.W我恨你' return title
def _measure_lags(self, src_pool, dst_pool): key = "xdcr_{}".format(uhex()) src_client = src_pool.get_client() dst_client = dst_pool.get_client() polling_interval = self.INITIAL_POLLING_INTERVAL src_client.set(key, key) t0 = time() while time() - t0 < self.TIMEOUT: if dst_client.get(key).value: break sleep(polling_interval) polling_interval *= 1.1 # increase interval by 10% else: logger.warn('XDCR sampling timed out after {} seconds' .format(self.TIMEOUT)) t1 = time() src_client.delete(key) src_pool.release_client(src_client) dst_pool.release_client(dst_client) return {"xdcr_lag": (t1 - t0) * 1000} # s -> ms
def __call__(self, test, benckmark): try: self.cbb = Couchbase.connect(bucket='benchmarks', **SHOWFAST) self.cbf = Couchbase.connect(bucket='feed', **SHOWFAST) except Exception, e: logger.warn('Failed to connect to database, {}'.format(e)) return
def added(self, id = None, page=0, play='false', f = None, date = None, dir = None): try: page = int(page) except: page = 0 limit = 17 if play == 'true' and id is not None: if date is None: id = None play = 'false' else: try: callingDate = datetime.strptime(date, "%a %b %d %H:%M:%S %Y %Z") currentDate = datetime.utcnow() - timedelta(seconds = 60) # Sat Feb 23 19:35:57 2013 GMT popcorn hour example call #callingDate = datetime.utcfromtimestamp(date) if callingDate < currentDate: id = None play = 'false' logger.warn('added', 'Play request time < now - 60 (%s < %s).', str(callingDate), str(currentDate)) except: id = None play = 'false' logger.error('added', 'Movie.py', 'Error converting UTC Javascript date for %s.', date) if id is not None: self.content.update_media_watched("movie",id) filters = () if f is not None: pFilter = Filter.ParseFilter(f) if pFilter is not None: filters += pFilter, movies=self.content.get_movies(page, limit, filters, 'm.added DESC') total = self.content.get_total_items("movie m", "m.id", None, Filter("movie", {"movie": "m"}, filters)) pPage = self.content.get_prev_page(page, limit, "movie", total) nPage = self.content.get_next_page(page, limit, "movie", total) filterLetters = self.content.get_available_letters("movie m", "m.file_name", None, None) tmpl = lookup.get_template("movies/movies.html") startOn = 'moviename_0' if id is not None and play == 'true': startOn = 'movieid_' + id elif dir is not None: startOn = 'moviename_' + str(len(movies)-1) return tmpl.render(movies = movies , prevPage = pPage , nextPage = nPage , totalPages = int(ceil(total/limit)) , page = page , play = play , selected = Filter.getFilterValue(Filter.FILTER_LETTER, filters) , filterUrl = "f=" + (f if f is not None else '') , filterLetters = filterLetters , pageName = 'added' , id = id , startOn = startOn)
def post_to_dailyp(self, metrics): test_title = self.test.test_config.test_case.title test_name = test_title.replace(', ', '_') replace_chars = ", =/.`\\" for c in replace_chars: test_name = test_name.replace(c, "_") snapshot_links = list() snapshot_host = "http://{}/reports/html/?snapshot=".\ format(StatsSettings.CBMONITOR) for snapshot in self.test.cbagent.snapshots: snapshot_link = snapshot_host + snapshot snapshot_links.append(snapshot_link) if self.test.test_config.test_case.sub_category: category_full_name = "{}-{}".format(self.test.test_config.test_case.category, self.test.test_config.test_case.sub_category) else: category_full_name = self.test.test_config.test_case.category post_body = { "category": category_full_name, "subcategory": self.test.test_config.test_case.sub_category, "test_title": test_title, "datetime": datetime.now(timezone('US/Pacific')).strftime("%Y_%m_%d-%H:%M"), "build": self.test.build, "test": test_name, "metrics": metrics, "snapshots": snapshot_links, } if self._upload_test_run_dailyp(post_body): logger.info("Successfully posted to Dailyp {}".format(post_body)) else: logger.warn("Failed to post to Dailyp {}".format(post_body))
def runPart(self): try: s = self.__elem or self.__iter.next() self.__elem = s if u"text" in s: try: retweeted = ( TweetText(s[u"retweeted_status"], self.__urlBuilder, self.__userBuilder, None) if s.has_key(u"retweeted_status") else None ) if retweeted: for url in retweeted.urls(): self.__urlResolver.addUrlToQueue(url) tweet = TweetText(s, self.__urlBuilder, self.__userBuilder, retweeted.id() if retweeted else None) for url in tweet.urls(): self.__urlResolver.addUrlToQueue(url) except UrlException as e: logger.warn(u"Cannot build url: " + str(e)) self._doSmthElse() self.__elem = None except Full: return except StopIteration: raise NothingToDo()
def authenticate(*, email, passwd): if not email: raise APIValueError('email', 'Invalid email.') if not passwd: raise APIValueError('passwd', 'Invalid password.') users = yield from User.findAll('email=?', [email]) if len(users) == 0: raise APIValueError('email', 'Email not exist.') user = users[0] # check passwd: # sha1 = hashlib.sha1() # sha1.update(user.id.encode('utf-8')) # sha1.update(b':') # sha1.update(passwd.encode('utf-8')) browser_sha1_passwd = '%s:%s' % (user.id, passwd) browser_sha1 = hashlib.sha1(browser_sha1_passwd.encode('utf-8')) logger.warn('user password: '******'server password: '******'passwd', 'Invalid password.') # authenticate ok, set cookie: r = web.Response() r.set_cookie(COOKIE_NAME, user2cookie(user, 86400), max_age=86400, httponly=True) user.passwd = '******' r.content_type = 'application/json' r.body = json.dumps(user, ensure_ascii=False).encode('utf-8') return r
def runTweetSchedule(newGenericPosts): timerKey = 'lastTweetTime' if scheduler.isTimeToPost(timerKey, twitterConfig['tweetTimes']) == True: potentialTweets = [] for genericPost in newGenericPosts: if (twitterUtils.isOldTweet(genericPost.link) == False): potentialTweets.append(genericPost) random.shuffle(potentialTweets) for genericPost in potentialTweets: tweet = genericPost.generateTweet() if (tweet.getLength() <= 140): scheduler.setTimeInDataStore(timerKey) logger.info('tweeting : {}'.format(tweet.title)) twitterUtils.saveTweet(tweet) tweet.tweet() break if len(potentialTweets) == 0: logger.warn('Twitter - No new content') else: logger.debug('Tweet worker : waiting')
def render(self): if self.spec.unit_tests is None: return # render all unit test collections for coll in self.spec.unit_tests: data = { 'info': self.spec.info, 'class': coll.klass, 'tests': coll.tests, } file_pattern = coll.klass.name if self.settings.resource_modules_lowercase: file_pattern = file_pattern.lower() file_name = self.settings.tpl_unittest_target_ptrn.format(file_pattern) file_path = os.path.join(self.settings.tpl_unittest_target, file_name) self.do_render(data, self.settings.tpl_unittest_source, file_path) # copy unit test files, if any if self.settings.unittest_copyfiles is not None: for origfile in self.settings.unittest_copyfiles: utfile = os.path.join(*origfile.split('/')) if os.path.exists(utfile): target = os.path.join(self.settings.tpl_unittest_target, os.path.basename(utfile)) logger.info('Copying unittest file {} to {}'.format(os.path.basename(utfile), target)) shutil.copyfile(utfile, target) else: logger.warn("Unit test file \"{}\" configured in `unittest_copyfiles` does not exist" .format(utfile))
def _add_cluster(self): cluster = self.test.cluster_spec.name params = self.test.cluster_spec.parameters try: cb = Couchbase.connect(bucket='clusters', **SHOWFAST) cb.set(cluster, params) except Exception, e: logger.warn('Failed to add cluster, {}'.format(e))
def onAnswer( self, id, caller, callee ): now = datetime.datetime.now() logger.msg( "Invoked onAnswer handler" ) if not self.ids.has_key( id ): logger.warn( "Something strange happened: answered a non-received call." ) else: self.insertCallHistory( self.ids[ id ], self.callStatuses[ 'ANSWER' ], now )
def collect(self): while True: try: self.sample() time.sleep(self.interval) except KeyboardInterrupt: sys.exit() except Exception as e: logger.warn(e)
def __call__(self, test, benckmark): showfast = test.test_config.stats_settings.showfast cbmonitor = test.test_config.stats_settings.cbmonitor try: self.cbb = Couchbase.connect(bucket='benchmarks', **showfast) self.cbf = Couchbase.connect(bucket='feed', **showfast) except Exception, e: logger.warn('Failed to connect to database, {}'.format(e)) return
def sample(self): while True: try: for bucket, pool in self.pools: stats, sleep_time = self._measure_lags(pool) self.store.append(stats, cluster=self.cluster, bucket=bucket, collector=self.COLLECTOR) sleep(sleep_time) except Exception as e: logger.warn(e)
def _add_cluster(self): cluster = self.test.cluster_spec.name params = self.test.cluster_spec.parameters showfast = self.test.test_config.stats_settings.showfast try: cb = Couchbase.connect(bucket='clusters', **showfast) cb.set(cluster, params) except Exception, e: logger.warn('Failed to add cluster, {}'.format(e))
def GET_static(self): dirRes = os.path.abspath(os.path.join(os.getcwd(), "./resources/")) filename = os.path.join(dirRes, self.path[1:]) filename = os.path.abspath(filename) if filename.startswith(dirRes): self.__sendFile(filename) else: logger.warn(u"Cannot send file " + filename + u": must be in " + dirRes) self.send_response(404)
def _post_benckmark(self, metric, value): key, benckmark = self._prepare_data(metric, value) try: cb = Couchbase.connect(bucket='benchmarks', **SHOWFAST) self._mark_previous_as_obsolete(cb, benckmark) cb.set(key, benckmark) Comparator()(test=self.test, benckmark=benckmark) except Exception, e: logger.warn('Failed to post results, {}'.format(e))
def _get_list_of_servers(self): while True: try: nodes = self.session.get(self.nodes_url).json() except Exception as e: logger.warn('Failed to get list of servers: {}'.format(e)) continue self.server_nodes = [n['hostname'] for n in nodes['servers']] sleep(self.NODES_UPDATE_INTERVAL)
def mc_iterator(self): _, password = self.cluster_spec.rest_credentials for hostname in self.cluster_spec.yield_hostnames(): for bucket in self.test_config.buckets: mc = MemcachedClient(host=hostname, port=11210) try: mc.sasl_auth_plain(user=bucket, password=password) yield mc except MemcachedError: logger.warn('Auth failure')
def set_index_settings(self, host: str, settings: dict): api = 'http://{}:9102/settings'.format(host) curr_settings = self.get_index_settings(host) for option, value in settings.items(): if option in curr_settings: logger.info('Changing {} to {}'.format(option, value)) self.post(url=api, data=json.dumps({option: value})) else: logger.warn('Skipping unknown option: {}'.format(option))
def collect(self): while True: try: self.sample() time.sleep(self.interval) except KeyboardInterrupt: sys.exit() except Exception as e: logger.warn("Unexpected exception in {}: {}" .format(self.__class__.__name__, e))
def find_previous(self, new_build): """Find previous build within current release or latest build from previous release""" all_builds = sorted(self.snapshots_by_build.keys(), reverse=True) try: return all_builds[all_builds.index(new_build) + 1:][0] except IndexError: return except ValueError: logger.warn('Didn\'t find {} in {}'.format(new_build, all_builds))
def _post_benckmark(self, metric, value): key, benckmark = self._prepare_data(metric, value) showfast = self.test.test_config.stats_settings.showfast try: cb = Couchbase.connect(bucket='benchmarks', **showfast) self._mark_previous_as_obsolete(cb, benckmark) cb.set(key, benckmark) Comparator()(test=self.test, benckmark=benckmark) except Exception, e: logger.warn('Failed to post results, {} : {}'.format(e, benckmark))
def error(self, err, cb, i): logger.warn('Connection problem with worker-{} thread-{}: {}'.format( self.sid, i, err) ) cb.client._close() time.sleep(15) d = cb.client.connect() d.addCallback(self.do_batch, cb, i) d.addErrback(self.error, cb, i)
def __exit__(self, exc_type, exc_val, exc_tb): failure = self.check_failures() self.tear_down() if exc_type == KeyboardInterrupt: logger.warn('The test was interrupted') return True if failure: logger.interrupt(failure)
def create_thumb(self): logger.info("create thumbnail") pdf_file = "data/" + self.idx + ".pdf" jpg_file = pdf_file[:-4] + ".jpg" if not os.path.isfile(jpg_file): cmd = "montage " + pdf_file + "[0-7] -mode Concatenate -tile x1 -quality 80 -resize x330 -trim " + jpg_file proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() logger.info("... done") else: logger.warn("file %s already exists" % jpg_file)
def sample(self): while True: try: for bucket, src_pool, dst_pool in self.pools: lags = self._measure_lags(src_pool, dst_pool) self.store.append(lags, cluster=self.cluster, bucket=bucket, collector=self.COLLECTOR) except Exception as e: logger.warn(e)
def seller_check(chromeDriver): l.info("Checking shipper...") element = chromeDriver.find_element_by_id("tabular-buybox-truncate-0").text shop = element.lower().find('amazon.com.tr') if shop == -1: l.warn("Amazon is not the seller/shipper") return False else: l.info(f"Successfully verified shipper as: {element}") return True
def do_request(id_unique,page,followers): #process page info **all network request** """ users :return: json """ headers['Referer'] = "https://xueqiu.com/" + str(id_unique) try: paging = session.get(url.format(id_unique,page,getRandom()),headers=headers) followers.extend(paging.json()["users"]) except: logger.warn("{} request failed".format(id_unique))
async def save(self): # self.fields是其余的属性,email name等,fields中全是key,args是value,就是zhoushaobo,[email protected]等 args = list(map(self.getValueOrDefault, self.__fields__)) # 将主键放到最后 args.append(self.getValueOrDefault(self.__primary_key__)) #执行INSERT语句,第一个是表名,其余是各列的属性名,values后面是主键,最后是加"?"的函数 #'insert into `%s` (%s, `%s`) values (%s)' % (tableName, ', '.join(escaped_fields), primaryKey, create_args_string(len(escaped_fields) + 1)) rows = await execute(self.__insert__, args) # 一次只能插入一行数据? if rows != 1: logger.warn('failed to insert record:affected rows: %s' % rows)
def __exit__(self, exc_type, exc_val, exc_tb): failure = self.debug() self.tear_down() if exc_type == KeyboardInterrupt: logger.warn('The test was interrupted') return True if failure: logger.interrupt(failure)
def connect_to_xenserver(): for host in XEN: if host[0] not in global_xenserver_conn: try: transport = TimeoutTransport() session = XenAPI.Session("http://" + host[0], transport) session.login_with_password(host[1], host[2]) global_xenserver_conn[host[0]] = session logger.warn("Connect to XenServer: {0} are success(with timeout).".format(host[0])) except Exception, e: logger.exception(e)
def render(self): if not self.spec.unit_tests: return # render all unit test collections for coll in self.spec.unit_tests: tests = coll.tests if (self.settings.CURRENT_RELEASE_NAME == "R4" and coll.klass.name == "Bundle"): tests = [ t for t in tests if t.filename not in ("profiles-types.json", "extension-definitions.json") ] data = { "info": self.spec.info, "class": coll.klass, "tests": tests, "profile": self.spec.profiles[coll.klass.name.lower()], "release_name": self.settings.CURRENT_RELEASE_NAME, } file_pattern = coll.klass.name if self.settings.RESOURCE_MODULE_LOWERCASE: file_pattern = file_pattern.lower() file_name = self.settings.UNITTEST_TARGET_FILE_NAME_PATTERN.format( file_pattern) file_path = self.settings.UNITTEST_TARGET_DIRECTORY / file_name self.do_render(data, self.settings.UNITTEST_SOURCE_TEMPLATE, file_path) # copy unit test files, if any if self.settings.UNITTEST_COPY_FILES is not None: for filepath in self.settings.UNITTEST_COPY_FILES: if filepath.exists(): target = self.settings.UNITTEST_TARGET_DIRECTORY / filepath.name logger.info("Copying unittest file {} to {}".format( filepath.name, target)) if filepath.name == "fixtures.py": with open(filepath, "r") as fp: contents = fp.read() contents = contents.replace( "{{release}}", self.settings.CURRENT_RELEASE_NAME).replace( "{{fhir_version}}", self.spec.info.version) with open(target, "w") as fp: fp.write(contents) else: shutil.copyfile(filepath, target) else: logger.warn( 'Unit test file "{0}" configured in `UNITTEST_COPY_FILES` does not exist' .format(filepath))
def parse_aircraft(self, response): # extracting aircraft specifications rows aircraft_url = response.url if not aircraft_url.endswith('/index'): logger.warn(f'Aircraft url={aircraft_url} has wrong format.') return aircraft_url = f'{aircraft_url[:-5]}{"specs"}' request = scrapy.Request(aircraft_url, callback=self.parse_aircraft_specs) yield request
def _get_self_ip(self): # 获取自身外网ip try: r = requests.get(self.target, timeout=5) if r.ok: ip = r.content.strip() logger.info('Get self ip success: %s' % ip) return ip except Exception, e: logger.warn('Get self ip fail, %s' % e) return ''
def play(self) -> 'DataFramePlayer': """ DataFrame に対して Cassette の処理を施します。 :return: 加工した DataFrame """ if self.cassette is None: logger.warn('カセットが刺さっていません') raise EmptyCassetteError else: self.df = self.cassette.to_process(self.df) return self
def mc_iterator(self): password = self.test_config.bucket.password for host_port in self.cluster_spec.yield_servers(): host = host_port.split(':')[0] memcached_port = self.rest.get_memcached_port(host_port) for bucket in self.test_config.buckets: mc = MemcachedClient(host=host, port=memcached_port) try: mc.sasl_auth_plain(user=bucket, password=password) yield mc except MemcachedError: logger.warn('Auth failure')
def _get_self_ip(self): # 获取自身外网ip try: r = requests.get(self.target, timeout=5) if r.ok: pattern = re.compile(r'IP:port</td>\n?\s*<td.*?>([\d.]*?)(?::\d*)</td>', re.I) ip = pattern.search(r.content).group(1) logger.info('Get self ip success: %s' % ip) return ip except Exception, e: logger.warn('Get self ip fail, %s' % e) return ''
def download_all_regionNames(maxNReq=2, pause=3): """ download the list of the supported area. Parameters ---------- maxNReq: int maximumn request number. (default: 3) pause: int sleep time in seconds between queries. (default: 3) """ db = virusDB(DBFILE) db.db_connect() reqCount = 0 isSuccess = False # retrieve names while reqCount <= maxNReq and (not isSuccess): try: reqCount = reqCount + 1 logger.info('Start to download the region names.') regionNamesRes = requests.get('{0}/provinceName'.format(API_URI), timeout=10) regionNames = json.loads(regionNamesRes.text, encoding='utf-8') isSuccess = True except Exception as e: if reqCount <= maxNReq: logger.warn('Failed in {0} try.'.format(reqCount)) logger.error(e) time.sleep(pause) else: logger.warn('Failed in {0} tries, exit!'.format(maxNReq)) regionNamesRes.raise_for_status() logger.info('{0:5d} region names were retrieved.'.format( len(regionNames['results']))) # save the region names to the database logger.info('Start to save the region names to the database.') db.db_create_regionname_table() for regionname in regionNames['results']: entry = {'name': regionname} db.db_insert_regionname_entry(entry) db.db_clean() db.db_close() logger.info('Finish successfully!') return regionNames
def set_index_settings(self, host: str, settings: dict): logger.info('Changing indexer settings for {}'.format(host)) api = 'http://{}:9102/settings'.format(host) curr_settings = self.get(url=api).json() for option, value in settings.items(): if option in curr_settings: logger.info('Changing {} to {}'.format(option, value)) self.post(url=api, data=json.dumps({option: value})) else: logger.warn('Skipping unknown option: {}'.format(option))
def map(cls, src_path, drive_letter=None, force=False): # Not remote path, do not need map if not is_remote_path(src_path): logger.warn(src_path + " is not a remote path, do not need to map.") return NOT_REMOTE_PATH_ERROR # Disable the logger if choose to force mapping if force: logger.disable() # Use the available drive letter if drive_letter is None: if cls.has_been_mapped(): drive_letter = cls.__cur_drive_letter else: drive_letter = cls._obtain_available_letter() if drive_letter is None: raise CustomError("No available drive letter!", NO_AVAILABLE_DRIVE_LETTER) # Try to map the drive if it has not been mapped by me result = cls.try_to_map( src_path, drive_letter) if not cls.has_been_mapped() else MAP_DRIVE_FAILURE # If failed, try to cancel the current drive mapping if result != SUCCESS: confirm = True if not force: confirm = call_user_confirm( "Mapping drive " + drive_letter + " failed, do you wanna try to force cancelling it?") if confirm: result = cls.try_to_cancel_mapping(drive_letter) else: return MAP_DRIVE_FAILURE # If succeed to cancel the mapping, try to map the drive again if result == SUCCESS: result = cls.try_to_map(src_path, drive_letter) # If still failed, raise an error if result != SUCCESS: raise CustomError("Mapping drive " + drive_letter + " failed!", MAP_DRIVE_FAILURE) # Enable the logger and return success if force: logger.enable() return SUCCESS
def sample(self): while True: for bucket, pool in self.pools: for metric in self.METRICS: try: stats, sleep_time = self.endure(pool, metric) self.store.append(stats, cluster=self.cluster, bucket=bucket, collector=self.COLLECTOR) sleep(sleep_time) except Exception as e: logger.warn(e)
def tweet(self, msg: str) -> bool: if not msg: logger.warn('Tweet failed: Empty status message') return False try: self.api.update_status(msg) logger.info(f'Tweet successful') return True except tweepy.TweepError as e: logger.error(f'Tweet failed: {e.reason}') return False
def post_http(self, path, server=None, port=8091): server = server or self.master_node url = "http://{}:{}{}".format(server, port, path) try: r = self.session.post(url=url, auth=self.auth) if r.status_code in (200, 201, 202): return r.json() else: logger.warn("Bad response: {}".format(url)) return self.retry(path, server, port) except requests.ConnectionError: logger.warn("Connection error: {}".format(url)) return self.retry(path, server, port)
def _parse(self, response): try: feed = feedparser.parse(response) print feed.entries entry = feed.entries[0] paper = parse_arxiv_entry(entry) paper['search_scope'] = "arxiv" paper['tags'] = "1,7" remote_pdf = "http://arxiv.org/pdf/%s.pdf" % paper['id'] return paper['id'], remote_pdf, paper except Exception as e: logger.warn(e) return "", "", []
def _get_self_ip(self): # 获取自身外网ip try: #r = requests.get(self.http_target, headers=self.headers, timeout=5) #if r.ok: #pattern = re.compile(r'IP:port</td>\n?\s*<td.*?>([\d.]*?)(?::\d*)</td>', re.I) #ip = pattern.search(r.content).group(1) #logger.info('Get self ip success: %s' % ip) ip = '115.159.190.214' # 没用到啊 return ip except Exception, e: logger.warn('Get self ip fail, %s' % e) return ''
def lblsave(filename, lbl): # if osp.splitext(filename)[1] != '.png': # filename += '.png' # Assume label ranses [-1, 254] for int32, # and [0, 255] for uint8 as VOC. if lbl.min() >= -1 and lbl.max() < 255: lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P') # colormap = label_colormap(255) # lbl_pil.putpalette((colormap * 255).astype(np.uint8).flatten()) lbl_pil.save(filename) else: logger.warn('[%s] Cannot save the pixel-wise class label as PNG, ' 'so please use the npy file.' % filename)
def read_valuesets(self): resources = self.read_bundle_resources('valuesets.json') for resource in resources: if 'ValueSet' == resource['resourceType']: assert 'url' in resource self.valuesets[resource['url']] = FHIRValueSet(self, resource) elif 'CodeSystem' == resource['resourceType']: assert 'url' in resource if 'content' in resource and 'concept' in resource: self.codesystems[resource['url']] = FHIRCodeSystem(self, resource) else: logger.warn("CodeSystem with no concepts: {}".format(resource['url'])) logger.info("Found {} ValueSets and {} CodeSystems".format(len(self.valuesets), len(self.codesystems)))
def _get_query_connections(self): conns = [] try: nodes = self.session.get(self.query_url).json() for node in nodes['nodes']: if 'n1ql' in node['services']: url = node['hostname'].replace('8091', '8093') conns.append(urllib3.connection_from_url(url)) except Exception as e: logger.warn('Failed to get list of servers: {}'.format(e)) raise return conns
def db_connect(self): """ Connect/create the SQLite3 database. """ conn = None try: conn = db.connect(self.dbFile) except db.Error as e: logger.warn(e) raise e self.conn = conn
def add(self, cassette: ConversionCassette) -> 'DataFramePlayer': """ Cassette を追加します。 :param cassette: ConversionCassette :return: self """ if self.cassette is not None: logger.warn('カセットがすでに刺さっています') # TODO 機能していないかも raise OverCassetteError else: self.cassette = cassette return self
def get_http(self, path, server=None, port=8091, json=True): server = server or self.master_node url = "http://{}:{}{}".format(server, port, path) try: r = self.session.get(url=url, auth=self.auth) if r.status_code in (200, 201, 202): return json and r.json() or r.text else: logger.warn("Bad response: {}".format(url)) return self.refresh_nodes_and_retry(path, server, port) except requests.ConnectionError: logger.warn("Connection error: {}".format(url)) return self.refresh_nodes_and_retry(path, server, port, json)
def connect_to_xenserver(): for host in XEN: if host[0] not in global_xenserver_conn: try: transport = TimeoutTransport() session = XenAPI.Session("http://" + host[0], transport) session.login_with_password(host[1], host[2]) global_xenserver_conn[host[0]] = session logger.warn( "Connect to XenServer: {0} are success(with timeout).". format(host[0])) except Exception, e: logger.exception(e)
def run(self): try: self._connection = pika.BlockingConnection(pika.URLParameters(self._url)) self.start_consuming() except (AMQPChannelError, ConnectionClosed, NoFreeChannels) as e: logger.warn('Connection error ({}, {}: {})! Reconnecting in about {} seconds' .format(time.time(), e.__class__, repr(e), self.RECONNECT_TIMEOUT)) time.sleep(self.RECONNECT_TIMEOUT + randint(-2, 2)) self.cleanup_maybe_reconnect() except Exception: logger.error('Consumer Error that does not look like connection failure! See the traceback below.') logger.error(traceback.format_exc()) self.cleanup_maybe_reconnect(False)
def start_turn(self): """ Start my turn """ if self._phase is not PHASE_NOTMYTURN: logger.warn('Player %s starts his turn, ' % self.name + ' but his phase was %s' % self._phase) self._phase = PHASE_STARTING start_rsrc = Resource(action=1, buy=1) self.set_resources_no_broadcast(start_rsrc) logger.info('turn of: %s' % self.name) self._game.bc_start_phase(self, start_rsrc) # TODO: check for start phase actions and bonuses self._phase = PHASE_ACTION self._game.bc_action_phase(self)
def get_default_config(): config_file = osp.join(here, "default_config.yaml") with open(config_file) as f: config = yaml.safe_load(f) # save default config to ~/.labelmerc user_config_file = osp.join(osp.expanduser("~"), ".labelmerc") if not osp.exists(user_config_file): try: shutil.copy(config_file, user_config_file) except Exception: logger.warn("Failed to save config: {}".format(user_config_file)) return config
def collect(self): while True: try: t0 = time.time() self.sample() delta = time.time() - t0 if delta >= self.interval: continue time.sleep(self.interval - delta) except KeyboardInterrupt: sys.exit() except Exception as e: logger.warn("Unexpected exception in {}: {}".format( self.__class__.__name__, e))