def wrap(self, availWidth, availHeight): """If we need more width than we have, complain, keep a scale""" self.pad = self.border_padding(True, 0.1) maxWidth = float( min( styles.adjustUnits(self.maxWidth, availWidth) or availWidth, availWidth)) self.maxWidth = maxWidth maxWidth -= (self.pad[1] + self.pad[3]) self.width, self.height = _listWrapOn(self.content, maxWidth, None) if self.width > maxWidth: if self.mode <> 'shrink': self.scale = 1.0 log.warning( "BoundByWidth too wide to fit in frame (%s > %s): %s", self.width, maxWidth, self.identity()) if self.mode == 'shrink' and not self.scale: self.scale = (maxWidth + self.pad[1]+self.pad[3])/\ (self.width + self.pad[1]+self.pad[3]) else: self.scale = 1.0 self.height *= self.scale self.width *= self.scale return self.width, self.height + (self.pad[0] + self.pad[2]) * self.scale
def handle_mode(self, numeric, command, args): # <- :unreal.midnight.vpn MODE #endlessvoid +bb test!*@* *!*@bad.net # <- :unreal.midnight.vpn MODE #endlessvoid +q GL 1444361345 # <- :unreal.midnight.vpn MODE #endlessvoid +ntCo GL 1444361345 # <- :unreal.midnight.vpn MODE #endlessvoid +mntClfo 5 [10t]:5 GL 1444361345 # <- :GL MODE #services +v GL # This seems pretty relatively inconsistent - why do some commands have a TS at the end while others don't? # Answer: the first syntax (MODE sent by SERVER) is used for channel bursts - according to Unreal 3.2 docs, # the last argument should be interpreted as a timestamp ONLY if it is a number and the sender is a server. # Ban bursting does not give any TS, nor do normal users setting modes. SAMODE is special though, it will # send 0 as a TS argument (which should be ignored unless breaking the internal channel TS is desired). # Also, we need to get rid of that extra space following the +f argument. :| if utils.isChannel(args[0]): channel = utils.toLower(self.irc, args[0]) oldobj = self.irc.channels[channel].deepcopy() modes = list(filter(None, args[1:])) # normalize whitespace parsedmodes = utils.parseModes(self.irc, channel, modes) if parsedmodes: utils.applyModes(self.irc, channel, parsedmodes) if numeric in self.irc.servers and args[-1].isdigit(): # Sender is a server AND last arg is number. Perform TS updates. their_ts = int(args[-1]) if their_ts > 0: self.updateTS(channel, their_ts) return {'target': channel, 'modes': parsedmodes, 'oldchan': oldobj} else: log.warning("(%s) received MODE for non-channel target: %r", self.irc.name, args) raise NotImplementedError
def start_job(s): s.cancel_job() log.info("PR %s: queueing build of commit %s", s.url, s.head) env = { "CI_PULL_COMMIT" : s.head, "CI_PULL_REPO" : s.repo, "CI_PULL_BRANCH" : s.branch, "CI_PULL_NR" : str(s.nr), "CI_PULL_URL" : s.url, "CI_PULL_TITLE" : s.title, "CI_PULL_USER" : s.user, "CI_BASE_REPO" : s.base_repo, "CI_BASE_BRANCH" : s.base_branch, "CI_BASE_COMMIT" : s.base_commit, "CI_SCRIPTS_DIR" : config.scripts_dir, "CI_PULL_LABELS" : ";".join(sorted(list(s.labels))), "CI_BUILD_HTTP_ROOT" : os.path.join(config.http_root, s.base_full_name, str(s.nr), s.head), } if s.mergeable: env["CI_MERGE_COMMIT"] = s.merge_commit for key, value in env.items(): if not value: log.warning("PR %s: env %s has NoneType!", s.url, key) return s s.current_job = Job(s.get_job_path(s.head), os.path.join(config.scripts_dir, "build.sh"), env, s.job_hook, s.head) s.jobs.append(s.current_job) queue.put(s.current_job) s.current_job.set_state(JobState.queued) return s
def findStyle(self, fn): """Find the absolute file name for a given style filename. Given a style filename, searches for it in StyleSearchPath and returns the real file name. """ def innerFind(path, fn): if os.path.isabs(fn): if os.path.isfile(fn): return fn else: for D in path: tfn = os.path.join(D, fn) if os.path.isfile(tfn): return tfn return None for ext in ['', '.style', '.json']: result = innerFind(self.StyleSearchPath, fn+ext) if result: break if result is None: log.warning("Can't find stylesheet %s"%fn) return result
def get_ipo_limit(self, stock_code): """ 查询新股申购额度申购上限 :param stock_code: 申购代码 ID :return: """ log.warning('目前仅在 佣金宝子类 中实现, 其余券商需要补充')
def close(data): pr = PullRequest.get(data, False) if pr: pr.cancel_job() log.info("PR %s: closed.", pr.url) else: log.warning("PR %s unknown, but tried to close!", data["_links"]["html"]["href"])
def identify(irc, source, args): """<username> <password> Logs in to PyLink using the configured administrator account.""" if utils.isChannel(irc.called_by): irc.reply('Error: This command must be sent in private. ' '(Would you really type a password inside a channel?)') return try: username, password = args[0], args[1] except IndexError: irc.msg(source, 'Error: Not enough arguments.') return # Usernames are case-insensitive, passwords are NOT. if username.lower() == irc.conf['login']['user'].lower() and password == irc.conf['login']['password']: realuser = irc.conf['login']['user'] irc.users[source].identified = realuser irc.msg(source, 'Successfully logged in as %s.' % realuser) log.info("(%s) Successful login to %r by %s", irc.name, username, utils.getHostmask(irc, source)) else: irc.msg(source, 'Error: Incorrect credentials.') u = irc.users[source] log.warning("(%s) Failed login to %r from %s", irc.name, username, utils.getHostmask(irc, source))
def handle_squit(self, numeric, command, args): """Handles incoming SQUITs (netsplits).""" # :70M SQUIT 1ML :Server quit by GL!gl@0::1 log.debug('handle_squit args: %s', args) split_server = args[0] affected_users = [] log.debug('(%s) Splitting server %s (reason: %s)', self.irc.name, split_server, args[-1]) if split_server not in self.irc.servers: log.warning("(%s) Tried to split a server (%s) that didn't exist!", self.irc.name, split_server) return # Prevent RuntimeError: dictionary changed size during iteration old_servers = self.irc.servers.copy() for sid, data in old_servers.items(): if data.uplink == split_server: log.debug( 'Server %s also hosts server %s, removing those users too...', split_server, sid) args = self.handle_squit(sid, 'SQUIT', [ sid, "PyLink: Automatically splitting leaf servers of %s" % sid ]) affected_users += args['users'] for user in self.irc.servers[split_server].users.copy(): affected_users.append(user) log.debug('Removing client %s (%s)', user, self.irc.users[user].nick) self.removeClient(user) sname = self.irc.servers[split_server].name del self.irc.servers[split_server] log.debug('(%s) Netsplit affected users: %s', self.irc.name, affected_users) return {'target': split_server, 'users': affected_users, 'name': sname}
def parsingShowBgpForPrefix(self, output, command, threeoctect, sourceIPs): try: if output: longestPrefixAddress = "" list1 = str(output).split('\r\n') first = 0 for line in list1: match = re.match( r'.(>i|i|)((\d+\.\d+\.\d+\.)\d+\/(\d+)).*', str(line), re.M | re.I | re.DOTALL) if match: matchip = match.group(3) if str(threeoctect).strip() == str(matchip).strip(): if IPAddress(str(sourceIPs)) in IPNetwork( str(match.group(2))): if first == 0: longestPrefixAddress = match.group(2) mask = int(match.group(4)) first = 1 else: if mask < int(match.group(4)): longestPrefixAddress = match.group(2) return longestPrefixAddress except Exception as e: log.warning( 'Exception occured while parsing \'%s\' command output : %s', str(command), str(output)) return "ErrorParser"
def __before_click(self, node_uid: ElementUid): # check the node whether has been clicked if node_uid.uid in self.seen: # log.info('element {} is seen, skip it.'.format(node_uid.uid)) return 0 # check the node whether is in white list seen # prevent always click white element. if node_uid.uid in self.__white_element_seen: # generate a random number to decide this node remove from seen. if random() <= 0.3: self.__white_element_seen.remove(node_uid.uid) log.info('Remove an element from white seen.') return 0 # check the node whether is in selected list. if not self.__is_selected_element(node_uid.uid): # log.warning("Current element not in selected list, not click.\n{}".format(node_uid.uid)) self.seen.add(node_uid.uid) return 0 # check the node whether is in black list. if self.__is_black_element(node_uid.uid): log.warning("Current element in black list, not click. {}.".format( node_uid.uid)) self.seen.add(node_uid.uid) return 0 return 1
def set_clef(self, clef): if clef.startswith('clef='): clef = clef[5:] self.key_type = Key.clef_type.get(clef.lower(), Key.clef_type['treble']) # if '-' in clef: # ptr = clef.find('-') # if '+' in clef: # ptr = clef.find('+') for c in Key.clef_type.keys(): if clef.startswith(c): if clef.endswith('+8'): self.add_pitch = +7 elif clef.endswith('-8'): self.add_pitch = -7 elif clef.endswith('+0') or clef.endswith('-0'): self.add_pitch = 0 elif clef.endswith('+16'): self.add_pitch = +14 elif clef.endswith('-16'): self.add_pitch = -14 else: log.warning(f'unknown octave modifier in clef: {clef}') return True if self.parse_tab_key(clef): return True return False
def test(r): if isinstance(r, basestring): r = resource.load(r) log.debug('Trying {}'.format(r.name)) script_path = os.path.join(r.db_obj.base_path, 'test.py') if not os.path.exists(script_path): log.warning('resource {} has no tests'.format(r.name)) return {} log.debug('File {} found'.format(script_path)) with open(script_path) as f: module = imp.load_module( '{}_test'.format(r.name), f, script_path, ('', 'r', imp.PY_SOURCE) ) try: module.test(r) return { r.name: { 'status': 'ok', }, } except Exception: return { r.name: { 'status': 'error', 'message': traceback.format_exc(), } }
def gather_elements(self, client, node, style): # Based on the graphviz extension global graphviz_warn try: # Is vectorpdf enabled? if hasattr(VectorPdf, "load_xobj"): # Yes, we have vectorpdf fname, outfn = sphinx.ext.graphviz.render_dot(node["builder"], node["code"], node["options"], "pdf") else: # Use bitmap if not graphviz_warn: log.warning( "Using graphviz with PNG output. You get much better results if you enable the vectorpdf extension." ) graphviz_warn = True fname, outfn = sphinx.ext.graphviz.render_dot(node["builder"], node["code"], node["options"], "png") if outfn: client.to_unlink.append(outfn) client.to_unlink.append(outfn + ".map") else: # Something went very wrong with graphviz, and # sphinx should have given an error already return [] except sphinx.ext.graphviz.GraphvizError, exc: log.error("dot code %r: " % node["code"] + str(exc)) return [Paragraph(node["code"], client.styles["code"])]
def run_ohlc_websocket(interval: int = 0, pair: str = "XBT/EUR"): if interval == 0: log.warning("No interval window given, using 1 minute") interval = 1 kraken_rest_api_to_psql(interval=interval) log.info(f"Setting up OHLC websocket") client = kraken_client.WssClient() client.subscribe_public(subscription={ "name": "ohlc", "interval": interval }, pair=[pair], callback=store2psql) log.info("⇩ = Insert new OHLC record for this interval window.") log.info("↺ = Update existing record for this interval window.") log.info("♥ = Websocket heartbeat.") log.info("Starting websocket client") client.start()
def checkSoureRouter(self, output, command): try: if output: match = re.match(r'.*directly\s*connected.*', str(output), re.DOTALL) if match: return "Yes" else: match = re.match( r'.*Local.*0\.0\.0\.0\s*from\s*0\.0\.0\.0.*best', str(output), re.DOTALL) if match: return "Yes" else: match = re.match(r'.*from\s*0\.0\.0\.0.*best', str(output), re.DOTALL) if match: return "Yes" else: return "No" except Exception as e: log.warning( 'Exception occured while parsing \'%s\' command output : %s', str(command), str(output)) return "ErrorParser"
def process_text_block(fp_in, fp, job: bool) -> None: add_final_nl = False if job == OBEYLINES: add_final_nl = True music.output_music(fp) buffer.buffer_eob(fp) common.cfmt.textfont.set_font(fp, False) common.words_of_text = "" for i in range(100): ln = fp_in.read() if ln == '': log.error("EOF reached scanning text block") common.linenum += 1 log.warning(f"{common.linenum:3d} {ln} \n") if ln.startswith('%%'): del ln[0:1] if ln == "endtext": break if job != SKIP: if not ln: subs.write_text_block(fp, job) common.words_of_text = '' else: subs.add_to_text_block(ln, add_final_nl) if job != SKIP: subs.write_text_block(fp, job)
def handle_pull_request(request): with handle_pull_request_lock: data = json.loads(request.body.decode("utf-8")) pr_data = data["pull_request"] repo = pr_data["base"]["repo"]["full_name"] if not repo in config.repos: log.warning("ignoring PR for repo %s", repo) return #print(json.dumps(data, sort_keys=False, indent=4)) action = data["action"] pr_url = pr_data["_links"]["html"]["href"] log.info("PR %s hook action %s", pr_url, action) if not action in known_actions: log.warning("PR %s unknown action %s", pr_url, action) log.debug(json.dumps(data, sort_keys=False, indent=4)) if action in { "closed" }: PullRequest.close(pr_data) return pr = PullRequest.get(pr_data).update() if action == "unlabeled": pr.remove_label(data["label"]["name"]) elif action == "labeled": pr.add_label(data["label"]["name"]) elif action in { "created", "opened", "reopened" } and not config.ci_ready_label in pr.labels: status = { "description": "\"%s\" label not set" % config.ci_ready_label, "target_url" : config.http_root, } pr.set_status(pr_data["head"]["sha"], **status)
def test(r): if isinstance(r, basestring): r = resource.load(r) log.debug('Trying {}'.format(r.name)) script_path = os.path.join(r.db_obj.base_path, 'test.py') if not os.path.exists(script_path): log.warning('resource {} has no tests'.format(r.name)) return {} log.debug('File {} found'.format(script_path)) with open(script_path) as f: module = imp.load_module('{}_test'.format(r.name), f, script_path, ('', 'r', imp.PY_SOURCE)) try: module.test(r) return { r.name: { 'status': 'ok', }, } except Exception: return { r.name: { 'status': 'error', 'message': traceback.format_exc(), } }
def wrap(self, availWidth, availHeight): """If we need more width than we have, complain, keep a scale""" if self.style: bp = self.style.__dict__.get("borderPadding", 0) bw = self.style.__dict__.get("borderWidth", 0) if isinstance(bp,list): self.pad = [bp[0] + bw + .1, bp[1] + bw + .1, bp[2] + bw + .1, bp[3] + bw + .1] else: self.pad = [bp + bw + .1, bp + bw + .1, bp + bw + .1, bp + bw + .1] else: self.pad = [0,0,0,0] maxWidth = float(min( styles.adjustUnits(self.maxWidth, availWidth) or availWidth, availWidth)) self.maxWidth = maxWidth maxWidth -= (self.pad[1]+self.pad[3]) self.width, self.height = _listWrapOn(self.content, maxWidth, None) self.scale = 1.0 if self.width > maxWidth: if self.mode <> 'shrink': log.warning("BoundByWidth too wide to fit in frame (%s > %s): %s", self.width,maxWidth,self.identity()) if self.mode == 'shrink': self.scale = (maxWidth + self.pad[1]+self.pad[3])/\ (self.width + self.pad[1]+self.pad[3]) self.height *= self.scale return self.width, self.height + (self.pad[0]+self.pad[2])*self.scale
def raster(self, filename, client): """Takes a filename and converts it to a raster image reportlab can process""" if not os.path.exists(filename): log.error("Missing image file: %s",filename) return missing try: # First try to rasterize using the suggested backend backend = self.get_backend(filename, client)[1] return backend.raster(filename, client) except: pass # Last resort: try everything if sys.platform[0:4] == 'java': try: from javax.imageio import ImageIO from java.io import File iis = ImageIO.createImageInputStream(File(filename)) readers = ImageIO.getImageReaders(iis) if readers.hasNext(): reader = readers.next() reader.setInput(iis, True) metadata = reader.getImageMetadata(0) # this means imageio can read it return filename else: log.warning("Java's ImageIO cannot read the file [%s]", filename) except Exception, error: log.warning("Could not use Java's ImageIO to read the file [%s]: %s", filename, str(error))
def crawl_items(self): try: for item in self.items_crawler.items(): self.crawl_buyers_for_item(item) except Exception as e: log.warning('FATAL: %s' % e) traceback.print_exc()
def handler(self, data): if type(data) is dict: pass if type(data) is list: channel_name = data[-2] if channel_name == "trade": if not self.valid_api_key(api_name="trade"): self.websocket.stop() from trades import trade_handler trade_handler(data) elif "book-" in channel_name: if not self.valid_api_key(api_name="book"): self.websocket.stop() from book import book_handler book_handler(data) else: log.warning("Could not determine channel") log.debug(data)
def crawle(self, keywords, pages, start_page, search_type, sort_type): ''' search_type: 0:宝贝, 1:文章, 3:专辑, 4:团购 sort_type: 0:默认排序, 3:最新发布, 4:最热排序 ''' all_data = [] page = start_page end_page = page + pages referer = self.base_url retry_count = 0 while page < end_page: try: log.info(f'爬取第{page}页') html, url = self.crawle_page(keywords, referer, page - 1, search_type, sort_type) log.debug(f'解析第{page}页') data, next = self.parse_page(html) log.info('解析出{}条商品信息'.format(len(data))) all_data.extend(data) if next: retry_count = 0 page += 1 referer = url else: break except Exception as err: log.warning(f'爬取第{page}页失败', exc_info=True) if retry_count < 3: retry_count += 1 else: retry_count = 0 page += 1 dataset = pandas.DataFrame(all_data) return dataset
def gather_elements(self, client, node, style): # Based on the graphviz extension global graphviz_warn try: # Is vectorpdf enabled? if hasattr(VectorPdf, 'load_xobj'): # Yes, we have vectorpdf fname, outfn = sphinx.ext.graphviz.render_dot( node['builder'], node['code'], node['options'], 'pdf') else: # Use bitmap if not graphviz_warn: log.warning( 'Using graphviz with PNG output. You get much better results if you enable the vectorpdf extension.' ) graphviz_warn = True fname, outfn = sphinx.ext.graphviz.render_dot( node['builder'], node['code'], node['options'], 'png') if outfn: client.to_unlink.append(outfn) client.to_unlink.append(outfn + '.map') else: # Something went very wrong with graphviz, and # sphinx should have given an error already return [] except sphinx.ext.graphviz.GraphvizError as exc: log.error('dot code %r: ' % node['code'] + str(exc)) return [Paragraph(node['code'], client.styles['code'])] return [MyImage(filename=outfn, client=client)]
def graceful_kill(process): try: os.killpg(process.pid, signal.SIGTERM) process.wait(config.sigterm_timeout) except subprocess.TimeoutExpired: log.warning("ShellWorker: killing process") process.kill() process.wait()
def init_window(): log.info('Initializing the raspberry pi pins') log.warning('Initializing the raspberry pi pins') GPIO.setmode(GPIO.BCM) GPIO.setup(WINDOW_OPEN_PIN, GPIO.OUT) GPIO.setup(WINDOW_CLOSE_PIN, GPIO.OUT) GPIO.output(WINDOW_OPEN_PIN, GPIO.LOW) GPIO.output(WINDOW_CLOSE_PIN, GPIO.LOW)
def parse_date(string): """Return a datetime with a best guess of the supplied string, using dateutil""" from lib.dateutil import parser try: dt = parser.parse(string) except ValueError, e: log.warning(e) dt = None
def read_config(self, path): try: self.account_config = helpers.file2dict(path) except ValueError: log.error("配置文件格式有误,请勿使用记事本编辑,推荐 sublime text") for value in self.account_config: if isinstance(value, int): log.warning("配置文件的值最好使用双引号包裹,使用字符串,否则可能导致不可知问题")
def get_exchangebill(self, start_date, end_date): """ 查询指定日期内的交割单 :param start_date: 20160211 :param end_date: 20160211 :return: """ log.warning('目前仅在 华泰子类 中实现, 其余券商需要补充')
def __call__(self, line: str, body: bool) -> None: Voice.body = body max_vc = 5 if len(voices) >= max_vc: log.error(f"Too many voices; use -maxv to increase limit. " f"Its now {max_vc}") self.switch_voice(line) log.warning(f'Make new v with id "{self.label}"')
def __signal_handler_terminate(self, signalnum, frame): """ Signal handler for terminate signals. Signal handler for the "signal.SIGTERM" signal. Raise a "SystemExit" exception to do something for that signal """ log.warning("Terminating on signal %(signalnum)r" % locals()) raise KeyboardInterrupt
def get_page_base(url, headers=header): resp = requests.get(url=url, headers=headers, verify=False) if str(resp.status_code)[0] == '2': log.info('下载成功,状态为%s,url:%s:。' % (resp.status_code, url)) return resp else: log.warning('下载不成功,状态为%s,url:%s:。' % (resp.status_code, url)) return resp
async def __call__(self, request): kw = None if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args: if request.method == 'POST': if not request.content_type: return web.HTTPBadRequest(reason='Missing Content-Type.') ct = request.content_type.lower() if ct.startswith('application/json'): params = await request.json() if not isinstance(params, dict): return web.HTTPBadRequest( reason='JSON body must be object.') kw = params elif ct.startswith('application/x-www-form-urlencoded' ) or ct.startswith('multipart/form-data'): params = await request.post() kw = dict(**params) else: return web.HTTPBadRequest( reason='Unsupported Content-Type: %s' % request.content_type) if request.method == 'GET': qs = request.query_string if qs: kw = dict() for k, v in parse.parse_qs(qs, True).items(): kw[k] = v[0] if kw is None: kw = dict(**request.match_info) else: if not self._has_var_kw_arg and self._named_kw_args: # remove all unamed kw: copy = dict() for name in self._named_kw_args: if name in kw: copy[name] = kw[name] kw = copy # check named arg: for k, v in request.match_info.items(): if k in kw: log.warning( 'Duplicate arg name in named arg and kw args: %s' % k) kw[k] = v if self._has_request_arg: kw['request'] = request # check required kw: if self._required_kw_args: for name in self._required_kw_args: if not name in kw: return web.HTTPBadRequest(reason='Missing argument: %s' % name) log.info('call with args: %s' % str(kw)) # 此处sha1后的密码会保存在日志中,不安全 try: r = await self._func(**kw) return r except APIError as e: return dict(error=e.error, data=e.data, message=e.message)
def main(irc=None): # This is a global sanity check, to make sure the protocol module is doing # its job. if irc and not irc.connected.wait(2): log.warning('(%s) IRC network %s (protocol %s) has not set ' 'irc.connected state after 2 seconds - this may be a bug ' 'in the protocol module, and will cause plugins like ' 'relay to not work correctly!', irc.name, irc.name, irc.protoname)
def post(s): s.write("ok") hook_type = s.request.headers.get('X-Github-Event') handler = s.handler.get(hook_type) if handler: handler(s.request) else: log.warning("unhandled github event: %s", hook_type)
def add_label(s, label): log.info("PR %s added label: %s", s.url, label) if label in s.labels: log.warning("PR %s label already present.", s.url) return s.labels.add(label) if label == config.ci_ready_label: s.start_job() return s
def exposed_close_window(self): log.warning('close window from server') global is_busy is_busy = True # log.warning('~~ is_busy: %s', is_busy) try: thread.start_new_thread(close_window, (connection,)) except Exception as err: log.error('thread did not started: %s', err) return True
def md5_encryption(*args): try: input = ''.join([str(i) if i is not None else '' for i in args]) input += config.server["cvkey"] md5 = hashlib.md5() md5.update(input) return md5.hexdigest() except Exception, e: log.warning(e) return None
def log_unknown(self, node, during): if not hasattr(self, "unkn_node"): self.unkn_node = set() cln = self.getclassname(node) if not cln in self.unkn_node: self.unkn_node.add(cln) log.warning("Unkn. node (self.%s): %s [%s]", during, cln, nodeid(node)) try: log.debug(node) except (UnicodeDecodeError, UnicodeEncodeError): log.debug(repr(node))
def get_state(s): code, result = github.repos[s.base_full_name].statuses[s.head].get() if code==200: for data in result: if data["context"] == config.context: if data["description"] == "The build has been canceled.": return "canceled" else: return data["state"] else: log.warning("PullRequest: couldn't get statuses: code %s", code) return "unknown"
def safeuni(s): if isinstance(s, unicode): return s if not isinstance(s, basestring): if hasattr(obj, '__unicode__'): return unicode(s) else: return str(s).decode('utf-8') try: s = unicode(s, errors='strict', encoding='utf-8') # unicode() is expecting a utf-8 bytestring (unicode itself is not utf-8 or anything else) except UnicodeDecodeError, e: log.warning(e) s = unicode(s, errors='ignore', encoding='utf-8') # dump anything that doesnt make sense in utf-8
def strategy(self, players): bomb_before = False bomb_after = False move = [] self.map.update_boost(self.boost_remain, self.boost_renew) if not self.target or (self.cur != self.last_want): self.target, self.path = self.find_path() if not self.target: self.map.add_player(self.cur) return bomb_before, move, bomb_after if not self.path: # has reached target if self.map.need_bomb(self.cur): # self.map.add_bomb(self.cur, 3) bomb_before = True # if self.target == self.cur: # do not need move # return bomb_before, move, bomb_after self.target, self.path = self.find_path() if not self.target: self.map.add_player(self.cur) return bomb_before, move, bomb_after if self.path: # walk to target first = self.path[0] if self.map.is_safe(first, 1): move.append(move_to(self.cur, first)) self.last_want = first self.path.pop(0) if self.path and self.is_boost: second = self.path[0] if self.map.is_safe(second, 2): move.append(move_to(first, second)) self.last_want = second self.path.pop(0) if self.map.need_bomb(self.last_want, True): # self.map.add_bomb(self.last_want, 3) bomb_after = True else: self.target, self.path = self.find_path() if not self.target: self.map.add_player(self.cur) return bomb_before, move, bomb_after log.warning('decide strategy again') return self.strategy(players) return bomb_before, move, bomb_after
def open_window(conn): """ Opens the window. @return: True or False """ global status log.warning('window opens') print 'window started to open' GPIO.output(WINDOW_CLOSE_PIN, GPIO.LOW) GPIO.output(WINDOW_OPEN_PIN, GPIO.HIGH) time.sleep(7) status = True GPIO.output(WINDOW_OPEN_PIN, GPIO.LOW) global is_busy is_busy = False print 'window opened' conn.root.action_finished(device_sn, 'open')
def close_window(conn): """ Closes the window. @return: True or False """ global status log.warning('window closes') print 'window started to close' GPIO.output(WINDOW_OPEN_PIN, GPIO.LOW) GPIO.output(WINDOW_CLOSE_PIN, GPIO.HIGH) time.sleep(7) status = False GPIO.output(WINDOW_CLOSE_PIN, GPIO.LOW) global is_busy is_busy = False print 'window closed' conn.root.action_finished(device_sn, 'close')
def handleInboundMessages(self): """Handle all inbound messages in a loop. Called only by the inbound thread in Gateway.__init__.""" if self.name is None: ## TODO: make more robust m = self.messenger.recvMessage() ## wait for login message self.setName(LoginMessage.fromstr(m).name) while True: m = self.messenger.recvMessage() log.info("%s.handleInboundMessages() got message '%s'" % (self.name, m)) if m is None: self.close() break else: try: m = GatewayMessage.fromstr(m) except ValueError as e: log.warning("conversion problem parsing message '%s'" % m) else: self.inboundQueue.put(m)
def __getitem__(self, key): # This 'normalizes' the key. # For example, if the key is todo_node (like sphinx uses), it will be # converted to 'todo-node' which is a valid docutils class name. if not re.match("^[a-z](-?[a-z0-9]+)*$", key): key = docutils.nodes.make_id(key) if self.StyleSheet.has_key(key): return self.StyleSheet[key] else: if key.startswith("pygments"): log.info("Using undefined style '%s'" ", aliased to style 'code'." % key) newst = copy(self.StyleSheet["code"]) else: log.warning("Using undefined style '%s'" ", aliased to style 'normal'." % key) newst = copy(self.StyleSheet["normal"]) newst.name = key self.StyleSheet.add(newst) return newst
def wrap(self, availWidth, availHeight): if self.__kind=='percentage_of_container': w, h= self.__width, self.__height if not w: log.warning('Scaling image as % of container with w unset.' 'This should not happen, setting to 100') w = 100 scale=w/100. w = availWidth*scale h = w/self.__ratio self.image.drawWidth, self.image.drawHeight = w, h return w, h else: if self.image.drawHeight > availHeight: if not getattr(self, '_atTop', True): return self.image.wrap(availWidth, availHeight) else: # It's the first thing in the frame, probably # Wrapping it will not make it work, so we # adjust by height # FIXME get rst file info (line number) # here for better error message log.warning('image %s is too tall for the '\ 'frame, rescaling'%\ self.filename) self.image.drawHeight = availHeight self.image.drawWidth = availHeight*self.__ratio elif self.image.drawWidth > availWidth: log.warning('image %s is too wide for the frame, rescaling'%\ self.filename) self.image.drawWidth = availWidth self.image.drawHeight = availWidth / self.__ratio return self.image.wrap(availWidth, availHeight)
def wrap(self, availWidth, availHeight): """If we need more width than we have, complain, keep a scale""" self.pad = self.border_padding(True, 0.1) maxWidth = float(min( styles.adjustUnits(self.maxWidth, availWidth) or availWidth, availWidth)) self.maxWidth = maxWidth maxWidth -= (self.pad[1]+self.pad[3]) self.width, self.height = _listWrapOn(self.content, maxWidth, None) if self.width > maxWidth: if self.mode <> 'shrink': self.scale = 1.0 log.warning("BoundByWidth too wide to fit in frame (%s > %s): %s", self.width,maxWidth,self.identity()) if self.mode == 'shrink' and not self.scale: self.scale = (maxWidth + self.pad[1]+self.pad[3])/\ (self.width + self.pad[1]+self.pad[3]) else: self.scale = 1.0 self.height *= self.scale self.width *= self.scale return self.width, self.height + (self.pad[0]+self.pad[2])*self.scale
def test_all(): results = {} conn_graph = signals.detailed_connection_graph() #srt = nx.topological_sort(conn_graph) for name in conn_graph: log.debug('Trying {}'.format(name)) r = resource.load(name) script_path = os.path.join(r.metadata['base_path'], 'test.py') if not os.path.exists(script_path): log.warning('resource {} has no tests'.format(name)) continue log.debug('File {} found'.format(script_path)) with open(script_path) as f: module = imp.load_module( '{}_test'.format(name), f, script_path, ('', 'r', imp.PY_SOURCE) ) try: module.test(r) results[name] = { 'status': 'ok', } except Exception: results[name] = { 'status': 'error', 'message': traceback.format_exc(), } return results
def POST(self): ''' Method:post ''' try: #@Step 1: Parse commit info try: data = web.data() commit_info = utils.json_load_to_str(data) commit_info = opts.FormatChangeSet(commit_info) log.debug("commit info: %s" % commit_info) except Exception, e: log.error(e) raise UnableParsePostDataError #@Step 2: Check author info if not opts.CheckAuthorInfo(commit_info["author"]): return HTTPAdaptor.format_response("pass", "001", "No authors available.") # log.error(NoAuthorFoundError.message) # raise NoAuthorFoundError #@Step 3: Check jobs needed to be triggerred jobs = opts.GetJobs(commit_info) if not jobs: log.warning(NoValidJobFoundError().message) return HTTPAdaptor.format_response("pass", "001", "No tests can be executed.") #@Step 4: Insert commit info dbopts = SQLAdaptor() dbopts.InsertCommitInfo(commit_info) dbopts.InsertJobInfo(jobs) log.debug("set status id to :%s" % 7) #@Step 5: Trigger jobs opts.TriggerJobs(jobs) return HTTPAdaptor.format_response("ok", "003", "Jobs are triggered. Please wait for the response.")
def size_for_node(self, node, client): '''Given a docutils image node, returns the size the image should have in the PDF document, and what 'kind' of size that is. That involves lots of guesswork''' uri = str(node.get("uri")) if uri.split("://")[0].lower() not in ('http','ftp','https'): uri = os.path.join(client.basedir,uri) else: uri, _ = urllib.urlretrieve(uri) client.to_unlink.append(uri) srcinfo = client, uri # Extract all the information from the URI imgname, extension, options = self.split_uri(uri) if not os.path.isfile(imgname): imgname = missing scale = float(node.get('scale', 100))/100 size_known = False # Figuring out the size to display of an image is ... annoying. # If the user provides a size with a unit, it's simple, adjustUnits # will return it in points and we're done. # However, often the unit wil be "%" (specially if it's meant for # HTML originally. In which case, we will use a percentage of # the containing frame. # Find the image size in pixels: kind = 'direct' xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi extension = imgname.split('.')[-1].lower() if extension in ['svg','svgz'] and SVGImage.available(): iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0) # These are in pt, so convert to px iw = iw * xdpi / 72 ih = ih * ydpi / 72 elif extension in [ "ai", "ccx", "cdr", "cgm", "cmx", "sk1", "sk", "xml", "wmf", "fig"] and VectorImage.available(): iw, ih = VectorImage(imgname, srcinfo=srcinfo).wrap(0, 0) # These are in pt, so convert to px iw = iw * xdpi / 72 ih = ih * ydpi / 72 elif extension == 'pdf': if VectorPdf is not None: box = VectorPdf.load_xobj(srcinfo).BBox else: pdf = LazyImports.pdfinfo if pdf is None: log.warning('PDF images are not supported without pyPdf or pdfrw [%s]', nodeid(node)) return 0, 0, 'direct' reader = pdf.PdfFileReader(open(imgname, 'rb')) box = [float(x) for x in reader.getPage(0)['/MediaBox']] x1, y1, x2, y2 = box # These are in pt, so convert to px iw = float((x2-x1) * xdpi / 72) ih = float((y2-y1) * ydpi / 72) size_known = True # Assume size from original PDF is OK else: keeptrying = True if sys.platform[0:4] == 'java': try: from javax.imageio import ImageIO from java.io import File iis = ImageIO.createImageInputStream(File(imgname)) readers = ImageIO.getImageReaders(iis) if readers.hasNext(): reader = readers.next() reader.setInput(iis, True) metadata = reader.getImageMetadata(0) iw = reader.getWidth(0) ih = reader.getHeight(0) xdpi = None ydpi = None if metadata.getNativeMetadataFormatName() == 'javax_imageio_png_1.0': png_metadata = metadata.getAsTree(metadata.getNativeMetadataFormatName()) if png_metadata is not None: phys_metadata = png_metadata.getElementsByTagName('pHYs') if phys_metadata.getLength() == 1: xdpi = phys_metadata.item(0).getAttribute('pixelsPerUnitXAxis') if xdpi is not None: xdpi = float(xdpi) ydpi = phys_metadata.item(0).getAttribute('pixelsPerUnitYAxis') if ydpi is not None: ydpi = float(ydpi) elif metadata.getNativeMetadataFormatName() == 'javax_imageio_jpeg_image_1.0': jpg_metadata = metadata.getAsTree(metadata.getNativeMetadataFormatName()) if jpg_metadata is not None: jfif_metadata = jpg_metadata.getElementsByTagName('app0JFIF') if jfif_metadata.getLength() == 1: resUnits = jfif_metadata.item(0).getAttribute('resUnits') # 0 = plain aspect ratio, 1 = dots per inch, 2 = dots per cm if resUnits is not None and (resUnits == '1' or resUnits == '2'): xdpi = jfif_metadata.item(0).getAttribute('pixelsPerUnitXAxis') if xdpi is not None: xdpi = float(xdpi) if resUnits == '2': xdpi = xdpi * 2.54 ydpi = jfif_metadata.item(0).getAttribute('pixelsPerUnitYAxis') if ydpi is not None: ydpi = float(ydpi) if resUnits == '2': ydpi = ydpi * 2.54 if xdpi is None: xdpi = 300 if ydpi is None: ydpi = 300 keeptrying = False except Exception, err: log.error('Error %s', str(err)) pass if LazyImports.PILImage: try: img = LazyImports.PILImage.open(imgname) img.load() iw, ih = img.size xdpi, ydpi = img.info.get('dpi', (xdpi, ydpi)) keeptrying = False except IOError: # PIL throws this when it's a broken/unknown image pass if keeptrying and LazyImports.PMImage: img = LazyImports.PMImage(imgname) iw = img.size().width() ih = img.size().height() density=img.density() # The density is in pixelspercentimeter (!?) xdpi=density.width()*2.54 ydpi=density.height()*2.54 keeptrying = False if keeptrying: if extension not in ['jpg', 'jpeg']: log.error("The image (%s, %s) is broken or in an unknown format" , imgname, nodeid(node)) raise ValueError else: # Can be handled by reportlab log.warning("Can't figure out size of the image (%s, %s). Install PIL for better results." , imgname, nodeid(node)) iw = 1000 ih = 1000
def support_warning(cls): if cls.warned or LazyImports.PILImage: return cls.warned = True log.warning("Support for images other than JPG," " is now limited. Please install PIL.")
def file_is(fpath, c=False, ftype='f', fname=None): """ Check if a file exists (and try to create it if it does not). :param fpath: full path to a file :param c: if set to True, create file if it does not exist yet :param ftype: type; defaults to file :param fname: file name; used for recursive use of the function (when directory creation has to precede file creation) :return return file path if file/dir exists, otherwise return None """ fp = None # if path name ends in / it is a directory if fpath[-1] == '/' or ftype == 'd' or ftype == 'dir': this_type = 'directory' else: this_type = 'file' # check if file exists if exists(fpath): # check if file is writeable if not access(fpath, W_OK): log.warning("{} {} exists but is not writeable." .format(this_type.title(), fpath)) else: log.info("{} {} exists and is writeable." .format(this_type.title(), fpath)) fp = fpath else: if not c: log.error("{} {} does not exist." .format(this_type.title(), fpath)) else: # try to create a file or directory (depending) try: if this_type == 'file': open(fpath, 'w') fp = fpath log.info("File created.") else: makedirs(fpath, mode=0o777) fp = fpath log.info("Successfully created directory.") # if the intention was to create a file as well as # its containing directory (indicated by setting the # ftype flag to 'd' or 'dir' as well as providing fname # for the actual file name), call the function again if fname and (ftype == 'd' or ftype == 'dir'): file_is(join(fpath, fname)) except PermissionError: log.error("Permission Error: {} {} does not exist or " "cannot be accessed.".format(this_type.title(), fpath)) except FileNotFoundError: if this_type == 'file': file_dir = dirname(fpath) log.error("File not found. Checking for existence " "of parent directory {}.".format(file_dir)) file_is(file_dir, ftype='d', fname=basename(fpath)) except NotADirectoryError: log.error("File {} exists but is not a directory " "(leave off trailing slash).".format(fpath)) # unforeseen exception except Exception as e: line_no = sys.exc_info()[2].tb_lineno log.exception("An unexpected error occurred on line {} while " "checking for existance of file {}: {}" .format(fpath, line_no, e)) return fp