def __init__(self, ipfile, recordfile): if not isfile(ipfile): logger.warning("can't find ip data file: %s" % ipfile) # 故意返回数据,另程序退出 return 1 self.ipfile = ipfile if not isfile(recordfile): logger.warning("can't find A record file: %s" % recordfile) return 2 self.recordfile = recordfile #初始化iplist,用来进行2分查找 self.iplist = [] #初始化iphash,用来检索详细信息 self.iphash = {} #初始化存储a.yaml配置 self.record = {} # 存储各个域名的地域对于ip信息 self.locmapip = {} #load record data self.LoadRecord() #load ip data self.LoadIP() print 'Init IP pool finished !'
def add_property(self, prop): """ Add a property to the receiver. :param FHIRClassProperty prop: A FHIRClassProperty instance """ assert isinstance(prop, FHIRClassProperty) # do we already have a property with this name? # if we do and it's a specific reference, make it a reference to a # generic resource for existing in self.properties: if existing.name == prop.name: if 0 == len(existing.reference_to_names): logger.warning('Already have property "{}" on "{}", which is only allowed for references'.format(prop.name, self.name)) else: existing.reference_to_names.extend(prop.reference_to_names) return self.properties.append(prop) self.properties = sorted(self.properties, key=lambda x: x.name) if prop.nonoptional and prop.one_of_many is not None: if prop.one_of_many in self.expanded_nonoptionals: self.expanded_nonoptionals[prop.one_of_many].append(prop) else: self.expanded_nonoptionals[prop.one_of_many] = [prop]
def sendrequest(self): """Sends a request to the server based on init above""" ##TODO: Switch to using requests module isntead of urllib while self.errorcounter <= 3: try: self.ratelimitcheck() f = urllib2.urlopen(self.url) jsondata = f.read() apidata = json.loads(jsondata) self.data = apidata break except: if self.errorcounter == 3: self.errorcounter = 0 raise IOError('Unknown error. Cannot retrieve apidata after 3 attempts') else: self.errorcounter += 1 logger.warning('Could not retrieve apidata, retrying (attempt #%d)', self.errorcounter) time.sleep(5) ##Check the status of the data returned for error codes: if 'status' in self.data and self.data['status']['status_code']!=200: self.statuscheck() elif self.errorcounter > 0: logger.info('Retry succeeded') self.errorcounter = 0
def FindIP(self, ip, name): logger.info("[IPPool] FindIP with ip: [%s] and name: [%s]." % (ip, name)); i, j, ipnum = self.SearchLocation(ip) if i in self.iphash: ipstart = self.iphash[i][0] ipend = self.iphash[i][1] country = self.iphash[i][2] province = self.iphash[i][3] city = self.iphash[i][4] sp = self.iphash[i][5] if ipstart <= ipnum <= ipend: ip_list = [ tmp_ip for tmp_ip in re.split(ur',|\s+', self.iphash[i][6][name][0]) if not re.search(ur'[^0-9.]', tmp_ip) ] logger.info("userip:[%s] domain:[%s] section:[%s-%s] location:[%s,%s,%s,%s] ip_list:%s" % (ip, name, long2ip(ipstart), long2ip(ipend), country, province, city, sp, ip_list ) ) return ip_list else: #print "可能不在ip列表内,需要指定默认地址" ip_list = [ tmp_ip for tmp_ip in re.split(ur',|\s+',self.record[name]['default']) if not re.search(ur'[^0-9.]', tmp_ip) ] logger.warning("userip:[%s] domain:[%s] ip-section:[%s-%s] range:[(%d-%d)-%d-%d] ip_list:%s" % (ip, name, long2ip(ipstart),long2ip(ipend), ipstart, ipend, ipnum, j, ip_list)) return ip_list else: #maybe something wrong ip_list = [ tmp_ip for tmp_ip in re.split(ur',|\s+',self.record[name]['default']) if not re.search(ur'[^0-9].', tmp_ip) ] logger.warning("can't find ip in iphash, ip:[%s] domain:[%s] ip_list:%s" % (ip, name, ip_list)) return ip_list
def download_tree(self, from_dir, to_dir, overwrite=True): logger.info("DOWNLOAD_TREE " + from_dir + " " + to_dir) @self.safe def f(): if overwrite: if os.path.isdir(to_dir): logger.debug("DEL: " + to_dir) shutil.rmtree(to_dir) logger.debug("MKDIR: " + to_dir) os.mkdir(to_dir) for root, dirs, files in self.ftp_host.walk(from_dir, topdown=True, followlinks=False): t_dir = to_dir + root[len(from_dir):] + "/" s_dir = root + "/" for d in dirs: logger.debug("MKDIR: " + t_dir + d) self.ftp_host.mkdir(t_dir+d) for f in files: logger.debug(s_dir+f + " -> " + t_dir+f) self.ftp_host.download(s_dir+f, t_dir+f) return True try: return f() except self.err: logger.warning("download_tree failed!") return False
def get_self_info(self): """ 获取自己的信息, 并存入self._self_info get_self_info2 {"retcode":0,"result":{"birthday":{"month":1,"year":1989,"day":30},"face":555,"phone":"","occupation":"","allow":1,"college":"","uin":2609717081,"blood":0,"constel":1,"lnick":"","vfwebqq":"68b5ff5e862ac589de4fc69ee58f3a5a9709180367cba3122a7d5194cfd43781ada3ac814868b474","homepage":"","vip_info":0,"city":"青岛","country":"中国","personal":"","shengxiao":5,"nick":"要有光","email":"","province":"山东","account":2609717081,"gender":"male","mobile":""}} :return:dict """ try_times = 0 while len(self._self_info) is 0: url = "http://s.web2.qq.com/api/get_self_info2?t={}".format(time.time()) response = self.client.get(url) logger.debug("get_self_info2 response:{}".format(response)) rsp_json = json.loads(response) if rsp_json["retcode"] != 0: try_times += 1 logger.warning("get_self_info2 fail. {}".format(try_times)) if try_times >= 5: return {} try: self._self_info = rsp_json["result"] except KeyError: logger.warning("get_self_info2 failed. Retrying.") continue return self._self_info
def __getattr__(self, name): item = AbstractProxy.__getattr__(self, name) result = item if not self.autoload: return result if not item._initialized: typeName = self._getTypeName(item) if typeName is None: return result href = getattr(item, 'href', None) if not self.usehref or href is None: serviceEndPoint = getattr(agility, typeName[0].lower() + typeName[1:]) getserviceName = lookup(typeName, action=lookup.ACTION.GET) getservice = getattr(serviceEndPoint, getserviceName, None) if getservice is None: logger.warning('No such service [%s] for asset [%s]', getserviceName, typeName) return result result = getservice(item.id) else: logger.info('loading proxy using url %s', href) result = agility.tools.xml.parse(agility.cfg.conn.request(href).read(), assetType=typeName) if self.recursive: result = proxy(result, useid=self.useid) result._initialized = True self._attrs[name] = result return result
def client_config_generator(self, settings=None): default_config = zip(self.cluster_spec.workers, self.cluster_spec.yield_masters()) if not hasattr(settings, 'clients_per_cluster') or settings.clients_per_cluster == 'null': return default_config clients_per_cluster = [int(clients) for clients in settings .clients_per_cluster.split()] if len(clients_per_cluster) != len(self.cluster_spec.config.items('clusters')): logger.warning("The number of clusters and clusters specified " "in the clients_per_cluster test config do not match - " "disabling multiple clients per cluster.") return default_config if sum(clients_per_cluster) > len(self.cluster_spec.workers): logger.warning("More clients_per_cluster specified in test config " "than are avaliable - disabling multiple clients " "per cluster.") return default_config cluster_config = [] workers_not_assigned = self.cluster_spec.workers for master, number_of_clients in zip(self.cluster_spec.yield_masters(), clients_per_cluster): for x in range(number_of_clients): cluster_config.append((workers_not_assigned.pop(0), master)) return cluster_config
def get_online_friends_list(self): """ 获取在线好友列表 get_online_buddies2 :return:list """ logger.info("RUNTIMELOG Requesting the online buddies.") response = self.client.get( 'http://d1.web2.qq.com/channel/get_online_buddies2?vfwebqq={0}&clientid={1}&psessionid={2}&t={3}'.format( self.vfwebqq, self.client_id, self.psessionid, self.client.get_timestamp(), ) ) # {"result":[],"retcode":0} logger.debug("RESPONSE get_online_buddies2 html:{}".format(response)) try: online_buddies = json.loads(response) except ValueError: logger.warning("get_online_buddies2 response decode as json fail.") return None if online_buddies['retcode'] != 0: logger.warning('get_online_buddies2 retcode is not 0. returning.') return None online_buddies = online_buddies['result'] return online_buddies
def notify(event, *args, **kargs): if event in events: for b in events[event]: try: b(*args, **kargs) except Exception as e: logger.warning(e)
def get_friend_info(self, tuin): """ 获取好友详情信息 get_friend_info {"retcode":0,"result":{"face":0,"birthday":{"month":1,"year":1989,"day":30},"occupation":"","phone":"","allow":1,"college":"","uin":3964575484,"constel":1,"blood":3,"homepage":"http://blog.lovewinne.com","stat":20,"vip_info":0,"country":"中国","city":"","personal":"","nick":" 信","shengxiao":5,"email":"*****@*****.**","province":"山东","gender":"male","mobile":"158********"}} :return:dict """ uin = str(tuin) if uin not in self.friend_uin_list: logger.info("RUNTIMELOG Requesting the account info by uin: {}".format(uin)) info = json.loads(self.client.get( 'http://s.web2.qq.com/api/get_friend_info2?tuin={0}&vfwebqq={1}&clientid={2}&psessionid={3}&t={4}'.format( uin, self.vfwebqq, self.client_id, self.psessionid, self.client.get_timestamp() ) )) logger.debug("get_friend_info2 html: {}".format(str(info))) if info['retcode'] != 0: logger.warning('get_friend_info2 retcode unknown: {}'.format(info)) return None info = info['result'] info['account'] = self.uin_to_account(uin) info['longnick'] = self.get_friend_longnick(uin) self.friend_uin_list[uin] = info try: return self.friend_uin_list[uin] except: logger.warning("RUNTIMELOG get_friend_info return fail.") logger.debug("RUNTIMELOG now uin list: " + str(self.friend_uin_list[uin]))
def listen(self): while 1: if not self.connected: self.connect() if self.connected: self.sleep_time = 5 r = self.sock.recv(1024*1024*10).decode("utf-8") if r == '': self.connected = False logger.warning("connection zum backend gestorben") time.sleep(1) continue ## zerstückelte blöcke? for d in r.split("\n"): if d == '': continue if d == '\n': pass else: try: self.parse(json.loads(d)) except Exception as e: logger.exception("failed to parse backend message") else: self.sleep_time = min(self.sleep_time * 3, 60*60*3) if not self.suppress_connection_warnings: logger.debug("No connection to Backend; sleeping " + str(self.sleep_time) + " seconds.") time.sleep(self.sleep_time)
def get_group_member_info_list(self, group_code): """ 获取指定群的成员信息 :group_code: int, can be "ture" of "fake" group_code {"retcode":0,"result":{"stats":[],"minfo":[{"nick":" 信","province":"山东","gender":"male","uin":3964575484,"country":"中国","city":""},{"nick":"崔震","province":"","gender":"unknown","uin":2081397472,"country":"","city":""},{"nick":"云端的猫","province":"山东","gender":"male","uin":3123065696,"country":"中国","city":"青岛"},{"nick":"要有光","province":"山东","gender":"male","uin":2609717081,"country":"中国","city":"青岛"},{"nick":"小莎机器人","province":"广东","gender":"female","uin":495456232,"country":"中国","city":"深圳"}],"ginfo":{"face":0,"memo":"http://hujj009.ys168.com\r\n0086+区(没0)+电话\r\n0086+手机\r\nhttp://john123951.xinwen365.net/qq/index.htm","class":395,"fingermemo":"","code":3943922314,"createtime":1079268574,"flag":16778241,"level":0,"name":"ぁQQぁ","gid":3931577475,"owner":3964575484,"members":[{"muin":3964575484,"mflag":192},{"muin":2081397472,"mflag":65},{"muin":3123065696,"mflag":128},{"muin":2609717081,"mflag":0},{"muin":495456232,"mflag":0}],"option":2},"cards":[{"muin":3964575484,"card":"●s.Εx2(22222)□"},{"muin":495456232,"card":"小莎机器人"}],"vipinfo":[{"vip_level":0,"u":3964575484,"is_vip":0},{"vip_level":0,"u":2081397472,"is_vip":0},{"vip_level":0,"u":3123065696,"is_vip":0},{"vip_level":0,"u":2609717081,"is_vip":0},{"vip_level":0,"u":495456232,"is_vip":0}]}} :return:dict """ if group_code == 0: return try: url = "http://s.web2.qq.com/api/get_group_member_info_ext2?gcode=%s&vfwebqq=%s&t=%s" % ( group_code, self.vfwebqq, int(time.time() * 100)) response = self.client.get(url) rsp_json = json.loads(response) logger.debug("get_group_member_info_ext2 info response: {}".format(rsp_json)) retcode = rsp_json["retcode"] if retcode == 0: result = rsp_json["result"] elif retcode == 6: logger.debug("get_group_member_info_ext2 retcode is 6, trying to get true code.") result = self.get_group_member_info_list(self.get_true_group_code(group_code)) else: logger.warning("group_code error.") return self.group_member_info[str(group_code)] = result # 缓存群成员信息, 此处会把真假group_code都加入cache return result except Exception as ex: logger.warning("RUNTIMELOG get_group_member_info_ext2. Error: " + str(ex)) return
def parse(self, d): if not "requestid" in d: logger.warning("Invalid Response!") pprint(d) return reqid = d["requestid"] if not reqid in self.requests: logger.warning("Requestid isnt known ({})".format(reqid)) pprint(d) return #logger.info("Backend [{}]: {}".format(reqid, d)) if hasattr(env, 'BACKEND_PPRINT'): pprint(d) if self.requests[reqid]["action"] == "tournament": self.handleTournament(self.requests[reqid], d) return self.requests[reqid].update(d) if self.handleGame(self.requests[reqid], d): return if "queue" in self.requests[reqid]: self.requests[reqid]["queue"].put(d) if "queues" in self.requests[reqid]: for q in self.requests[reqid]["queues"]: q.put(d)
def send_friend_msg(self, reply_content, uin, msg_id, fail_times=0): fix_content = str(reply_content.replace("\\", "\\\\\\\\").replace("\n", "\\\\n").replace("\t", "\\\\t")) rsp = "" try: req_url = "http://d1.web2.qq.com/channel/send_buddy_msg2" data = ( ('r', '{{"to":{0}, "face":594, "content":"[\\"{4}\\", [\\"font\\", {{\\"name\\":\\"Arial\\", \\"size\\":\\"10\\", \\"style\\":[0, 0, 0], \\"color\\":\\"000000\\"}}]]", "clientid":{1}, "msg_id":{2}, "psessionid":"{3}"}}'.format( uin, self.client_id, msg_id, self.psessionid, fix_content)), ('clientid', self.client_id), ('psessionid', self.psessionid) ) rsp = self.client.post(req_url, data, self.smart_qq_refer) rsp_json = json.loads(rsp) if 'errCode' in rsp_json and rsp_json['errCode'] != 0: raise ValueError("reply pmchat error" + str(rsp_json['retcode'])) logger.info("RUNTIMELOG Reply successfully.") logger.debug("RESPONSE Reply response: " + str(rsp)) return rsp_json except: if fail_times < 5: logger.warning("RUNTIMELOG Response Error.Wait for 2s and Retrying." + str(fail_times)) logger.debug("RESPONSE " + str(rsp)) time.sleep(2) self.send_friend_msg(reply_content, uin, msg_id, fail_times + 1) else: logger.warning("RUNTIMELOG Response Error over 5 times.Exit.reply content:" + str(reply_content)) return False
def restart_analytics_cluster(self, analytics_node: str): logger.info('Restarting analytics cluster') api = 'http://{}:{}/analytics/cluster/restart'.format(analytics_node, ANALYTICS_PORT) r = self.post(url=api) if r.status_code not in (200, 202,): logger.warning('Unexpected request status code {}'. format(r.status_code))
def check_device_name_and_version(self): try: self.connect() name = self.control_board.name() hardware_version = utility.Version.fromstring( self.control_board.hardware_version() ) if name != "Arduino DMF Controller": raise Exception("Device is not an Arduino DMF Controller") host_software_version = self.control_board.host_software_version() remote_software_version = self.control_board.software_version() # reflash the firmware if it is not the right version if host_software_version != remote_software_version: response = yesno("The " "control board firmware version (%s) does not match the " "driver version (%s). Update firmware?" % (remote_software_version, host_software_version)) #"Update firmware?") if response == gtk.RESPONSE_YES: self.on_flash_firmware() except Exception, why: logger.warning("%s" % why)
def handle_tweet(self, data): """ This method is called when data is received through Streaming endpoint. """ # self.nb_tweet += 1 data = data.decode('utf-8') self.buffer += data if data.endswith('\r\n') and self.buffer.strip(): try: message = json.loads(self.buffer) self.buffer = '' if message.get('limit'): pass # logger.warning('Rate limiting caused us to miss %s tweets' % (message['limit'].get('track'))) elif message.get('disconnect'): pass logger.warning('Got disconnected'.format(message['disconnected'].get('reason'))) raise Exception('Got disconnect: %s' % message['disconnect'].get('reason')) elif message.get('warning'): logger.warning('Got warning: {}'.format(message['warning'].get('message'))) pass else: print(message) # self.process_tweet(message) except ValueError: pass if datetime.datetime.now() - self.last_run > datetime.timedelta(seconds=120): raise TimeOutTweet()
def register(self, model, file=':memory:'): try: self.conn[model] logger.warning('ModelError: Model already exist.') except KeyError: self.conn[model] = sqlite3.connect(file) self.lock[model] = False
def whitelist_from_file(filename): """ Reads a whitelist from the specified file and returns a mapping from PDB ID to chains in the whitelist. """ whitelist = {} with open(filename, 'r') as whitelist_file: # iterate over each entry in the whitelist, which is a PDB ID, followed # by a colon, followed by one or more chain IDs (like A, B, C, etc.) # separated by commas to include for training for line in whitelist_file: line = line.strip() try: # skip blank lines and comments if len(line) == 0 or line[0] == '#': continue # get the pdbid and the chain ids from the line pdbid, chains = line.split(':') # ensure pdbid is lowercase, chains are uppercase pdbid = pdbid.lower() chains = chains.upper() # update or create a new set containing each chain id if pdbid in whitelist: whitelist[pdbid].update((chain_id for chain_id in chains)) else: whitelist[pdbid] = set((chain_id for chain_id in chains)) except ValueError: logger.warning('Incorrectly formatted whitelist line: ' + line) return whitelist
def _merge_tiles(self): dx, dy = self._get_delta() self.log('Merging tiles...') filename = '%s%s' % (self.file_name_prefix, self.tile_format) tiles = [] imx = 0 imy = 0 for x in range(dx): imy = 0 height = 0 for y in reversed(range(dy)): tile_file = os.path.join(self.tile_dir, "%s_%s%s" % (x, y, self.tile_format)) try: tile = Image.open(tile_file) tiles.append((tile, (imx, imy))) imy += tile.width if tile.height > height: height = tile.height except Exception as er: logger.warning(er) imx += height path = os.path.join(self.output_dir, filename) self.real_width = imx self.real_height = imy out = Image.new('RGB', (self.real_width, self.real_height)) for t in tiles: out.paste(t[0], t[1]) out.save(path) return path
def from_inprogress(cls, d): if "exception" in d: logger.warning("Game Exception! " + str(d["exception"])) return ais = [d["ai0"], d["ai1"]] g = Game(type=ais[0].type) g.log = d["states"] g.crashes = d["crashes"] if "reason" in d: g.reason = d["reason"] db.session.add(g) db.session.commit() g.ai_assocs = [AI_Game_Assoc(game_id=g.id, ai_id=ai.id) for ai in ais] db.session.add(g) db.session.commit() for ai, score in d["scores"].items(): ai = AI.query.get(int(ai.split("v")[0])) AI_Game_Assoc.query.filter(AI_Game_Assoc.game == g).filter(AI_Game_Assoc.ai == ai).one().score = score for ai, position in d["position"].items(): ai = AI.query.get(int(ai.split("v")[0])) AI_Game_Assoc.query.filter(AI_Game_Assoc.game == g).filter(AI_Game_Assoc.ai == ai).one().position = position g.update_ai_elo() db.session.add(g) db.session.commit() logger.info("neues Spiel " + str(g)) return g
def getgamesmongo(matchIds): """Pass a list of matchIds to add each match to mongodb""" fullcounter = 0 parsedcounter = 0 for matchId in matchIds: m = match(matchId) try: m.fetchdata() except IOError as e: logger.warning('%s. Cannot get match data, skipping to next', str(e)) continue try: m.fetchparsed() except IOError as e: logger.warning('%s. Cannot get parsed data, will add full data if possible', str(e)) except AssertionError as e: ##If there is no timeline data, skip this logger.warning('%s. Skipping to next', str(e)) continue ##Try to add to mongodb, will output error if it already is there try: m.addtomongo('full') fullcounter += 1 except IOError as e: logger.warning('%s. Cannot add full data, will add parsed data if possible', str(e)) try: m.addtomongo('parsed') parsedcounter += 1 except IOError as e: logger.warning('%s, Cannot add parsed data, moving on to next match', str(e)) if fullcounter % 50 == 0: logger.info('Added %d games to MongoDB full collection and %d games to MongoDB parsed collection so far this session', fullcounter, parsedcounter) logger.info('Operation completed successfully, added %d games to MongoDB full collection and %d games to MongoDB parsed collection', fullcounter, parsedcounter)
def copy_tree(self, from_dir, to_dir, overwrite=True): logger.info("COPY_TREE " + from_dir + " " + to_dir + " " + str(overwrite)) @self.safe def f(): if overwrite: if self.ftp_host.path.isdir(to_dir): logger.debug("DEL: " + to_dir) self.ftp_host.rmtree(to_dir) logger.debug("MKDIR: " + to_dir) self.ftp_host.mkdir(to_dir) for root, dirs, files in self.ftp_host.walk(from_dir, topdown=True, followlinks=False): t_dir = to_dir + root[len(from_dir):] + "/" s_dir = root + "/" for d in dirs: logger.debug("MKDIR: " + t_dir + d) self.ftp_host.mkdir(t_dir+d) for f in files: logger.debug(s_dir+f + " -> " + t_dir+f) with self.ftp_host.open(s_dir+f, "r", encoding="utf-8") as source: with self.ftp_host.open(t_dir+f, "w", encoding="utf-8") as target: target.write(source.read()) return True try: return f() except self.err: logger.warning("copy_tree failed!") return False
def populate(self, data): if type(data) == str: try: data = json.loads(data) except ValueError: logger.error("Couldn't parse JSON string:") logger.error(data, False) logger.error("Aborting SecureMessage population", False) raise PopulationError elif type(data) != dict: logger.error("I can't populate a message with a '%s'", type(data)) raise PopulationError if 'message' not in data: logger.warning("No message in data, don't have anything to populate") return self self.message = data.get('message', None) if 'sender' in data: self.sender = data['sender'] else: logger.warning("No sender set for populated message") if 'action' in data: self.action = data['action'] if 'signature' in data: self.signature = data['signature'] self.signed = True else: self.signed = False return self
def start_game(): if not 'ai[]' in request.form: return CommonErrors.INVALID_ID ais = request.form.getlist("ai[]") logger.debug(ais) for i1, ai1 in enumerate(ais): for i2, ai2 in enumerate(ais): if i1 != i2 and ai1 == ai2: logger.warning("Nen Gegen die selben KIs") logger.warning(str(ais)) return {"error": "No duplicate AIs allowed."}, 400 ais = [AI.query.get(ai) for ai in ais] logger.debug(ais) if not all(ais): return CommonErrors.INVALID_ID if not any([current_user.can_access(ai) for ai in ais]): return CommonErrors.NO_ACCESS if not all([ai.latest_frozen_version() for ai in ais]): return {"error": "All AIs have to be frozen"}, 400 return {"error": False, "inprogress_id": backend.request_game(ais)}
def ai_qualify_blocking(id): ai = AI.query.get(id) if not ai: return (CommonErrors.INVALID_ID[0]["error"], "error") if not current_user.can_access(ai): return (CommonErrors.NO_ACCESS[0]["error"], "error") if not ai.latest_version().compiled: return ("AI_Version isnt compiled.", "error") if ai.latest_version().frozen: return ("AI_Version is frozen.", "error") reqid = backend.request_qualify(ai) for data, event in backend.inprogress_log(reqid): if event == "success": d = backend.request(reqid) if "position" in d: if d["position"][str(ai.id) + "v" + str(ai.latest_version().version_id)] > d["position"][str(-ai.type.id) + "v1"]: yield "", "qualified" ai.latest_version().compiled = True ai.latest_version().qualified = True else: logger.info("AI " + str(ai.id) + " '" + str(ai.name) + "' failed its qualification") yield "", "failed" ai.latest_version().qualified = False else: logger.warning("no position in finished ai_qualify_blocking") db.session.commit() elif event == "crash": return ("ai crashed", "error")
def make_request_with_proxy(url): proxies = proxy_handling.load_proxies() if not proxies: proxy_handling.update_proxies() proxies = proxy_handling.load_proxies_from_file() tries = 1 # number of tries for each proxy for proxy in reversed(proxies): for i in range(1, tries+1): # how many tries for each proxy try: # print('%i iteration of proxy %s' % (i, proxy), end="") proxy_handler = urllib2.ProxyHandler({'http': proxy, 'https': proxy}) opener = urllib2.build_opener(proxy_handler) urllib2.install_opener(opener) headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36', 'referer': 'htpps://www.google.com/'} request = urllib2.Request(url, headers=headers) f = urllib2.urlopen(request) read = f.read() if read.find('400 Bad Request') == -1: return read except Exception as er: logger.warning(er) if i == tries: proxies.remove(proxy) proxy_handling.dump_proxies_to_file(proxies) # if here, the result is not received # try with the new proxy list return make_request_with_proxy(url)
def load_svg(cls, svg_path): with warnings.catch_warnings(record=True) as warning_list: path_group = PathGroup.load_svg(svg_path, on_error=parse_warning) if warning_list: logger.warning('The following paths could not be parsed properly ' 'and have been ignored:\n%s' % \ '\n'.join([str(w.message) for w in warning_list])) # Assign the color blue to all paths that have no colour assigned for p in path_group.paths.values(): if p.color is None: p.color = (0, 0, 255) # If the first and last vertices in a loop are too close together, # it can cause tessellation to fail (Ticket # 106). for loop in p.loops: # distance between first and last point in a loop d = sqrt((loop.verts[0][0] - loop.verts[-1][0])**2 + \ (loop.verts[0][1] - loop.verts[-1][1])**2) # diagonal across device bounding box device_diag = sqrt(path_group.get_bounding_box()[2]**2 + \ path_group.get_bounding_box()[3]**2) # If the distance between the vertices is below a threshold, # remove the last vertex (the threshold is scaled by the device # diagonal so that we are insensitive to device size). if d/device_diag < 1e-3: loop.verts.pop() dmf_device = DmfDevice() dmf_device.add_path_group(path_group) dmf_device.init_body_group() return dmf_device
def __call__(self, environ, start_response): """ If the wsgi PATH_INFO starts with the static contents location, it will be returned. Otherwise the wrapped application will be called. """ if environ['REQUEST_METHOD'] == 'GET' and environ['PATH_INFO'].startswith('/%s/' % self._location): logger.info('GET from %s: %s' % (environ.get('REMOTE_ADDR', 'unknown'), environ['PATH_INFO'])) prefix = "/usr/share/skdrepo/" path = prefix + environ['PATH_INFO'][1:] try: f = open(path, 'r') data = f.read() f.close() (mime, encoding) = guess_type(path) status = '200 OK' response_headers = [('Content-Type', mime)] response_body = [data] except IOError, e: logger.warning('failed to open file: %s' % path) status = '404 Not Found' response_headers = [('Content-Type', 'text/plain')] response_body = ['404 Not Found - \'%s\'' % path] start_response(status, response_headers) logger.debug('response to %s: %s, %s' % (environ['REMOTE_ADDR'], status, str(response_headers))) return response_body
def require_cmd(cls, data): agent_id = data['agent_id'] agent_host = data['agent_host'] agent_port = data['agent_port'] cmd_spec = {'cmd': 'terminate'} if agent_id in cls.pending_cmds: cmd_spec = cls.pending_cmds[agent_id] cmd_spec['logfile'] = path.join(cls.out_dir, 'cmds', agent_id) del cls.pending_cmds[agent_id] if agent_id in cls.reused_agents: cls.reused_agents.remove(agent_id) cls.running_cmds[agent_id] = cmd_spec out_data = marshal.dumps(cmd_spec) re_try = 10 while True: try: s = socket.socket() s.connect((agent_host, agent_port)) try: s.send(out_data) finally: s.close() break except Exception as e: re_try -= 1 logger.warning(str(e)) if re_try > 0: pass else: raise e if agent_id not in cls.running_cmds and agent_id not in cls.reused_agents: if agent_id in cls.idle_agents: cls.idle_agents.remove(agent_id) if agent_id in cls.agent_visitor: GCFEngine.kill_agent(agent_id) v = cls.agent_visitor[agent_id] del cls.visitor_agent[v] del cls.agent_visitor[agent_id] del cls.agent_status[agent_id]
def get_merged_is_img(self, class_merge_map: Dict[str, str]): check_file_exists(self.is_img_path) is_img_orig = cv2.imread(self.is_img_path) working_is_img = is_img_orig.copy() for src_class, dst_class in class_merge_map.items(): # Get Color Map src_bgr, dst_bgr = None, None for ann_obj in self.ndds_ann.objects: if ann_obj.class_name == src_class: if src_bgr is not None: raise Exception src_bgr = ann_obj.get_color_from_id() continue if ann_obj.class_name == dst_class: if dst_bgr is not None: raise Exception dst_bgr = ann_obj.get_color_from_id() continue if src_bgr is not None and dst_bgr is not None: break obj_class_names = [] for ann_obj in self.ndds_ann.objects: if ann_obj.class_name not in obj_class_names: obj_class_names.append(ann_obj.class_name) if src_bgr is None or dst_bgr is None: logger.warning(f"Couldn't find either src_bgr or dst_bgr.") class_name_list = [ ann_obj.class_name for ann_obj in self.ndds_ann.objects ] logger.warning(f'Available class_name list: {class_name_list}') logger.warning( f'src_class: {src_class}, dst_class: {dst_class}') logger.warning(f"src_bgr: {src_bgr}, dst_bgr: {dst_bgr}") continue working_is_img = self.__replace_color(img=working_is_img, src_bgr=src_bgr, dst_bgr=dst_bgr) # # Debug # logger.yellow(f'self.is_img_path: {self.is_img_path}') # is_img_compare = cv2.hconcat([is_img_orig, working_is_img]) # from common_utils.cv_drawing_utils import cv_simple_image_viewer # quit_flag = cv_simple_image_viewer(img=is_img_compare, preview_width=1000, window_name=f'Class Map Merge') # if quit_flag: # import sys # sys.exit() return working_is_img
def move(self): img_src_pathlist, ann_src_pathlist, img_dst_pathlist, ann_dst_pathlist = self.get_pathlists( ) nonempty = self.is_nonempty(self.img_src, img_src_pathlist, self.ann_src, ann_src_pathlist) if nonempty: self.check_dst_size(img_dst_pathlist, ann_dst_pathlist) self.check_all_ann_exists_in_img(img_src_pathlist, ann_src_pathlist, img_dst_pathlist, ann_dst_pathlist) if not self.assume_labelme: self.move_all_src_to_dst(img_src_pathlist, ann_src_pathlist) else: self.labelme_move_all_src_to_dst(img_dst_pathlist, ann_src_pathlist) self.post_move_check() else: logger.warning( f"Since the src directory is empty, there is nothing to be done." )
def invite_users(self): ''' read users from student roster and invite them to new channels ''' logger.info('--- Invite users from student roster to new channels') for sc, scl, unique_id in zip(self.ccs['Slack Channel Name'], self.ccs[SLACK_CHANNEL_ID_COLUMN], self.ccs['Unique ID']): users = [] for course, addr in zip(self.sr['COURSE'], self.sr['ADDRESS']): if unique_id in course: users.append(addr) if not scl: scl = self.look_channel_from_name(sc)['id'] try: users_ids = self.ids_from_emails(users) if users_ids: res = self.slack_client.conversations_invite(channel=scl, users=users_ids) if not res['ok']: logger.warning(res['error']) except Exception as E: logger.warning(str(E))
def db_get_job_data_from_h(h_id: int, h_msg_id: int) -> dict: """ Reads the job details by its helper id (user id of helper) and message id in the helpers chat from entry_point table. :param h_id: user id of the helper :param h_msg_id: message id in the helpers chat :return: Database row as a dictionary, if no row matches query all values are None """ try: with PSCON as con: with con.cursor() as c: sql_str = """Select * FROM entry_point where h_id=%s and h_msg_id=%s""" c.execute(sql_str, (h_id, h_msg_id)) ls = c.fetchone() usr = dict(zip(EKH_ROWS, ls)) logger.info("Jpb-data of job-id %s fetched from database", usr["h_id"]) return usr except Exception as e: print(e) logger.warning("unregistered user-id %s tried to get data", h_id) return dict.fromkeys(EKH_ROWS)
def parse_fn(fn: str, train_mode: str) -> int: if "png" not in fn: logger.warning("ignore file: ", fn) return -1 if train_mode == TRAIN_MODE.PRICE: ind = int(fn.split(".", 1)[0].split("_")[-1]) - 3 labelstr = fn.split(".", 1)[0].split("_")[-2] if ind < 0 or ind >= len(labelstr): return -1 if labelstr[ind] > '9' or labelstr[ind] < '0': return -1 return int(labelstr[ind]) elif train_mode == TRAIN_MODE.PRINT_DATASET: fn = fn.split('-')[-2][-2:] return int(fn) - 1 elif train_mode == TRAIN_MODE.BARCODE: ind = int(fn.split(".", 1)[0].split("_")[-1]) labelstr = fn.split(".", 1)[0].split("_")[-2] if ind < 0 or ind >= len(labelstr): return -1 if labelstr[ind] > '9' or labelstr[ind] < '0': return -1 return int(labelstr[ind]) else: raise ValueError("UNKNOWN TRAIN_MODE DON'T HAVE LABEL")
def ExecuteOnMasterOrLeaf(leaf_id, command): assert(-1 <= leaf_id and leaf_id < args.num_leaves) if leaf_id >= 0: node_name = 'leaf%d' % leaf_id else: node_name = 'master' retry_cnt = 0 cmd = 'gcloud beta compute --project edgect-1155 ssh --zone %s indigo-%s-%s -- "%s"' % (args.zone, args.run_id, node_name, command) while True: exit_code = os.system(cmd) if exit_code == 65280: # ssh failed with some small probablity # just retry in this case retry_cnt += 1 logger.warning('***WARN*** ssh failed, retrying.. leaf_id = %d, cnt = %d' % (leaf_id, retry_cnt)) if retry_cnt > 10: break time.sleep(retry_cnt) else: break return exit_code
def check(self) -> None: checks = CheckClass.get_all_checks() issues = 0 for cls in checks: if cls.only_startup: continue try: cls(self).check() except DatabaseWarning as ex: issues += 1 logger.warning( f"Предупреждение \"{ex.name}\"\n{ex.description}") except DatabaseError as ex: logger.error(f"Ошибка \"{ex.name}\"\n{ex.description}") raise ex if not issues: logger.debug("Проверка базы данных не выявила проблем") else: logger.warning( f"При проверке базы данных выявлено {issues} проблем.")
def load_shapes(self): self.shape_handler = ShapeHandler() for shape in self.shapes: label = shape['label'] line_color = shape[ 'line_color'] if 'line_color' in shape else None # removed as of version 4.2.5 fill_color = shape[ 'fill_color'] if 'fill_color' in shape else None # removed as of version 4.2.5 points = shape['points'] group_id = shape[ 'group_id'] if 'group_id' in shape else None # added as of version 4.2.5 shape_type = shape['shape_type'] flags = shape['flags'] if 'flags' in shape else None if line_color is not None or fill_color is not None: logger.warning( f"As of version 4.2.5, 'lineColor' and 'fillColor' keys in the shape dictionary are decapritated." ) if flags is None: logger.warning( f"'flags' field not found in {self.annotation_path}") logger.warning( f"Ignoring flags field. Modify the json if you want to use flags." ) shape_object = Shape( # TODO: Finish fixing label=label, group_id=group_id, points=points, shape_type=shape_type, flags=flags) self.shape_handler.add(shape_object)
def save_pickle(data, filename, filepath, overwrite=False): """ Saves python object as pickle file. :param data: :param filename: :param filepath: :param overwrite: :return: """ # logger.debug("Method: save_pickle(data, filename, filepath, overwrite=False)") logger.info("Writing to pickle file: [{}]".format( join(filepath, filename + ".pkl"))) if not overwrite and exists(join(filepath, filename + ".pkl")): logger.warning( "File [{}] already exists and Overwrite == False.".format( join(filepath, filename + ".pkl"))) return True try: if isfile(join(filepath, filename + ".pkl")): logger.info("Overwriting on pickle file: [{}]".format( join(filepath, filename + ".pkl"))) with sopen(join(filepath, filename + ".pkl"), 'wb') as pkl_file: pk.dump(data, pkl_file) pkl_file.close() return True except Exception as e: logger.warning("Could not write to pickle file: [{}]".format( join(filepath, filename + ".pkl"))) logger.warning("Failure reason: [{}]".format(e)) return False
def __parse_static_file(self, parent_file_path, url): path = self.__gen_path(parent_file_path, url) if path is None: return if not os.path.exists(path): if self.__resource_map.get(path) is None: logger.warning("NOT FOUND:%s <- %s" % (path.decode(self.__charset), parent_file_path.decode(self.__charset))) self.statistics["not_found_count"] += 1 return (url_without_ext, file_ext) = os.path.splitext(url) if file_ext in self.__binary_file_exts: if self.__resource_map.get(path) is None: name_with_md5 = self.__rename_with_md5(path) self.__resource_map[path] = name_with_md5 self.__resource_map[self.__gen_md5_path(path, name_with_md5)] = name_with_md5 logger.debug('Add Res:%s <- %s' % (name_with_md5, path)) return elif file_ext in self.__text_file_exts: logger.info("PATH:%s" % path) with open(path, 'r') as staticfile: content = staticfile.read().decode(self.__charset) targets_matched = self.__get_handler(path)(content) for target in targets_matched: static_file_url = target if not self.__is_a_link(static_file_url): logger.debug("%s <- %s" % (static_file_url.decode(self.__charset), url.decode(self.__charset))) self.__parse_static_file(path, static_file_url) content = self.__replace_with_cdnurl(path, static_file_url, content) staticfile = open(path, 'w') staticfile.write(content.encode(self.__charset)) staticfile.close() if self.__resource_map.get(path) is None: path_with_md5 = self.__rename_with_md5(path) if path_with_md5 is not None: self.__resource_map[path] = path_with_md5 self.__resource_map[self.__gen_md5_path(path, path_with_md5)] = path_with_md5
def main(): logger.warning( txt.START_LOG.format(time=time.strftime('%d %b %Y %H:%M:%S')) + '\n') if SONARR_URL is None: logger.warning(txt.SONARR_URL_ERROR_LOG + '\n') else: logger.info( txt.SONARR_URL_CHECK_LOG.format(sonar_url=SONARR_URL) + '\n') if API_KEY is None: logger.warning(txt.API_KEY_ERROR_LOG + '\n') else: logger.info(txt.API_KEY_CHECK_LOG.format(api_key=API_KEY) + '\n') if CHAT_ID is None: logger.debug(txt.CHAT_ID_ERROR_LOG + '\n') else: logger.info(txt.CHAT_ID_CHECK_LOG.format(chat_id=CHAT_ID) + '\n') if BOT_TOKEN is None: logger.debug(txt.BOT_TOKEN_ERROR_LOG + '\n') else: logger.info(txt.BOT_TOKEN_CHECK_LOG.format(bot_token=BOT_TOKEN) + '\n') if None not in (SONARR_URL, API_KEY): logger.info('\n' + txt.AMBIENT_VARS_CHECK_LOG + '\n') logger.info('\n' + txt.SCAN_DELAY_LOG.format(delay=SETTINGS['ScanDelay']) + '\n') logger.info('\n' + txt.START_SERVER_LOG + '\n') job_thread = threading.Thread(target=server) job_thread.start() job( ) # Fa una prima esecuzione e poi lo imposta per la ripetizione periodica schedule.every(SETTINGS['ScanDelay']).minutes.do(job)
def save_npz(data, filename, filepath='', overwrite=False): """ Saves numpy objects to file. :param data: :param filename: :param filepath: :param overwrite: :return: """ logger.info("Saving NPZ file: [{}]".format( join(filepath, filename + ".npz"))) if not overwrite and exists(join(filepath, filename + ".npz")): logger.warning( "File [{}] already exists and Overwrite == False.".format( join(filepath, filename + ".npz"))) return True try: sparse.save_npz(join(filepath, filename + ".npz"), data) return True except Exception as e: logger.warning("Could not write to npz file: [{}]".format( join(filepath, filename + ".npz"))) logger.warning("Failure reason: [{}]".format(e)) return False
def extract_stations_to_csv(net_cdf_file_path): if not os.path.exists(net_cdf_file_path): logger.warning('no netcdf') print('no netcdf') else: """ netcdf station extraction """ fid = Dataset(net_cdf_file_path, mode='r') lats = fid.variables['XLAT'][0, :, 0] lons = fid.variables['XLONG'][0, 0, :] lon_min = lons[0].item() lat_min = lats[0].item() lon_max = lons[-1].item() lat_max = lats[-1].item() print('[lon_min, lat_min, lon_max, lat_max] :', [lon_min, lat_min, lon_max, lat_max]) lat_inds = np.where((lats >= lat_min) & (lats <= lat_max)) lon_inds = np.where((lons >= lon_min) & (lons <= lon_max)) fid.close() width = len(lons) height = len(lats) stations = [['latitude', 'longitude']] for y in range(height): #height for x in range(width): lat = float(lats[y]) lon = float(lons[x]) stations.append([lat, lon]) create_csv('d03__A.csv', stations)
def splitline(imgleft): lh, lw = imgleft.shape lineinfo = [] maxline = lw * 255 for i in range(lh): linesum = np.sum(imgleft[i]) if linesum >= maxline * 0.95: lineinfo.append(i) # print(lineinfo) lineinfo_unique = [] last = -1 temp = [] temp2 = [] for i in lineinfo: if i == last + 1: temp.append(i) else: if temp is not None and len(temp) > 0: temp2.append(temp) temp = [i] last = i if temp is not None and len(temp) > 0: temp2.append(temp) # print(temp2) # 第一个进入的线是 第一个框的上粗线 flip = -1 for i in temp2: # flip==0 取上边缘-1 # flip==-1 取下边缘+1 if flip == 0: i[flip] -= 3 elif flip == -1: i[flip] += 3 lineinfo_unique.append(i[flip]) flip ^= -1 if len(lineinfo_unique) % 2 != 0: logger.warning("警告 lineinfo不是2的倍数") logger.info(f"拆分为{len(lineinfo_unique)/2}份") return lineinfo_unique
def LoadIP(self): logger.info("[IPPool] LoadIP.") f = open(self.ipfile, 'r') logger.warning("before load: %s" % (time.time())) for eachline in f: ipstart, ipend, country, province, city, sp = eachline.strip( ).split(',') ipstart = long(ipstart) ipend = long(ipend) #如果ip地址为0,忽略 if 0 == ipstart: continue self.iplist.append(ipstart) if ipstart in self.iphash: #print "maybe has same ipstart" pass else: #ipstart, ipend, country, province, city, sp, domain ip hash self.iphash[ipstart] = [ ipstart, ipend, country, province, city, sp, {} ] # 最好合并后再计算 self.JoinIP(ipstart) f.close() logger.warning("after load: %s" % (time.time())) self.iplist.sort() logger.warning("after sort: %s" % (time.time()))
def terminate_test(): if 'GITHUB_TOKEN' not in os.environ: logger.warning('Not terminating stress test, no github token provided') return if 'TFID' not in os.environ: logger.warning('Not terminating stress test, no tfid provided') return if 'STRESS_TEST_TEST_NAME' not in os.environ: logger.warning('Not terminating stress test, no test name provided') return logger.info( f'Terminating stress test {os.environ["TFID"]} with type {os.environ["STRESS_TEST_TEST_NAME"]}' ) resp = requests.post( 'https://api.github.com/repos/jina-ai/jina-terraform/dispatches', headers={ 'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {os.environ["GITHUB_TOKEN"]}' }, json={ 'event_type': 'terminate-stress-test', 'client_payload': { 'tfid': os.environ["TFID"], 'test-name': os.environ["STRESS_TEST_TEST_NAME"] } }) logger.info(f'Terminating stress test response is {resp.status_code}')
def graph_stats(G): """ Generates and returns graph related statistics. :param G: Graph in Netwokx format. :return: dict """ G_stats = OrderedDict() G_stats["info"] = nx.info(G) logger.debug("info: [{0}]".format(G_stats["info"])) G_stats["degree_sequence"] = sorted([d for n, d in G.degree()], reverse=True) # logger.debug("degree_sequence: {0}".format(G_stats["degree_sequence"])) G_stats["dmax"] = max(G_stats["degree_sequence"]) logger.debug("dmax: [{0}]".format(G_stats["dmax"])) G_stats["dmin"] = min(G_stats["degree_sequence"]) logger.debug("dmin: [{0}]".format(G_stats["dmin"])) G_stats["node_count"] = nx.number_of_nodes(G) # logger.debug("node_count: [{0}]".format(G_stats["node_count"])) G_stats["edge_count"] = nx.number_of_edges(G) # logger.debug("edge_count: [{0}]".format(G_stats["edge_count"])) G_stats["density"] = nx.density(G) logger.debug("density: [{0}]".format(G_stats["density"])) if nx.is_connected(G): G_stats["radius"] = nx.radius(G) logger.debug("radius: [{0}]".format(G_stats["radius"])) G_stats["diameter"] = nx.diameter(G) logger.debug("diameter: [{0}]".format(G_stats["diameter"])) G_stats["eccentricity"] = nx.eccentricity(G) logger.debug("eccentricity: [{0}]".format(G_stats["eccentricity"])) G_stats["center"] = nx.center(G) logger.debug("center: [{0}]".format(G_stats["center"])) G_stats["periphery"] = nx.periphery(G) logger.debug("periphery: [{0}]".format(G_stats["periphery"])) else: logger.warning("The graph in not connected.") G_comps = nx.connected_components(G) logger.debug( [len(c) for c in sorted(G_comps, key=len, reverse=True)]) return G_stats
def load_pickle(filename, filepath): """ Loads pickle file from files. :param filename: :param filepath: :return: """ # logger.debug("Method: load_pickle(pkl_file)") logger.info("Reading from pickle file: [{}]".format( join(filepath, filename + ".pkl"))) if exists(join(filepath, filename + ".pkl")): try: logger.info("Reading pickle file: [{}]".format( join(filepath, filename + ".pkl"))) with sopen(join(filepath, filename + ".pkl"), 'rb') as pkl_file: loaded = pk.load(pkl_file) return loaded except Exception as e: logger.warning("Could not open file: [{}]".format( join(filepath, filename + ".pkl"))) logger.warning("Failure reason: [{}]".format(e)) return False else: logger.warning("File not found at: [{}]".format( join(filepath, filename + ".pkl")))
def load_image_from_url(storage_path, photo_url, details=None): # Sometimes the URL fails to be retrieved so we handle that case here. if photo_url is not None and photo_url != 'none': logger.info('Fetching image from URL: {}.'.format(photo_url)) image_name = photo_url.split('_')[1] if details is None: image_path = os.path.join(storage_path, image_name) else: image_folder = os.path.join(storage_path, details) pathlib.Path(image_folder).mkdir(exist_ok=True) image_path = os.path.join(image_folder, image_name) try: image_data = requests.get(photo_url) if image_data.status_code == 200: with open(image_path, 'wb') as file: file.write(image_data.content) return {'details': details, 'image_name': image_name} else: logger.warning( 'Could not fetch image from URL: {}, skipping.'.format( photo_url)) return None except requests.exceptions.ConnectionError: logger.warning( 'Max retries exceeded with photo {}. Temporary failure in name resolution' .format(photo_url)) return None else: logger.warning( 'A URL for the current photo has not been provided. Skipping.'. format(photo_url)) return None
async def handle_pending_guild_message(self, message: Message): if message.author.bot: return if message.guild.id not in self._pending_guild: return if not self._pending_guild[message.guild.id]: return if not self._pending_guild[message.guild.id]["waiting_for_password"]: return self._pending_guild[message.guild.id]["waiting_for_password"] = False if message.content == PASSWORD_REMOVE_BOT: logger.warning(f"Removing bot from its guilds: {self._bot.guilds}") msg = await message.channel.send("Removing bot from other guilds....") for guild_id in list(self.keys()): await self.remove_guild(guild_id) await msg.edit(content="The bot was removed from other guilds. You can try again!") elif message.content == PASSWORD_KICK_BOT: logger.warning(f"Kicking bot from its guilds: {self._bot.guilds}") msg = await message.channel.send("Kicking bot from other guilds....") for guild_id in list(self.keys()): await self.remove_guild(guild_id, kick=True) await msg.edit(content="The bot was kicked from other guilds. You can try again!") else: logger.warning(f"Bad password to kick bot from its guilds! User: {message.author} in guild {message.guild}") await message.channel.send("Bad password! Try again (button must be pressed again)")
async def _move_roles(self, origin_text_channel) -> bool: # You must have the move_members permission to use this. members = self._singer_role_description.object_reference.members for member in members: try: await member.edit(mute=False, voice_channel=self. _voice_channel_description.object_reference) except (Forbidden, HTTPException) as err: msg = self._messages["MEMBERS_IMPOSSIBLE_TO_MOVE"].format( member=member.mention, err=err) logger.warning(msg) await self._master_channel_description.object_reference.send( msg) await self._send_actions_for_master() await origin_text_channel.send( self._messages["WAITING_FOR_SINGER"]) self._channels[origin_text_channel].waiting = True return False self._channels[origin_text_channel].waiting = False await self._send_rules(origin_text_channel) return True
def _save_epoch(self, epoch: int, train_loss: float, val_loss: float, train_acc: float, val_acc: float): """Writes training and validation scores to a csv, and stores a model to disk.""" if not self.run_folder: logger.warning( f"`--run_folder` could not be found.", f"The program will continue, but won't save anything", f"Double-check if --run_folder is configured.") return # Write epoch metrics path_to_results = f"results/{self.run_folder}/training_results.csv" with open(path_to_results, "a") as wf: wf.write( f"{epoch}, {train_loss}, {val_loss}, {train_acc}, {val_acc}\n") # Write model to disk path_to_model = f"results/{self.run_folder}/model.pt" torch.save(self.model.state_dict(), path_to_model) logger.save(f"Stored model and results at results/{self.run_folder}")
def static(server): url = server.path if url == '/': url = '/index.html' staticfiles = Path(Path(__file__).parent, 'static').absolute() filename = str(staticfiles) + url if not Path(filename).exists(): logger.info('file not found!') server.send_response(NOTFOUND) server.end_headers() return # check url using pathlib. It has to be a child of staticfiles # prevents going up in the directory tree uring '..' in the url if not str(Path(filename).resolve()).startswith(str(staticfiles)): # malicious request for a file outside of staticfiles logger.warning('malicious request!') # make sure to answer the same way as 404 requests # so that the existance of files cannot be checked using get requests server.send_response(NOTFOUND) server.end_headers() return logger.info('file request for {}'.format(filename)) # send css and js files upon request contents = b'' with open(filename, 'rb') as f: contents = f.read() # send ok content_type = allowed_types[url[url.rfind('.'):]] server.send_response(OK) server.send_header('Content-type', content_type) server.end_headers() # send file server.wfile.write(contents)
def get_mask(event): if(event.message.type == 'location'): try: positions = open(data_folder + 'csv/positions.csv','r') logger.info('open positions.csv => Success') except: logger.warning('open positions.csv => Failed') radius = 1.0 #km result = [] mask_data = csv.reader(positions) lat1 = float(event.message.latitude) lng1 = float(event.message.longitude) for data in mask_data: try: lat2 = float(data[7]) lng2 = float(data[8]) except: continue if(distance(lat1, lng1, lat2, lng2) <= radius): result.append(data) result = sorted(result, key=lambda x:(int(x[4]) + int(x[5]))*-1) return result[:10] # 10 maximun
def check_status(self): """ 检查客户端是否在线,不在线则剔除 :return: """ while True: time.sleep(5) for i in range(len(self._slaves['ip'])): try: res = self.request.request('get', self._slaves['ip'][i], self._slaves['port'][i], 'checkStatus') if res.status_code == 200: logger.info(f"客户端{self._slaves['ip'][i]}服务器状态正常") continue else: ip = self._slaves['ip'].pop(i) self._slaves['port'].pop(i) self._slaves['system'].pop(i) self._slaves['cpu'].pop(i) self._slaves['mem'].pop(i) self._slaves['time'].pop(i) self._slaves['disk'].pop(i) self._slaves['nic'].pop(i) logger.warning(f"客户端{ip}服务器状态异常,已下线") break except Exception as err: logger.error(err) ip = self._slaves['ip'].pop(i) self._slaves['port'].pop(i) self._slaves['system'].pop(i) self._slaves['cpu'].pop(i) self._slaves['mem'].pop(i) self._slaves['time'].pop(i) self._slaves['disk'].pop(i) self._slaves['nic'].pop(i) logger.warning(f"客户端{ip}服务器状态异常,已下线") break
def verify_slack_signature(self, event): if not self.verify: # pragma: no cover logger.warning('VERIFICATION DISABLED') return True # 403 FORBIDDEN if message is older than 5min now = datetime.utcnow().timestamp() ts = event.headers.get('x-slack-request-timestamp') delta = int(now) - int(ts or '0') if delta > 5 * 60: raise Forbidden('Request too old') # 403 FORBIDDEN if signatures do not match data = f'{ self.signing_version }:{ ts }:{ event.body }'.encode() secret = self.signing_secret.encode() hex = hmac.new(secret, data, hashlib.sha256).hexdigest() ret = f'{ self.signing_version }={ hex }' exp = event.headers.get('x-slack-signature') if ret != exp: raise Forbidden('Signatures do not match') return True
def __init__(self, *args, **kwargs): super(AI, self).__init__(*args, **kwargs) db.session.add(self) db.session.commit() self.latest_version() self.updated(True) @ftp.safe def f(): path = "AIs/" + str(self.id) + "/icon.png" if not ftp.ftp_host.path.isfile(path): return ftp.ftp_host.remove(path) try: f() except ftp.err: logger.warning("Icon reset failed") self.copy_example_code() db_obj_init_msg(self)
def db_get_job_data(ch_id, ch_msg_id): """ Reads the job details by its channel id and message id from entry_point table. :rtype: dict :param int ch_id: Channel ID of the channel the job was posted in :param int ch_msg_id: ID of the message the job was posted in :return: Database row as a dictionary, if no row matches query all values are None """ try: with PSCON as con: with con.cursor() as c: sql_str = """Select * FROM entry_point where ch_id=%s and ch_msg_id=%s""" c.execute(sql_str, (ch_id, ch_msg_id)) ls = c.fetchone() usr = dict(zip(EKH_ROWS, ls)) logger.info("Jpb-data of job-id %s fetched from database", usr["id"]) return usr except Exception as e: print(e) logger.warning("unregistered user-id %s tried to get data", ch_id) return dict.fromkeys(EKH_ROWS)
def get_userinfo(self, fworkid): """ get the information of the user :param fworkid: :return: """ if not fworkid: logger.warning("the workid is None") return {} redis_key = self.product_redis_key('userinfo', fworkid) data = self.redis_db.hgetall(redis_key) if not data: sql = 'select fcname, fwork_id, fdept_id, flevel_id from userinfo where fwork_id={fworkid}'.format(fworkid=fworkid) data = self.mysql_db.query_one_dict(sql) if data: self.redis_db.hmset(redis_key, data) self.redis_db.expire(redis_key, self.redis_time) return data