def start_service(self): """ 启动服务 :return: """ args = ArgumentParser().args try: Jobs().add_interval_job(UPDATE_INTERVAL, self.update) if not self.is_sm: port = {"tcp": args.tcp_port} port.update({"https": args.http_port} if args.is_https else {"http": args.http_port}) self.adv = ServiceAdvertiser(self.service_type, port, self.get_jid(), self.service_version) self.adv.advertise() checker_ls = [] self.add_port_checker(args, checker_ls) PortChecker(checker_ls).start() self.services(args, self.thread_ls) self.__sys_services(args, self.thread_ls) logger.warn("start services for %s, args:%s" % (self.service_type, args)) gevent.joinall([thread.start() for thread in self.thread_ls]) except: logger.error(traceback.format_exc()) sys.exit(0)
def resolve_lang(mcc=None, api_key=None, pid=None): """ 多语言翻译策略: 1. 设备mcc 2. 无mcc的设备,以帐号api_key优先 :param mcc: :param api_key: :param pid: """ if mcc is not None: return _mcc_map.get(mcc, DEFAULT_LANG) if api_key is not None: if api_key in beiqi_keys: return DEFAULT_LANG d = oem_accounts.get(api_key) if d is not None: return d.get('tr') or DEFAULT_LANG logger.warn('tr key {0} not found'.format(api_key)) return DEFAULT_LANG if pid is not None: etc = dev_filter.send_cmd(*get_dev_imeiimsi_etc(pid)) if not etc: return DEFAULT_LANG query_mcc = etc.split(':')[0] return resolve_lang( mcc=int(query_mcc)) if is_num(query_mcc) else DEFAULT_LANG raise ValueError('at least one arg required')
def test_warn(capsys): """ Test the basic use of the WARN function """ logger.warn("test") captured = capsys.readouterr() assert re.search("WARNING: test", captured[1])
def retrieve_data(self, link, result, operation, paras='', start_message='', end_message=''): if self.msg_buff is None: self._init_msg_buff() src, dst = link.split('--') ne_link = self._topology.link(src, dst) if ne_link is None: logger.warn('No link between NEs to send message. ' '%s--%s: Retrieve %s' % (src, dst, operation)) return if ((not (ne_link.simulator == src and ne_link.dut == dst)) and (not (ne_link.simulator == dst and ne_link.dut == src))): logger.warn('Retrieve data on unassigned link. ' '%s--%s: Retrieve %s' % (src, dst, operation)) return msg_str = self._compose_retrieve_data(result, operation, paras, start_message, end_message) msg_buff = self._get_msg_buff(ne_link.names[0]) msg_buff.append(msg_str) logger.info('"Retrieve" added to buffer of %s' % ne_link.names[0])
def _parse_lexique(self, lexique_path: str, parser_type: str) -> None: """ | Parses the given lexique file and creates 2 hash tables to store the data. :param lexique_path: string. Path to the lexique file. :param parser_type: string. Can be either 'csv', 'pandas_csv'. :return: """ try: if parser_type == 'pandas_csv': df = pd.read_csv(lexique_path, delimiter='\t') content = (list(row) for row in df.values) elif parser_type == 'csv': content = self._parse_csv(lexique_path) else: content = self._parse_csv(lexique_path) except UnicodeDecodeError: logger.warn( f"there was an issue while parsing the file {lexique_path}." f" Trying again with built-in csv parser") content = self._parse_csv(lexique_path) self._create_db(content) if self.value_errors: self._save_errors(self.value_errors, _VALUE_ERRORS_PATH) if self.length_errors: self._save_errors(self.length_errors, _LENGTH_ERRORS_PATH) return
def start_services(): start_service_mgr() sm_sleep_time = HEARTBEAT_EXPIRE_TIME + 10 logger.warn("sleep %ss for service_mgr check dead service!!!" % sm_sleep_time) time.sleep(sm_sleep_time) start_logic_services()
def web_unpick(self, data_ls): """ web 反序列化 :param data_ls: :return: """ unpick_ls = copy.deepcopy(data_ls) v_unpick_data_ls = [] for data_dic in unpick_ls: try: data_dic['port'] = ujson.loads(data_dic['port']) if data_dic['port'] else {} data_dic['params'] = ujson.loads(data_dic['params']) if data_dic['params'] else {} if not data_dic['id']\ or not data_dic['service_group']\ or not IP_REGEX.match(data_dic['ip']) if data_dic['ip'] else False: logger.warn("TPServiceMgr::web_unpick invalid params:%s" % data_dic) continue except: logger.warn("TPServiceMgr::web_unpick invalid params:%s %s" % (data_dic, traceback.format_exc())) raise v_unpick_data_ls.append(data_dic) return v_unpick_data_ls
def start(self): """ 服务开启 :return: """ logger.warn("start listen on %s:%s" % (self.protocol, self.port)) self._mqttc.loop_forever()
def start_service(self): """ 启动服务 :return: """ args = ArgumentParser().args try: Jobs().add_interval_job(UPDATE_INTERVAL, self.update) if not self.is_sm: port = {"tcp": args.tcp_port} port.update({"https": args.http_port} if args. is_https else {"http": args.http_port}) self.adv = ServiceAdvertiser(self.service_type, port, self.get_jid(), self.service_version) self.adv.advertise() checker_ls = [] self.add_port_checker(args, checker_ls) PortChecker(checker_ls).start() self.services(args, self.thread_ls) self.__sys_services(args, self.thread_ls) logger.warn("start services for %s, args:%s" % (self.service_type, args)) gevent.joinall([thread.start() for thread in self.thread_ls]) except: logger.error(traceback.format_exc()) sys.exit(0)
def start_service(self): """ 启动服务 :return: """ args = ArgumentParser().args try: Jobs().add_interval_job(UPDATE_INTERVAL, self.update) if not self.is_sm: self.adv = ServiceAdvertiser(args.service_id, self.service_version, args.port) self.adv.advertise() check_ports = {} if "tcp_port" in args: check_ports['tcp'] = args.tcp_port if "http_port" in args: if not args.is_https: check_ports['http'] = args.http_port else: check_ports['https'] = args.http_port PortChecker(check_ports).start() self.thread_ls.extend([Jobs()]) self.services(args, self.thread_ls) logger.warn("start services for %s, args:%s" % (self.service_type, args)) gevent.joinall([thread.start() for thread in self.thread_ls]) except: logger.error(traceback.format_exc()) sys.exit(0)
def post(self, *args, **kwargs): try: new_grp_data_ls = ServiceMgr().web_unpick(kwargs['js_data']) grp = kwargs['grp'] assert grp except: logger.warn( "SaveServiceData::post error!!!, js_data:%s traceback:%s" % (kwargs['js_data'], traceback.format_exc())) return old_all_data_ls = ServiceMgr().get_init_data_ls() old_not_grp_data_ls = [] old_grp_data_ls = [] for service_dic in old_all_data_ls: if service_dic['service_group'] != grp: old_not_grp_data_ls.append(service_dic) else: old_grp_data_ls.append(service_dic) new_all_data_ls = new_grp_data_ls + old_not_grp_data_ls try: DBServiceInst.update_diff(ServiceMgr().db_pick(old_grp_data_ls), ServiceMgr().db_pick(new_grp_data_ls)) ServiceMgr().init(new_all_data_ls) except: logger.warn( "SaveServiceData::post error!!!, data_ls:%s traceback:%s" % (new_grp_data_ls, traceback.format_exc())) ServiceMgr().init(old_all_data_ls)
def post(self, account, *args, **kwargs): """ 请求发送注册验证短信 """ if not is_email(account): logger.warn('account:%s illegal' % account) return {'status': 1} mobile = account.split('@')[0] sms_speed = GDevRdsInts.send_cmd(*get_sms_speed()) if sms_speed is None: GDevRdsInts.send_multi_cmd(*combine_redis_cmds( *combine_redis_cmds(init_sms_speed()))) elif sms_speed >= SMS_SPEED_MAX: logger.debug('sms speed max, mobile={0}, {1}'.format( mobile, datetime.now().isoformat())) return {'status': 3} else: GDevRdsInts.send_cmd(*incr_sms_speed()) ts = GDevRdsInts.send_cmd(*get_user_veri_sms_time(mobile)) if ts is not None: logger.debug('veri sms, ts={0}'.format(ts)) return {'status': 4} else: GDevRdsInts.send_multi_cmd(*combine_redis_cmds( set_user_veri_sms_time(mobile, time.time()))) if not reg_via_mobile(account, None): return {'status': 2} return {'status': 0}
async def delete(sid: str, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) label = Label.get_or_none(uuid=data) if label is None: logger.warn(f"{pr.player.name} tried to delete a non-existing label.") return if label.user != pr.player: logger.warn(f"{pr.player.name} tried to delete another user's label.") return label.delete_instance(True) await sio.emit( "Label.Delete", { "user": pr.player.name, "uuid": data }, room=sid, skip_sid=sid, namespace=GAME_NS, )
def _batch_commit(self, tasks): account = tasks[0]['account'] trans_type = tasks[0]['trans_type'] with transaction.begin(account, trans_type) as tx: metanate_object = None if trans_type == transaction.TRANS_USER: metanate_object = MetanateUser(account, tx) else: metanate_object = MetanateGroup(account, tx) logger.info('do transaction in %s for account: %s, trans_type: %s', self.name, account, trans_type) executed_funcs = [] for task in tasks: logger.info('%s, function name: %s, parameters: %s', self.name, task['func'], task['parameters']) if 'objectguid' in task['parameters'] and task['parameters']['objectguid']: try: task['parameters']['objectguid'] = base64.b64decode( task['parameters']['objectguid']) except Exception: logger.warn('objectguid is not base64 encoded') func_params = (task['func'], task['parameters']) if func_params in executed_funcs: logger.info('the same request has executed and ignore this time') continue func = getattr(metanate_object, task['func']) func(**task['parameters']) logger.info('submitted metanate request') executed_funcs.append(func_params) # END for tx.commit(metanate_object) logger.info('metanate transaction is done')
def wait(self, link, tm): """ Wait for certain seconds specified by 'tm'. 'tm' is the time to be waited in millisecond. """ if self.msg_buff is None: self._init_msg_buff() src, dst = link.split('--') ne_link = self._topology.link(src, dst) if ne_link is None: logger.warn('No link between NEs to send message. ' '%s->%s: Wait' % (src, dst)) return if ((not (ne_link.simulator == src and ne_link.dut == dst)) and (not (ne_link.simulator == dst and ne_link.dut == src))): logger.warn('Stop on unassigned link. ' '%s-%s: Stop' % (src, dst)) return msg_str = self._compose_wait(tm) msg_buff = self._get_msg_buff(ne_link.names[0]) msg_buff.append(msg_str) logger.info('"wait %s" added to buffer %s' % (tm, ne_link.names[0]))
def deal_with_text(data_list, mode='full'): if len(data_list) == 1 and mode == 'train': cache_text = get_config_values('cache', 'text_train') elif len(data_list) == 1 and mode == 'dev': cache_text = get_config_values('cache', 'text_dev') elif len(data_list) == 2 and mode == 'mix': cache_text = get_config_values('cache', 'text_mix') elif len(data_list) == 3 and mode == 'full': cache_text = get_config_values('cache', 'text_full') else: logger.warn('Found data format wrong when dealing with text...') if not os.path.exists(cache_text): logger.info("dealing with text...") text = [] for dataset in tqdm(data_list): text.extend([ Converter('zh-hans').convert(line['text']) for line in dataset ]) save_pickle(cache_text, text) else: logger.info("loading with text...") text = load_pickle(cache_text) logger.info("text total num: {0}".format(len(text))) return text
def _connect_to_tools(self): addrs = set(self._addresses.values()) self._socks = dict.fromkeys(addrs, None) self._recv_buff = dict.fromkeys(addrs, '') self._sock_queue = Queue.Queue() for addr in addrs: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: logger.info('Connecting to tool at %s...' % str(addr)) sock.connect(addr) except socket.error: logger.warn('Cannot connect to tool %s' % str(addr)) self._report_link_broken(addr) self._socks[addr] = sock self._recv_buff[addr] = '' logger.info('All tools connected.') # Sender & Reciever live in seperate threads. # Communicate with the main thread by using queues self.sender = threading.Thread(target=self._send_handler) self.reciever = threading.Thread(target=self._receive_handler) self.sender.daemon = True # thread dies after main thread exit self.reciever.daemon = True # thread dies after main thread exit self.sender.start() self.reciever.start()
def deal_with_postag(data_list, mode='full'): if len(data_list) == 1 and mode == 'train': cache_postag = get_config_values('cache', 'postag_train') elif len(data_list) == 1 and mode == 'dev': cache_postag = get_config_values('cache', 'postag_dev') elif len(data_list) == 2 and mode == 'mix': cache_postag = get_config_values('cache', 'postag_mix') elif len(data_list) == 3 and mode == 'full': cache_postag = get_config_values('cache', 'postag_full') else: logger.warn('Found data format wrong when dealing with postag...') if not os.path.exists(cache_postag): logger.info("dealing with postag...") postag = [] for dataset in tqdm(data_list): for line in dataset: postag.append([[ Converter('zh-hans').convert(word['word'].strip().replace( ' ', '')), word['pos'], len(word['word']) ] for word in line['postag']]) save_pickle(cache_postag, postag) else: logger.info("loading with postag...") postag = load_pickle(cache_postag) logger.info("postag total num: {0}".format(len(postag))) logger.info("postag 5: {0}".format(postag[:5])) return postag
def web_unpick(self, data_ls): """ web 反序列化 :param data_ls: :return: """ unpick_ls = copy.deepcopy(data_ls) v_unpick_data_ls = [] for data_dic in unpick_ls: try: data_dic['port'] = ujson.loads( data_dic['port']) if data_dic['port'] else {} data_dic['params'] = ujson.loads( data_dic['params']) if data_dic['params'] else {} if not data_dic['id']\ or not data_dic['service_group']\ or not IP_REGEX.match(data_dic['ip']) if data_dic['ip'] else False: logger.warn("TPServiceMgr::web_unpick invalid params:%s" % data_dic) continue except: logger.warn("TPServiceMgr::web_unpick invalid params:%s %s" % (data_dic, traceback.format_exc())) raise v_unpick_data_ls.append(data_dic) return v_unpick_data_ls
def handler_exception(engine, e): writer.close() engine.terminate() if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1): logger.warn('KeyboardInterrupt caught. Exiting gracefully.') else: raise e
def io_get_account_info(self): ''' print("canDeposit: ", result.canDeposit) print("canWithdraw: ", result.canWithdraw) print("feeTier: ", result.feeTier) print("maxWithdrawAmount: ", result.maxWithdrawAmount) print("totalInitialMargin: ", result.totalInitialMargin) print("totalMaintMargin: ", result.totalMaintMargin) print("totalMarginBalance: ", result.totalMarginBalance) print("totalOpenOrderInitialMargin: ", result.totalOpenOrderInitialMargin) print("totalPositionInitialMargin: ", result.totalPositionInitialMargin) print("totalUnrealizedProfit: ", result.totalUnrealizedProfit) print("totalWalletBalance: ", result.totalWalletBalance) print("updateTime: ", result.updateTime) print("=== Assets ===") PrintMix.print_data(result.assets) print("==============") print("=== Positions ===") PrintMix.print_data(result.positions) print("==============") :return: none ''' result = self.request_client.get_account_information() self.get_account_information_result = result if self.origin_balance == 0: # 初始保证金 self.origin_balance = result.totalWalletBalance self.total_balance = result.totalWalletBalance # 当前可用保证金 self.margin_balance = result.totalMarginBalance self.total_unrealised_profit = result.totalUnrealizedProfit # 保证金比率 self.total_margin_ratio = 1 - (float(result.totalWalletBalance) / float(result.totalMarginBalance)) self.profit = (self.total_balance - self.origin_balance) self.profit_ratio = (self.total_balance - self.origin_balance) / self.origin_balance if self.old_total_balance != self.total_balance: logger.warn('@@ Now status: total_balance:', round(self.total_balance, 6), ',total_unrealised_profit:', round(self.total_unrealised_profit, 6)) # logger.warn('@@ Now status: total_margin_ratio:',round(self.total_margin_ratio*100,6),'%, profit:',round(self.profit,6),', profit_ratio:',round(self.profit_ratio*100,6),'%') # logger.warn('@@ Now status: total_balance:', round(self.total_balance,6), ',total_unrealised_profit:', round(self.total_unrealised_profit,6)) logger.warn('@@ Now status: total_margin_ratio:', round(self.total_margin_ratio * 100, 6), '%, profit:', round(self.profit, 6), ', profit_ratio:', round(self.profit_ratio * 100, 6), '%') self.old_total_balance = self.total_balance self.assets = result.assets self.positions = result.positions self.old_total_balance = self.total_balance return 0
def finish(self, link): """ The stop point of test case. At least one stop point must be exist in each test case. This keyword represents the end of the test case. Test tool will send stop signal to dispatcher then the dispatcher will notify all other tools to stop. After all tools stopped, the dispatcher will send test reports to the verification module to be verified. """ if self.msg_buff is None: self._init_msg_buff() src, dst = link.split('--') ne_link = self._topology.link(src, dst) if ne_link is None: logger.warn('No link between NEs to send message. ' '%s--%s: Stop' % (src, dst)) return if ((not (ne_link.simulator == src and ne_link.dut == dst)) and (not (ne_link.simulator == dst and ne_link.dut == src))): logger.warn('Finish on unassigned link. ' '%s--%s: Stop' % (src, dst)) return msg_str = self._compose_finish() msg_buff = self._get_msg_buff(ne_link.names[0]) msg_buff.append(msg_str) logger.info('"Finish" added to buffer of %s' % ne_link.names[0])
def dispatch(self, **kwargs): """ 处理来自视图的请求 Keyword Args: service: 请求的服务, 可选值为: 'ADMINISTRATOR': 管理服务 'AIR_CONDITIONER': 空调服务 'DETAIL': 详单服务 'GET_FEE': 获取费用服务 'INVOICE': 账单服务 'POWER': 从机开关机服务 'REPORT': 报表服务 """ service_type = kwargs.get('service') if service_type == 'ADMINISTRATOR': return self.__dispatch_administrator_service(**kwargs) elif service_type == 'SLAVE': return self.__dispatch_slave_service(**kwargs) elif service_type == 'DETAIL': return self.__dispatch_detail_service(**kwargs) elif service_type == 'GET_FEE': return self.__dispatch_get_fee_service(**kwargs) elif service_type == 'INVOICE': return self.__dispatch_invoice_service(**kwargs) elif service_type == 'POWER': return self.__dispatch_power_service(**kwargs) elif service_type == 'REPORT': return self.__dispatch_report_service(**kwargs) else: logger.warn('不支持的service') raise RuntimeError('不支持的service')
def on_recieve(self, msg, src, dst, paras='', paras_to_save='', index=0): """ The start of the action to a message recieved. This keyword starts a sub-procedure used as the reaction to a message recieved. The sub-procedure ended when next on_recieve met. The action set to certain message is only valid for one test case. """ if self.msg_buff is None: self._init_msg_buff() ne_link = self._topology.link(src, dst) if ne_link is None: logger.warn('No link between NEs to send message. ' '%s<-%s: %s' % (dst, src, msg)) return if not (ne_link.simulator == dst and ne_link.dut == src): logger.warn('Not a message from dut to simulator. ' '%s<-%s: %s' % (dst, src, msg)) return msg_str = self._compose_on_recv(msg, paras, paras_to_save, index) msg_buff = self._get_msg_buff(ne_link.names[0]) msg_buff.append(msg_str) logger.info('"send %s" added to buffer of %s' % (msg, ne_link.names[0]))
def post(self, *args, **kwargs): try: new_grp_data_ls = ServiceMgr().web_unpick(kwargs['js_data']) grp = kwargs['grp'] assert grp except: logger.warn("SaveServiceData::post error!!!, js_data:%s traceback:%s" % (kwargs['js_data'], traceback.format_exc())) return old_all_data_ls = ServiceMgr().get_init_data_ls() old_not_grp_data_ls = [] old_grp_data_ls = [] for service_dic in old_all_data_ls: if service_dic['service_group'] != grp: old_not_grp_data_ls.append(service_dic) else: old_grp_data_ls.append(service_dic) new_all_data_ls = new_grp_data_ls + old_not_grp_data_ls try: ServiceMgr().init(new_all_data_ls) except: logger.warn("SaveServiceData::post error!!!, data_ls:%s traceback:%s" % (new_grp_data_ls, traceback.format_exc())) ServiceMgr().init(old_all_data_ls)
def send(self, msg, src, dst, paras='', paras_to_save='', delay=0): """ Composing a message sending from simulator to dut. """ if self.msg_buff is None: self._init_msg_buff() ne_link = self._topology.link(src, dst) if ne_link is None: logger.warn('No link between NEs to send message. ' '%s->%s: %s' % (src, dst, msg)) return if not (ne_link.simulator == src and ne_link.dut == dst): logger.warn('Not a message from simulator to dut. ' '%s->%s: %s' % (src, dst, msg)) return msg_str = self._compose_send(msg, paras, paras_to_save, delay) msg_buff = self._get_msg_buff(ne_link.names[0]) msg_buff.append(msg_str) logger.info('"send %s" added to buffer of %s' % (msg, ne_link.names[0])) if self.trigger is None and not self._on_recv_found: self.trigger = msg_buff.ne_name logger.info('Found trigger: %s' % src)
def web_unpick(self, data_ls): """ web 反序列化 :param data_ls: :return: """ unpick_ls = copy.deepcopy(data_ls) v_unpick_data_ls = [] for data_dic in unpick_ls: try: data_dic["port"] = ujson.loads(data_dic["port"]) if data_dic["port"] else {} data_dic["params"] = ujson.loads(data_dic["params"]) if data_dic["params"] else {} data_dic["state"] = SS_RUNNING if data_dic["state"] == "连接" else SS_FREE if not data_dic["service_group"] or not int(data_dic["state"]) in SERVICE_STATE: logger.warn("ServiceMgr::web_unpick invalid params:%s" % data_dic) continue # 去除临时数据 del data_dic["process_name"] del data_dic["service_version"] del data_dic["current_load"] del data_dic["heartbeat_time"] except: logger.warn("ServiceMgr::web_unpick invalid params:%s %s" % (data_dic, traceback.format_exc())) raise v_unpick_data_ls.append(data_dic) return v_unpick_data_ls
def load_state(self, fname, sess=None): from utils import logger logger.warn('load_state method is deprecated, please use ' 'load_variables instead') sess = sess or self.get_session() saver = tf.train.Saver() return saver.restore(self.get_session(), fname)
def post(self, account, apply_sorder, pay_msg="", file_path="", file_name="", **kwargs): """ nginx文件上传回调 :param account: 上传账号 :param apply_sorder: 上传的子申请帮助订单id :param pay_msg: 支付消息 :param file_path: 上传的文件路径 :param file_name: 上传的文件名 :param kwargs: 其他参数 :return: """ apply_help_pay_dic = ApplyHelpPayMgr().get_data_by_sorder(apply_sorder) apply_help_dic = ApplyHelpMgr().get_data_by_order( apply_help_pay_dic['apply_order']) if apply_help_dic['apply_uid'] != account.id: logger.info( "apply_help_paid ERROR_LOGIC, apply_uid not valid, apply_uid:%s, id:%s" % (apply_help_dic['apply_uid'], account.id)) self.set_status(error_code.ERROR_LOGIC, 'Parameter Error') return {"result": error_code.ERROR_LOGIC} if apply_help_pay_dic['apply_pstat'] != APYS_PAY_WAIT: logger.warn( "apply_help_paid ERROR_LOGIC, apply stat is not APYS_PAY_WAIT, apply_uid:%s, id:%s" % (apply_help_dic['apply_uid'], account.id)) self.set_status(error_code.ERROR_LOGIC, 'Parameter Error') return {"result": error_code.ERROR_LOGIC} if file_path: suffix = file_name.split(".")[1] new_file_name = "%s_%s.%s" % ( account.id, urllib2.quote( time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time()))), suffix) # 将nginx临时上传文件移动到存储路径 mv_pay_pic(file_path, new_file_name, ArgumentParser().args.pic_store_path) cur_piture_link_path = ArgumentParser( ).args.pic_download_path + new_file_name else: cur_piture_link_path = "" ApplyHelpPayMgr().do_pay(apply_sorder, cur_piture_link_path, pay_msg) return { "result": error_code.ERROR_SUCCESS, "apply_help": { "apply_sorder": apply_sorder, "apply_pstat": apply_help_pay_dic['apply_pstat'] } }
def wait_for_auth(self): """ 等待xmpp登陆授权成功 :return: """ while not self.is_auth: logger.warn("XMPPClient::wait_for_auth, sleep 1s to wait for auth") gevent.sleep(5)
def save_state(self, fname, sess=None): from utils import logger logger.warn('load_state method is deprecated, please use ' 'load_variables instead') sess = sess or self.get_session() os.makedirs(os.path.dirname(fname), exist_ok=True) saver = tf.train.Saver() saver.save(self.get_session(), fname)
def enqueue_request(self, request): """put request """ if not request.dont_filter \ and self.request_filter.request_seen(request): logger.warn("ignore %s", request.url) return self.queue.put(request)
async def _send_heartbeat_msg(self, *args, **kwargs): """ 发送心跳给服务器 """ if not self.ws: logger.warn("websocket connection not connected yet!", caller=self) return data = {"pong": int(time.time()*1000)} await self.ws.send_json(data)
def start(self): """ 服务开启 :return: """ assert self.server logger.warn("start listen on %s:%s" % (self.protocol, self.port)) self.server.serve_forever()
def handle(self, data, address): service_id, process_name,service_version, port, current_load, running = ujson.loads(data) service_obj = ServiceMgr().get_service_by_id(int(service_id)) if not service_obj: logger.warn("HeartbeatApp:handle invalid service_id:%s" % service_id) return service_obj.heart_beat(process_name,service_version, port, int(current_load), bool(running))
def restart(self): """ 服务重启 :return: """ logger.warn("XMPPClient::restart listen on %s:%s:%s" % (self.protocol, self.JID, self.password)) self.stop() self.start()
def start_logic_services(): # start_process(mmm_da_start_path, "--apply_req second_1 --match second_11 --apply_pay second_21 --accept_req second_31 --day_seconds 1 --force_mtype user --enable_active False --enable_seal False --enable_pay_check False --use_system_balance False") start_process( mmm_da_start_path, "--apply_req second_1 --match second_11 --apply_pay second_21 --accept_req second_31 --day_seconds 1 --force_mtype user --enable_active False --enable_pay_check False --use_system_balance False" ) logger.warn("sleep %ss for service_mgr heartbeat:%s!!!" % (BEAT_INTERVAL, mmm_da_start_path)) time.sleep(BEAT_INTERVAL)
def init(self, data_ls): assert data_ls self.__dict__ = data_ls[0] self.ddp = DirtyDictProcess([]) self.pay_reward_dic = ujson.loads(self.pay_reward_dic) self.cfmd_reward_dic = ujson.loads(self.cfmd_reward_dic) logger.warn("ServerInfoMgr::init __dict__:%s" % sub_dict(self.__dict__, KEY_SET))
def init_checks(self): for port_dic in self.port_ls: checker = self._get_checker(port_dic['type'], port_dic['port']) if not checker: logger.warn("PortChecker::init_checks protocol:%s, port:%s not checker!!!" % (port_dic['type'], port_dic['port'])) continue self.checks.append(checker) self.last_connect_dic.setdefault(checker.port, time.time())
def get_chrom_size(genome_2bit, chrom_size, circ_chrom): """ Generate the chrom.size file and identify the available chromosomes in the 2Bit file. Parameters ---------- genome_2bit : str Location of the 2bit genome file chrom_size : str Location to save the chrom.size file to circ_chrom : list List of chromosomes that are known to be circular Returns ------- If successful 2 lists: [0] : List of the linear chromosomes in the 2bit file [1] : List of circular chromosomes in the 2bit file Returns (False, False) if there is an IOError """ command_line_chrom_size = "twoBitInfo " + genome_2bit + " stdout | sort -k2rn" try: logger.info("twoBitInfo ...") # args = shlex.split(command_line_chrom_size) with open(chrom_size, "w") as f_out: sub_proc_1 = subprocess.Popen(command_line_chrom_size, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sub_proc_1.wait() out, err = sub_proc_1.communicate() f_out.write(out) logger.info(out) logger.warn(err) except (IOError, OSError) as msg: logger.fatal("I/O error({0} - twoBitInfo): {1}\n{2}".format( msg.errno, msg.strerror, command_line_chrom_size)) out, err = sub_proc_1.communicate() logger.info(out) logger.warn(err) return (False, False) chrom_seq_list = [] chrom_circ_list = [] with open(chrom_size, "r") as f_in: for line in f_in: line = line.split("\t") if any(cc in line[0] for cc in circ_chrom): chrom_circ_list.append(line[0]) else: chrom_seq_list.append(line[0]) return (chrom_seq_list, chrom_circ_list)
def start(self): """ 服务开始 :return: """ logger.warn("XMPPClient::start listen on %s:%s:%s" % (self.protocol, self.JID, self.password)) self.xmpp_client = Client(self.JID, [self], self.settings) self.xmpp_client.connect() self.xmpp_client.run() self.is_auth = False
def sell(self, price, eth=None, jpy=None): detail = ' price: ' + str(price) + ', amount ' + str( eth) + ' ether or ' + str(jpy) + ' JPY ' logger.warn('selling with' + detail) if eth is not None: self.sell_by_eth(eth) elif jpy is not None: self.sell_by_eth(float(jpy) / float(price)) else: logger.warn('selling failed, eth and jpy both empty.')
def stop(self): """ Request disconnection and let the main loop run for a 2 more seconds for graceful disconnection. """ logger.warn("XMPPClient::stop listen on %s:%s:%s" % (self.protocol, self.JID, self.password)) assert self.xmpp_client self.xmpp_client.disconnect() self.xmpp_client.run(timeout=2) self.is_auth = False
def normalize_tags(self, hashed): fix_tag_names = { self.normalize(k) :v for k, v in hashed.items()} uniq_values = {k :cmn.tryuniq(v) for k, v in fix_tag_names.items()} try: if 'donor_age' in uniq_values: age = int(uniq_values['donor_age']) uniq_values['donor_age'] = age except Exception as err: logger.warn( '#__warning: failed to cast donor age to number\n'.format(err) + str(uniq_values)) return uniq_values
def del_service(self, service_obj): """ 从服务集群里面删除一个服务 """ cur_node = service_obj.hash_key() if not self.__hash_ring.has_node(cur_node): logger.warn("ServiceCluster::del_service, node not exist!!!, service_obj:%s cur_node:%s" % (service_obj, cur_node)) return self.__hash_ring.remove_node(service_obj.hash_key()) logger.warn("ServiceCluster::del_service success, service_obj:%s cur_node:%s" % (service_obj, cur_node))
def test_factor(self): try: print logger.name = "test" logger.level = logging.DEBUG logger.info("test info") logger.debug("test debug") logger.warn("test warn") logger.error("test error") except Exception, e: print(e) raise AssertionError()
def mqtt_onmessage_param_decorator(self, mqttc, userdata, msg): try: msg.payload = XXTEACrypto.instance(xxtea_key).decrypt(msg.payload) \ if xxtea_key and msg.payload\ else msg.payload msg.payload = ujson.loads(msg.payload) \ if use_json_loads and msg.payload \ else msg.payload except: logger.warn("mqtt_onmessage_decorator Error!!! topic:%s payload:%s" % (msg.topic, msg.payload)) return return fun(self, mqttc, userdata, msg)
def validate_semantics(self, attrs): try: attributes = attrs['attributes'] miRNA_experiment_type = attributes['experiment_type'] in ['smRNA-Seq'] # abstract all this using a Rule interface... another day miRNA_strategy = attributes['library_strategy'] in ['miRNA-Seq'] validation_status = miRNA_strategy if miRNA_experiment_type else not miRNA_strategy if not validation_status: logger.warn('#warn: __semantic_validation_failed__: smRNA-Seq library strategy if and only if miRNA-Seq experiment type\n') return validation_status except KeyError as e: logger.warn('#warn keyerror in validate_semantics, probably is not even syntactically valid\n') return False
def __on_ping_timeout(self, checker): self.timing_out = True since_connected = int(time.time() - self.last_connect_dic[checker.port]) if since_connected > 2*TIMEOUT_GRACE: logger.error('Unable to connect to my port:%s for %s' % (checker.port, since_connected)) self._on_disconnected(checker, since_connected) elif since_connected > TIMEOUT_GRACE: logger.error('Unable to connect to my port:%s for %s' % (checker.port, since_connected)) else: logger.warn('Unable to connect to my port:%s. Checking again later'% checker.port )
def send_alert(post_data): url = "https://api.cloud.appcelerator.com/v1/push_notification/notify.json?key=%s" % ACS_KEY req = urllib2.Request(url) req.add_header('Content-Type','application/json') req.add_header('Cookie', '_session_id=%s' % ACS_SESSION_ID) try: response = urllib2.urlopen(req, json.dumps(post_data)) parsed_response = json.loads(response.read()) logger.debug( (u"succesfully sent push notification no %s" % parsed_response['response']['push_notification']['id']).encode("utf-8") ) except urllib2.URLError as e: error_response = json.loads(e.read()) logger.warn( ("push notification failed with error code %s and message %s" % (e.code, error_response['meta']['message'])).encode('utf8') )
def web_unpick(self, data_ls): """ web 反序列化 :param data_ls: :return: """ unpick_ls = copy.deepcopy(data_ls) v_data_ls = [] for data_dic in unpick_ls: if not data_dic['id']: logger.warn("ServiceGrpMgr::web_unpick invalid params:%s" % data_dic) continue v_data_ls.append(data_dic) return v_data_ls
def tp_redirect(request_hdl, service_type, url): """ 第三方HTTP 协议重定向 :param RequestHandler:请求处理对象 :param service_type: 服务类型 :param url:URL :return: """ redis_dic = ServiceMgrCacher.find_tp_service(service_type) ip = redis_dic['ip'] is_https = 'https' in redis_dic['port'] port = redis_dic['port']['https'] if is_https else redis_dic['port']['http'] full_url = "https" if is_https else "http" + "://%s:%s/%s" % (ip, port, url) logger.warn("ServiceMgrCacher::redirect full_url:%s!!!" % full_url) return request_hdl.redirect(full_url)
def post(self, *args, **kwargs): try: data_ls = WechatMgr().web_unpick(kwargs['js_data']) except: logger.warn("SaveWechatData::post error!!!, js_data:%s traceback:%s" % (kwargs['js_data'], traceback.format_exc())) return last_data_ls = WechatMgr().get_init_data_ls() try: DBWechatInst.update_diff(last_data_ls, data_ls) WechatMgr().init(data_ls) except: logger.warn("SaveWechatData::post error!!!, data_ls:%s traceback:%s" % (data_ls, traceback.format_exc())) WechatMgr().init(last_data_ls)
def redirect(request_hdl, service_type, url, rdm_type=RT_CPU_USAGE_RDM, rdm_param=1, protocol=PT_HTTPS): """ HTTP 协议重定向 :param RequestHandler:请求处理对象 :param service_type: 服务类型 :param rdm_type:随机类型,0选择cpu使用率最低的;1一致性hash选择 :param rdm_param:如果随机类型是0,参数整形,表示随机个数 如果随机类型是1,list形式,hash key 列表 :param url:URL :return: """ ip, port = ServiceMgrCacher.find_port(service_type, rdm_type, rdm_param, protocol) full_url = "https" if protocol == PT_HTTPS else "http" + "://%s:%s/%s" % (ip, port, url) logger.warn("ServiceMgrCacher::redirect full_url:%s!!!" % (full_url)) return request_hdl.redirect(full_url)