def _initByData(svrid, sc, rc, glc, conf): global global_config global_config.update(json.loads(glc)) # init log system... log_file = "%s.log" % svrid log_path = global_config["log_path"] ftlog.trace_stdout("FreeTime service(%s) up, log to %s/%s..." % (svrid, log_path, log_file)) log_level = int(global_config.get("log_level", 0)) # skip twisted noisy in no-debug level... if log_level > 0: Factory.noisy = False if ide_debug(): ftlog.initLog(log_file, log_path, log_level) # ftlog.initLog(log_file, 'stdout', log_level) else: ftlog.initLog(log_file, log_path, log_level) # init maps... ftlog.info("init from redis:", conf[0], conf[1], conf[2]) _initServerMap(json.loads(sc)) dbs = json.loads(rc) global_config['freetime:db'] = dbs if "redis" in dbs: _initRedisPoolMap(svrid, conf, dbs["redis"]) if "mysql" in dbs: _initMysqlPoolMap(svrid, conf, dbs["mysql"])
def query(dst, data, userheader1='', userheader2='', timeout=2, notimeoutex=0, returnDeffer=0): src = ftcon.global_config["server_id"] server_conf = ftcon.getServerConf(src) agent_id = server_conf['agent'] protocol = ftcon.serverid_protocol_map[agent_id] if ide_debug(): ide_print_pack("Query[%s]" % dst.split("-")[0], json.loads(data)) d = protocol.query(src, dst, userheader1, userheader2, data, timeout, notimeoutex=notimeoutex) if returnDeffer == 1: return d res = stackless.getcurrent()._fttask.waitDefer(d) global _QUERY_SLOW_TIME if isinstance(res, tuple): msg = res[0] query_write_time = res[1] query_recive_time = res[2] ct = time() if ct - query_write_time > _QUERY_SLOW_TIME: if performance.PERFORMANCE_NET: msg = performance.linkMsgTime('DO', msg) ftlog.warn('QUERY REPLAY SLOW ! reply=%0.6f' % (query_recive_time - query_write_time), 'schedule=%0.6f' % (ct - query_recive_time), 'total=%0.6f' % (ct - query_write_time), 'dst=', dst, 'request=', data, 'response=', msg) return msg return res
def send(dst, data, userheader1='', userheader2=''): src = ftcon.global_config["server_id"] server_conf = ftcon.getServerConf(src) agent_id = server_conf['agent'] if ide_debug(): ide_print_pack("Send_[%s]" % dst.split("-")[0], json.loads(data)) message = agentmsg.packstr(src, dst, '', userheader1, userheader2, data) protocol = ftcon.serverid_protocol_map[agent_id] ftlog.debug('transport.write', message) protocol.transport.write(message)
def lineReceived(self, data): if performance.PERFORMANCE_NET: data = performance.linkMsgTime('LR', data) ftlog.debug('S2AProtocol->lineReceived', FTTasklet.concurrent_task_count, time(), data) src, dst, query_id, userheader1, userheader2, msg = agentmsg.unpack(data) if ide_debug(): ide_print_pack("S2AP Recv [%5s]" % src.split("-")[0], json.loads(msg)) if src is None or dst is None: ftlog.info("ERROR, recive a error format message") return if self.peer_id == 0: self.peer_id = src ftcon.serverid_protocol_map[self.peer_id] = self ftlog.info('receive register, agentid=', self.peer_id) return _countProtocolPack(1, self) # send过来的数据 if query_id == '': self._runTasklet(data=msg, src=src, dst=dst, userheader1=userheader1, userheader2=userheader2, time_recv=time()) else: querysrc, _ = query_id.split('.') server_id = ftcon.global_config["server_id"] # query本服务的请求 if querysrc != server_id: self._runTasklet(data=msg, src=src, dst=dst, query_id=query_id, userheader1=userheader1, userheader2=userheader2, time_recv=time()) # response回来的请求 else: if userheader1 == 'RQ': # 本进程内, 异步查询本进程的其他消息接口 self._runTasklet(data=msg, src=src, dst=dst, query_id=query_id, userheader1=userheader1, userheader2=userheader2, time_recv=time()) else: d, c, t = None, None, 0 # ftlog.debug('lineReceived', query_id, id(_LIVE_MESSAGES), id(self)) if query_id in _LIVE_MESSAGES: d, c, t = _LIVE_MESSAGES[query_id] del _LIVE_MESSAGES[query_id] else: if query_id in _FAILED_MESSAGES: del _FAILED_MESSAGES[query_id] ftlog.warn('QUERY TOO SLOW !!', query_id, msg) if len(_FAILED_MESSAGES) > 100: _FAILED_MESSAGES.clear() else: ftlog.warn('NOT KNOW of query_id->', query_id, msg) if d and c: try: c.cancel() d.callback((msg, t, time())) except: ftlog.error(msg)
def writeEncodeMsg(self, msg): """ 向用户客户端发送一个加密消息 """ ftlog.debug('======== SEND TCP->', self.clientAddress, self.userId, msg) msg = structProtocolHelper.encode(msg) if len(msg) and ide_debug(): ide_print_pack("SEND TCP_", json.loads(msg)) msg = self._encode(msg) self.transport.write(msg)
def action(options): """ 拷贝源代码工程的etc、src、src-robot、webroot到编译输出目录,按照配置文件的工程列表进行顺序覆盖拷贝 """ # 创建所有的路径 allpaths = [ options.env['log_path'], options.env['webroot_path'], options.env['backup_path'], ] for mp in allpaths: fsutils.makePath(mp, False) allpaths = [ options.env['bin_path'] ] for mp in allpaths: fsutils.makePath(mp, True) if os.environ.get('RUN_IN_DOCKER', 0): # 在开发docker模式下,webroot为link模式,无需拷贝 copySourceInDocker(options) actlog.log('docker mode use original project src path !') return 1 paths = [] for proj in options.projectlist: src = fsutils.appendPath(proj['path'], 'src') if fsutils.dirExists(src): paths.append({'path': src, 'include': [], 'exclude': [".*\\.svn\\.*", ".*pyc"]}) else: # freetime project paths.append({'path': proj['path'], "include": ["^/freetime/.*"], "exclude": [".*\\.svn\\.*", ".*pyc", ".*\\logserver\\.*", ".*\\cold-data-server\\.*"]}) if ide_debug(): _, copy_files = fsutils.linkTree(paths, options.env['bin_path'], logfun=actlog.log) else: _, copy_files = fsutils.copyTree(paths, options.env['bin_path'], logfun=actlog.log) setattr(options, '_pyfiles', copy_files) return 1
def _response(dst, data, queryid, userheader1, userheader2): src = ftcon.global_config["server_id"] server_conf = ftcon.getServerConf(src) if 'agent' in server_conf: agent_id = server_conf['agent'] else: agent_id = dst if dst in ftcon.serverid_protocol_map: agent_id = dst else: server_conf = ftcon.getServerConf(dst) if 'agent' in server_conf: agent_id = server_conf['agent'] message = agentmsg.packstr(src, dst, queryid, userheader1, userheader2, data) protocol = ftcon.serverid_protocol_map[agent_id] if performance.PERFORMANCE_NET: message = performance.linkMsgTime('LW', message) ftlog.debug('transport.write', message) if ide_debug(): ide_print_pack("Resp [%5s]" % dst.split("-")[0], data) protocol.transport.write(message)
def doClientTcpMsg(self): """ 接收到一个的用户客户端的TCP消息 """ msgstr = ftsvr.getTaskPack() if not msgstr: return ftlog.debug('======== RECEIVE TCP->', self.clientAddress, 'pack=|', repr(msgstr), '|') msgstr = structProtocolHelper.decode(msgstr) if not gdata.initializeOk(): ftlog.warn( 'COTCPProto-doClientTcpMsg not initialize ok, ignore this message :', msgstr) return if len(msgstr) < 11: # {"cmd":"a"} ftlog.warn('simple json format check too short ! [' + repr(msgstr) + ']') return if msgstr[0] == '[': # 代理机器校验 _proxyCheck(msgstr, self) return if msgstr[0] != '{': ftlog.warn('simple json format check not start with { ! [' + repr(msgstr[0:10]) + '...]') return # 后3位可能是 }\n\0 或 }\n 或 }\0 或 } 或 }\r\n if msgstr[-1] == '}': pass elif msgstr[-2] == '}': pass # msgstr = msgstr[0:-1] elif msgstr[-3] == '}': pass # msgstr = msgstr[0:-2] elif msgstr[-4] == '}': pass # msgstr = msgstr[0:-3] elif msgstr[-5] == '}': pass # msgstr = msgstr[0:-4] else: ftlog.warn('simple json format check not end with } ! [...' + repr(msgstr[-10:]) + ']') return if ide_debug(): ide_print_pack("RECV TCP_", json.loads(msgstr)) userId = strutil.getJsonInt(msgstr, 'userId', 0) if userId <= 0: ftlog.warn( 'COTCPProto-doClientTcpMsg the userId error, ignore this message :', userId, msgstr) return msgqueue = _MSG_QUEUES.get(userId, None) if msgqueue == None: msgqueue = [] _MSG_QUEUES[userId] = msgqueue ftlog.debug('creat user msgqueue !', userId) if len(msgqueue) > _MSG_QUEUES_MAX_LEN and msgstr.find( '"conn_lost"') < 0: # TODO 这个经验值如何确定? ftlog.warn( 'the user msgqueue queue too large !!, ignore this message :', userId) return msgqueue.append(msgstr) if len(msgqueue) > 1: ftlog.debug('the user msgqueue is process, wait ...', userId) return ftlog.debug('process user msgqueue !', userId) while 1: msgstr1 = msgqueue[0] try: self._processUserMessage(userId, msgstr1) except: ftlog.error('ERROR _processUserMessage', userId, msgstr1) del msgqueue[0] if len(msgqueue) == 0: break del _MSG_QUEUES[userId] ftlog.debug('remove user msgqueue !', userId)
def lineReceived(self, data): if performance.PERFORMANCE_NET: data = performance.linkMsgTime('LR', data) ftlog.debug('S2AProtocol->lineReceived', FTTasklet.concurrent_task_count, time(), data) src, dst, query_id, userheader1, userheader2, msg = agentmsg.unpack( data) if ide_debug(): ide_print_pack("S2AP Recv [%5s]" % src.split("-")[0], json.loads(msg)) if src is None or dst is None: ftlog.info("ERROR, recive a error format message") return if self.peer_id == 0: self.peer_id = src ftcon.serverid_protocol_map[self.peer_id] = self ftlog.info('receive register, agentid=', self.peer_id) return _countProtocolPack(1, self) # send过来的数据 if query_id == '': self._runTasklet(data=msg, src=src, dst=dst, userheader1=userheader1, userheader2=userheader2, time_recv=time()) else: querysrc, _ = query_id.split('.') server_id = ftcon.global_config["server_id"] # query本服务的请求 if querysrc != server_id: self._runTasklet(data=msg, src=src, dst=dst, query_id=query_id, userheader1=userheader1, userheader2=userheader2, time_recv=time()) # response回来的请求 else: if userheader1 == 'RQ': # 本进程内, 异步查询本进程的其他消息接口 self._runTasklet(data=msg, src=src, dst=dst, query_id=query_id, userheader1=userheader1, userheader2=userheader2, time_recv=time()) else: d, c, t = None, None, 0 # ftlog.debug('lineReceived', query_id, id(_LIVE_MESSAGES), id(self)) if query_id in _LIVE_MESSAGES: d, c, t = _LIVE_MESSAGES[query_id] del _LIVE_MESSAGES[query_id] else: if query_id in _FAILED_MESSAGES: del _FAILED_MESSAGES[query_id] ftlog.warn('QUERY TOO SLOW !!', query_id, msg) if len(_FAILED_MESSAGES) > 100: _FAILED_MESSAGES.clear() else: ftlog.warn('NOT KNOW of query_id->', query_id, msg) if d and c: try: c.cancel() d.callback((msg, t, time())) except: ftlog.error(msg)