def update_enhance(self,col_name,id_dict,operate_str,mongo_str_dict): """ update data with operate_str, col_name is name of collection operate_str (e.g. "$set","$inc") record is type of list which contain two dictionary """ result = True if self.db is None: result = False col=collection.Collection(self.db,col_name) set_value_dict={} set_value_dict[operate_str] = mongo_str_dict record = [] record.append(id_dict) record.append(set_value_dict) LOG.info("record: %s" % (record)) try: ret = col.update(record[0],record[1],safe=True) LOG.info("ret: %s" % (ret)) if ret['update_existing'] == False: result = False except: result = False return result
def __save_train_model(self): if self.__train_step % 2e4 == 0: st = time.time() self.rainbow.save('./Model/', name='model_{}.pth'.format(self.__train_step)) et = time.time() cost_ime = ((et - st) * 1000) LOG.info('saving rainbow costs {} ms at train step {}'.format(cost_ime, self.__train_step))
def _receive_frame_info(self, fd): client_socket = self.__fd_to_socket[fd] header = client_socket.recv(4, socket.MSG_WAITALL) LOG.info("the length of header is {}, header:{}", len(header), header) if len(header) != 4: LOG.error('header length is not 4') return None data_length = struct.unpack('i', header)[0] data_bin = client_socket.recv(data_length, socket.MSG_WAITALL) if len(data_bin) != data_length: LOG.error('the length of dataBin is not equal to data length') return None # 前面16个字节是魔数、补偿信息、是否终止、帧序号,后面是帧数据 magic_number, reward, terminal, frame_index = struct.unpack( 'ifii', data_bin[0:16]) if magic_number != MAGIC_NUMBER: LOG.error('magic number error') return None np_array = np.fromstring(data_bin[16:], np.uint8) image = np.reshape(np_array, (IMAGE_WIDTH, IMAGE_HEIGHT)) done = bool(terminal == 1) LOG.debug( 'receive frame information from fd {}: frame index = {}, reward = {}' .format(client_socket.fileno(), frame_index, reward)) return image, reward, done, frame_index
def _conf_init(self): conf_file = "/".join( [self.env.basic_conf_dir(), self.env.basic_conf_file()]) self.conf = AnalyzeConf(conf_file) LOG.set_log_level(self.conf.log_level())
def prepareENV(): """ 1. get the parameter """ global CURRENT_PATH curPath = os.path.dirname(os.path.realpath(__file__)) CURRENT_PATH = curPath ipfsConfig = ipfs_config.IPFSConfig( os.path.join(curPath, "../config/config.yaml")) if ipfsConfig.IPFS_BIN != None: LOG.info( "ipfs bin already exists, no need to reinstall, check if you want to update it? " ) return ipfsConfig createDirs(ipfsConfig) downloadFile(ipfsConfig, ipfsConfig.IPFS_FILE_URL) downloadFile(ipfsConfig, ipfsConfig.IPFS_CLUSTER_SERVICE_FILE_URL) downloadFile(ipfsConfig, ipfsConfig.IPFS_CLUSTER_CTL_FILE_URL) IPFS_BIN = os.path.join(ipfsConfig.BIN_PATH, ipfsConfig.IPFS_BIN_NAME) copyFileToBin( os.path.join(ipfsConfig.TMP_PATH, ipfsConfig.IPFS_FOLDER, ipfsConfig.IPFS_BIN_NAME), IPFS_BIN) IPFS_CLUSTER_SERVICE_BIN = os.path.join( ipfsConfig.BIN_PATH, ipfsConfig.IPFS_CLUSTER_SERVICE_BIN_NAME) copyFileToBin( os.path.join(ipfsConfig.TMP_PATH, ipfsConfig.IPFS_CLUSTER_SERVICE_FOLDER, ipfsConfig.IPFS_CLUSTER_SERVICE_BIN_NAME), IPFS_CLUSTER_SERVICE_BIN) IPFS_CLUSTER_CTL_BIN = os.path.join(ipfsConfig.BIN_PATH, ipfsConfig.IPFS_CLUSTER_CTL_BIN_NAME) copyFileToBin( os.path.join(ipfsConfig.TMP_PATH, ipfsConfig.IPFS_CLUSTER_CTL_FOLDER, ipfsConfig.IPFS_CLUSTER_CTL_BIN_NAME), IPFS_CLUSTER_CTL_BIN) # update config file yamlPath = os.path.join(curPath, "../config/config.yaml") f = open(yamlPath, 'r', encoding='utf-8') cfg = f.read() f.close() d = yaml.load(cfg) # 用load方法转字典 d["ipfs"]["ipfs_bin"] = IPFS_BIN d["ipfs"]["ipfs_cluster_service_bin"] = IPFS_CLUSTER_SERVICE_BIN d["ipfs"]["ipfs_cluster_ctl_bin"] = IPFS_CLUSTER_CTL_BIN d["ipfs"]["tmp_path"] = ipfsConfig.TMP_PATH d["ipfs"]["bin_path"] = ipfsConfig.BIN_PATH f = open(yamlPath, 'w+', encoding='utf-8') yaml.dump(d, f) f.close() # reload ipfsconfig ipfsConfig = ipfs_config.IPFSConfig( os.path.join(curPath, "../config/config.yaml")) return ipfsConfig
def run_strategy(): stocks_brief = get_all_stock() if stocks_brief is None: LOG.log('run_strategy get_all_stock empty') return stock_detail_read = StockRead() strategy = get_strategy(StrategyType.TURTLE) strategy_data = pd.DataFrame(columns=stock_brief_list) data_list = [[0] * len(stock_brief_list) for _ in range(1)] date = None for brief in stocks_brief: stock_list = None if brief.market == "主板": stock_list = stock_detail_read.get_stock(brief.ts_code, None) if stock_list is not None: data_frame = convert_to_dataframe(stock_list) ret = strategy.enter(data_frame) if date is None: date = data_frame.data.iloc[0]['DATE'] if ret: LOG.log('stock can enter %s name %s' % (brief.ts_code, brief.name)) brief_list = brief.convert_to_list() data_list[0] = brief_list data = pd.DataFrame(data_list, columns=stock_brief_list) strategy_data = strategy_data.append(data, ignore_index=True) file_path = get_save_file(date) if os.path.exists(file_path): os.remove(file_path) strategy_data.to_csv(file_path)
def get_save_file(date): basedir = os.path.dirname(os.path.dirname(__file__)) if date is not None: current_time = date else: current_time = datetime.datetime.now().strftime("%Y%m%d") LOG.log('BaseStock basedir %s ' % basedir) return basedir + '/data/trategy_' + current_time + '.cvs'
def _init_worker(self): for worker_index in range(MAX_WORKER_COUNT): worker = Worker(worker_index, self.__master) self.__workers.append(worker) self.__worker_index_to_fd.append(-1) LOG.info('init {} workers successful'.format(MAX_WORKER_COUNT)) return
def __init__(self, db_name): if db_name is not None: self.DB_NAME = db_name else: basedir = os.path.dirname(os.path.dirname(__file__)) LOG.log('BaseStock basedir %s ' % basedir) self.DB_NAME = basedir + '/data/stock.db' self.open_db()
def extract(tar_path, target_path): import tarfile try: tar = tarfile.open(tar_path, "r:gz") tar.extractall(target_path) tar.close() except Exception: LOG.info("failed to extract file %s" % tar_path)
def check_passport_id(passport_id): #检查账号名称(passport_id) LOG.info("passport_id: %s" % (passport_id)) result = False if passport_id != None: passport_id_count = rpg_access.query(COL_PLAYER, {'passport_id':passport_id}).count() if passport_id_count < playerconfig.MAX_PLAYER_CREATED: result = True return result
def verify(self): # [uint32 channel_id][string channel_name][string key] LOG.info("channel_id: %s, channel_name_len: %s, channel_name: %s, config.KEY:\ %s" % (self._channel_id, len(self._channel_name), self._channel_name, config.KEY)) fmt = '!II%ssI%ss' % (len(self._channel_name), len(config.KEY)) veryfy_string = struct.pack(fmt, self._channel_id,\ len(self._channel_name), self._channel_name, len(config.KEY), config.KEY) LOG.info('veryfyString: %s' % (repr(veryfy_string))) self._sock.send(veryfy_string)
def _start_worker(self): for worker in self.__workers: thread = threading.Thread(target=lambda: worker.work()) thread.start() self.__worker_threads.append(thread) LOG.info('start {} workers successful'.format(MAX_WORKER_COUNT)) return
def __update_daily_stocks_thread(self, stocks_brief): """ 更新数据 :return: """ for index, stock_brief in enumerate(stocks_brief): LOG.log('__update_daily_stocks_thread index %s size %s' % (index, len(stocks_brief))) self.__update_data_task(stock_brief)
def _finish_server(self): """ Release tcp server and epoll """ self.__epoll.unregister(self.__server_socket.fileno()) self.__epoll.close() self.__server_socket.close() LOG.info('finish server successful') return
def initIPFS(ipfsBin, ipfsPath): """ init ipfs given ipfsPath :param ipfsBin: :param ipfsPath: :return: """ os.putenv("IPFS_PATH", ipfsPath) retcode = subprocess.call([ipfsBin, "init"]) LOG.info(retcode)
def _send_action_info(self, fd, action_info): client_socket = self.__fd_to_socket[fd] action_index = action_info[0] frame_index = action_info[1] data_bin = struct.pack("iii", MAGIC_NUMBER, action_index, frame_index) client_socket.sendall(data_bin) LOG.debug('send action information to fd {}, action index = {}'.format( fd, action_index)) return
def do_monitor(self, data_list): for data in data_list: if 'gpu' in data['tags'].keys(): device_name = data['measurement'] amd_gpu_temperature = data['fields']['temperature'] if amd_gpu_temperature > HEAT_LIMIT: LOG.error( 'Current temperature of AMD GPU-%s is %d > %d! Killing all miners...', device_name, amd_gpu_temperature, HEAT_LIMIT) self.switch_off_miner_overheat(SLEEP_TIMEOUT_MINS)
def __init__(self, influxdb_client, exit_flag_event): """ Initialize PySensors(lm-sensors) """ super(LmSensorsMetrics, self).__init__(influxdb_client=influxdb_client, watchdog=LmSensorsWatchdog(exit_flag_event), exit_flag_event=exit_flag_event) LOG.info('Initializing Lm-sensors...') sensors.init()
def query_one(self,col_name,record,fields=None): """ query single document record is type of dictionary """ if self.db is None: return col=collection.Collection(self.db,col_name) LOG.info("record: %s" % (record)) result = col.find_one(record,fields=fields) LOG.info("result: %s" % (result)) return result
def _close_client(self, fd): self.__epoll.unregister(fd) self.__fd_to_socket[fd].close() del self.__fd_to_socket[fd] index = self.__fd_to_worker_index[fd] self.__worker_index_to_fd[index] = -1 del self.__fd_to_worker_index[fd] LOG.info('close client {} successful'.format(fd)) return
def __init__(self, influxdb_client, exit_flag_event): """ Initialize NVML """ super(NvidiaMetrics, self).__init__(influxdb_client=influxdb_client, watchdog=NvidiaWatchdog(exit_flag_event), exit_flag_event=exit_flag_event) LOG.info('Initializing NVML sensors....') nvmlInit()
def __init__(self, index, master): LOG.info('init worker-{}'.format(index)) self.__index = index self.__master = master self.__env = Env(index) # self.__device = torch.device('cuda') self.__device = torch.device('cpu') self.__step = 0
def __init__(self, adds, channel_id, channel_name): CCallback.__init__(self) self._bufer = '' self._sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) self._channel_id = channel_id self._channel_name = channel_name self._rqstid = {} try: self._sock.connect(adds) self.verify() except: LOG.info("connected failed") sys.exit(0)
def dataReceived(self, data): self._bufer = self._bufer + data #[uint32_t playerid][uint16_t msg_channel][uint16_t cmd][string amf3_data] LOG.info('len data: %s' % (len(data))) while(len(self._bufer) >= 12): (playerid, msg_channel, cmd, amf3_data_len) = struct.unpack("!IHHI", self._bufer[0:12]) LOG.info("playerid: %s, msg_channel: %s, cmd: %s, amf3_data_len:\ %s" % (playerid, msg_channel, pattern.to_hex(cmd), amf3_data_len,)) amf3_data_fmt = '!%ss' % (amf3_data_len) if (len(self._bufer[12:]) >= amf3_data_len): rqstid = uuid.uuid1() self._rqstid[rqstid] = {'rqstid':rqstid, 'msg_channel':msg_channel, 'playerid':playerid, 'cmd':cmd} (amf3_data,) = struct.unpack(amf3_data_fmt, self._bufer[12:12+amf3_data_len]) LOG.info("amf3_data: %s" % repr(amf3_data)) self._bufer = self._bufer[12+amf3_data_len:] decoder = Decoder(amf3=True) data = decoder.decode(amf3_data) LOG.info("data: %s" % (data)) try: callback = self._callback[cmd] callback(self, playerid, rqstid, data) except KeyError: LOG.ERROR("CMD error, The CMD: %s can not be handled" % (pattern.to_hex(cmd))) break
def isInuse(ipList, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) flag = True for ip in ipList: try: s.connect((ip, int(port))) s.shutdown(2) LOG.info('%d is inuse' % port) flag = True break except: LOG.info('%d is free' % port) flag = False return flag
def mkdir(path): # strip head path = path.strip() # strip end path = path.rstrip("\\") isExists = os.path.exists(path) if not isExists: os.makedirs(path) LOG.info('create path: %s succeed' % path) return True else: LOG.info('path: %s already exists' % path) return False
def _init_server(self): """ Create tcp server, epoll, and listen port """ self.__server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) config_address = (self.server_ip, self.server_port) self.__server_socket.bind(config_address) self.__server_socket.listen(MAX_WORKER_COUNT) self.__server_socket.setblocking(False) self.__epoll.register(self.__server_socket.fileno(), select.EPOLLIN) LOG.info('init server successful') return True
def insert_table(self, data: StockBrief): """ 插入数据 :param data: :return: """ insert_sql = '''INSERT INTO {stock_name} '''.format(stock_name=self.__table_name) + self.table_field \ + self.__convert_data_sql(data) search_data = self.search_table(data.ts_code) if search_data is None or len(search_data) == 0: LOG.log('insert_table ts_code = %s' % data.ts_code) self.connect.execute(insert_sql) # self.connect.commit() else: LOG.log('insert_table ts_code = %s have exist' % data.ts_code)
def initIPFSService(ipfsServiceBin, ipfsServicePath): """ init ipfs given ipfsPath :param ipfsBin: :param ipfsPath: :return: """ os.putenv( "CLUSTER_SECRET", "8512641fc38bd61cd784b09def7c72075c4ad4995d1f8d91db2bef4433fdb2d5") os.putenv("IPFS_CLUSTER_PATH", ipfsServicePath) LOG.info( "ipfs-cluster-service init cmd: %s init, with IPFS_CLUSTER_PATH: %s " % (ipfsServiceBin, ipfsServicePath)) retcode = subprocess.call([ipfsServiceBin, "init"]) LOG.info(retcode)
def do_monitor(self, data_list): for data in data_list: device_name = data['measurement'] temperature = int(data['fields']['temperature']) power_usage = int(data['fields']['power_usage']) if temperature > HEAT_LIMIT: LOG.error( 'Current temperature of Nvidia GPU-%s is %d > %d! Killing all miners...', device_name, temperature, HEAT_LIMIT) self.switch_off_miner_overheat(HEATAGE_SLEEP_TIMEOUT_MINS) if power_usage < POWER_LIMIT: LOG.error( 'Current power usage from Nvidia GPU-%s is %d < %d! Killing all miners...', device_name, power_usage, POWER_LIMIT) self.switch_off_miner_underpowered(WATTAGE_SLEEP_TIMEOUT_MINS)
def query(self,col_name,record,skip_num = None,limit_num = None,fields = None,sort = None): """ query documents record is type of dictionary """ if self.db is None: return col=collection.Collection(self.db,col_name) LOG.info("record: %s" % (record)) if skip_num != None and limit_num != None: result = col.find(record,skip=skip_num,limit=limit_num,fields=fields,sort=sort) elif skip_num != None: result = col.find(record,skip=skip_num,fields=fields,sort=sort) elif limit_num != None: result = col.find(record,limit=limit_num,fields=fields,sort=sort) else: result = col.find(record,fields=fields,sort=sort) return result
def _accept_client(self): client_socket, _ = self.__server_socket.accept() client_socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) fd = client_socket.fileno() self.__epoll.register(fd, select.EPOLLIN | select.EPOLLOUT) self.__fd_to_socket[fd] = client_socket for index in range(len(self.__workers)): if self.__worker_index_to_fd[index] == -1: self.__worker_index_to_fd[index] = fd self.__fd_to_worker_index[fd] = index LOG.info('assign fd {} to worker {}'.format(fd, index)) break LOG.info('accept client {} successful'.format(fd)) return
def insert_table(self, table, data: StockData): """ 插入数据 :param table: :param data: StockData :return: """ insert_sql = '''INSERT INTO "{stock_name}" '''.format(stock_name=table) + self.table_field \ + self.__convert_data_sql(data) search_data = self.search_table(table, data.date) if search_data is None or len(search_data) == 0: LOG.log('insert_table ts_code %s date size %s' % (table, data.date)) self.connect.execute(insert_sql) # self.connect.commit() else: LOG.log('insert_table ts_code %s date size %s have exist' % (table, data.date))
def startIPFSServiceDaemon(nodeNum, ipfsServiceBin, ipfsServicePath): os.putenv("IPFS_CLUSTER_PATH", ipfsServicePath) os.putenv( "CLUSTER_SECRET", "8512641fc38bd61cd784b09def7c72075c4ad4995d1f8d91db2bef4433fdb2d5") if nodeNum == 0: cmd = "nohup %s daemon" % ipfsServiceBin LOG.info("command: %s " % cmd) child = subprocess.Popen( cmd, stdout=open(os.path.join(ipfsServicePath, "ipfs_service.log"), 'w'), stderr=open(os.path.join(ipfsServicePath, "error.log"), 'a'), shell=True) pid = child.pid pgid = os.getpgid(pid) LOG.info("child pid: %d, parent pid: %d" % (child.pid, pgid)) else: # node:/ip4/127.0.0.1/tcp/9096/ipfs/QmStfZRBxoNFb8KnKVzxumA1CzEHgMVU84cXRtT9KjpUCA # get cluster id from service.json: cluster:id # get port from service.json:cluster: listen_multiaddress # ipfsConfig = ipfs_config.IPFSConfig( os.path.join(CURRENT_PATH, "../config/config.yaml")) ipfsServiceNodeConfigFile = os.path.join(ipfsConfig.ROOT_PATH, "node" + str(0), "config", "service.json") with open(ipfsServiceNodeConfigFile, 'r') as load_f: load_dict = json.load(load_f) id = load_dict["cluster"]["id"] listen_multiaddress = load_dict["cluster"][ "listen_multiaddress"].split("/")[-1] bootStrapNode = "/ip4/127.0.0.1/tcp/%s/ipfs/%s" % (listen_multiaddress, id) cmd = "nohup %s daemon --bootstrap %s" % (ipfsServiceBin, bootStrapNode) LOG.info("command: %s " % cmd) child = subprocess.Popen( cmd, stdout=open(os.path.join(ipfsServicePath, "ipfs_service.log"), 'w'), stderr=open(os.path.join(ipfsServicePath, "error.log"), 'a'), shell=True) pid = child.pid pgid = os.getpgid(pid) LOG.info("child pid: %d, parent pid: %d" % (child.pid, pgid)) time.sleep(15)
def update(self,col_name,record): """ update data, col_name is Name of collection record is type of list which contain two dictionary """ result = True if self.db is None: result = False col=collection.Collection(self.db,col_name) LOG.info("record: %s" % (record)) try: ret = col.update(record[0],record[1],safe=True) LOG.info("ret: %s" % (ret)) if ret['update_existing'] == True: result = True else: result = False except: result = False return result
def __init__(self): LOG.info('init master') self.__loop_count = 0 self.__train_step = 0 self.__args = self._set_args() LOG.info("the args is{}".format(self.__args)) self.rainbow = Agent(self.__args, ACTION_SPACE) self.rainbow.train() self.__count_list = list() self.__queue_list = list() self.__memory_list = list() for _ in range(MAX_WORKER_COUNT): self.__count_list.append(0) self.__queue_list.append(queue.Queue()) self.__memory_list.append(ReplayMemory(self.__args, self.__args.memory_capacity)) self.__priority_weight_increase = (1 - self.__args.priority_weight) / ( self.__args.T_max - self.__args.learn_start)
def __init__(self): signal.signal(signal.SIGINT, self._kill_callback) signal.signal(signal.SIGTERM, self._kill_callback) parser = argparse.ArgumentParser() parser.add_argument('--props', help=""" Specify the path to where the app.yml configuration-file exists. Use the metrics/apps.yml.sample to create an app.yml """) args = parser.parse_args() with open(args.props, 'r') as f: self.props = yaml.safe_load(f) logging_init(self.props['logs']['location'], self.props['logs']['file_name']) pid_file = os.path.join(self.props['pid_file']['location'], self.props['pid_file']['file_name']) LOG.info('Creating pid file at %s with PID=[%s]...', pid_file, os.getpid()) with open(pid_file, 'w') as f: f.write(str(os.getpid()))
def start(self): LOG.info("conn2center start...") while 1: data, address = self.get_sock().recvfrom(8192) LOG.debug("data received: %s" % (repr(data))) if data: gevent.spawn(self.dataReceived, data) LOG.info("job's done") else: self.get_sock().close() break
def __init__(self,address = None,port = None,db = None): """ init function, address is type of string,port is type of int,db is type of string """ self.con=connection.Connection(address,port) LOG.info("self.con: %s" % (self.con)) if db is None: self.db=None else: self.db=database.Database(self.con,db) LOG.info("self.db: %s" % (self.con)) LOG.info("collection: %s" % (self.db.collection_names()))
def insert(self,col_name,record): """ insert data, col_name is Name of collection record is a document or list of document """ if self.db is None: return col=collection.Collection(self.db,col_name) LOG.info("col:: %s" % (col)) LOG.info("record: %s" % (record)) try: ret = col.insert(record,safe = True) LOG.info("ret is :%s" % (ret)) except: return False return True
def remove(self,col_name,record): """ remove date record is type of dictionary """ result = True if self.db is None: result = False col=collection.Collection(self.db,col_name) LOG.info("record: %s" % (record)) LOG.info("col: %s" % (col)) try: ret = col.remove(record,safe=True) LOG.info("ret is :%s" % (ret)) if ret["n"] == 0L: result = False except: result = False return result
def create_player(conn, playerid, rqstid, pkt): LOG.info("conn: %s, playerid: %s, rqstid: %s, pkt: %s" % (conn, playerid, rqstid, pkt)) retVal = player.create_player(pkt["name"], pkt["camp"], pkt["occupation"], pkt["passport_id"]) LOG.info("send data back ----------------->") LOG.info("retVal: %s" % (retVal)) conn.send_rsp(CMD["PLAYER_CREATE_PLAYER_REP"], playerid, rqstid, retVal)
def check_player(conn, playerid, rqstid, pkt): LOG.info("conn: %s, playerid: %s, rqstid: %s, pkt: %s" % (conn, playerid, rqstid, pkt)) retVal = player.check_player(pkt["zone"], pkt["passport_id"], pkt["sitekey"], pkt["sign"]) LOG.info("send data back ----------------->") LOG.info("retVal: %s" % (retVal)) conn.send_rsp(CMD["PLAYER_CHECK_PLAYER_REP"], playerid, rqstid, retVal)
import gevent import sys import ctypes from gevent import socket from gevent import event from gevent.queue import Queue from amfast.decoder import Decoder from amfast.encoder import Encoder import db.player.player as player from log.log import LOG, TYPE from config.cmd import CMD from network.conn2center import CConn2Center from config import config LOG.setLevel(TYPE.DEBUG) def create_player(conn, playerid, rqstid, pkt): LOG.info("conn: %s, playerid: %s, rqstid: %s, pkt: %s" % (conn, playerid, rqstid, pkt)) retVal = player.create_player(pkt["name"], pkt["camp"], pkt["occupation"], pkt["passport_id"]) LOG.info("send data back ----------------->") LOG.info("retVal: %s" % (retVal)) conn.send_rsp(CMD["PLAYER_CREATE_PLAYER_REP"], playerid, rqstid, retVal) def get_recommand_player_info(conn, playerid, rqstid, pkt): LOG.info("conn: %s, playerid: %s, rqstid: %s, pkt: %s" % (conn, playerid, rqstid, pkt)) retVal = player.get_recommand_player_info() LOG.info("send data back ----------------->") LOG.info("retVal: %s" % (retVal))
def get_recommand_player_info(conn, playerid, rqstid, pkt): LOG.info("conn: %s, playerid: %s, rqstid: %s, pkt: %s" % (conn, playerid, rqstid, pkt)) retVal = player.get_recommand_player_info() LOG.info("send data back ----------------->") LOG.info("retVal: %s" % (retVal)) conn.send_rsp(CMD["PLAYER_GET_RECOMMAND_PLAYER_INFO_REP"], playerid, rqstid, retVal)
def forward_msg(self, cmd, amf3_data, playerid, rqstid, flag, playerid_list=[]): #[uint8_t flag][uint32_t targetLen][target_players][uint8_t ch='\0']\ #[uint16_t msg_channel][uint16_t cmd][string amf3_data] msg_channel = self._rqstid[rqstid]['msg_channel'] LOG.info("cmd: %s, amf3_data: %s, playerid: %s, rqstid: %s, flag: %s,\ playerid_list: %s" % (pattern.to_hex(cmd), repr(amf3_data), playerid,\ rqstid, flag, playerid_list)) target_players = '' targetLen = 1 seperator = '\0' playerid_list = [] if flag == 0: _playerid = [] _playerid.append(playerid) playerid_list = list(set(playerid_list).union(_playerid)) for i in xrange(len(playerid_list)): target_players += struct.pack('!I', playerid_list[i]) LOG.info('len(playerid_list): %s' % (len(playerid_list))) elif flag == 1: pass elif flag == 2: pass else: return LOG.debug('target_players: %s' % (repr(target_players))) targetLen += len(target_players) LOG.debug('targetLen: %s' % (targetLen)) replyMsg = struct.pack("!BI", flag, targetLen) replyMsg += target_players replyMsg += struct.pack("!sI", seperator, cmd) replyMsg += struct.pack(self.get_amf3_fmt(amf3_data), len(amf3_data), amf3_data) LOG.info('len(amf3_data): %s' % (len(amf3_data))) LOG.info('amf3_data: %s' % (repr(struct.pack(self.get_amf3_fmt(amf3_data),len(amf3_data), amf3_data)))) LOG.info('replyMsg: %s' % (repr(replyMsg))) data_count = self._sock.send(replyMsg) LOG.info('send %s data' % (data_count))