def exec_worker(self, check_interval, func_queue, worker_name=''): while self._status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: continue function = None argvs = None kwargs = None try: _, (function, argvs, kwargs) = item # pylint: disable=W0142 if func_queue is self._delay_queue: log.debug('to delay exec func:{0}'.format(function)) function(*argvs, **kwargs) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn('{0} worker encountered exception:{1}, func:{2},' 'args:{3} {4} , executor service({5})'.format( worker_name, error, function, argvs, kwargs, self._name)) log.warn('error type:{0}'.format(type(error))) log.debug('{0} worker thread exited as the service ' 'is stopping'.format(worker_name))
def __exec_worker(self, check_interval, func_queue, worker_name=''): while self.__status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: # log.debug('no item found in exec queue') continue try: _, (function, argvs, kwargs) = item # pylint: disable=W0142 if func_queue is self.__delay_queue: log.debug('to delay exec func:{0}'.format(function)) function(*argvs, **kwargs) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn( '%s worker encountered exception:%s, func:%s, args:%s' % (worker_name, error, function, kwargs) ) log.warn('error type:{0}'.format(type(error))) log.warn(traceback.format_exc()) log.info( '%s worker thread exited as the service is stopping' % worker_name )
def _move2next_load_fname(self): """ get next load fname""" folder = self._get_storage_dir() fname = os.path.basename(self._load_stream.name) files = self._get_ordered_logfiles(folder) length = len(files) ind = -1 try: ind = files.index(fname) except ValueError: log.error('cannot find current log stream:{0}'.format(fname)) return LOGFILE_BAD_RECORD newfile = None if ind < (length - 2): newfile = '{0}/{1}'.format(folder, files[ind + 1]) elif ind == (length - 2): if files[length - 1].find('writing') < 0: newfile = '{0}/{1}'.format(folder, files[length - 1]) else: log.debug('does not have more finished log edits to read') return LOGFILE_EOF elif ind == (length - 1): log.info('does not have more log edits to read, return') return LOGFILE_EOF try: self._load_stream.close() self._load_stream = open(newfile, 'rb') return LOGFILE_GOOD except StandardError as err: log.error('failed to move to next load stream:{0}'.format(newfile)) log.error('err:{0}'.format(err)) return LOGFILE_BAD_RECORD
def add_log(self, log_type, log_mode, log_binary): """ add log into the local file""" if not self.is_stream_open(): fname = self.get_next_logfile(self._logid) if not self._stream_open(fname): return False # binary := # 32bit len | 128bit logid | log_type 16bit | log_mode 16bit| binary bin_logid = self.asign_uint2byte_bybits(self._logid, 128) bin_type = self.asign_uint2byte_bybits(log_type, 16) bin_mode = self.asign_uint2byte_bybits(log_mode, 16) data = '{0}{1}{2}{3}'.format(bin_logid, bin_type, bin_mode, log_binary) data_len = len(data) str_data_len = self.asign_uint2byte_bybits(data_len, 32) log.debug('{0} add_log, log_type {1} log_mode {2}'.format( self.__class__, log_type, log_mode) ) write_data = '{0}{1}'.format(str_data_len, data) log.info('to add data, logid:{0}'.format(self._logid)) if self._write_data(write_data): log.debug('add_log, write success') self._current_filesize += (data_len + 4) if not self._check_need_new_logfile(): return False return True else: log.warn('{0} failed to add_log, log_type {1} log_mode {2}'.format( self.__class__, log_type, log_mode) ) return False
def check_and_load_existence(user_confdict, default_dict, key, required=False): """ check if the conf item is required to be existent. Use default if it's not required and does not exist. Raise ConfItemError if it's required and does not exists """ confitem = None try: # try user conf dict confitem = eval('user_confdict{0}'.format(key)) except KeyError: log.debug('user conf does not have {0} in user_confdict'.format(key)) if confitem is None: try: # try user conf dict confitem = eval('default_dict{0}'.format(key)) log.info('{0} will use default value:{1}'.format( key, confitem) ) except KeyError: log.warn('default conf does not have {0}'.format(key)) if confitem is None and required: raise ConfItemError('{0} should exist'.format(key)) return confitem
def exec_worker(self, check_interval, func_queue, worker_name=''): log.info('CronExecution exec worker started') while self._status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: continue function = None argvs = None kwargs = None try: _, crontask, (function, argvs, kwargs) = item # pylint: disable=W0142 if func_queue is self._delay_queue: log.debug('to delay exec func:{0}'.format(function)) dtnow = datetime.datetime.now(crontask.pytz_timezone()) if (dtnow - crontask.get_last_schedtime()).total_seconds() > 60: log.warn( 'lagging crontask found (name:{0} id: {1})'.format( crontask.name(), crontask.taskid())) function(*argvs, **kwargs) self.schedule(crontask) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn('{0} worker encountered exception:{1}, func:{2},' 'args:{3} {4} , executor service({5})'.format( worker_name, error, function, argvs, kwargs, self._name)) log.warn('error type:{0}'.format(type(error))) log.debug('{0} worker thread exited as the service ' 'is stopping'.format(worker_name))
def try_move2next_sending_msg(self): """ move to next msg that will be sent """ if self._sending_msg is None or \ self._sending_msg.is_msg_already_sent(): try: item = self._send_queue.get_nowait() msg = item[2] except queue.Empty: log.debug('The send queue is empty') msg = None except Exception as error: errmsg = ( 'Catch a error that I cannot handle, err_msg:%s' % str(error) ) log.error(errmsg) raise CConnectionManager.QueueError(errmsg) self._sending_msg = msg else: log.debug( 'No need to move to next msg since the current one' 'is not sent out yet' ) temp = self._sending_msg return temp
def _do_read(self, context): sock = context.get_sock() data = None context.move2recving_msg() while self._stopsign is not True: try: data = sock.recv(self.NET_RW_SIZE) except socket.error as error: err = error.args[0] if err == errno.EAGAIN: log.debug('EAGAIN happend, peer info %s' % context.get_context_info()) return context elif err == errno.EWOULDBLOCK: log.info('EWOULDBLOCK happend, context info %s' % context.get_context_info()) return context else: log.debug('Socket error happend, error:%s, peer info %s' % (str(error), context.get_context_info())) context.to_destroy() return context except Exception as error: log.critical('Socket error happend, error:%s, peer info %s' % (str(error), context.get_context_info())) context.to_destroy() return context data_len = len(data) if data_len == 0: # socket closed by peer context.to_destroy() return context context.do_recv_data(data, data_len) del data
def add_log(self, log_type, log_mode, log_binary): """ add log into the local file""" if not self.is_stream_open(): fname = self.get_next_logfile(self._logid) if not self._stream_open(fname): return False # binary := # 32bit len | 128bit logid | log_type 16bit | log_mode 16bit| binary bin_logid = self.asign_uint2byte_bybits(self._logid, 128) bin_type = self.asign_uint2byte_bybits(log_type, 16) bin_mode = self.asign_uint2byte_bybits(log_mode, 16) data = '{0}{1}{2}{3}'.format(bin_logid, bin_type, bin_mode, log_binary) data_len = len(data) str_data_len = self.asign_uint2byte_bybits(data_len, 32) log.debug('{0} add_log, log_type {1} log_mode {2}'.format( self.__class__, log_type, log_mode)) write_data = '{0}{1}'.format(str_data_len, data) log.info('to add data, logid:{0}'.format(self._logid)) if self._write_data(write_data): log.debug('add_log, write success') self._current_filesize += (data_len + 4) if not self._check_need_new_logfile(): return False return True else: log.warn('{0} failed to add_log, log_type {1} log_mode {2}'.format( self.__class__, log_type, log_mode)) return False
def read(self, record_num=128): """ load log into memory :notice: If skip_badlog is not True, will raise IOError if the stream encounters any error. Otherwise, the stream will skip the bad log file, move to next one and continue reading :return: a. return a list of "record_num" of LogRecord. b. If the count number of list is less than record_num, it means the stream encounter EOF, plz read again afterwards. c. If the returned is None, it means the stream got nothing, plz try again. """ recordlist = [] count = 0 move2nextstream = False while count < record_num: ret, retval = self._try_read_one_log(self._load_stream) if ret == LOGFILE_EOF: # need read next log file move2nextstream = True elif ret == LOGFILE_GOOD: recordlist.append(retval) count += 1 continue elif ret == LOGFILE_BAD_RECORD: if not self._skip_badlog: raise IOError('find bad records in {0}'.format( self._load_stream.name)) else: log.warn( 'Bad record! ' 'But skip_badlog is on, will skip the file:{0}'.format( self._load_stream.name)) move2nextstream = True if move2nextstream: move2nextstream = False ret = self._move2next_load_fname() if LOGFILE_EOF == ret: log.debug('no more log edits to read, plz retry') break elif LOGFILE_GOOD == ret: log.debug('moved to next log edit file, to read new log') continue elif LOGFILE_BAD_RECORD == ret: log.error('IOError happended, read_logs failed') if self._skip_badlog: log.error('skip bad log is on, try moving to next one') move2nextstream = True continue else: raise IOError('encounter bad records, raise exception') return recordlist
def add_write_job(self, context): """ add network write into queue """ if context is None: return try: peerinfo = context.get_peerinfo() # pylint: disable=W0703 except Exception as error: log.info('failed to get peerinfo, return') return if not context.try_writelock(): log.debug('Another thread is writing the context, return. ' 'Peerinfo:%s:%s' % (peerinfo[0], peerinfo[1])) return if context.is_detroying(): log.info('The context is being destroyed, i will do nothing. ' 'Peerinfo:%s:%s' % (peerinfo[0], peerinfo[1])) return try: # log.debug('write in add_write_job') self._do_write(context) self._finish_write_callback(True, context) # pylint: disable=W0703 except Exception as error: log.debug( 'seems error happend for context:%s Peerinfo:%s:%s\n, %s' % (str(error), peerinfo[0], peerinfo[1], traceback.format_exc())) self._finish_write_callback(False, context)
def __exec_worker(self, check_interval, func_queue, worker_name=''): while self.__status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: # log.debug('no item found in exec queue') continue try: _, (function, argvs, kwargs) = item # pylint: disable=W0142 if func_queue is self.__delay_queue: log.debug('to delay exec func:{0}'.format(function)) function(*argvs, **kwargs) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn( '%s worker encountered exception:%s, func:%s, args:%s' % (worker_name, error, function, kwargs) ) log.warn('error type:{0}'.format(type(error))) log.warn(traceback.format_exc()) log.debug( '%s worker thread exited as the service is stopping' % worker_name )
def refresh(self, key, resource=None): """ :param key: refresh the device by key :return: if key does not exist, return False else, fresh the last_healthy time of the device """ assert type(key) == str, 'needs to be a str' device = self._devices.get(key) if device is None: log.warn('Device not found, key:%s' % key) return False device.set_last_healthy() if resource is not None: device.refresh_resouce(resource) log.debug( 'Heartbeat: Device %s refreshed with resource. ' ) else: log.debug( 'Heartbeat: Device %s only refreshed with heartbeat. ' 'Resource not refreshed' ) return True
def _do_write(self, context): """write into interface sending buffer""" sock = context.get_sock() msg = context.try_move2next_sending_msg() if msg is None: log.debug('send queue is empty, quit the _do_write thread') return context # log.debug('To enter write loop until eagin') # pylint:disable=w0212 while not self._stopsign: data = msg.get_write_bytes(self.NET_RW_SIZE) log.debug('msg get_write_bytes_len to be sent: %d' % len(data)) try: succ_len = sock.send(data) msg.seek_write(succ_len) except cuperr.AsyncMsgError as error: log.debug('has seek out of msg len, continue') except socket.error as error: err = error.args[0] if err == errno.EAGAIN: log.debug( 'EAGAIN happend, context info %s' % context.get_context_info() ) return context elif err == errno.EWOULDBLOCK: log.debug( 'EWOULDBLOCK happend, context info %s' % context.get_context_info() ) return context else: log.warn( 'Socket error happend. But its not eagin,error:%s,\ context info %s, errno:%s' % (str(error), context.get_context_info(), err) ) context.to_destroy() break except Exception as error: log.error( 'Socket error happend, error:%s, context info %s, trace:%s' % (str(error), context.get_context_info(), traceback.format_exc()) ) context.to_destroy() break finally: del data if msg.is_msg_already_sent(): log.info( 'sent out a msg uniqid:{0}'.format( async_msg.netmsg_tostring(msg)) ) # if we have successfully send out a msg. Then move to next one msg = context.try_move2next_sending_msg() if msg is None: break return context
def set_uniq_id(self, uniq_id): """ set msg unique id """ # misc.check_type(uniq_id, int) log.debug('uniq_id: {0}'.format(uniq_id)) self._data['uniq_id'] = self._asign_uint2byte_bybits(uniq_id, 128) self._uniqid = uniq_id
def _handle_new_send(self, context): """ handle new send message """ if context is None: log.debug('conetext is none') return self._thdpool.add_1job(self.add_write_job, context)
def put_msg(self, flag, msg): """ Put msg into the sending queue. :param flag: flag determines the priority of the msg. Msg with higher priority will have bigger chance to be sent out soon. :param return: return 0 on success return 1 on TRY_AGAIN ---- queue is full. network is too busy. :TODO: If the msg queue is too big, consider close the network link """ succ = None self._lock.acquire() if self._is_1st_send_msg: msg.set_need_head(True) # pylint: disable=W0212 msg._set_msg_len() self._is_1st_send_msg = False else: msg.set_need_head(False) # log.debug( # 'put msg into context, msg_type:ACK, msg_flag:%d,' # 'msg_src:%s, msg_dest:%s, uniqid:%d' % # ( # msg.get_flag(), # str(msg.get_from_addr()), # str(msg.get_to_addr()), # msg.get_uniq_id() # ) # ) # pylint: disable=W0212 msg._set_msg_len() urgency = 1 is_urgent = flag & async_msg.MSG_FLAG2NUM['FLAG_URGENT'] if is_urgent == async_msg.MSG_FLAG2NUM['FLAG_URGENT']: urgency = 0 try: self._send_queue.put_nowait((urgency, self._msgind_in_sendque, msg)) self._msgind_in_sendque += 1 succ = 0 except queue.Full: log.debug( 'network is busy. send_msg_queue is full, peerinfo:{0}'.format( msg.get_to_addr()[0] ) ) succ = 1 self._lock.release() return succ
def do_check_msg_ack_loop(self): """ check msg ack loop """ log.debug('start check msg ack info.') self._check_needack_queue() self._executor.delay_exec( 3, # todo set the check_time to ? self.do_check_msg_ack_loop, urgency=executor.URGENCY_HIGH)
def seek_write(self, length_ahead): """ seek foreward by length """ log.debug('to seek msg length {0}, now index {1}'.format( length_ahead, self._writeindex)) self._writeindex += length_ahead if self._writeindex > self.get_msg_len(): raise cup.err.AsyncMsgError( 'You have seek_write out of the msg length')
def get_write_bytes(self, length): """ get write bytes from the msg """ if length <= 0: return log.debug('to get {0} write bytes from msg, ' '_writeindex:{1}, msg total_len: {2}'.format( length, self._writeindex, len(self._dumpdata))) return self._dumpdata[self._writeindex:self._writeindex + length]
def set_need_head(self, b_need=False): """ :note: By default, the msg does not need to have a head unless it's the first msg that posted/received. """ self._need_head = b_need if self._is_postmsg and self._need_head: self._data['head'] = self.MSG_SIGN log.debug('to set msg need head:%s' % str(self._need_head))
def delay_exec(self, delay_time_insec, function, urgency, *args, **kwargs): """ delay_execute function after delay_time seconds You can use urgency := executor.URGENCY_NORMAL, by default """ log.debug('got delay exec, func:{0}'.format(function)) task_data = (urgency, (function, args, kwargs)) timer = threading.Timer(delay_time_insec, self._do_delay_exe, [task_data]) timer.start()
def poll(self): """ start to poll """ self._thdpool.start() self._executor.run() log.info('thdpool and executor start') misc.check_not_none(self._bind_sock) self._bind_sock.listen(128) self._executor.queue_exec(self.listen_new_connect, urgency=executor.URGENCY_HIGH) self._executor.delay_exec( 2, # todo set the check_time to ? self.do_check_msg_ack_loop, urgency=executor.URGENCY_HIGH) while not self._stopsign: try: events = self._poller.poll(1) # events = self._epoll.poll(1) except IOError as err: if err.errno == errno.EINTR: return raise err log.debug('poller events num {0}'.format(len(events))) for fileno, event in events: # if it comes from the listen port, new conn #if fileno == self._bind_sock.fileno(): # newsock, addr = self._bind_sock.accept() # self._handle_new_conn(newsock, addr) # if event & select.EPOLLIN: if event & ioloop.READ: log.info('ioloop.READ, fd {0}'.format(fileno)) try: self._handle_new_recv(self._fileno2context[fileno]) except KeyError: log.info('fd:{0} socket already closed'.format(fileno)) # elif event & select.EPOLLOUT: elif event & ioloop.WRITE: log.info('ioloop.WRITE, fd {0}'.format(fileno)) try: self._handle_new_send(self._fileno2context[fileno]) except KeyError: log.info('fd:%s, socket already closed' % fileno) elif event & ioloop.ERROR: # FIXME: consider if we need to release net msg resources if event & select.EPOLLHUP: log.info('--EPOLLHUP--') else: log.info('--EPOLLERR--') try: self.cleanup_error_context( self._fileno2context[fileno]) except KeyError: log.info('socket already closed')
def get_write_bytes(self, length): """ get write bytes from the msg """ if length <= 0: return log.debug('to get {0} write bytes from msg, ' '_writeindex:{1}, msg total_len: {2}'.format( length, self._writeindex, len(self._dumpdata) ) ) return self._dumpdata[self._writeindex: self._writeindex + length]
def to_destroy(self): """ destroy context """ self._lock.acquire() self._destroying = True if self._sock is None: msg = 'context is with no sock' else: msg = 'context with socket: %s' % str(self._sock) log.debug('to destroy context, ' + msg) self._lock.release()
def seek_write(self, length_ahead): """ seek foreward by length """ log.debug('to seek msg length {0}, now index {1}'.format( length_ahead, self._writeindex) ) self._writeindex += length_ahead if self._writeindex > self.get_msg_len(): raise cup.err.AsyncMsgError( 'You have seek_write out of the msg length' )
def _cleanup_context(send_queue, peerinfo): """cleanup context""" log.debug('to cleanup socket, peer:{0}'.format(peerinfo)) log.debug('cleanup: send_queue of socket size:{0}'.format( send_queue.qsize())) while True: try: item = send_queue.get_nowait() msg = item[2] del msg except queue.Empty: break
def put_msg(self, flag, msg): """ Put msg into the sending queue. :param flag: flag determines the priority of the msg. Msg with higher priority will have bigger chance to be sent out soon. :param return: return 0 on success return 1 on TRY_AGAIN ---- queue is full. network is too busy. :TODO: If the msg queue is too big, consider close the network link """ succ = None self._lock.acquire() if self._is_1st_send_msg: msg.set_need_head(True) # pylint: disable=W0212 msg._set_msg_len() self._is_1st_send_msg = False else: msg.set_need_head(False) # log.debug( # 'put msg into context, msg_type:ACK, msg_flag:%d,' # 'msg_src:%s, msg_dest:%s, uniqid:%d' % # ( # msg.get_flag(), # str(msg.get_from_addr()), # str(msg.get_to_addr()), # msg.get_uniq_id() # ) # ) # pylint: disable=W0212 msg._set_msg_len() try: self._send_queue.put_nowait((flag, self._msgind_in_sendque, msg)) self._msgind_in_sendque += 1 succ = 0 except queue.Full: log.debug( 'network is busy. send_msg_queue is full, peerinfo:{0}'.format( msg.get_to_addr()[0] ) ) succ = 1 self._lock.release() return succ
def handle(self, netmsg): """ handle netmsg """ log.debug('to handle msg in the child class') msg_type = netmsg.get_msg_type() src_peer, stub_future = netmsg.get_from_addr() log.debug('got msg from: %s stub_future:%s' % (src_peer, stub_future)) if msg_type == self._type_man.getnumber_bytype('ACK_HEART_BEAT'): self._executor.queue_exec(self._on_recv_heartbeat_ack, executor.URGENCY_HIGH, netmsg) else: self.default_handle(msg)
def to_destroy(self): """ destroy context """ self._lock.acquire() self._destroying = True if self._sock is None: msg = 'context is with no sock' else: msg = 'context with socket: {0}, peer:{1}'.format( self._sock, self.get_peerinfo()) log.debug('({0}) is to be destroyed'.format(msg)) self._lock.release()
def handle(self, msg): """ handle msg """ log.debug('to handle msg in the child class') msg_type = msg.get_msg_type() src_peer, stub_future = msg.get_from_addr() # log.debug('got msg from: %s stub_future:%s' % (src_peer, stub_future)) # log.debug('type of msg_type:{0}, settings msg_type:{1}'.format( # type(msg_type), type(self._type_man.getnumber_bytype('HEART_BEAT')) # )) if msg_type == self._type_man.getnumber_bytype('HEART_BEAT'): self._executor.queue_exec(self._on_heartbeat, 1, msg) else: self.default_handle(msg)
def _get_msg_order_ind(self, index): ind = index if self._need_head is not True: i = 1 else: i = 0 log.debug('msg index:{0}'.format(index)) log.debug('msg index type:{0}'.format(self._ORDER[index])) while ind >= 0: ind -= self._ORDER_BYTES[i] if ind > 0: i += 1 continue else: return (i, ind + self._ORDER_BYTES[i])
def handle(self, netmsg): """ handle netmsg """ log.debug('to handle msg in the child class') msg_type = netmsg.get_msg_type() src_peer, stub_future = netmsg.get_from_addr() log.debug('got msg from: %s stub_future:%s' % (src_peer, stub_future)) if msg_type == self._type_man.getnumber_bytype('ACK_HEART_BEAT'): self._executor.queue_exec( self._on_recv_heartbeat_ack, executor.URGENCY_HIGH, netmsg ) else: self.default_handle(msg)
def _set_msg_len(self): if self._need_head: size_except_body = self._SIZE_EXCEPT_BODY else: size_except_body = self._SIZE_EXCEPT_HEAD_BODY body_len = len(self._data['body']) self._ORDER_BYTES[7] = body_len self._msglen = body_len + size_except_body log.debug('msglen is {0}'.format(self._msglen)) self._data['len'] = self._asign_uint2byte_bybits(self._msglen, 64) tempstr = '' for i in xrange(0, self._ORDER_COUNTS - 1): if i == 0 and (not self._need_head): continue tempstr += self._data[self._ORDER[i]] self._dumpdata = '{0}{1}'.format(tempstr, self._data['body'])
def _get_msg_order_ind(self, index): ind = index if self._need_head is not True: i = 1 else: i = 0 log.debug('msg index:{0}'.format(index)) # log.debug('msg index type:{0}'.format( # self._ORDER[index]) # ) while ind >= 0: ind -= self._ORDER_BYTES[i] if ind > 0: i += 1 continue else: return (i, ind + self._ORDER_BYTES[i])
def __exec_worker(self, check_interval, func_queue, worker_name=''): while self.__status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: log.debug('no item found in exec queue') continue try: _, (function, data) = item function(data) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn( '%s worker encountered exception:%s, func:%s, data:%s' % (worker_name, error, function, data)) log.info('%s worker thread exited as the service is stopping' % worker_name)
def read(self, context): """ read with conn context """ if context.is_detroying(): log.debug('The context is being destroyed. return') return if not context.try_readlock(): return try: self._do_read(context) self._finish_read_callback(True, context) except Exception as error: log.info('read error occur, error type:{0}, content:{1}'.format( type(error), error)) log.info(traceback.format_exc()) self._finish_read_callback(False, context)
def delay_exec(self, delay_time_insec, function, urgency, *args, **kwargs ): """ delay_execute function after delay_time seconds You can use urgency := executor.URGENCY_NORMAL, by default :TODO: consider about stopping timers when invoking stop function """ log.debug('got delay exec, func:{0}'.format(function)) task_data = (urgency, (function, args, kwargs)) timer = threading.Timer( delay_time_insec, self._do_delay_exe, [task_data] ) timer.start()
def _set_msg_len(self): if self._need_head: size_except_body = self._SIZE_EXCEPT_BODY else: size_except_body = self._SIZE_EXCEPT_HEAD_BODY body_len = len(self._data['body']) self._ORDER_BYTES[7] = body_len self._msglen = body_len + size_except_body log.debug('msglen is {0}'.format(self._msglen)) self._data['len'] = self._asign_uint2byte_bybits( self._msglen, 64 ) tempstr = '' for i in xrange(0, self._ORDER_COUNTS - 1): if i == 0 and (not self._need_head): continue tempstr += self._data[self._ORDER[i]] self._dumpdata = '{0}{1}'.format(tempstr, self._data['body'])
def _finish_read_callback(self, succ, result): context = result log.debug( 'context:%s, succ:%s' % (context.get_context_info(), succ) ) if context.is_detroying(): # destroy the context and socket context.release_readlock() try: self._handle_error_del_context(context) except KeyError: pass else: self._epoll.modify( context.get_sock().fileno(), select.EPOLLIN | select.EPOLLET ) context.release_readlock()
def set_linux_res_bydict(self, info_dict): """ { 'iface': 'eth0', 'ipaddr': '10.10.10.1', 'port': 8089, 'cpu_idle': 50, 'mem_inuse': 1024, # MB 'mem_total': 8192, 'net_in': 8192, # kb 'net_out': 102400, # kb } """ for key in info_dict: if key not in self._dict_info: log.warn('does not have this key:%s, ignore' % key) continue self._dict_info[key] = info_dict[key] log.debug('linux info:%s updated, %s' % (key, info_dict[key]))
def handle(self, msg): """ handle msg """ log.debug('to handle msg in the child class') msg_type = msg.get_msg_type() src_peer, stub_future = msg.get_from_addr() # log.debug('got msg from: %s stub_future:%s' % (src_peer, stub_future)) # log.debug('type of msg_type:{0}, settings msg_type:{1}'.format( # type(msg_type), type(self._type_man.getnumber_bytype('HEART_BEAT')) # )) if msg_type == self._type_man.getnumber_bytype('HEART_BEAT'): self._executor.queue_exec( self._on_heartbeat, 1, msg ) else: self.default_handle(msg)
def _do_read(self, context): sock = context.get_sock() data = None context.get_recving_msg() while self._stopsign is not True: try: data = sock.recv(self.NET_RW_SIZE) except socket.error as error: err = error.args[0] if err == errno.EAGAIN: log.debug( 'EAGAIN happend, peer info %s' % context.get_context_info() ) return context elif err == errno.EWOULDBLOCK: log.info( 'EWOULDBLOCK happend, context info %s' % context.get_context_info() ) return context else: log.warn( 'Socket error happend, error:%s, peer info %s' % (str(error), context.get_context_info()) ) context.to_destroy() return context except Exception as error: log.critical( 'Socket error happend, error:%s, peer info %s' % (str(error), context.get_context_info()) ) context.to_destroy() return context data_len = len(data) if data_len == 0: # socket closed by peer context.to_destroy() return context context.do_recv_data(data, data_len) del data
def __exec_worker(self, check_interval, func_queue, worker_name=''): while self.__status != 2: try: item = func_queue.get(timeout=check_interval) except queue.Empty: log.debug('no item found in exec queue') continue try: _, (function, data) = item function(data) # pylint: disable=W0703 # we can NOT predict the exception type except Exception as error: log.warn( '%s worker encountered exception:%s, func:%s, data:%s' % (worker_name, error, function, data) ) log.info( '%s worker thread exited as the service is stopping' % worker_name )
def get_recving_msg(self): """ get the net msg being received """ log.debug('to get recving_msg') # if no recving msg pending there, create one. if self._recving_msg is None: self._recving_msg = async_msg.CNetMsg(is_postmsg=False) if self._recving_msg.is_recvmsg_complete(): self._recving_msg = async_msg.CNetMsg(is_postmsg=False) if self._is_1st_recv_msg: self._recving_msg.set_need_head(True) self._is_1st_recv_msg = False else: self._recving_msg.set_need_head(False) msg = self._recving_msg return msg
def _finish_write_callback(self, succ, result): """finish write callback""" context = result # You cannot do things below as getpeername will block if the conn # has problem!!!!! - Guannan # try: # context.get_sock().getpeername() # except socket.error as error: # log.debug('Seems socket failed to getpeername:%s' % str(error)) # context.to_destroy() if context is not None and context.is_detroying(): # destroy the context and socket context.release_writelock() try: self._handle_error_del_context(context) except: pass else: log.debug('to epoll modify') epoll_write_params = self._epoll_write_params() context.release_writelock()
def add_write_job(self, context): """ add network write into queue """ if context is None: return try: peerinfo = context.get_peerinfo() # pylint: disable=W0703 except Exception as error: log.warn('failed to get peerinfo, return') return if not context.try_writelock(): log.debug( 'Another thread is writing the context, return. Peerinfo:%s:%s' % (peerinfo[0], peerinfo[1]) ) return if context.is_detroying(): log.debug( 'The context is being destroyed, i will do nothing. ' 'Peerinfo:%s:%s' % (peerinfo[0], peerinfo[1]) ) return try: self._do_write(context) self._finish_write_callback(True, context) # pylint: disable=W0703 except Exception as error: log.debug( 'seems error happend for context:%s Peerinfo:%s:%s' % (str(error), peerinfo[0], peerinfo[1]) ) self._finish_write_callback(False, context)
def try_move2next_sending_msg(self): """ move to next msg that will be sent """ if self._sending_msg is None or \ self._sending_msg.is_msg_already_sent(): log.debug('to move2next_sending_msg') # if self._sending_msg is not None: # # del self._sending_msg # pass try: item = self._send_queue.get_nowait() msg = item[2] except queue.Empty: log.debug('The send queue is empty') msg = None except Exception as error: errmsg = ( 'Catch a error that I cannot handle, err_msg:%s' % str(error) ) log.error(errmsg) # self._lock.release() raise CConnectionManager.QueueError(errmsg) self._sending_msg = msg else: log.debug( 'No need to move to next msg since the current one' 'is not sent out yet' ) temp = self._sending_msg return temp
def dump_stats(self, print_stdout=False): """ 打印当前threadpool的状态信息到log 和stdout 其中状态信息来自于get_stats函数 """ stat = self.get_stats() if print_stdout: print stat log.info('ThreadPool Stat %s: %s' % (self._name, stat)) log.debug('queue: %s' % self._jobqueue.queue) log.debug('waiters: %s' % self._waiters) log.debug('workers: %s' % self._working) log.debug('total: %s' % self._threads) return stat
def get_recv_msg(self): """ get recv msg from queue """ log.debug('to fetch a msg from recv_queue for handle function') try: # msg = self._recv_queue.get_nowait()[1] msg = self._recv_queue.get()[1] except queue.Empty as error: log.debug('The recv queue is empty') msg = None except TypeError as error: log.error('type error, seems received SIGTERM, err:{0}'.format( error) ) msg = None except Exception as error: msg = 'Catch a error that I cannot handle, err_msg:%s' % error log.error(msg) log.error(type(error)) raise CConnectionManager.QueueError(msg) return msg
def _stream_open(self, fname): """open new stream""" ret = False try: parent = os.path.dirname(fname) if not os.path.exists(parent): os.makedirs(parent) self._writestream = open(fname, 'w+b') log.debug('open new stream succeed') ret = True except IOError as err: log.error( 'IOError, failed to open stream, err:{0}, file:{1}'.format( err, fname ) ) except OSError as err: log.error( 'OSError, failed to open stream, err:{0}, file:{1}'.format( err, fname ) ) return ret
def read(self, context): """ read with conn context """ if context.is_detroying(): log.debug('The context is being destroyed. return') return if not context.try_readlock(): return log.debug( 'succeed to acquire readlock, to add the \ readjob into the threadpool' ) try: self._do_read(context) self._finish_read_callback(True, context) except Exception as error: log.info('read error occur, error type:{0}, content:{1}'.format( type(error), error) ) log.info(traceback.format_exc()) self._finish_read_callback(False, context)
def push_data(self, data): """ push data into the msg. Return pushed length. Return -1 if we should shutdown the socket channel. """ if self._msg_finish: log.warn('The CNetMsg has already been pushed enough data') return 0 if len(data) == 0: log.warn( 'You just pushed into the msg with a zero-length data' ) return 0 sign = True data_ind = 0 data_max = len(data) order, offsite = self._get_msg_order_ind(self._readindex) log.debug('msg data index:{0}, offsite: {1}'.format(order, offsite)) data_key = self._ORDER[order] while sign: msg_data_loop_end = False # One loop handle one data_key until there all the data is handled. try: self._data[data_key] except KeyError: self._data[data_key] = '' loop_data_max = ( self._ORDER_BYTES[order] - len(self._data[data_key]) ) if (data_max - data_ind) >= loop_data_max: # can fill up the msg self._data[data_key] += ( data[data_ind: loop_data_max + data_ind] ) data_ind += loop_data_max msg_data_loop_end = True self._readindex += loop_data_max if data_key != 'body': log.debug( 'data_key {0} full filled'.format(data_key) ) if data_key == 'head': if self._data[data_key] != self.MSG_SIGN: return -1 else: pass else: # cannot fill up the msg in this round sign = False push_bytes = data_max - data_ind self._data[data_key] += data[data_ind: data_max] self._readindex += push_bytes data_ind += push_bytes if (data_key == 'len') and (msg_data_loop_end): # set up the length of the body total_len = self._convert_bytes2uint(self._data['len']) if self._need_head: self._ORDER_BYTES[7] = total_len - self._SIZE_EXCEPT_BODY else: self._ORDER_BYTES[7] = ( total_len - self._SIZE_EXCEPT_HEAD_BODY ) log.debug('total len %d' % total_len) if msg_data_loop_end and (order == self._ORDER_COUNTS - 1): self._msg_finish = True sign = False log.debug('congratulations. This msg has been fullfilled') elif msg_data_loop_end and order < self._ORDER_COUNTS: order += 1 data_key = self._ORDER[order] log.debug('This round has finished') return data_ind
def _do_write(self, context): sock = context.get_sock() msg = context.try_move2next_sending_msg() if msg is None: log.debug('send queue is empty, quit the _do_write thread') return context log.debug('To enter write loop until eagin') # pylint:disable=w0212 # log.debug('This msg _need_head:%s' % msg._need_head) while not self._stopsign: data = msg.get_write_bytes(self.NET_RW_SIZE) log.debug('get_write_bytes_len: %d' % len(data)) try: succ_len = sock.send(data) # log.debug('succeed to send length:%d' % succ_len) msg.seek_write(succ_len) except socket.error as error: err = error.args[0] if err == errno.EAGAIN: log.debug( 'EAGAIN happend, context info %s' % context.get_context_info() ) return context elif err == errno.EWOULDBLOCK: log.debug( 'EWOULDBLOCK happend, context info %s' % context.get_context_info() ) return context else: log.warn( 'Socket error happend. But its not eagin,error:%s,\ context info %s, errno:%s' % (str(error), context.get_context_info(), err) ) context.to_destroy() break except Exception as error: log.error( 'Socket error happend, error:%s, context info %s' % (str(error), context.get_context_info()) ) context.to_destroy() break finally: del data log.debug('%d bytes has been sent' % succ_len) if msg.is_msg_already_sent(): # log.debug( # 'send out a msg: msg_type:%d, msg_len:%d, msg_flag:%d, ' # 'msg_src:%s, msg_dest:%s, uniqid:%d' % # ( # msg.get_msg_type(), # msg.get_msg_len(), # msg.get_flag(), # str(msg.get_from_addr()), # str(msg.get_to_addr()), # msg.get_uniq_id() # ) # ) # if we have successfully send out a msg. Then move to next one msg = context.try_move2next_sending_msg() if msg is None: break return context