Esempio n. 1
0
    def read(self, record_num=128):
        """
        load log into memory

        :notice:
            If skip_badlog is not True, will raise IOError if the stream
            encounters any error.

            Otherwise, the stream will skip the bad log file, move to next one
            and continue reading

        :return:
            a. return a list of "record_num" of LogRecord.

            b. If the count number of list is less than record_num,
            it means the stream encounter EOF, plz read again afterwards.

            c. If the returned is None, it means the stream got nothing, plz
                try again.
        """
        recordlist = []
        count = 0
        move2nextstream = False
        while count < record_num:
            ret, retval = self._try_read_one_log(self._load_stream)
            if ret == LOGFILE_EOF:
                # need read next log file
                move2nextstream = True
            elif ret == LOGFILE_GOOD:
                recordlist.append(retval)
                count += 1
                continue
            elif ret == LOGFILE_BAD_RECORD:
                if not self._skip_badlog:
                    raise IOError('find bad records in {0}'.format(
                        self._load_stream.name))
                else:
                    log.warn(
                        'Bad record! '
                        'But skip_badlog is on, will skip the file:{0}'.format(
                            self._load_stream.name))
                    move2nextstream = True
            if move2nextstream:
                move2nextstream = False
                ret = self._move2next_load_fname()
                if LOGFILE_EOF == ret:
                    log.debug('no more log edits to read, plz retry')
                    break
                elif LOGFILE_GOOD == ret:
                    log.debug('moved to next log edit file, to read new log')
                    continue
                elif LOGFILE_BAD_RECORD == ret:
                    log.error('IOError happended, read_logs failed')
                    if self._skip_badlog:
                        log.error('skip bad log is on, try moving to next one')
                        move2nextstream = True
                        continue
                    else:
                        raise IOError('encounter bad records, raise exception')
        return recordlist
Esempio n. 2
0
 def purge_data(self, before_logid):
     """
     log files which contains log (less than before_logid) will be purged.
     """
     folder = self._get_storage_dir()
     logfiles = self._get_ordered_logfiles(folder)
     last_logid = None
     last_fname = None
     purge_list = []
     for fname in logfiles:
         if fname.find('writing') >= 0:
             continue
         current = int(fname.split('.')[-1])
         if last_logid is not None and (current - 1) < before_logid:
             purge_list.append(last_fname)
         last_fname = fname
         last_logid = current
     log.info('log id < before_logid will be purged:purged:{0}'.format(
         purge_list)
     )
     ind = 0
     for fname in purge_list:
         full = '{0}/{1}'.format(folder, fname)
         log.info('to purge log file:{0}'.format(full))
         try:
             os.remove(full)
             ind += 1
         except StandardError as err:
             log.error(
                 'failed to purge log file:{0}, {1}'.format(full, err)
             )
         if ind % 1000:
             time.sleep(0.1)
Esempio n. 3
0
 def purge_data(self, before_logid):
     """
     log files which contains log (less than before_logid) will be purged.
     """
     folder = self._get_storage_dir()
     logfiles = self._get_ordered_logfiles(folder)
     last_logid = None
     last_fname = None
     purge_list = []
     for fname in logfiles:
         if fname.find('writing') >= 0:
             continue
         current = int(fname.split('.')[-1])
         if last_logid is not None and (current - 1) < before_logid:
             purge_list.append(last_fname)
         last_fname = fname
         last_logid = current
     log.info('log id < before_logid will be purged:purged:{0}'.format(
         purge_list))
     ind = 0
     for fname in purge_list:
         full = '{0}/{1}'.format(folder, fname)
         log.info('to purge log file:{0}'.format(full))
         try:
             os.remove(full)
             ind += 1
         except StandardError as err:
             log.error('failed to purge log file:{0}, {1}'.format(
                 full, err))
         if ind % 1000:
             time.sleep(0.1)
Esempio n. 4
0
    def _try_read_one_log(self, stream):
        """
        read one log record from the stream_close.

        :return:

        """
        if stream is None:
            return (LOGFILE_EOF, None)
        str_datalen = datalen = str_data = None
        try:
            str_datalen = stream.read(self._record_lenbytes)
            if len(str_datalen) == 0:
                return (LOGFILE_EOF, None)
            if len(str_datalen) < self._record_lenbytes:
                log.warn('got a bad log from stream:{0}'.format(stream.name))
                return (LOGFILE_BAD_RECORD, None)
            datalen = self.convert_bytes2uint(str_datalen)
            str_data = stream.read(datalen)
            if len(str_data) < datalen:
                log.warn('got less than data len from stream:{0}'.format(
                    stream.name))
                return (LOGFILE_BAD_RECORD, None)
            log_id = self.convert_bytes2uint(str_data[0:16])
            log_type = self.convert_bytes2uint(str_data[16:16 + 2])
            log_mode = self.convert_bytes2uint(str_data[18:18 + 2])
            log_binary = str_data[20:]
            return (LOGFILE_GOOD,
                    LogRecord(datalen, log_id, log_type, log_mode, log_binary))
        except Exception as err:
            log.error('failed to parse log record:{0}'.format(err))
            log.error(traceback.format_exc())
            return LOGFILE_BAD_RECORD
Esempio n. 5
0
 def _do_read(self, context):
     log.debug('to read sock {0}'.format(context.get_context_info()))
     sock = context.get_sock()
     data = None
     context.move2recving_msg()
     while self._stopsign is not True:
         try:
             data = sock.recv(self.NET_RW_SIZE)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug('EAGAIN happend peerinfo {0}'.format(
                     context.get_context_info()))
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.info('EWOULDBLOCK happend peerinfo {0}'.format(
                     context.get_context_info()))
                 return context
             else:
                 log.info('Socket error:{0},  peer info {1}'.format(
                     error, context.get_context_info()))
                 context.to_destroy()
                 return context
         except Exception as error:
             log.error('Socket error happend, error:%s,  peer info %s' %
                       (str(error), context.get_context_info()))
             context.to_destroy()
             return context
         data_len = len(data)
         if data_len == 0:
             # socket closed by peer
             context.to_destroy()
             return context
         context.do_recv_data(data, data_len)
         del data
Esempio n. 6
0
 def try_move2next_sending_msg(self):
     """
     move to next msg that will be sent
     """
     if self._sending_msg is None or \
             self._sending_msg.is_msg_already_sent():
         log.debug('to move2next_sending_msg')
         # if self._sending_msg is not None:
         #     # del self._sending_msg
         #     pass
         try:
             item = self._send_queue.get_nowait()
             msg = item[2]
         except queue.Empty:
             log.debug('The send queue is empty')
             msg = None
         except Exception as error:
             errmsg = (
                 'Catch a error that I cannot handle, err_msg:%s' %
                 str(error)
             )
             log.error(errmsg)
             # self._lock.release()
             raise CConnectionManager.QueueError(errmsg)
         self._sending_msg = msg
     else:
         log.debug(
             'No need to move to next msg since the current one'
             'is not sent out yet'
         )
     temp = self._sending_msg
     return temp
Esempio n. 7
0
 def run(self):
     """
     run the msgcenter
     """
     if not self.setup():
         return False
     thd_conn_man = threading.Thread(target=self._run_conn_manager, args=())
     thd_conn_man.start()
     thd_stat = threading.Thread(target=self.dump_stat, args=())
     thd_stat.start()
     ind = 0
     msg_ackflag = async_msg.MSG_FLAG2NUM['FLAG_ACK']
     while not self._stop:
         msg = self._conn_mgr.get_recv_msg()
         if ind >= 10000:
             recv_queue = self._conn_mgr.get_recv_queue()
             log.info('msgcenter netmsg queue size:{0}'.format(
                 recv_queue.qsize()))
             ind = 0
         if msg is not None:
             try:
                 log.info(
                     'msg received, type:%d, flag:%d, from:%s, uniqid:%d' %
                     (msg.get_msg_type(), msg.get_flag(),
                      str(msg.get_from_addr()), msg.get_uniq_id()))
                 ind += 1
                 if msg_ackflag & msg.get_flag() == msg_ackflag:
                     self._conn_mgr.push_msg2needack_queue(msg)
                 self.handle(msg)
             # pylint: disable=W0703
             except Exception as err:
                 log.error('get a msg that cannot be handled.'
                           'Seems network err:{0}'.format(err))
         msg = None
     return True
Esempio n. 8
0
File: obj.py Progetto: wenhuiwu/CUP
 def get(self, path, localpath):
     """
     get a file into localpath
     """
     ret = {
         'returncode': 0,
         'msg': 'success'
     }
     log.info('to get ftp file {0} to  {1}'.format(path, localpath))
     self._check_timeout()
     cwd = self._ftp_con.pwd()
     path = self._get_relative_path(path, cwd)
     if localpath.endswith('/'):
         localpath += path.split('/')[-1]
     log.info('to get ftp {0} to local {1}'.format(path, localpath))
     try:
         with open(localpath, 'w+') as fhandle:
             ftp_cmd = 'RETR {0}'.format(path)
             resp = self._ftp_con.retrbinary(ftp_cmd, fhandle.write)
     except Exception as error:
         ret['returncode'] = -1
         ret['msg'] = 'failed to get {0} to {1}, err:{2}'.format(
             path, localpath, error
         )
         log.error(ret['msg'])
     return ret
Esempio n. 9
0
    def _do_write(self, context):
        """write into interface sending buffer"""
        sock = context.get_sock()
        msg = context.try_move2next_sending_msg()
        if msg is None:
            log.debug('send queue is empty, quit the _do_write thread')
            return context
        # log.debug('To enter write loop until eagin')
        # pylint:disable=w0212
        while not self._stopsign:
            data = msg.get_write_bytes(self.NET_RW_SIZE)
            log.debug('msg get_write_bytes_len to be sent: %d' % len(data))
            try:

                succ_len = sock.send(data)
                msg.seek_write(succ_len)
            except cuperr.AsyncMsgError as error:
                log.debug('has seek out of msg len, continue')
            except socket.error as error:
                err = error.args[0]
                if err == errno.EAGAIN:
                    log.debug(
                        'EAGAIN happend, context info %s' %
                        context.get_context_info()
                    )
                    return context
                elif err == errno.EWOULDBLOCK:
                    log.debug(
                        'EWOULDBLOCK happend, context info %s' %
                        context.get_context_info()
                    )
                    return context
                else:
                    log.warn(
                        'Socket error happend. But its not eagin,error:%s,\
                        context info %s, errno:%s' %
                        (str(error), context.get_context_info(), err)
                    )
                    context.to_destroy()
                    break
            except Exception as error:
                log.error(
                    'Socket error happend, error:%s,  context info %s, trace:%s' %
                    (str(error), context.get_context_info(), traceback.format_exc())
                )
                context.to_destroy()
                break
            finally:
                del data
            if msg.is_msg_already_sent():
                log.info(
                    'sent out a msg uniqid:{0}'.format(
                        async_msg.netmsg_tostring(msg))
                )
                # if we have successfully send out a msg. Then move to next one
                msg = context.try_move2next_sending_msg()
                if msg is None:
                    break
        return context
Esempio n. 10
0
 def open4read(self):
     """open logs for read"""
     if self._load_stream is None:
         if not self._do_open4read():
             log.error('failed to open4read, return')
             return False
         else:
             return True
Esempio n. 11
0
 def loop(self):
     """run loop"""
     self._status.set_status(self._status.RUNNING)
     self._executor.run()
     self._send_heartbeat_loop()
     if not msgcenter.IMessageCenter.run(self):
         log.error('message center error happened')
         self.stop()
Esempio n. 12
0
 def loop(self):
     """run loop"""
     self._status.set_status(self._status.RUNNING)
     self._executor.run()
     self._send_heartbeat_loop()
     if not msgcenter.IMessageCenter.run(self):
         log.error('message center error happened')
         self.stop()
Esempio n. 13
0
 def open4read(self):
     """open logs for read"""
     if self._load_stream is None:
         if not self._do_open4read():
             log.error('failed to open4read, return')
             return False
         else:
             return True
Esempio n. 14
0
 def setup(self):
     """
     setup the message center
     """
     try:
         self._bind_port()
         return True
     except socket.error as error:
         log.error('bind error:{0}'.format(error))
         return False
Esempio n. 15
0
 def set_current_logid(self, logid):
     """reset current log id"""
     if logid < 0:
         raise ValueError('cannot setup logid less than 0')
     self._logid = logid
     fname = self._get_next_logfile(self._logid)
     if not self._stream_wbopen(fname):
         log.error('failed to open stream, return False')
         return False
     log.info('reset current log id to {0}'.format(logid))
     return True
Esempio n. 16
0
 def set_current_logid(self, logid):
     """reset current log id"""
     if logid < 0:
         raise ValueError('cannot setup logid less than 0')
     self._logid = logid
     fname = self.get_next_logfile(self._logid)
     if not self._stream_open(fname):
         log.error('failed to open stream, return False')
         return False
     log.info('reset current log id to {0}'.format(logid))
     return True
Esempio n. 17
0
 def _do_write(self, context):
     """write into interface sending buffer"""
     sock = context.get_sock()
     msg = context.try_move2next_sending_msg()
     if msg is None:
         log.debug('send queue is empty, quit the _do_write thread')
         return context
     # log.debug('To enter write loop until eagin')
     # pylint:disable=w0212
     # log.debug('This msg _need_head:%s' % msg._need_head)
     while not self._stopsign:
         data = msg.get_write_bytes(self.NET_RW_SIZE)
         # log.debug('get_write_bytes_len: %d' % len(data))
         try:
             succ_len = sock.send(data)
             # log.debug('succeed to send length:%d' % succ_len)
             msg.seek_write(succ_len)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug('EAGAIN happend, context info %s' %
                           context.get_context_info())
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.debug('EWOULDBLOCK happend, context info %s' %
                           context.get_context_info())
                 return context
             else:
                 log.warn(
                     'Socket error happend. But its not eagin,error:%s,\
                     context info %s, errno:%s' %
                     (str(error), context.get_context_info(), err))
                 context.to_destroy()
                 break
         except Exception as error:
             log.error('Socket error happend, error:%s,  context info %s' %
                       (str(error), context.get_context_info()))
             context.to_destroy()
             break
         finally:
             del data
         log.debug('%d bytes has been sent' % succ_len)
         if msg.is_msg_already_sent():
             log.info(
                 'end sending out a msg: msg_type:%d, msg_len:%d,'
                 'msg_flag:%d, msg_dest:%s, uniqid:%d, dict:%s' %
                 (msg.get_msg_type(), msg.get_msg_len(), msg.get_flag(),
                  str(msg.get_to_addr()), msg.get_uniq_id(), msg._data))
             # if we have successfully send out a msg. Then move to next one
             msg = context.try_move2next_sending_msg()
             if msg is None:
                 break
     return context
Esempio n. 18
0
 def _check_need_new_logfile(self):
     """if need new log file"""
     if os.path.exists(self._loglist_switched) and self._logfile_switching:
         try:
             os.rename(self._logfile_listnew, self._logfile_list)
             os.remove(self._loglist_switch)
             os.remove(self._loglist_switched)
         # pylint: disable=W0703
         except Exception as err:
             log.error('failed to rename loglist, old:{0} new:{1}'.format(
                 self._loglist_switched, self._loglist_switch)
             )
             return False
     if os.path.exists(self._loglist_switch) and \
             (not self._logfile_switching):
         self._loglist_stream.write('NEED_SWITCH_LOCALFILE\n')
         self._loglist_stream.flush()
         os.fsync(self._loglist_stream)
         self._loglist_stream.close()
         self._logfile_switching = True
         if not os.path.exists(self._logfile_listnew):
             try:
                 os.mknod(self._logfile_listnew)
             # pylint: disable=W0703
             except Exception as err:
                 log.error('switch loglist file failed:{0}'.format(err))
                 return False
         self._loglist_stream = open(self._logfile_listnew, 'a')
     if self._current_filesize >= self._max_log_file_size:
         # log.info('serilizer file needs moving to a new one')
         last_logid = self._writestream.name.split('.')[-1]
         newname = os.path.normpath('{0}/done.{1}'.format(
             os.path.dirname(self._writestream.name), last_logid
         ))
         log.info(
             'finish one log file, logid, range:{0}-{1}'.format(
                 last_logid, self._logid - 1
             )
         )
         os.rename(self._writestream.name, newname)
         self._stream_close()
         self._loglist_stream.write('{0}\n'.format(newname))
         self._loglist_stream.flush()
         os.fsync(self._loglist_stream.fileno())
         self._current_filesize = 0
         # log.info('next logid:{0}'.format(self._logid))
         fname = self.get_next_logfile(self._logid)
         if not self._stream_open(fname):
             return False
     return True
Esempio n. 19
0
 def _write_data(self, binary):
     """ write data into the local file"""
     try:
         self._writestream.write(binary)
         self._writestream.flush()
         os.fsync(self._writestream.fileno())
         self._logid += 1
     # pylint: disable=W0703
     # need to catch such general exeception
     except Exception as error:
         log.error(
             'failed to write data into LocalFileSerilizer, err:{0}'.format(
                 error))
         return False
     return True
Esempio n. 20
0
 def _write_data(self, binary):
     """ write data into the local file"""
     try:
         self._writestream.write(binary)
         self._writestream.flush()
         os.fsync(self._writestream.fileno())
         self._logid += 1
     # pylint: disable=W0703
     # need to catch such general exeception
     except Exception as error:
         log.error(
             'failed to write data into LocalFileSerilizer, err:{0}'.format(
                 error)
         )
         return False
     return True
Esempio n. 21
0
 def _check_need_new_logfile(self):
     """if need new log file"""
     if os.path.exists(self._loglist_switch) and \
             (not self._loglist_switching):
         # start to switch
         log.info('{0} loglist start to switch'.format(self._name))
         self._loglist_switching = True
         self._loglist_stream.write('NEED_SWITCH_LOCALFILE\n')
         self._loglist_stream.flush()
         os.fsync(self._loglist_stream)
         self._loglist_stream.close()
         if not os.path.exists(self._logfile_listnew):
             try:
                 exfile.mk_newnode(self._logfile_listnew)
             # pylint: disable=W0703
             except Exception as err:
                 log.error('switch loglist file failed:{0}'.format(err))
                 return False
         log.info('{0} loglist file {1} switched'.format(
             self._name, self._logfile_list)
         )
         os.rename(self._logfile_list, self._logfile_listold)
         os.rename(self._logfile_listnew, self._logfile_list)
         self._loglist_stream = open(self._logfile_list, 'a')
         self._loglist_switching = False
     if self._current_filesize >= self._max_log_file_size:
         # log.info('serilizer file needs moving to a new one')
         last_logid = self._writestream.name.split('.')[-1]
         newname = os.path.normpath('{0}/done.{1}'.format(
             os.path.dirname(self._writestream.name), last_logid
         ))
         log.info(
             'finish one log file, logid, range:{0}-{1}'.format(
                 last_logid, self._logid - 1
             )
         )
         os.rename(self._writestream.name, newname)
         self._stream_close()
         self._loglist_stream.write('{0}\n'.format(newname))
         self._loglist_stream.flush()
         os.fsync(self._loglist_stream.fileno())
         self._current_filesize = 0
         fname = self._get_next_logfile(self._logid)
         if not self._stream_wbopen(fname):
             return False
     return True
Esempio n. 22
0
 def do_recv_data(self, data, data_len):
     """
     push data into the recving_msg queue
     network read should be in 1 thread only.
     """
     if self._recving_msg is None:
         raise cup.err.NotInitialized('self._recving_msg')
     ret = self._recving_msg.push_data(data)
     #  log.debug('pushed data length: %d' % ret)
     if ret < 0:
         log.warn(
             'receive an wrong socket msg, to close the peer:{0}'.format(
                 self._sock.get_peerinfo()
             )
         )
         self.to_destroy()
         return
     if data_len >= ret:
         if self._recving_msg.is_recvmsg_complete():
             # log.debug(
             #     'get a msg: msg_type:%d, msg_len:%d, msg_flag:%d,'
             #     'msg_src:%s, msg_dest:%s, uniqid:%d' %
             #     (
             #         self._recving_msg.get_msg_type(),
             #         self._recving_msg.get_msg_len(),
             #         self._recving_msg.get_flag(),
             #         str(self._recving_msg.get_from_addr()),
             #         str(self._recving_msg.get_to_addr()),
             #         self._recving_msg.get_uniq_id()
             #     )
             # )
             self._conn.get_recv_queue().put(
                 (self._recving_msg.get_flag(), self._recving_msg)
             )
             if self._conn.get_recv_queue().qsize() >= 500:
                 time.sleep(0.1)
             self._recving_msg = self.get_recving_msg()
         #  the pushed data should span on two msg datas
         if data_len > ret:
             return self.do_recv_data(data[ret:], (data_len - ret))
     else:
         log.error(
             'Socket error. We cannot get more than pushed data length'
         )
         assert False
     return
Esempio n. 23
0
 def do_recv_data(self, data, data_len):
     """
     push data into the recving_msg queue
     network read should be in 1 thread only.
     """
     if self._recving_msg is None:
         raise cup.err.NotInitialized('self._recving_msg')
     try:
         ret = self._recving_msg.push_data(data)
     except IndexError as error:
         log.warn('index error/msg len error happened:{0}'.format(error))
         log.warn(traceback.format_exc())
         log.warn('receive a msg that cannot handle, close the socket')
         self.to_destroy()
         return
     if ret < 0:
         log.warn(
             'receive an wrong socket msg, to close the peer:{0}'.format(
                 self.get_peerinfo()))
         self.to_destroy()
         self._conn.cleanup_error_context(self)
         return
     if data_len >= ret:
         if self._recving_msg.is_recvmsg_complete():
             self._is_1st_recv_msg = False
             self._conn.get_recv_queue().put(
                 (self._recving_msg.get_flag(), self._recving_msg))
             if self.get_listened_peer() is None:
                 listened_peer = self._recving_msg.get_from_addr()[0]
                 self.set_listened_peer(listened_peer)
                 log.info(
                     'set listened peer {0} for this context({1})'.format(
                         listened_peer, self._peerinfo))
             self._recving_msg = None
             if self._conn.get_recv_queue().qsize() >= 500:
                 time.sleep(0.1)
             self.move2recving_msg()
         #  the pushed data should span on two msg datas
         if data_len > ret:
             return self.do_recv_data(data[ret:], (data_len - ret))
     else:
         log.error(
             'Socket error. We cannot get more than pushed data length')
         assert False
     return
Esempio n. 24
0
    def set(self, kvdict, expire_sec=None):
        """
        set cache with kvdict
        ::

            {
                'key1': 'value1',
                'key2': 'value2',
                ....
            }

        :param kvdict:
            kvdict is a dict that contains your cache.
        :param expire_sec:
            if expire_sec is None, the cache will never expire.

        :return:
            True if set cache successfully. False otherwise.
        """
        if all([
                self._maxsize != 0,
                len(kvdict) > self._maxsize
        ]):
            log.error(
                'KVCache {0} cannot insert more '
                'elements than the maxsize'.format(self._name)
            )
            return False
        expire_value = None
        if expire_sec is not None and expire_sec != self.INFINITE_TIME:
            expire_value = expire_sec + time.time()
        else:
            expire_value = self.INFINITE_TIME
        with self._lock_release(b_rw_lock=True):
            for key in kvdict:
                if key in self._kv_data:
                    cup.log.debug(
                        'KVCache: Key:{0} updated.'.format(key)
                    )
                    self._kv_data[key] = (expire_value, kvdict[key])
                    continue
                if not self._heapq_newset(key, kvdict[key], expire_value):
                    return False
        return True
Esempio n. 25
0
 def _stream_wbopen(self, fname):
     """open new stream"""
     ret = False
     try:
         parent = os.path.dirname(fname)
         if not os.path.exists(parent):
             os.makedirs(parent)
         self._writestream = open(fname, 'w+b')
         log.debug('open new stream succeed')
         ret = True
     except IOError as err:
         log.error(
             'IOError, failed to open stream, err:{0}, file:{1}'.format(
                 err, fname))
     except OSError as err:
         log.error(
             'OSError, failed to open stream, err:{0}, file:{1}'.format(
                 err, fname))
     return ret
Esempio n. 26
0
 def _heapq_newset(self, key, value, expire_value):
     """
     headp set
     """
     if any([
             self._maxsize == 0,
             len(self._kv_data) < self._maxsize
     ]):
         # no limit, just insert it into the queue
         self._sorted_keys.put((expire_value, key))
         self._kv_data[key] = (expire_value, value)
         return True
     else:
         # need replace the smallest one
         while True:
             try:
                 pop_value = self._sorted_keys.get_nowait()
             except queue.Full:
                 return False
             real_value = self._kv_data.get(pop_value[1], None)
             # key exipred, key deleted in self._kv_data
             if real_value is None:
                 self._kv_data[key] = (expire_value, value)
                 self._sorted_keys.put((expire_value, key))
                 return True
             if real_value[0] > pop_value[0]:
                 # resort, adjust real
                 self._sorted_keys.put((expire_value, key))
             else:
                 if expire_value < pop_value[0]:
                     log.error(
                         'KVCache {0} the alorithm you design has faults '
                         'the new inserted cache {1} expire time '
                         '< the oldest cache {2} in it'.format(
                             self._name, (key, expire_value), pop_value
                         )
                     )
                     return False
                 del self._kv_data[pop_value[1]]
                 self._kv_data[key] = (expire_value, value)
                 self._sorted_keys.put((expire_value, key))
                 break
     return True
Esempio n. 27
0
    def open4write(self, truncate_last_failure=True):
        """
        open4write

        :raise Exception:
            if encounter any IOError, will raise IOError(errmsg)
        """
        try:
            if not os.path.exists(self._logfile_list):
                os.mknod(self._logfile_list)
            self._loglist_stream = open(self._logfile_list, 'a')
        except Exception as err:
            log.error('cannot create loglist, raise IOError')
            raise IOError('cannot create loglist, {0}'.format(err))
        log.info(
            '{0} try to recover from last '
            'write if there is any need, truncate_last_failure:{1}'.format(
                self._name, truncate_last_failure))
        self._recover_from_lastwriting(truncate_last_failure)
Esempio n. 28
0
    def add_log(self, log_type, log_mode, log_binary):
        """add log into the local file

        :return:
            a tuple (result_True_or_False, logid_or_None)
        """
        self._mlock.acquire()
        ret = (True, None)
        if not self.is_stream_wbopen():
            fname = self._get_next_logfile(self._logid)
            if not self._stream_wbopen(fname):
                ret = (False, None)
        if ret[0]:
            # binary :=
            # 32bit len | 128bit logid | log_type 16bit | log_mode 16bit| bin
            bin_logid = self.asign_uint2byte_bybits(self._logid, 128)
            bin_type = self.asign_uint2byte_bybits(log_type, 16)
            bin_mode = self.asign_uint2byte_bybits(log_mode, 16)
            data = '{0}{1}{2}{3}'.format(
                bin_logid, bin_type, bin_mode, log_binary
            )
            data_len = len(data)
            str_data_len = self.asign_uint2byte_bybits(
                data_len, self._record_lenbytes * 8)
            write_data = '{0}{1}'.format(str_data_len, data)
            log.info('{0} add_log, type {1} mode {2}, logid {3}, '
                'datelen:{4}'.format(
                self._name, log_type, log_mode, self._logid, data_len)
            )
            if self._write_data(write_data):
                self._current_filesize += (data_len + len(str_data_len))
                if not self._check_need_new_logfile():
                    ret = (False, None)
                else:
                    ret = (True, self._logid)
            else:
                log.error('failed to add_log(type:{} mode {}'.format(
                    log_type, log_mode)
                )
                ret = (False, None)
        self._mlock.release()
        return ret
Esempio n. 29
0
    def open4write(self, truncate_last_failure=True):
        """
        open4write

        :raise Exception:
            if encounter any IOError, will raise IOError(errmsg)
        """
        try:
            if not os.path.exists(self._logfile_list):
                os.mknod(self._logfile_list)
            self._loglist_stream = open(self._logfile_list, 'a')
        except Exception as err:
            log.error('cannot create loglist, raise IOError')
            raise IOError('cannot create loglist, {0}'.format(err))
        log.info(
            'try to recover from last '
            'write if there is any need, truncate_last_failure:{0}'.format(
                truncate_last_failure)
        )
        self._recover_from_lastwriting(truncate_last_failure)
Esempio n. 30
0
 def _move2next_load_fname(self):
     """ get next load fname"""
     folder = self._get_storage_dir()
     fname = os.path.basename(self._load_stream.name)
     files = self._get_ordered_logfiles(folder)
     length = len(files)
     ind = -1
     try:
         ind = files.index(fname)
     except ValueError:
         log.error('cannot find current log stream:{0}'.format(fname))
         return LOGFILE_BAD_RECORD
     newfile = None
     if ind < (length - 2):
         newfile = '{0}/{1}'.format(folder, files[ind + 1])
     elif ind == (length - 2):
         if files[length - 1].find('writing') < 0:
             newfile = '{0}/{1}'.format(folder, files[length - 1])
         else:
             log.debug('does not have more finished log edits to read')
             return LOGFILE_EOF
     elif ind == (length - 1):
         log.info('does not have more log edits to read, return')
         return LOGFILE_EOF
     try:
         self._load_stream.close()
         self._load_stream = open(newfile, 'rb')
         return LOGFILE_GOOD
     except StandardError as err:
         log.error('failed to move to next load stream:{0}'.format(newfile))
         log.error('err:{0}'.format(err))
         return LOGFILE_BAD_RECORD
Esempio n. 31
0
 def _move2next_load_fname(self):
     """ get next load fname"""
     folder = self._get_storage_dir()
     fname = os.path.basename(self._load_stream.name)
     files = self._get_ordered_logfiles(folder)
     length = len(files)
     ind = -1
     try:
         ind = files.index(fname)
     except ValueError:
         log.error('cannot find current log stream:{0}'.format(fname))
         return LOGFILE_BAD_RECORD
     newfile = None
     if ind < (length - 2):
         newfile = '{0}/{1}'.format(folder, files[ind + 1])
     elif ind == (length - 2):
         if files[length - 1].find('writing') < 0:
             newfile = '{0}/{1}'.format(folder, files[length - 1])
         else:
             log.debug('does not have more finished log edits to read')
             return LOGFILE_EOF
     elif ind == (length - 1):
         log.info('does not have more log edits to read, return')
         return LOGFILE_EOF
     try:
         self._load_stream.close()
         self._load_stream = open(newfile, 'rb')
         return LOGFILE_GOOD
     except StandardError as err:
         log.error('failed to move to next load stream:{0}'.format(newfile))
         log.error('err:{0}'.format(err))
         return LOGFILE_BAD_RECORD
Esempio n. 32
0
def get_hostip(hostname=None):
    """
    get ipaddr of a host

    :param hostname:
        None, by default, will use udp to get ipaddr for ipv4
        if not None, will use hostname to convert to ipaddr
    """
    ipaddr = None
    times = 0
    if hostname is not None:
        times = 0
        while times < 10:
            try:
                ipaddr = str(socket.gethostbyname(hostname))
                break
            except socket.gaierror:
                times += 1
                time.sleep(0.1)
        else:
            log.error('failed to get hostip for {0}'.format(hostname))
        return ipaddr
    else:
        while times < 10:
            try:
                sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                sock.connect(('8.8.8.8', 80))
                ipaddr = sock.getsockname()[0]
                break
            except socket.error:
                hostname = get_local_hostname()
                ipaddr = str(socket.gethostbyname(hostname))
                break
            finally:
                sock.close()
                times += 1
        else:
            log.error('failed to get hostip')
        return ipaddr
Esempio n. 33
0
 def _stream_open(self, fname):
     """open new stream"""
     ret = False
     try:
         parent = os.path.dirname(fname)
         if not os.path.exists(parent):
             os.makedirs(parent)
         self._writestream = open(fname, 'w+b')
         log.debug('open new stream succeed')
         ret = True
     except IOError as err:
         log.error(
             'IOError, failed to open stream, err:{0}, file:{1}'.format(
                 err, fname
             )
         )
     except OSError as err:
         log.error(
             'OSError, failed to open stream, err:{0}, file:{1}'.format(
                 err, fname
             )
         )
     return ret
Esempio n. 34
0
def get_hostip(hostname=None):
    """
    拿到某hostname的ip地址.

    :param hostname:
        机器的hostname, 默认为None代表不传值, 函数将自动获取当前机器的ip
    """
    times = 0
    ipaddr = None
    got = False
    while times < 10:
        try:
            if hostname is None:
                hostname = get_local_hostname()
            ipaddr = str(socket.gethostbyname(hostname))
            got = True
            break
        except socket.gaierror:
            times += 1
            time.sleep(0.1)
    if not got:
        log.error('failed to get socket hostname for {0}'.format(hostname))
    return ipaddr
Esempio n. 35
0
def get_hostip(hostname=None):
    """
    get ipaddr of a host

    :param hostname:
        None, by default, will get localhost ipaddr
    """
    times = 0
    ipaddr = None
    got = False
    while times < 10:
        try:
            if hostname is None:
                hostname = get_local_hostname()
            ipaddr = str(socket.gethostbyname(hostname))
            got = True
            break
        except socket.gaierror:
            times += 1
            time.sleep(0.1)
    if not got:
        log.error('failed to get socket hostname for {0}'.format(hostname))
    return ipaddr
Esempio n. 36
0
def get_hostip(hostname=None):
    """
    拿到某hostname的ip地址.

    :param hostname:
        机器的hostname, 默认为None代表不传值, 函数将自动获取当前机器的ip
    """
    times = 0
    ipaddr = None
    got = False
    while times < 10:
        try:
            if hostname is None:
                hostname = get_local_hostname()
            ipaddr = str(socket.gethostbyname(hostname))
            got = True
            break
        except socket.gaierror:
            times += 1
            time.sleep(0.1)
    if not got:
        log.error('failed to get socket hostname for {0}'.format(hostname))
    return ipaddr
Esempio n. 37
0
    def _do_open4read(self, start_logid=-1):
        """
        get open load stream

        :TODO:
            read starting from logid
        """
        load_dir = self._get_storage_dir(logid=self._logid)
        self._buffer_files = self._get_ordered_logfiles(load_dir)
        to_open = None
        if len(self._buffer_files) <= 0:
            log.warn('does not have any log record yet')
        else:
            if -1 == start_logid:
                to_open = self._buffer_files[0]
            else:
                pass
                # if self._load_stream is None:
                #     log.error('load stream should not be None')
                #     return False
                # name = os.path.basename(self._load_stream.name)
                # ind = -1
                # try:
                #     ind = self._buffer_files.index(name)
                # except ValueError:
                #     log.error('does not find the log: {0}'.format(name))
                #     return False
                # to_open = self._buffer_files[ind]
            try:
                fname = '{0}/{1}'.format(load_dir, to_open)
                self._load_stream = open(fname, 'rb')
                return True
            # pylint:disable=W0703
            # need such an exception
            except Exception as err:
                log.error('failed to open log stream :{0}'.format(err))
                return False
Esempio n. 38
0
    def _try_read_one_log(self, stream):
        """
        read one log record from the stream_close.

        :return:

        """
        if stream is None:
            return LOGFILE_EOF
        str_datalen = datalen = str_data = None
        try:
            str_datalen = stream.read(4)
            if len(str_datalen) == 0:
                return (LOGFILE_EOF, None)
            if len(str_datalen) < 4:
                log.warn('got a bad log from stream:{0}'.format(stream.name))
                return LOGFILE_BAD_RECORD
            datalen = self.convert_bytes2uint(str_datalen)
            str_data = stream.read(datalen)
            if len(str_data) < datalen:
                log.warn(
                    'got less than data len from stream:{0}'.format(
                        stream.name)
                )
                return (LOGFILE_BAD_RECORD, None)
            log_id = self.convert_bytes2uint(str_data[0:16])
            log_type = self.convert_bytes2uint(str_data[16:2])
            log_mode = self.convert_bytes2uint(str_data[18:2])
            log_binary = self.convert_bytes2uint(str_data[20:])
            return (
                LOGFILE_GOOD, LogRecord(
                    datalen, log_id, log_type, log_mode, log_binary
                )
            )
        except Exception as err:
            log.error('failed to parse log record:{0}'.format(err))
            return LOGFILE_BAD_RECORD
Esempio n. 39
0
    def _do_open4read(self, start_logid=-1):
        """
        get open load stream

        :TODO:
            read starting from logid
        """
        load_dir = self._get_storage_dir(logid=self._logid)
        self._buffer_files = self._get_ordered_logfiles(load_dir)
        to_open = None
        if len(self._buffer_files) <= 0:
            log.warn('does not have any log record yet')
        else:
            if -1 == start_logid:
                to_open = self._buffer_files[0]
            else:
                pass
                # if self._load_stream is None:
                #     log.error('load stream should not be None')
                #     return False
                # name = os.path.basename(self._load_stream.name)
                # ind = -1
                # try:
                #     ind = self._buffer_files.index(name)
                # except ValueError:
                #     log.error('does not find the log: {0}'.format(name))
                #     return False
                # to_open = self._buffer_files[ind]
            try:
                fname = '{0}/{1}'.format(load_dir, to_open)
                self._load_stream = open(fname, 'rb')
                return True
            # pylint:disable=W0703
            # need such an exception
            except Exception as err:
                log.error('failed to open log stream :{0}'.format(err))
                return False
Esempio n. 40
0
def test_gen_wf():
    """
    init_comlog指定ge_wf参数为True时,将大于等于WARING级别的消息
    写入${logfile}.wf日志文件中。本case用来验证相关功能是否符合
    预期。
    """
    log.init_comlog(
        "Yang Honggang", logging.DEBUG, "cup.log",
        log.ROTATION, gen_wf=True
    )

    log.info("info")
    log.critical("critical")
    log.error("error")
    log.warn("warning")
    log.debug("debug")

    # 检查是否生成了cup.log和cup.log.wf文件
    try:
        flog = open('cup.log')
        flog_wf = open('cup.log.wf')
    except IOError:
        assert(False), "can not find cup.log or cup.log.wf file"
    # 检查cup.log的内容是否包括“debug”、"info"
    flog_str = flog.read()
    assert('debug' in flog_str and 'info' in flog_str), "cup.log's content error"
    # 检查cup.log.wf的内容是否包括"critical"、“error”和“warning”
    flog_wf_str = flog_wf.read()
    assert('critical' in flog_wf_str and 'error' in flog_wf_str and \
            'warning' in flog_wf_str), "cup.log.wf's content error"

    assert('debug' not in flog_wf_str and 'info' not in flog_wf_str), \
            "cup.log.wf's content error"
    # cup.log的内容不应该包括"critical"、“error”和“warning”
    assert('critical' not in flog_str and 'error' not in flog_str and \
            'warning' not in flog_str), "cup.log's content error"
Esempio n. 41
0
 def get_recv_msg(self):
     """
     get recv msg from queue
     """
     log.debug('to fetch a msg from recv_queue for handle function')
     try:
         # should use block-mode, othwersie the while loop in the upper
         # code scope will crazily occupy a full cpu-core capacity.
         msg = self._recv_queue.get(block=True, timeout=0.5)[1]
     except queue.Empty as error:
         msg = None
     except TypeError as error:
         log.error(
             'type error, seems received SIGTERM, err:{0}'.format(error))
         msg = None
     except Exception as error:
         msg = 'Catch a error that I cannot handle, err_msg:%s' % error
         log.error(msg)
         log.error(type(error))
         raise CConnectionManager.QueueError(msg)
     return msg
Esempio n. 42
0
 def get_recv_msg(self):
     """
     get recv msg from queue
     """
     # log.debug('to fetch a msg from recv_queue for handle function')
     try:
         msg = self._recv_queue.get_nowait()[1]
         # msg = self._recv_queue.get()[1]
     except queue.Empty as error:
         # log.debug('The recv queue is empty')
         msg = None
     except TypeError as error:
         log.error(
             'type error, seems received SIGTERM, err:{0}'.format(error))
         msg = None
     except Exception as error:
         msg = 'Catch a error that I cannot handle, err_msg:%s' % error
         log.error(msg)
         log.error(type(error))
         raise CConnectionManager.QueueError(msg)
     return msg
Esempio n. 43
0
File: conn.py Progetto: zfq308/CUP
 def get_recv_msg(self):
     """
     get recv msg from queue
     """
     log.debug('to fetch a msg from recv_queue for handle function')
     try:
         # msg = self._recv_queue.get_nowait()[1]
         msg = self._recv_queue.get()[1]
     except queue.Empty as error:
         log.debug('The recv queue is empty')
         msg = None
     except TypeError as error:
         log.error('type error, seems received SIGTERM, err:{0}'.format(
             error)
         )
         msg = None
     except Exception as error:
         msg = 'Catch a error that I cannot handle, err_msg:%s' % error
         log.error(msg)
         log.error(type(error))
         raise CConnectionManager.QueueError(msg)
     return msg
Esempio n. 44
0
    def _recover_from_lastwriting(self, truncate_last_failure=True):
        """
        recovery from last log writing

        :raise Exception:
            IOError, if any error happened
        """
        folder = self._get_storage_dir()
        files = self._get_ordered_logfiles(folder)
        need_finish_file = False
        if len(files) < 1:
            log.info('no need recovery. Does not contain any files')
            return
        file_start_logid = -1
        seek_file = None
        if files[-1].find('writing') < 0:
            # does not need recovery
            log.info('does not have unfinished logfile, return')
            file_start_logid = int(files[-1].split('.')[-1]) + 1
            seek_file = files[-1]
            log.info('next logid will be {0}'.format(self._logid))
        else:
            # need recovery, checkup, must <= 0 writing log file
            count = 0
            for fname in files:
                if fname.find('writing') >= 0:
                    count += 1
            if count > 1:
                errmsg = 'has more than 1 writing log files, count:{0}'
                log.error(errmsg.format(count))
                raise IOError(errmsg)
            log.info('has not finished log file, recovery start')

            seek_file = files[-1]
            file_start_logid = int(seek_file.split('.')[-1])
        fullname = os.path.normpath('{0}/{1}'.format(folder, seek_file))
        done_id = file_start_logid
        read_length = 0
        try:
            reader = open(fullname, 'rb')
            while True:
                ret, _ = self._try_read_one_log(reader)
                if ret == LOGFILE_BAD_RECORD:
                    if truncate_last_failure:
                        log.info(
                            'to truncate last writing file:{0}'
                            ', size:{1}'.format(
                                files[-1], read_length)
                        )
                        reader.truncate(read_length)
                        break
                    else:
                        raise IOError(
                            'truncate_last_failure is not enabled &&'
                            ' Bad record found'
                        )
                elif ret == LOGFILE_EOF:
                    log.info('log file is intact, no need truncating')
                    break
                elif ret == LOGFILE_GOOD:
                    done_id += 1
                    read_length = reader.tell()
                newname = os.path.normpath('{0}/{1}'.format(
                    folder, 'done.{0}'.format(file_start_logid))
                )
            if fullname != newname:
                log.info('move last writing file {0} to {1}'.format(
                    fullname, newname)
                )
                os.rename(fullname, newname)
                self._loglist_stream.write('{0}\n'.format(newname))
                self._loglist_stream.flush()
                os.fsync(self._loglist_stream.fileno())
            self._logid = done_id
            log.info('next logid will be {0}'.format(self._logid))
        except Exception as err:
            log.error('failed to recover from last log:{0}'.format(err))
            raise IOError(err)
        return
Esempio n. 45
0
File: conn.py Progetto: zfq308/CUP
 def _do_write(self, context):
     sock = context.get_sock()
     msg = context.try_move2next_sending_msg()
     if msg is None:
         log.debug('send queue is empty, quit the _do_write thread')
         return context
     log.debug('To enter write loop until eagin')
     # pylint:disable=w0212
     # log.debug('This msg _need_head:%s' % msg._need_head)
     while not self._stopsign:
         data = msg.get_write_bytes(self.NET_RW_SIZE)
         log.debug('get_write_bytes_len: %d' % len(data))
         try:
             succ_len = sock.send(data)
             # log.debug('succeed to send length:%d' % succ_len)
             msg.seek_write(succ_len)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug(
                     'EAGAIN happend, context info %s' %
                     context.get_context_info()
                 )
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.debug(
                     'EWOULDBLOCK happend, context info %s' %
                     context.get_context_info()
                 )
                 return context
             else:
                 log.warn(
                     'Socket error happend. But its not eagin,error:%s,\
                     context info %s, errno:%s' %
                     (str(error), context.get_context_info(), err)
                 )
                 context.to_destroy()
                 break
         except Exception as error:
             log.error(
                 'Socket error happend, error:%s,  context info %s' %
                 (str(error), context.get_context_info())
             )
             context.to_destroy()
             break
         finally:
             del data
         log.debug('%d bytes has been sent' % succ_len)
         if msg.is_msg_already_sent():
             # log.debug(
             #     'send out a msg: msg_type:%d, msg_len:%d, msg_flag:%d, '
             #     'msg_src:%s, msg_dest:%s, uniqid:%d' %
             #     (
             #         msg.get_msg_type(),
             #         msg.get_msg_len(),
             #         msg.get_flag(),
             #         str(msg.get_from_addr()),
             #         str(msg.get_to_addr()),
             #         msg.get_uniq_id()
             #     )
             # )
             # if we have successfully send out a msg. Then move to next one
             msg = context.try_move2next_sending_msg()
             if msg is None:
                 break
     return context
Esempio n. 46
0
File: conn.py Progetto: zfq308/CUP
 def push_msg2sendqueue(self, msg):
     """
     push msg into the send queue
     """
     ret = 0
     if msg is None:
         log.warn('put a None into msg send queue. return')
         ret = -1
         return ret
     flag = msg.get_flag()
     #  log.debug('to put flag and msg into the queue. flag:%d' % flag)
     #  self._send_queue.put( (flag, msg) )
     peer = msg.get_to_addr()[0]
     new_created = False
     context = None
     sock = None
     try:
         context = self._peer2context[peer]
     except KeyError:
         log.info('To create a new context for the sock:{0}'.format(
             peer)
         )
         self._mlock.acquire()
         if peer not in self._peer2context:
             sock = self.connect(peer)
             if sock is not None:
                 context = CConnContext()
                 context.set_conn_man(self)
                 context.set_sock(sock)
                 context.set_peerinfo(peer)
                 fileno = sock.fileno()
                 self._peer2context[peer] = context
                 self._fileno2context[fileno] = context
                 self._context2fileno_peer[context] = (fileno, peer)
                 log.info('created context for the new sock')
                 ret = 0
                 try:
                     self._epoll.register(
                         sock.fileno(), self._epoll_write_params()
                     )
                 except Exception as error:  # pylint: disable=W0703
                     log.warn(
                         'failed to register the socket fileno, err_msg:%s,'
                         'perinfo:%s:%s. To epoll modify it' %
                         (str(error), peer[0], peer[1])
                     )
                     self._epoll.modify(
                         sock.fileno(), self._epoll_write_params()
                     )
             else:
                 log.error(
                     'failed to post msg. Connect failed. peer info:{0}.'
                     ' msg_type:{1}'.format(
                         str(peer), msg.get_msg_type()
                     )
                 )
                 ret = -1
         else:
             context = self._peer2context[peer]
         self._mlock.release()
     else:
         context = self._peer2context[peer]
     if ret != 0:
         return ret
     if not context.is_detroying():
         if context.put_msg(flag, msg) == 0:
             ret = 0
         else:
             ret = -1
         self._handle_new_send(context)
         return ret
Esempio n. 47
0
    def read(self, record_num=128):
        """
        load log into memory

        :notice:
            If skip_badlog is not True, will raise IOError if the stream
            encounters any error.

            Otherwise, the stream will skip the bad log file, move to next one
            and continue reading

        :return:
            a. return a list of "record_num" of LogRecords.

            b. If the count number of list is less than record_num,
            it means the stream encounter EOF, plz read again afterwards.

            c. If the returned is None, it means the stream got nothing, plz
                try again.
        """
        recordlist = []
        count = 0
        move2nextstream = False
        while count < record_num:
            ret, retval = self._try_read_one_log(self._load_stream)
            if ret == LOGFILE_EOF:
                # need read next log file
                move2nextstream = True
            elif ret == LOGFILE_GOOD:
                recordlist.append(retval)
                count += 1
                continue
            elif ret == LOGFILE_BAD_RECORD:
                if not self._skip_badlog:
                    raise IOError(
                        'find bad records in {0}'.format(
                            self._load_stream.name)
                    )
                else:
                    log.warn(
                        'Bad record! '
                        'But skip_badlog is on, will skip the file:{0}'.format(
                            self._load_stream.name)
                    )
                    move2nextstream = True
            if move2nextstream:
                move2nextstream = False
                ret = self._move2next_load_fname()
                if LOGFILE_EOF == ret:
                    log.debug('does not have more log edits to read, plz retry')
                    break
                elif LOGFILE_GOOD == ret:
                    log.debug('moved to next log edit file, to read new log')
                    continue
                elif LOGFILE_BAD_RECORD == ret:
                    log.error('IOError happended, read_logs failed')
                    if self._skip_badlog:
                        log.error('skip bad log is on, try moving to next one')
                        move2nextstream = True
                        continue
                    else:
                        raise IOError('encounter bad records, raise exception')
        return recordlist