Ejemplo n.º 1
0
 def add_log(self, log_type, log_mode, log_binary):
     """ add log into the local file"""
     if not self.is_stream_open():
         fname = self.get_next_logfile(self._logid)
         if not self._stream_open(fname):
             return False
     # binary :=
     # 32bit len | 128bit logid | log_type 16bit | log_mode 16bit| binary
     bin_logid = self.asign_uint2byte_bybits(self._logid, 128)
     bin_type = self.asign_uint2byte_bybits(log_type, 16)
     bin_mode = self.asign_uint2byte_bybits(log_mode, 16)
     data = '{0}{1}{2}{3}'.format(bin_logid, bin_type, bin_mode, log_binary)
     data_len = len(data)
     str_data_len = self.asign_uint2byte_bybits(data_len, 32)
     log.debug('{0} add_log, log_type {1} log_mode {2}'.format(
         self.__class__, log_type, log_mode)
     )
     write_data = '{0}{1}'.format(str_data_len, data)
     log.info('to add data, logid:{0}'.format(self._logid))
     if self._write_data(write_data):
         log.debug('add_log, write success')
         self._current_filesize += (data_len + 4)
         if not self._check_need_new_logfile():
             return False
         return True
     else:
         log.warn('{0} failed to add_log, log_type {1} log_mode {2}'.format(
             self.__class__, log_type, log_mode)
         )
         return False
Ejemplo n.º 2
0
 def refresh(self, key, resource=None):
     """
     :param key:
         refresh the device by key
     :return:
         if key does not exist, return False
         else, fresh the last_healthy time of the device
     """
     assert type(key) == str, 'needs to be a str'
     device = self._devices.get(key)
     if device is None:
         log.warn('Device not found, key:%s' % key)
         return False
     device.set_last_healthy()
     if resource is not None:
         device.refresh_resouce(resource)
         log.debug(
             'Heartbeat: Device %s refreshed with resource. '
         )
     else:
         log.debug(
             'Heartbeat: Device %s only refreshed with heartbeat. '
             'Resource not refreshed'
         )
     return True
Ejemplo n.º 3
0
Archivo: conn.py Proyecto: zfq308/CUP
 def add_write_job(self, context):
     """
     add network write into queue
     """
     if context is None:
         return
     try:
         peerinfo = context.get_peerinfo()
     # pylint: disable=W0703
     except Exception as error:
         log.warn('failed to get peerinfo, return')
         return
     if not context.try_writelock():
         log.debug(
             'Another thread is writing the context, return. Peerinfo:%s:%s' %
             (peerinfo[0], peerinfo[1])
         )
         return
     if context.is_detroying():
         log.debug(
             'The context is being destroyed, i will do nothing. '
             'Peerinfo:%s:%s' %
             (peerinfo[0], peerinfo[1])
         )
         return
     try:
         self._do_write(context)
         self._finish_write_callback(True, context)
     # pylint: disable=W0703
     except Exception as error:
         log.debug(
             'seems error happend for context:%s Peerinfo:%s:%s' %
             (str(error), peerinfo[0], peerinfo[1])
         )
         self._finish_write_callback(False, context)
Ejemplo n.º 4
0
def check_and_load_existence(user_confdict, default_dict, key, required=False):
    """
    check if the conf item is required to be existent.
    Use default if it's not required and does not exist.
    Raise ConfItemError if it's required and does not exists
    """
    confitem = None
    try:
        # try user conf dict
        confitem = eval('user_confdict{0}'.format(key))
    except KeyError:
        log.debug('user conf does not have {0} in user_confdict'.format(key))

    if confitem is None:
        try:
            # try user conf dict
            confitem = eval('default_dict{0}'.format(key))
            log.info('{0} will use default value:{1}'.format(
                key, confitem)
            )
        except KeyError:
            log.warn('default conf does not have {0}'.format(key))
    if confitem is None and required:
        raise ConfItemError('{0} should exist'.format(key))
    return confitem
Ejemplo n.º 5
0
 def deserilize(self, binary):
     """
     deserilize it from binary
     """
     try:
         self._dict_info = pickle.loads(binary)
         return True
     # pylint: disable=W0703
     except Exception as error:
         log.warn('deserilize linux device error, msg:%s' % error)
         return False
Ejemplo n.º 6
0
 def deserilize(self, binary):
     """
     deserilize it from binary
     """
     try:
         self._dict_info = pickle.loads(binary)
         return True
     # pylint: disable=W0703
     except Exception as error:
         log.warn('deserilize linux device error, msg:%s' % error)
         return False
Ejemplo n.º 7
0
 def _do_write(self, context):
     """write into interface sending buffer"""
     sock = context.get_sock()
     msg = context.try_move2next_sending_msg()
     if msg is None:
         log.debug('send queue is empty, quit the _do_write thread')
         return context
     # log.debug('To enter write loop until eagin')
     # pylint:disable=w0212
     # log.debug('This msg _need_head:%s' % msg._need_head)
     while not self._stopsign:
         data = msg.get_write_bytes(self.NET_RW_SIZE)
         # log.debug('get_write_bytes_len: %d' % len(data))
         try:
             succ_len = sock.send(data)
             # log.debug('succeed to send length:%d' % succ_len)
             msg.seek_write(succ_len)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug('EAGAIN happend, context info %s' %
                           context.get_context_info())
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.debug('EWOULDBLOCK happend, context info %s' %
                           context.get_context_info())
                 return context
             else:
                 log.warn(
                     'Socket error happend. But its not eagin,error:%s,\
                     context info %s, errno:%s' %
                     (str(error), context.get_context_info(), err))
                 context.to_destroy()
                 break
         except Exception as error:
             log.error('Socket error happend, error:%s,  context info %s' %
                       (str(error), context.get_context_info()))
             context.to_destroy()
             break
         finally:
             del data
         log.debug('%d bytes has been sent' % succ_len)
         if msg.is_msg_already_sent():
             log.info(
                 'end sending out a msg: msg_type:%d, msg_len:%d,'
                 'msg_flag:%d, msg_dest:%s, uniqid:%d, dict:%s' %
                 (msg.get_msg_type(), msg.get_msg_len(), msg.get_flag(),
                  str(msg.get_to_addr()), msg.get_uniq_id(), msg._data))
             # if we have successfully send out a msg. Then move to next one
             msg = context.try_move2next_sending_msg()
             if msg is None:
                 break
     return context
Ejemplo n.º 8
0
Archivo: conn.py Proyecto: zhangxt/CUP
    def _handle_error_del_context(self, context):
        def _cleanup_context(send_queue, peerinfo):
            """cleanup context"""
            log.info('to cleanup socket, peer:{0}'.format(peerinfo))
            log.info('cleanup: send_queue of socket size:{0}'.format(
                send_queue.qsize()))
            while True:
                try:
                    item = send_queue.get_nowait()
                    msg = item[2]
                    del msg
                except queue.Empty:
                    break
            # pylint: disable=W0212
            # need cleanup
            log.info('end clean up peerinfo:{0}'.format(peerinfo))

        if context is None:
            return
        # log.info('to del context as socket is not normal')
        self._mlock.acquire()
        try:
            peerinfo = context.get_peerinfo()
            log.info('handle socket reset by peer, to close the socket:%s:%s' %
                     (peerinfo[0], peerinfo[1]))
            fileno_peer = self._context2fileno_peer[context]
            log.info('socket info: %s' % str(fileno_peer[1]))

            try:
                sock = context.get_sock()
                # sock.shutdown(socket.SHUT_RDWR)
                sock.close()
                context.set_sock(None)
            except socket.error as error:
                log.info('failed to close the socket, err_msg:%s' % str(error))
            except Exception as error:
                log.warn('failed to close socket:{0}'.format(error))

            try:
                self._epoll.unregister(fileno_peer[0])
            except Exception as error:  # pylint: disable=W0703
                log.warn('epoll unregister error:%s, peerinfo:%s' %
                         (str(error), str(fileno_peer[1])))
            log.info('socket closed')
            del self._fileno2context[fileno_peer[0]]
            del self._peer2context[fileno_peer[1]]
            del self._context2fileno_peer[context]
        except Exception as error:
            pass
        finally:
            self._mlock.release()
        # pylint: disable=W0212
        self._thdpool.add_1job(_cleanup_context, context._send_queue, peerinfo)
Ejemplo n.º 9
0
Archivo: mail.py Proyecto: zfq308/CUP
 def _handle_attachments(cls, outer, attachments):
     if type(attachments) == str:
         attrs = [attachments]
     elif type(attachments) == list:
         attrs = attachments
     else:
         attrs = []
     for attached in attrs:
         if not os.path.isfile(attached):
             log.warn('attached is not a file:%s' % attached)
             continue
         # Guess the content type based on the file's extension.  Encoding
         # will be ignored, although we should check for simple things like
         # gzip'd or compressed files.
         ctype, encoding = mimetypes.guess_type(attached)
         if ctype is None or encoding is not None:
             # No guess could be made, or the file is encoded (compressed)
             # use a generic bag-of-bits type.
             ctype = 'application/octet-stream'
         maintype, subtype = ctype.split('/', 1)
         try:
             if maintype == 'text':
                 with open(attached) as fhandle:
                     # Note: we should handle calculating the charset
                     msg = text.MIMEText(fhandle.read(), _subtype=subtype)
             elif maintype == 'image':
                 with open(attached, 'rb') as fhandle:
                     imgid = os.path.basename(attached)
                     msg = image.MIMEImage(fhandle.read(), _subtype=subtype)
                     msg.add_header('Content-ID', imgid)
             elif maintype == 'audio':
                 with open(attached, 'rb') as fhandle:
                     msg = audio.MIMEAudio(fhandle.read(), _subtype=subtype)
             else:
                 with open(attached, 'rb') as fhandle:
                     msg = base.MIMEBase(maintype, subtype)
                     msg.set_payload(fhandle.read())
                 # Encode the payload using Base64
                 encoders.encode_base64(msg)
                 # Set the filename parameter
                 msg.add_header(
                     'Content-Disposition', 'attachment',
                     filename=os.path.basename(attached)
                 )
             outer.attach(msg)
         # pylint: disable=W0703
         except Exception as exception:
             log.warn(
                 'failed to attach %s, errmsg:%s. Will skip it' % (
                     attached, str(exception)
                 )
             )
Ejemplo n.º 10
0
    def next_schedtime(self, starting_fromdate=None):
        """
        return next schedule time with timezone enabled.
        """
        if starting_fromdate is None:
            tmp = datetime.datetime.now()
            datenow = self._pytz.localize(tmp)
        else:
            datenow = starting_fromdate
        tmp_dict = {
            'year': datenow.year,
            'month': datenow.month,
            'monthday': datenow.day,
            'weekday': datenow.isoweekday(),
            'hour': datenow.hour,
            'minute': datenow.minute + 1
        }
        timer_params = copy.deepcopy(self._timer_params)
        maxtimes = 365 * 24 * 60
        while True:
            if tmp_dict['month'] in timer_params['month']:
                if self.check_monthday_weekday(
                    tmp_dict, timer_params
                ):
                    if tmp_dict['hour'] in timer_params['hour']:
                        if tmp_dict['minute'] in timer_params['minute']:
                            break
                        else:
                            self.next_minute(tmp_dict, self._timer_params)
                        maxtimes -= 1
                        if maxtimes < 0:
                            log.warn(
                                'No valid datetime in a year'
                                'for crontask {0}'.format(self)
                            )
                            return None
                    else:
                        self.next_hour(tmp_dict, self._timer_params)
                else:
                    self.next_monthday_weekday(tmp_dict, self._timer_params)
            else:
                self.next_month(tmp_dict, timer_params)

        local_dt = self._pytz.localize(datetime.datetime(
            year=tmp_dict['year'],
            month=tmp_dict['month'],
            day=tmp_dict['monthday'],
            hour=tmp_dict['hour'],
            minute=tmp_dict['minute']
        ))
        self.set_last_schedtime(local_dt)
        return local_dt
Ejemplo n.º 11
0
    def _do_write(self, context):
        """write into interface sending buffer"""
        sock = context.get_sock()
        msg = context.try_move2next_sending_msg()
        if msg is None:
            log.debug('send queue is empty, quit the _do_write thread')
            return context
        # log.debug('To enter write loop until eagin')
        # pylint:disable=w0212
        while not self._stopsign:
            data = msg.get_write_bytes(self.NET_RW_SIZE)
            log.debug('msg get_write_bytes_len to be sent: %d' % len(data))
            try:

                succ_len = sock.send(data)
                msg.seek_write(succ_len)
            except cuperr.AsyncMsgError as error:
                log.debug('has seek out of msg len, continue')
            except socket.error as error:
                err = error.args[0]
                if err == errno.EAGAIN:
                    log.debug('EAGAIN happend, context info %s' %
                              context.get_context_info())
                    return context
                elif err == errno.EWOULDBLOCK:
                    log.debug('EWOULDBLOCK happend, context info %s' %
                              context.get_context_info())
                    return context
                else:
                    log.warn(
                        'Socket error happend. But its not eagin,error:%s,\
                        context info %s, errno:%s' %
                        (str(error), context.get_context_info(), err))
                    context.to_destroy()
                    break
            except Exception as error:
                log.error(
                    'Socket error happend, error:%s,  context info %s, trace:%s'
                    % (str(error), context.get_context_info(),
                       traceback.format_exc()))
                context.to_destroy()
                break
            finally:
                del data
            if msg.is_msg_already_sent():
                log.info('sent out a msg uniqid:{0}'.format(
                    async_msg.netmsg_tostring(msg)))
                # if we have successfully send out a msg. Then move to next one
                msg = context.try_move2next_sending_msg()
                if msg is None:
                    break
        return context
Ejemplo n.º 12
0
Archivo: mail.py Proyecto: yaoml/CUP
 def _handle_attachments(cls, outer, attachments):
     if type(attachments) == str:
         attrs = [attachments]
     elif type(attachments) == list:
         attrs = attachments
     else:
         attrs = []
     for attached in attrs:
         if not os.path.isfile(attached):
             log.warn('attached is not a file:%s' % attached)
             continue
         # Guess the content type based on the file's extension.  Encoding
         # will be ignored, although we should check for simple things like
         # gzip'd or compressed files.
         ctype, encoding = mimetypes.guess_type(attached)
         if ctype is None or encoding is not None:
             # No guess could be made, or the file is encoded (compressed)
             # use a generic bag-of-bits type.
             ctype = 'application/octet-stream'
         maintype, subtype = ctype.split('/', 1)
         try:
             if maintype == 'text':
                 with open(attached) as fhandle:
                     # Note: we should handle calculating the charset
                     msg = text.MIMEText(fhandle.read(), _subtype=subtype)
             elif maintype == 'image':
                 with open(attached, 'rb') as fhandle:
                     imgid = os.path.basename(attached)
                     msg = image.MIMEImage(fhandle.read(), _subtype=subtype)
                     msg.add_header('Content-ID', imgid)
             elif maintype == 'audio':
                 with open(attached, 'rb') as fhandle:
                     msg = audio.MIMEAudio(fhandle.read(), _subtype=subtype)
             else:
                 with open(attached, 'rb') as fhandle:
                     msg = base.MIMEBase(maintype, subtype)
                     msg.set_payload(fhandle.read())
                 # Encode the payload using Base64
                 encoders.encode_base64(msg)
                 # Set the filename parameter
                 msg.add_header(
                     'Content-Disposition', 'attachment',
                     filename=os.path.basename(attached)
                 )
             outer.attach(msg)
         # pylint: disable=W0703
         except Exception as exception:
             log.warn(
                 'failed to attach %s, errmsg:%s. Will skip it' % (
                     attached, str(exception)
                 )
             )
Ejemplo n.º 13
0
    def cleanup_error_context(self, context):
        """clean up error context"""
        def _cleanup_context(send_queue, peerinfo):
            """cleanup context"""
            log.debug('to cleanup socket, peer:{0}'.format(peerinfo))
            log.debug('cleanup: send_queue of socket size:{0}'.format(
                send_queue.qsize()))
            while True:
                try:
                    item = send_queue.get_nowait()
                    msg = item[2]
                    del msg
                except queue.Empty:
                    break

        if context is None:
            return
        try:
            peerinfo = context.get_peerinfo()
            log.info('handle socket reset by peer, to close the socket:%s:%s' %
                     (peerinfo[0], peerinfo[1]))
            self._rwlock.acquire_readlock()
            fileno_peer = self._context2fileno_peer[context]
            self._rwlock.release_readlock()
            try:
                sock = context.get_sock()
                sock.close()
                context.set_sock(None)
            except socket.error as error:
                log.info('failed to close the socket, err_msg:%s' % str(error))
            except Exception as error:
                log.warn('failed to close socket:{0}'.format(error))
            try:
                self._epoll.unregister(fileno_peer[0])
            except Exception as error:  # pylint: disable=W0703
                log.warn('epoll unregister error:%s, peerinfo:%s' %
                         (str(error), str(fileno_peer[1])))
            self._rwlock.acquire_writelock()
            del self._fileno2context[fileno_peer[0]]
            del self._peer2context[fileno_peer[1]]
            del self._context2fileno_peer[context]
            self._rwlock.release_writelock()
            log.info('socket {0} closed successfully'.format(peerinfo))
        except Exception as error:
            pass
        # pylint: disable=W0212
        self._thdpool.add_1job(_cleanup_context, context._send_queue, peerinfo)
        listened_peer = context.get_listened_peer()
        if listened_peer is not None and (listened_peer in self._peer2context):
            log.info('clean up socket: this socket has listened peer {0}, will'
                     ' clean up it as well.'.format(listened_peer))
            self.cleanup_error_context(self._peer2context[listened_peer])
Ejemplo n.º 14
0
 def default_handle(self, msg):  # pylint: disable=W0613,R0201
     """
     default handle for msgcenter
     """
     msg_ackflag = async_msg.MSG_FLAG2NUM['FLAG_ACK']
     if msg_ackflag & msg.get_flag() == msg_ackflag:
         # no need to handle it
         pass
     else:
         log.warn('got a msg that you cannot hanlde, default will skip it. '
                  'msg received, type:%d, flag:%d, from:%s, uniqid:%d' %
                  (msg.get_msg_type(), msg.get_flag(),
                   str(msg.get_from_addr()), msg.get_uniq_id()))
         del msg
Ejemplo n.º 15
0
 def get_lost(self):
     """
     get lost devices
     """
     now = time.time()
     lost_devices = []
     for dkey in self._devices.keys():
         device = self._devices[dkey]
         if now - device.get_last_healthy() > self._judge_lost:
             if self._lost_devices is not None:
                 self._lost_devices[dkey] = device
             del self._devices[dkey]
             lost_devices.append(device)
             log.warn('heartbeat lost, device:%s' % dkey)
     return lost_devices
Ejemplo n.º 16
0
 def refresh(self, key):
     """
     :param key:
         refresh the device by key
     :return:
         if key does not exist, return False
         else, fresh the last_healthy time of the device
     """
     assert type(key) == str, 'needs to be a str'
     device = self._devices.get(key)
     if device is None:
         log.warn('Device not found, key:%s' % key)
         return False
     device.set_last_healthy()
     return True
Ejemplo n.º 17
0
 def get_lost(self):
     """
     get lost devices
     """
     now = time.time()
     lost_devices = []
     for dkey in self._devices.keys():
         device = self._devices[dkey]
         if now - device.get_last_healthy() > self._judge_lost:
             if self._lost_devices is not None:
                 self._lost_devices[dkey] = device
             del self._devices[dkey]
             lost_devices.append(device)
             log.warn('heartbeat lost, device:%s' % dkey)
     return lost_devices
Ejemplo n.º 18
0
 def do_recv_data(self, data, data_len):
     """
     push data into the recving_msg queue
     network read should be in 1 thread only.
     """
     if self._recving_msg is None:
         raise cup.err.NotInitialized('self._recving_msg')
     ret = self._recving_msg.push_data(data)
     #  log.debug('pushed data length: %d' % ret)
     if ret < 0:
         log.warn(
             'receive an wrong socket msg, to close the peer:{0}'.format(
                 self._sock.get_peerinfo()
             )
         )
         self.to_destroy()
         return
     if data_len >= ret:
         if self._recving_msg.is_recvmsg_complete():
             # log.debug(
             #     'get a msg: msg_type:%d, msg_len:%d, msg_flag:%d,'
             #     'msg_src:%s, msg_dest:%s, uniqid:%d' %
             #     (
             #         self._recving_msg.get_msg_type(),
             #         self._recving_msg.get_msg_len(),
             #         self._recving_msg.get_flag(),
             #         str(self._recving_msg.get_from_addr()),
             #         str(self._recving_msg.get_to_addr()),
             #         self._recving_msg.get_uniq_id()
             #     )
             # )
             self._conn.get_recv_queue().put(
                 (self._recving_msg.get_flag(), self._recving_msg)
             )
             if self._conn.get_recv_queue().qsize() >= 500:
                 time.sleep(0.1)
             self._recving_msg = self.get_recving_msg()
         #  the pushed data should span on two msg datas
         if data_len > ret:
             return self.do_recv_data(data[ret:], (data_len - ret))
     else:
         log.error(
             'Socket error. We cannot get more than pushed data length'
         )
         assert False
     return
Ejemplo n.º 19
0
 def __exec_worker(self, check_interval, func_queue, worker_name=''):
     while self.__status != 2:
         try:
             item = func_queue.get(timeout=check_interval)
         except queue.Empty:
             log.debug('no item found in exec queue')
             continue
         try:
             _, (function, data) = item
             function(data)
         # pylint: disable=W0703
         # we can NOT predict the exception type
         except Exception as error:
             log.warn(
                 '%s worker encountered exception:%s, func:%s, data:%s' %
                 (worker_name, error, function, data))
     log.info('%s worker thread exited as the service is stopping' %
              worker_name)
Ejemplo n.º 20
0
 def set_linux_res_bydict(self, info_dict):
     """
         {
             'iface': 'eth0',
             'ipaddr': '10.10.10.1',
             'port':   8089,
             'cpu_idle': 50,
             'mem_inuse': 1024,        # MB
             'mem_total': 8192,
             'net_in':    8192,        # kb
             'net_out':   102400,      # kb
         }
     """
     for key in info_dict:
         if key not in self._dict_info:
             log.warn('does not have this key:%s, ignore' % key)
             continue
         self._dict_info[key] = info_dict[key]
         log.debug('linux info:%s updated, %s' % (key, info_dict[key]))
Ejemplo n.º 21
0
 def close_socket(self, msg, recv_socket):
     """
     close socket by msg
     """
     peer = None
     try:
         if not recv_socket:
             peer = msg.get_to_addr()[0]
         else:
             peer = msg.get_from_addr()[0]
         context = self._peer2context.get(peer)
         if context is not None:
             self.cleanup_error_context(context)
         else:
             log.warn('conn manager close socket failed:{0}'.format(peer))
     except Exception as err:
         log.warn('failed to close socket:{1}, recv_socket:{0}'.format(
             recv_socket, err))
     return
Ejemplo n.º 22
0
 def set_linux_res_bydict(self, info_dict):
     """
         {
             'iface': 'eth0',
             'ipaddr': '10.10.10.1',
             'port':   8089,
             'cpu_idle': 50,
             'mem_inuse': 1024,        # MB
             'mem_total': 8192,
             'net_in':    8192,        # kb
             'net_out':   102400,      # kb
         }
     """
     for key in info_dict:
         if key not in self._dict_info:
             log.warn('does not have this key:%s, ignore' % key)
             continue
         self._dict_info[key] = info_dict[key]
         log.debug('linux info:%s updated, %s' % (key, info_dict[key]))
Ejemplo n.º 23
0
    def schedule(self, crontask):
        """schedule the crontask

        :param timer_dict:
            ::

                {   'minute': minute_list,
                    'hour': hour_list,
                    'weekday': weekday_list,
                    'monthday': monday_list,
                    'month': month_list
                }
        :param function:
            function that to be scheduled
        :param args:
            args of function
        :param kwargs:
            key args of function
        """
        next_schedtime = crontask.next_schedtime()
        if next_schedtime is None:
            log.warn(
                'CronExecution:crontask {0} will be deleted '
                'from the crontask as '
                'no valid schedule time is found'.format(crontask)
            )
        function, args, kwargs = crontask.get_funcargs()
        tmpnow = crontask.pytz_timezone().localize(datetime.datetime.now())
        wait_seoncds = (next_schedtime - tmpnow).total_seconds()
        log.info(
            'CronExecution: next schedule time for this crontask is {0} '
            'timezone {1}, wait for {2} seconds, timenwo is {3}'.format(
                next_schedtime, next_schedtime.tzinfo,
                wait_seoncds,
                next_schedtime.tzinfo.localize(datetime.datetime.now())
            )
        )
        # pylint: disable=W0142
        self.delay_exec(
            wait_seoncds, crontask, function, URGENCY_NORMAL, *args, **kwargs
        )
        self._task_dict[crontask.taskid()] = crontask
Ejemplo n.º 24
0
 def _do_read(self, context):
     sock = context.get_sock()
     data = None
     context.get_recving_msg()
     while self._stopsign is not True:
         try:
             data = sock.recv(self.NET_RW_SIZE)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug(
                     'EAGAIN happend, peer info %s' %
                     context.get_context_info()
                 )
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.info(
                     'EWOULDBLOCK happend, context info %s' %
                     context.get_context_info()
                 )
                 return context
             else:
                 log.warn(
                     'Socket error happend, error:%s,  peer info %s' %
                     (str(error), context.get_context_info())
                 )
                 context.to_destroy()
                 return context
         except Exception as error:
             log.critical(
                 'Socket error happend, error:%s,  peer info %s' %
                 (str(error), context.get_context_info())
             )
             context.to_destroy()
             return context
         data_len = len(data)
         if data_len == 0:
             # socket closed by peer
             context.to_destroy()
             return context
         context.do_recv_data(data, data_len)
         del data
Ejemplo n.º 25
0
 def test_abc(self):
     """test network speed of cup.net.async"""
     if self._status.get_status() != self._status.RUNNING:
         log.warn('control service is not running, stop heartbeat')
         return
     netmsg = None
     hostinfo = 'a' * 128 * 1024
     while self._status.get_status() == self._status.RUNNING:
         # hostinfo = cuphb.LinuxHost(
         #     str(self._agent_ipport), True,
         #     self._confdict['control']['interface']
         # )
         netmsg = msg.CNetMsg(is_postmsg=True)
         netmsg.set_from_addr(self._agent_ipport, (1, 1))
         netmsg.set_to_addr(self._master_ipport, (1, 1))
         netmsg.set_flag(self._last_heartbeat)
         netmsg.set_msg_type(self._type_man.getnumber_bytype('HEART_BEAT'))
         netmsg.set_uniq_id(1)
         netmsg.set_body(hostinfo)
         self.post_msg(netmsg)
Ejemplo n.º 26
0
 def test_abc(self):
     """test network speed of cup.net.async"""
     if self._status.get_status() != self._status.RUNNING:
         log.warn('control service is not running, stop heartbeat')
         return
     netmsg = None
     hostinfo = 'a' * 128 * 1024
     while self._status.get_status() == self._status.RUNNING:
         # hostinfo = cuphb.LinuxHost(
         #     str(self._agent_ipport), True,
         #     self._confdict['control']['interface']
         # )
         netmsg = msg.CNetMsg(is_postmsg=True)
         netmsg.set_from_addr(self._agent_ipport, (1, 1))
         netmsg.set_to_addr(self._master_ipport, (1, 1))
         netmsg.set_flag(self._last_heartbeat)
         netmsg.set_msg_type(self._type_man.getnumber_bytype('HEART_BEAT'))
         netmsg.set_uniq_id(1)
         netmsg.set_body(hostinfo)
         self.post_msg(netmsg)
Ejemplo n.º 27
0
    def read(self, context):
        """
        read with conn context
        """
        if context.is_detroying():
            log.debug('The context is being destroyed. return')
            return
        if not context.try_readlock():
            return

        try:
            self._do_read(context)
            self._finish_read_callback(True, context)
        except Exception as error:
            context.to_destroy()
            log.info('read error occur, error type:{0}, content:{1}'.format(
                type(error), error))
            self.cleanup_error_context(context)
            log.warn(traceback.format_exc())
            self._finish_read_callback(False, context)
Ejemplo n.º 28
0
 def __exec_worker(self, check_interval, func_queue, worker_name=''):
     while self.__status != 2:
         try:
             item = func_queue.get(timeout=check_interval)
         except queue.Empty:
             log.debug('no item found in exec queue')
             continue
         try:
             _, (function, data) = item
             function(data)
         # pylint: disable=W0703
         # we can NOT predict the exception type
         except Exception as error:
             log.warn(
                 '%s worker encountered exception:%s, func:%s, data:%s' %
                 (worker_name, error, function, data)
             )
     log.info(
         '%s worker thread exited as the service is stopping' % worker_name
     )
Ejemplo n.º 29
0
 def default_handle(self, msg):  # pylint: disable=W0613,R0201
     """
     default handle for msgcenter
     """
     msg_ackflag = async_msg.MSG_FLAG2NUM['FLAG_ACK']
     if msg_ackflag & msg.get_flag() == msg_ackflag:
         # no need to handle it
         pass
     else:
         log.warn(
             'got a msg that you cannot hanlde, default will skip it. '
             'msg received, type:%d, flag:%d, from:%s, uniqid:%d' %
             (
                 msg.get_msg_type(),
                 msg.get_flag(),
                 str(msg.get_from_addr()),
                 msg.get_uniq_id()
             )
         )
         del msg
Ejemplo n.º 30
0
    def cleanup_oldlost(self, dump_file=None):
        """
        cleanup old lost devices.

        :param dump_file:
            if dump_file is not None, we will store devices info into dump_file
            Otherwise, we will cleanup the lost devices only.
        """
        self._lock.acquire()
        log.info('start - empty_lost devices, dump_file:%s' % dump_file)
        if self._lost_devices is None:
            log.info('end - does not keep_lost devices, return')
            self._lock.release()
            return
        if dump_file is None:
            self._lost_devices = {}
            log.info('end - does not have dump_file, return')
            self._lock.release()
            return
        info_dict = {}
        info_dict['devices'] = {}
        if len(self._lost_devices) != 0:
            info_dict['devices']['lost'] = []
            info_dict['devices']['lost_num'] = len(self._lost_devices)
        else:
            info_dict['devices']['lost_num'] = 0
        for dkey in self._lost_devices.keys():
            try:
                tmp_dict = {}
                tmp_dict['key'] = dkey
                tmp_dict['last_healthy'] = self._devices[
                    dkey].get_last_healthy()
                del self._lost_devices[dkey]
                log.info('end - empty_lost devices')
                info_dict['devices']['lost'].append(tmp_dict)
            except KeyError as error:
                log.warn('failed to dump lost_file, error:%s' % str(error))
        conf_writer = conf.Dict2Configure(info_dict)
        conf_writer.write_conf(dump_file)
        self._lock.release()
        return
Ejemplo n.º 31
0
 def refresh(self, key, resource=None):
     """
     :param key:
         refresh the device by key
     :return:
         if key does not exist, return False
         else, fresh the last_healthy time of the device
     """
     assert type(key) == str, 'needs to be a str'
     device = self._devices.get(key)
     if device is None:
         log.warn('Device not found, key:%s' % key)
         return False
     device.set_last_healthy()
     if resource is not None:
         device.refresh_resouce(resource)
         log.debug('Heartbeat: Device %s refreshed with resource. ')
     else:
         log.debug('Heartbeat: Device %s only refreshed with heartbeat. '
                   'Resource not refreshed')
     return True
Ejemplo n.º 32
0
 def _send_heartbeat_loop(self):
     if self._status.get_status() != self._status.RUNNING:
         log.warn('control service will stop. stop sending heartbeat')
         return
     hostinfo = cuphb.LinuxHost(str(self._agent_ipport), True,
                                self._confdict['control']['interface'])
     log.info('to create msg and send msg')
     netmsg = msg.CNetMsg(is_postmsg=True)
     netmsg.set_from_addr(self._agent_ipport, (1, 1))
     netmsg.set_to_addr(self._master_ipport, (1, 1))
     netmsg.set_flag(1)
     netmsg.set_msg_type(self._type_man.getnumber_bytype('HEART_BEAT'))
     netmsg.set_uniq_id(1)
     netmsg.set_body(hostinfo.serilize())
     self.post_msg(netmsg)
     log.info('finish queue sending heartbeat to {0}'.format(
         self._master_ipport))
     self._executor.delay_exec(
         int(self._confdict['control']['heartbeat_interval']) - 3,
         self._send_heartbeat_loop,
         urgency=executor.URGENCY_HIGH)
Ejemplo n.º 33
0
 def exec_worker(self, check_interval, func_queue, worker_name=''):
     log.info('CronExecution exec worker started')
     while self._status != 2:
         try:
             item = func_queue.get(timeout=check_interval)
         except queue.Empty:
             continue
         function = None
         argvs = None
         kwargs = None
         try:
             _, crontask, (function, argvs, kwargs) = item
             # pylint: disable=W0142
             if func_queue is self._delay_queue:
                 log.debug('to delay exec func:{0}'.format(function))
             dtnow = datetime.datetime.now(crontask.pytz_timezone())
             if (dtnow -
                     crontask.get_last_schedtime()).total_seconds() > 60:
                 log.warn(
                     'lagging crontask found (name:{0} id: {1})'.format(
                         crontask.name(), crontask.taskid()))
             function(*argvs, **kwargs)
             self.schedule(crontask)
         # pylint: disable=W0703
         # we can NOT predict the exception type
         except Exception as error:
             log.warn('{0} worker encountered exception:{1}, func:{2},'
                      'args:{3} {4} , executor service({5})'.format(
                          worker_name, error, function, argvs, kwargs,
                          self._name))
             log.warn('error type:{0}'.format(type(error)))
     log.debug('{0} worker thread exited as the service '
               'is stopping'.format(worker_name))
Ejemplo n.º 34
0
 def __exec_worker(self, check_interval, func_queue, worker_name=''):
     while self.__status != 2:
         try:
             item = func_queue.get(timeout=check_interval)
         except queue.Empty:
             # log.debug('no item found in exec queue')
             continue
         try:
             _, (function, argvs, kwargs) = item
             # pylint: disable=W0142
             if func_queue is self.__delay_queue:
                 log.debug('to delay exec func:{0}'.format(function))
             function(*argvs, **kwargs)
         # pylint: disable=W0703
         # we can NOT predict the exception type
         except Exception as error:
             log.warn(
                 '%s worker encountered exception:%s, func:%s, args:%s' %
                 (worker_name, error, function, kwargs)
             )
             log.warn('error type:{0}'.format(type(error)))
             log.warn(traceback.format_exc())
     log.debug(
         '%s worker thread exited as the service is stopping' % worker_name
     )
Ejemplo n.º 35
0
Archivo: conn.py Proyecto: zfq308/CUP
 def connect(self, peer):
     """
     :param peer:
         ip:port
     """
     log.info('to connect to peer:{0}'.format(peer))
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     self._set_sock_params(sock)
     try:
         ret = sock.connect_ex(peer)
         if ret != 0:
             log.warn('connect failed, peer:{0}'.format(peer))
             return None
         if sock.getpeername() == sock.getsockname():
             log.warn('connect failed, seems connected to self')
             sock.close()
             return None
         self._set_sock_nonblocking(sock)
         log.info('connect peer success')
         return sock
     except socket.error as error:
         log.warn(
             'failed to connect to %s:%s. Error:%s' %
             (peer[0], peer[1], str(error))
         )
         sock.close()
         return None
     else:
         sock.close()
         return None
Ejemplo n.º 36
0
 def connect(self, peer):
     """
     :param peer:
         ip:port
     """
     log.info('to connect to peer:{0}'.format(peer))
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     self._set_sock_params(sock)
     try:
         ret = sock.connect_ex(peer)
         if ret != 0:
             log.warn('connect failed, peer:{0}'.format(peer))
             return None
         if sock.getpeername() == sock.getsockname():
             log.warn('connect failed, seems connected to self')
             sock.close()
             return None
         self._set_sock_nonblocking(sock)
         return sock
     except socket.error as error:
         log.warn('failed to connect to %s:%s. Error:%s' %
                  (peer[0], peer[1], str(error)))
         sock.close()
         return None
     else:
         sock.close()
         return None
Ejemplo n.º 37
0
 def __exec_worker(self, check_interval, func_queue, worker_name=''):
     while self.__status != 2:
         try:
             item = func_queue.get(timeout=check_interval)
         except queue.Empty:
             # log.debug('no item found in exec queue')
             continue
         try:
             _, (function, argvs, kwargs) = item
             # pylint: disable=W0142
             if func_queue is self.__delay_queue:
                 log.debug('to delay exec func:{0}'.format(function))
             function(*argvs, **kwargs)
         # pylint: disable=W0703
         # we can NOT predict the exception type
         except Exception as error:
             log.warn(
                 '%s worker encountered exception:%s, func:%s, args:%s' %
                 (worker_name, error, function, kwargs)
             )
             log.warn('error type:{0}'.format(type(error)))
             log.warn(traceback.format_exc())
     log.info(
         '%s worker thread exited as the service is stopping' % worker_name
     )
Ejemplo n.º 38
0
    def _worker(self):
        """
        worker func to handle jobs
        """
        current_thd = self._CURRENT_THREAD()
        with self._worker_state(self._waiters, current_thd):
            job = self._jobqueue.get()

        while job is not self._WORKER_STOP_SIGN:
            with self._worker_state(self._working, current_thd):
                # pylint: disable=W0621
                context, function, args, kwargs, result_callback = job
                del job
                try:
                    # pylint: disable=W0142
                    result = _CONTEXT_TRACKER.call_with_context(
                        context, function, *args, **kwargs
                    )
                    success = True
                except Exception as error:
                    success = False
                    log.warn(
                        'Func failed, func:%s, error_msg: %s'  %
                        (str(function), str(error))
                    )
                    if result_callback is None:
                        log.warn('This func does not have callback.')
                        _CONTEXT_TRACKER.call_with_context(
                            context, self._log_err_context, context
                        )
                        result = None
                    else:
                        result = error

                del function, args, kwargs
            # when out of  "with scope",
            # the self._working will remove the thread from
            # its self._working list
            if result_callback is not None:
                try:
                    _CONTEXT_TRACKER.call_with_context(
                        context, result_callback, success, result
                    )
                except Exception as e:
                    log.warn(
                        'result_callback func failed, callback func:%s,'
                        'err_msg:%s' % (str(result_callback), str(e))
                    )
                    _CONTEXT_TRACKER.call_with_context(
                        context, self._log_err_context, context
                    )
            del context, result_callback, result

            with self._worker_state(self._waiters, current_thd):
                job = self._jobqueue.get()
            # after with statements, self._waiters will remove current_thd

        # remove this thread from the list
        self._threads.remove(current_thd)
Ejemplo n.º 39
0
    def _try_read_one_log(self, stream):
        """
        read one log record from the stream_close.

        :return:

        """
        if stream is None:
            return (LOGFILE_EOF, None)
        str_datalen = datalen = str_data = None
        try:
            str_datalen = stream.read(self._record_lenbytes)
            if len(str_datalen) == 0:
                return (LOGFILE_EOF, None)
            if len(str_datalen) < self._record_lenbytes:
                log.warn('got a bad log from stream:{0}'.format(stream.name))
                return (LOGFILE_BAD_RECORD, None)
            datalen = self.convert_bytes2uint(str_datalen)
            str_data = stream.read(datalen)
            if len(str_data) < datalen:
                log.warn(
                    'got less than data len from stream:{0}'.format(
                        stream.name)
                )
                return (LOGFILE_BAD_RECORD, None)
            log_id = self.convert_bytes2uint(str_data[0: 16])
            log_type = self.convert_bytes2uint(str_data[16: 16 + 2])
            log_mode = self.convert_bytes2uint(str_data[18: 18 + 2])
            log_binary = str_data[20:]
            return (
                LOGFILE_GOOD, LogRecord(
                    datalen, log_id, log_type, log_mode, log_binary
                )
            )
        except Exception as err:
            log.error('failed to parse log record:{0}'.format(err))
            log.error(traceback.format_exc())
            return (LOGFILE_BAD_RECORD, None)
Ejemplo n.º 40
0
 def _send_heartbeat_loop(self):
     if self._status.get_status() != self._status.RUNNING:
         log.warn('control service will stop. stop sending heartbeat')
         return
     hostinfo = cuphb.LinuxHost(
         str(self._agent_ipport), True,
         self._confdict['control']['interface']
     )
     log.info('to create msg and send msg')
     netmsg = msg.CNetMsg(is_postmsg=True)
     netmsg.set_from_addr(self._agent_ipport, (1, 1))
     netmsg.set_to_addr(self._master_ipport, (1, 1))
     netmsg.set_flag(1)
     netmsg.set_msg_type(self._type_man.getnumber_bytype('HEART_BEAT'))
     netmsg.set_uniq_id(1)
     netmsg.set_body(hostinfo.serilize())
     self.post_msg(netmsg)
     log.info('finish queue sending heartbeat to {0}'.format(self._master_ipport))
     self._executor.delay_exec(
         int(self._confdict['control']['heartbeat_interval']) - 3,
         self._send_heartbeat_loop,
         urgency=executor.URGENCY_HIGH
     )
Ejemplo n.º 41
0
 def _finish_write_callback(self, succ, result):
     """finish write callback"""
     context = result
     # You cannot do things below as getpeername will block if the conn
     # has problem!!!!!   - Guannan
     # try:
     #     context.get_sock().getpeername()
     # except socket.error as error:
     #   log.debug('Seems socket failed to getpeername:%s' % str(error))
     #   context.to_destroy()
     if context is not None and context.is_detroying():
         # destroy the context and socket
         context.release_writelock()
         try:
             self.cleanup_error_context(context)
         # pylint: disable=W0703
         except Exception as error:
             log.warn('context destroying encounters error,'
                      'skip it:{0}'.format(error))
     else:
         # log.debug('to epoll modify')
         epoll_write_params = self._epoll_write_params()
         context.release_writelock()
Ejemplo n.º 42
0
    def _try_read_one_log(self, stream):
        """
        read one log record from the stream_close.

        :return:

        """
        if stream is None:
            return LOGFILE_EOF
        str_datalen = datalen = str_data = None
        try:
            str_datalen = stream.read(4)
            if len(str_datalen) == 0:
                return (LOGFILE_EOF, None)
            if len(str_datalen) < 4:
                log.warn('got a bad log from stream:{0}'.format(stream.name))
                return LOGFILE_BAD_RECORD
            datalen = self.convert_bytes2uint(str_datalen)
            str_data = stream.read(datalen)
            if len(str_data) < datalen:
                log.warn(
                    'got less than data len from stream:{0}'.format(
                        stream.name)
                )
                return (LOGFILE_BAD_RECORD, None)
            log_id = self.convert_bytes2uint(str_data[0:16])
            log_type = self.convert_bytes2uint(str_data[16:2])
            log_mode = self.convert_bytes2uint(str_data[18:2])
            log_binary = self.convert_bytes2uint(str_data[20:])
            return (
                LOGFILE_GOOD, LogRecord(
                    datalen, log_id, log_type, log_mode, log_binary
                )
            )
        except Exception as err:
            log.error('failed to parse log record:{0}'.format(err))
            return LOGFILE_BAD_RECORD
Ejemplo n.º 43
0
    def _do_open4read(self, start_logid=-1):
        """
        get open load stream

        :TODO:
            read starting from logid
        """
        load_dir = self._get_storage_dir(logid=self._logid)
        self._buffer_files = self._get_ordered_logfiles(load_dir)
        to_open = None
        if len(self._buffer_files) <= 0:
            log.warn('does not have any log record yet')
        else:
            if -1 == start_logid:
                to_open = self._buffer_files[0]
            else:
                pass
                # if self._load_stream is None:
                #     log.error('load stream should not be None')
                #     return False
                # name = os.path.basename(self._load_stream.name)
                # ind = -1
                # try:
                #     ind = self._buffer_files.index(name)
                # except ValueError:
                #     log.error('does not find the log: {0}'.format(name))
                #     return False
                # to_open = self._buffer_files[ind]
            try:
                fname = '{0}/{1}'.format(load_dir, to_open)
                self._load_stream = open(fname, 'rb')
                return True
            # pylint:disable=W0703
            # need such an exception
            except Exception as err:
                log.error('failed to open log stream :{0}'.format(err))
                return False
Ejemplo n.º 44
0
    def cleanup_oldlost(self, dump_file=None):
        """
        cleanup old lost devices.

        :param dump_file:
            if dump_file is not None, we will store devices info into dump_file
            Otherwise, we will cleanup the lost devices only.
        """
        log.info('start - empty_lost devices, dump_file:%s' % dump_file)
        if self._lost_devices is None:
            log.info('end - does not keep_lost devices, return')
            return
        if dump_file is None:
            self._lost_devices = {}
            log.info('end - does not have dump_file, return')
            return
        info_dict = {}
        info_dict['devices'] = {}
        if len(self._lost_devices) != 0:
            info_dict['devices']['lost'] = []
            info_dict['devices']['lost_num'] = len(self._lost_devices)
        else:
            info_dict['devices']['lost_num'] = 0
        for dkey in self._lost_devices.keys():
            try:
                tmp_dict = {}
                tmp_dict['key'] = dkey
                tmp_dict['last_healthy'] = self._devices[dkey].get_last_healthy(
                )
                del self._lost_devices[dkey]
                log.info('end - empty_lost devices')
                info_dict['devices']['lost'].append(tmp_dict)
            except KeyError as error:
                log.warn('failed to dump lost_file, error:%s' % str(error))
        conf_writer = conf.Dict2Configure(info_dict)
        conf_writer.write_conf(dump_file)
        return
Ejemplo n.º 45
0
    def _do_open4read(self, start_logid=-1):
        """
        get open load stream

        :TODO:
            read starting from logid
        """
        load_dir = self._get_storage_dir(logid=self._logid)
        self._buffer_files = self._get_ordered_logfiles(load_dir)
        to_open = None
        if len(self._buffer_files) <= 0:
            log.warn('does not have any log record yet')
        else:
            if -1 == start_logid:
                to_open = self._buffer_files[0]
            else:
                pass
                # if self._load_stream is None:
                #     log.error('load stream should not be None')
                #     return False
                # name = os.path.basename(self._load_stream.name)
                # ind = -1
                # try:
                #     ind = self._buffer_files.index(name)
                # except ValueError:
                #     log.error('does not find the log: {0}'.format(name))
                #     return False
                # to_open = self._buffer_files[ind]
            try:
                fname = '{0}/{1}'.format(load_dir, to_open)
                self._load_stream = open(fname, 'rb')
                return True
            # pylint:disable=W0703
            # need such an exception
            except Exception as err:
                log.error('failed to open log stream :{0}'.format(err))
                return False
Ejemplo n.º 46
0
def test_gen_wf():
    """
    init_comlog指定ge_wf参数为True时,将大于等于WARING级别的消息
    写入${logfile}.wf日志文件中。本case用来验证相关功能是否符合
    预期。
    """
    log.init_comlog(
        "Yang Honggang", logging.DEBUG, "cup.log",
        log.ROTATION, gen_wf=True
    )

    log.info("info")
    log.critical("critical")
    log.error("error")
    log.warn("warning")
    log.debug("debug")

    # 检查是否生成了cup.log和cup.log.wf文件
    try:
        flog = open('cup.log')
        flog_wf = open('cup.log.wf')
    except IOError:
        assert(False), "can not find cup.log or cup.log.wf file"
    # 检查cup.log的内容是否包括“debug”、"info"
    flog_str = flog.read()
    assert('debug' in flog_str and 'info' in flog_str), "cup.log's content error"
    # 检查cup.log.wf的内容是否包括"critical"、“error”和“warning”
    flog_wf_str = flog_wf.read()
    assert('critical' in flog_wf_str and 'error' in flog_wf_str and \
            'warning' in flog_wf_str), "cup.log.wf's content error"

    assert('debug' not in flog_wf_str and 'info' not in flog_wf_str), \
            "cup.log.wf's content error"
    # cup.log的内容不应该包括"critical"、“error”和“warning”
    assert('critical' not in flog_str and 'error' not in flog_str and \
            'warning' not in flog_str), "cup.log's content error"
Ejemplo n.º 47
0
 def do_recv_data(self, data, data_len):
     """
     push data into the recving_msg queue
     network read should be in 1 thread only.
     """
     if self._recving_msg is None:
         raise cup.err.NotInitialized('self._recving_msg')
     try:
         ret = self._recving_msg.push_data(data)
     except IndexError as error:
         log.warn('index error/msg len error happened:{0}'.format(error))
         log.warn(traceback.format_exc())
         log.warn('receive a msg that cannot handle, close the socket')
         self.to_destroy()
         return
     if ret < 0:
         log.warn(
             'receive an wrong socket msg, to close the peer:{0}'.format(
                 self.get_peerinfo()))
         self.to_destroy()
         self._conn.cleanup_error_context(self)
         return
     if data_len >= ret:
         if self._recving_msg.is_recvmsg_complete():
             self._is_1st_recv_msg = False
             self._conn.get_recv_queue().put(
                 (self._recving_msg.get_flag(), self._recving_msg))
             if self.get_listened_peer() is None:
                 listened_peer = self._recving_msg.get_from_addr()[0]
                 self.set_listened_peer(listened_peer)
                 log.info(
                     'set listened peer {0} for this context({1})'.format(
                         listened_peer, self._peerinfo))
             self._recving_msg = None
             if self._conn.get_recv_queue().qsize() >= 500:
                 time.sleep(0.1)
             self.move2recving_msg()
         #  the pushed data should span on two msg datas
         if data_len > ret:
             return self.do_recv_data(data[ret:], (data_len - ret))
     else:
         log.error(
             'Socket error. We cannot get more than pushed data length')
         assert False
     return
Ejemplo n.º 48
0
Archivo: msg.py Proyecto: sylarhl/CUP
    def push_data(self, data):
        """
        push data into the msg. Return pushed length.

        Return -1 if we should shutdown the socket channel.
        """
        if self._msg_finish:
            log.warn('The CNetMsg has already been pushed enough data')
            return 0
        if len(data) == 0:
            log.warn(
                'You just pushed into the msg with a zero-length data'
            )
            return 0
        sign = True
        data_ind = 0
        data_max = len(data)
        order, offsite = self._get_msg_order_ind(self._readindex)
        log.debug('msg data index:{0}, offsite: {1}'.format(order, offsite))
        data_key = self._ORDER[order]
        while sign:
            msg_data_loop_end = False
            # One loop handle one data_key until there all the data is handled.
            try:
                self._data[data_key]
            except KeyError:
                self._data[data_key] = ''
            loop_data_max = (
                self._ORDER_BYTES[order] - len(self._data[data_key])
            )
            if (data_max - data_ind) >= loop_data_max:
                # can fill up the msg
                self._data[data_key] += (
                    data[data_ind: loop_data_max + data_ind]
                )
                data_ind += loop_data_max
                msg_data_loop_end = True
                self._readindex += loop_data_max
                if data_key != 'body':
                    log.debug(
                        'data_key {0} full filled'.format(data_key)
                    )
                    if data_key == 'head':
                        if self._data[data_key] != self.MSG_SIGN:
                            return -1
                else:
                    pass
            else:
                # cannot fill up the msg in this round
                sign = False
                push_bytes = data_max - data_ind
                self._data[data_key] += data[data_ind: data_max]
                self._readindex += push_bytes
                data_ind += push_bytes

            if (data_key == 'len') and (msg_data_loop_end):
                # set up the length of the body
                total_len = self._convert_bytes2uint(self._data['len'])
                if self._need_head:
                    self._ORDER_BYTES[7] = total_len - self._SIZE_EXCEPT_BODY
                else:
                    self._ORDER_BYTES[7] = (
                        total_len - self._SIZE_EXCEPT_HEAD_BODY
                    )
                log.debug('total len %d' % total_len)
            if msg_data_loop_end and (order == self._ORDER_COUNTS - 1):
                self._msg_finish = True
                sign = False
                log.debug('congratulations. This msg has been fullfilled')
            elif msg_data_loop_end and order < self._ORDER_COUNTS:
                order += 1
                data_key = self._ORDER[order]
                log.debug('This round has finished')
        return data_ind
Ejemplo n.º 49
0
    def _worker(self):
        """
        worker func to handle jobs
        """
        current_thd = self._CURRENT_THREAD()
        with self._worker_state(self._waiters, current_thd):
            job = self._jobqueue.get()

        while job is not self._WORKER_STOP_SIGN:
            with self._worker_state(self._working, current_thd):
                # pylint: disable=W0621
                context, function, args, kwargs, result_callback = job
                del job

                try:
                    # pylint: disable=W0142
                    result = _CONTEXT_TRACKER.call_with_context(
                        context, function, *args, **kwargs
                    )
                    success = True
                except Exception as error:
                    success = False
                    log.warn(
                        'Func failed, func:%s, error_msg: %s'  %
                        (str(function), str(error))
                    )
                    if result_callback is None:
                        log.warn('This func does not have callback.')
                        _CONTEXT_TRACKER.call_with_context(
                            context, self._log_err_context, context
                        )
                        result = None
                    else:
                        result = error

                del function, args, kwargs
            # when out of  "with scope",
            # the self._working will remove the thread from
            # its self._working list

            if result_callback is not None:
                try:
                    _CONTEXT_TRACKER.call_with_context(
                        context, result_callback, success, result
                    )
                except Exception as e:
                    # traceback.print_exc(file=sys.stderr)
                    log.warn(
                        'result_callback func failed, callback func:%s,'
                        'err_msg:%s' % (str(result_callback), str(e))
                    )
                    _CONTEXT_TRACKER.call_with_context(
                        context, self._log_err_context, context
                    )

            del context, result_callback, result

            with self._worker_state(self._waiters, current_thd):
                job = self._jobqueue.get()
            # after with statements, self._waiters will remove current_thd

        # remove this thread from the list
        self._threads.remove(current_thd)
Ejemplo n.º 50
0
Archivo: mail.py Proyecto: intohole/CUP
    def sendmail(self, recipients, subject='', body='', attachments=None):
        """
        发送邮件.

        :param recipients:
            支持传入一个邮件接收者(string), 或者邮件接受者list
        :param subject:
            邮件主题
        :param body:
            邮件正文
        :param attachments:
            支持传入一个附件(string类型,邮件路径)或者附件list路径列表.
            请注意, 需要传入绝对路径!
        :return:
            发送成功返回(True, None)的tuple, 失败返回(False, error_msg)的tuple

        """
        errmsg = None
        self._check_type(recipients, [str, list])
        self._check_type(subject, [str])
        # self._check_type(body, [str])
        if self._is_html:
            msg_body = text.MIMEText(body, 'html', _charset='utf-8')
        else:
            msg_body = text.MIMEText(body, 'plain', _charset='utf-8')
        outer = multipart.MIMEMultipart()
        outer['Subject'] = subject
        if type(recipients) == list:
            outer['To'] = self._COMMA_SPLITTER.join(recipients)
        else:
            outer['To'] = recipients
        outer['From'] = self._sender
        outer.preamble = 'Peace and Joy!\n'
        outer.attach(msg_body)
        if type(attachments) == str:
            attrs = [attachments]
        elif type(attachments) == list:
            attrs = attachments
        else:
            attrs = []
        for attached in attrs:
            if not os.path.isfile(attached):
                log.warn('attached is not a file:%s' % attached)
                continue
            # Guess the content type based on the file's extension.  Encoding
            # will be ignored, although we should check for simple things like
            # gzip'd or compressed files.
            ctype, encoding = mimetypes.guess_type(attached)
            if ctype is None or encoding is not None:
                # No guess could be made, or the file is encoded (compressed)
                # use a generic bag-of-bits type.
                ctype = 'application/octet-stream'
            maintype, subtype = ctype.split('/', 1)
            try:
                if maintype == 'text':
                    with open(attached) as fhandle:
                        # Note: we should handle calculating the charset
                        msg = text.MIMEText(fhandle.read(), _subtype=subtype)
                elif maintype == 'image':
                    with open(attached, 'rb') as fhandle:
                        msg = image.MIMEImage(fhandle.read(), _subtype=subtype)
                elif maintype == 'audio':
                    with open(attached, 'rb') as fhandle:
                        msg = audio.MIMEAudio(fhandle.read(), _subtype=subtype)
                else:
                    with open(attached, 'rb') as fhandle:
                        msg = base.MIMEBase(maintype, subtype)
                        msg.set_payload(fhandle.read())
                # Encode the payload using Base64
                encoders.encode_base64(msg)
                # Set the filename parameter
                msg.add_header(
                    'Content-Disposition', 'attachment',
                    filename=os.path.basename(attached)
                )
                outer.attach(msg)
            # pylint: disable=W0703
            except Exception as exception:
                log.warn(
                    'failed to attach %s, errmsg:%s. Will skip it' % (
                        attached, str(exception)
                    )
                )
        composed = outer.as_string()
        try:
            smtp = smtplib.SMTP(self._server, self._port)
            smtp.sendmail(self._sender, recipients, composed)
            smtp.quit()
            return (True, None)
        except smtplib.SMTPException as smtperr:
            errmsg = str(smtperr)
            return (False, errmsg)
Ejemplo n.º 51
0
Archivo: conn.py Proyecto: zfq308/CUP
 def _do_write(self, context):
     sock = context.get_sock()
     msg = context.try_move2next_sending_msg()
     if msg is None:
         log.debug('send queue is empty, quit the _do_write thread')
         return context
     log.debug('To enter write loop until eagin')
     # pylint:disable=w0212
     # log.debug('This msg _need_head:%s' % msg._need_head)
     while not self._stopsign:
         data = msg.get_write_bytes(self.NET_RW_SIZE)
         log.debug('get_write_bytes_len: %d' % len(data))
         try:
             succ_len = sock.send(data)
             # log.debug('succeed to send length:%d' % succ_len)
             msg.seek_write(succ_len)
         except socket.error as error:
             err = error.args[0]
             if err == errno.EAGAIN:
                 log.debug(
                     'EAGAIN happend, context info %s' %
                     context.get_context_info()
                 )
                 return context
             elif err == errno.EWOULDBLOCK:
                 log.debug(
                     'EWOULDBLOCK happend, context info %s' %
                     context.get_context_info()
                 )
                 return context
             else:
                 log.warn(
                     'Socket error happend. But its not eagin,error:%s,\
                     context info %s, errno:%s' %
                     (str(error), context.get_context_info(), err)
                 )
                 context.to_destroy()
                 break
         except Exception as error:
             log.error(
                 'Socket error happend, error:%s,  context info %s' %
                 (str(error), context.get_context_info())
             )
             context.to_destroy()
             break
         finally:
             del data
         log.debug('%d bytes has been sent' % succ_len)
         if msg.is_msg_already_sent():
             # log.debug(
             #     'send out a msg: msg_type:%d, msg_len:%d, msg_flag:%d, '
             #     'msg_src:%s, msg_dest:%s, uniqid:%d' %
             #     (
             #         msg.get_msg_type(),
             #         msg.get_msg_len(),
             #         msg.get_flag(),
             #         str(msg.get_from_addr()),
             #         str(msg.get_to_addr()),
             #         msg.get_uniq_id()
             #     )
             # )
             # if we have successfully send out a msg. Then move to next one
             msg = context.try_move2next_sending_msg()
             if msg is None:
                 break
     return context
Ejemplo n.º 52
0
Archivo: conn.py Proyecto: zfq308/CUP
    def _handle_error_del_context(self, context):
        def _cleanup_context(send_queue, peerinfo):
            """cleanup context"""
            log.info('to cleanup socket, peer:{0}'.format(peerinfo))
            log.info(
                'cleanup: send_queue of socket size:{0}'.format(
                    send_queue.qsize()
                )
            )
            while True:
                try:
                    item = send_queue.get_nowait()
                    msg = item[2]
                    del msg
                except queue.Empty:
                    break
            # pylint: disable=W0212
            # need cleanup
            log.info('end clean up peerinfo:{0}'.format(peerinfo))
        if context is None:
            return
        log.info('to del context as socket is not normal')
        self._mlock.acquire()
        try:
            peerinfo = context.get_peerinfo()
            log.info(
                'handle socket reset by peer, to close the socket:%s:%s' %
                (peerinfo[0], peerinfo[1])
            )
            fileno_peer = self._context2fileno_peer[context]
            log.info('socket info: %s' % str(fileno_peer[1]))

            try:
                sock = context.get_sock()
                # sock.shutdown(socket.SHUT_RDWR)
                sock.close()
                context.set_sock(None)
            except socket.error as error:
                log.info(
                    'failed to close the socket, err_msg:%s' % str(error)
                )
            except Exception as error:
                log.warn('failed to close socket:{0}'.format(error))

            try:
                self._epoll.unregister(fileno_peer[0])
            except Exception as error:  # pylint: disable=W0703
                log.warn(
                    'epoll unregister error:%s, peerinfo:%s' %
                    (str(error), str(fileno_peer[1]))
                )
            log.info('socket closed')
            del self._fileno2context[fileno_peer[0]]
            del self._peer2context[fileno_peer[1]]
            del self._context2fileno_peer[context]
        except Exception as error:
            pass
        finally:
            self._mlock.release()
        # pylint: disable=W0212
        self._thdpool.add_1job(_cleanup_context, context._send_queue, peerinfo)
Ejemplo n.º 53
0
    def read(self, record_num=128):
        """
        load log into memory

        :notice:
            If skip_badlog is not True, will raise IOError if the stream
            encounters any error.

            Otherwise, the stream will skip the bad log file, move to next one
            and continue reading

        :return:
            a. return a list of "record_num" of LogRecords.

            b. If the count number of list is less than record_num,
            it means the stream encounter EOF, plz read again afterwards.

            c. If the returned is None, it means the stream got nothing, plz
                try again.
        """
        recordlist = []
        count = 0
        move2nextstream = False
        while count < record_num:
            ret, retval = self._try_read_one_log(self._load_stream)
            if ret == LOGFILE_EOF:
                # need read next log file
                move2nextstream = True
            elif ret == LOGFILE_GOOD:
                recordlist.append(retval)
                count += 1
                continue
            elif ret == LOGFILE_BAD_RECORD:
                if not self._skip_badlog:
                    raise IOError(
                        'find bad records in {0}'.format(
                            self._load_stream.name)
                    )
                else:
                    log.warn(
                        'Bad record! '
                        'But skip_badlog is on, will skip the file:{0}'.format(
                            self._load_stream.name)
                    )
                    move2nextstream = True
            if move2nextstream:
                move2nextstream = False
                ret = self._move2next_load_fname()
                if LOGFILE_EOF == ret:
                    log.debug('does not have more log edits to read, plz retry')
                    break
                elif LOGFILE_GOOD == ret:
                    log.debug('moved to next log edit file, to read new log')
                    continue
                elif LOGFILE_BAD_RECORD == ret:
                    log.error('IOError happended, read_logs failed')
                    if self._skip_badlog:
                        log.error('skip bad log is on, try moving to next one')
                        move2nextstream = True
                        continue
                    else:
                        raise IOError('encounter bad records, raise exception')
        return recordlist
Ejemplo n.º 54
0
Archivo: conn.py Proyecto: zfq308/CUP
 def push_msg2sendqueue(self, msg):
     """
     push msg into the send queue
     """
     ret = 0
     if msg is None:
         log.warn('put a None into msg send queue. return')
         ret = -1
         return ret
     flag = msg.get_flag()
     #  log.debug('to put flag and msg into the queue. flag:%d' % flag)
     #  self._send_queue.put( (flag, msg) )
     peer = msg.get_to_addr()[0]
     new_created = False
     context = None
     sock = None
     try:
         context = self._peer2context[peer]
     except KeyError:
         log.info('To create a new context for the sock:{0}'.format(
             peer)
         )
         self._mlock.acquire()
         if peer not in self._peer2context:
             sock = self.connect(peer)
             if sock is not None:
                 context = CConnContext()
                 context.set_conn_man(self)
                 context.set_sock(sock)
                 context.set_peerinfo(peer)
                 fileno = sock.fileno()
                 self._peer2context[peer] = context
                 self._fileno2context[fileno] = context
                 self._context2fileno_peer[context] = (fileno, peer)
                 log.info('created context for the new sock')
                 ret = 0
                 try:
                     self._epoll.register(
                         sock.fileno(), self._epoll_write_params()
                     )
                 except Exception as error:  # pylint: disable=W0703
                     log.warn(
                         'failed to register the socket fileno, err_msg:%s,'
                         'perinfo:%s:%s. To epoll modify it' %
                         (str(error), peer[0], peer[1])
                     )
                     self._epoll.modify(
                         sock.fileno(), self._epoll_write_params()
                     )
             else:
                 log.error(
                     'failed to post msg. Connect failed. peer info:{0}.'
                     ' msg_type:{1}'.format(
                         str(peer), msg.get_msg_type()
                     )
                 )
                 ret = -1
         else:
             context = self._peer2context[peer]
         self._mlock.release()
     else:
         context = self._peer2context[peer]
     if ret != 0:
         return ret
     if not context.is_detroying():
         if context.put_msg(flag, msg) == 0:
             ret = 0
         else:
             ret = -1
         self._handle_new_send(context)
         return ret
Ejemplo n.º 55
0
 def _log_err_context(self, context):
     """
     context error, log warning msg
     """
     log.warn('Seems a call with context failed. See the context info')
     log.warn(str(context))
Ejemplo n.º 56
0
 def _log_err_context(self, context):
     log.warn(
         'Seems a call with context failed. See the context info'
     )
     log.warn(str(context))