def _do_test_del(self, pipe, **kwargs): r, w = pipe s = FileObject(w, 'wb', **kwargs) s.write(b'x') try: s.flush() except IOError: # Sometimes seen on Windows/AppVeyor print("Failed flushing fileobject", repr(s), file=sys.stderr) import traceback traceback.print_exc() import warnings with warnings.catch_warnings(): warnings.simplefilter('ignore', ResourceWarning) # Deliberately getting ResourceWarning with FileObject(Thread) under Py3 del s gc.collect() # PyPy if kwargs.get("close", True): with self.assertRaises((OSError, IOError)): # expected, because FileObject already closed it os.close(w) else: os.close(w) with FileObject(r, 'rb') as fobj: self.assertEqual(fobj.read(), b'x')
def _do_test_del(self, pipe, **kwargs): r, w = pipe s = FileObject(w, 'wb', **kwargs) ts = type(s) s.write(b'x') try: s.flush() except IOError: # Sometimes seen on Windows/AppVeyor print("Failed flushing fileobject", repr(s), file=sys.stderr) import traceback traceback.print_exc() del s # Deliberately getting ResourceWarning with FileObject(Thread) under Py3 gc.collect() # PyPy if kwargs.get("close", True): try: os.close(w) except (OSError, IOError): pass # expected, because FileObject already closed it else: raise AssertionError('os.close(%r) must not succeed on %r' % (w, ts)) else: os.close(w) fobj = FileObject(r, 'rb') self.assertEqual(fobj.read(), b'x') fobj.close()
def _do_test_del(self, pipe, **kwargs): r, w = pipe s = FileObject(w, 'wb', **kwargs) ts = type(s) s.write(b'x') try: s.flush() except IOError: # Sometimes seen on Windows/AppVeyor print("Failed flushing fileobject", repr(s), file=sys.stderr) import traceback traceback.print_exc() del s # Deliberately getting ResourceWarning with FileObject(Thread) under Py3 gc.collect() # PyPy if kwargs.get("close", True): try: os.close(w) except (OSError, IOError): pass # expected, because FileObject already closed it else: raise AssertionError('os.close(%r) must not succeed on %r' % (w, ts)) else: os.close(w) fobj = FileObject(r, 'rb') self.assertEqual(fobj.read(), b'x') fobj.close()
class LogArchiver(BaseLogProcessor): pool = ThreadPool(3) def __init__(self, cname, local_dir): self.cname = cname self.local_dir = local_dir self.f = None self._cur_date = None self._aof_file = os.path.join(self.local_dir, "LOG") self._tmp_file = os.path.join(self.local_dir, "TMP") self._gz_tmpl = os.path.join(self.local_dir, self.cname + "_{date}.gz") self._open_log() self.queue = Queue() gevent.spawn(self._write_log) def _open_log(self): self.f = FileObject(open(self._aof_file, "a+"), "a+") def compress(self, date): self.f.close() os.rename(self._aof_file, self._tmp_file) LogArchiver.pool.spawn(self._compress, date) self._open_log() def _compress(self, date): f_in = open(self._tmp_file, "rb") filename = self._gz_tmpl.format(date=date) gz = gzip.open(filename, "wb") gz.writelines(f_in) gz.close() f_in.close() @classmethod def join(cls): cls.pool.join() def _write_log(self): while 1: entry = self.queue.get() date = entry["t"].date() if self._cur_date is None: self._cur_date = entry["t"].date() if date > self._cur_date: self.compress(self._cur_date.strftime("%Y-%m-%d")) self._cur_date = date self.f.write(entry["msg"]) if not entry["msg"].endswith("\n"): self.f.write("\n") self.f.flush() def push(self, entry): self.queue.put(entry)
def test_del_noclose(self): r, w = os.pipe() s = FileObject(w, 'wb', close=False) s.write('x') s.flush() del s os.close(w) self.assertEqual(FileObject(r).read(), 'x')
def test_del_noclose(self): r, w = os.pipe() s = FileObject(w, 'wb', close=False) s.write(b'x') s.flush() if PYPY: s.close() else: del s os.close(w) self.assertEqual(FileObject(r, 'rb').read(), b'x')
def _test_del(self, **kwargs): r, w = os.pipe() s = FileObject(w, 'wb') s.write('x') s.flush() del s try: os.close(w) except OSError: pass # expected, because SocketAdapter already closed it else: raise AssertionError('os.close(%r) must not succeed' % w) self.assertEqual(FileObject(r).read(), 'x')
def _test_del(self, **kwargs): r, w = os.pipe() s = FileObject(w, 'wb') s.write(b'x') s.flush() if PYPY: s.close() else: del s # Deliberately getting ResourceWarning under Py3 try: os.close(w) except OSError: pass # expected, because SocketAdapter already closed it else: raise AssertionError('os.close(%r) must not succeed' % w) fobj = FileObject(r, 'rb') self.assertEqual(fobj.read(), b'x') fobj.close()
def _test_del(self, **kwargs): r, w = os.pipe() s = FileObject(w, 'wb') s.write(b'x') s.flush() if PYPY: s.close() else: del s # Deliberately getting ResourceWarning under Py3 try: os.close(w) except OSError: pass # expected, because SocketAdapter already closed it else: raise AssertionError('os.close(%r) must not succeed' % w) fobj = FileObject(r, 'rb') self.assertEqual(fobj.read(), b'x') fobj.close()
class LogArchiver(BaseLogProcessor): pool = ThreadPool(3) # 使得所有LogArchiver类对象能共享线程池 def __init__(self, cname, local_dir): self.cname = cname self.local_dir = local_dir self.f = None self._cur_date = None self._aof_file = os.path.join( self.local_dir, "LOG") self._tmp_file = os.path.join( self.local_dir, "TMP") self._gz_tmpl = os.path.join( self.local_dir, self.cname + "_{date}.gz") # 存储压缩文件 # 主程序开始 self._open_log() self.queue = Queue() gevent.spawn(self._write_log) # 触发一个协程 # 主程序结束 def _open_log(self): self.f = FileObject(open(self._aof_file, 'a+'), 'wb') def compress(self, date): self.f.close() os.rename(self._aof_file, self._tmp_file) LogArchiver.pool.spawn(self._compress, date) self._open_log() def _compress(self, date): # 如果数据过大则压缩 f_in = open(self._tmp_file, 'rb') filename = self._gz_tmpl.format(date=date) gz = gzip.open(filename, 'wb') gz.writelines(f_in) gz.close() f_in.close() @classmethod def join(cls): cls.pool.join() def _write_log(self): while 1: entry = self.queue.get() date = entry['t'].date() if self._cur_date is None: self._cur_date = entry['t'].date() if date > self._cur_date: self.compress(self._cur_date.strftime("%Y-%m-%d")) self._cur_date = date self.f.write(entry['msg']) if not entry['msg'].endswith('\n'): self.f.write('\n') self.f.flush() def push(self, entry): self.queue.put(entry)
class ConsoleAdapter(object): """ This adapter will run Jeev in console mode, listening to stdin for messages, and writing outgoing messages to stdout. """ def __init__(self, jeev, opts): self._jeev = jeev self._opts = opts self._stdin = None self._stdout = None self._reader = None self._channel = opts.get('console_channel', 'console') self._user = opts.get('console_user', 'user') def _read_stdin(self): self._stdout.write(">>> Jeev Console Adapater\n") self._stdout.write(">>> Switch channel using \c channel_name\n") self._stdout.write(">>> Switch user using \u user_name\n") self._stdout.flush() while True: self._stdout.write('[%s@%s] > ' % (self._user, self._channel)) self._stdout.flush() line = self._stdin.readline() if not line: break if line.startswith('\c'): self._channel = line[2:].strip().lstrip('#') self._stdout.write("Switched channel to #%s\n" % self._channel) self._stdout.flush() elif line.startswith('\u'): self._user = line[2:].strip() self._stdout.write("Switched user %s\n" % self._user) self._stdout.flush() else: message = Message({}, self._channel, self._user, line.strip()) self._jeev._handle_message(message) def start(self): self._reader = Greenlet(self._read_stdin) self._stdin = FileObject(sys.stdin) self._stdout = FileObject(sys.stdout) self._reader.start() def stop(self): self._reader.kill() self._reader = None def join(self): self._reader.join() def send_message(self, channel, message): self._stdout.write('\r< [#%s] %s\n' % (channel, message)) self._stdout.write('[%s@%s] > ' % (self._user, self._channel)) self._stdout.flush() def send_messages(self, channel, *messages): for message in messages: self.send_message(channel, message)