def main(): if len(sys.argv) > 1: url = sys.argv[1] else: url = 'http://localhost:8888/msgbus/chatdemo' scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not path.startswith('/msgbus/'): raise ValueError, 'invalid msgbus URL: %s' % url channel = path[8:] username, password, hostname, port = parse_netloc(scheme, netloc) # XXX - stdin b0rkage! os.putenv('EVENT_NOKQUEUE', '1') os.putenv('EVENT_NOPOLL', '1') event.init() event.read(0, recv_stdin, channel) evmsg.open(hostname, port, scheme == 'https') if username and password: evmsg.set_auth(username, password) sub = evmsg.subscribe(channel, '*', '*', recv_chat) event.signal(2, event.abort) print 'pub/sub to', url event.dispatch()
def test_read2(self): def __read2_cb(fd, msg): assert os.read(fd, 1024) == msg, 'read2 event failed' print 'test_read2' msg = 'hello world' pipe = os.pipe() event.read(pipe[0], __read2_cb, pipe[0], msg) os.write(pipe[1], msg) event.dispatch()
def merge_with(line): events = read() to_merge = read_line(line) for event in to_merge: if event not in events: events.add(event) events.write()
def get_progress(since): """Get a dictionary containing the amount of time each subject has been worked on since the time given in the argument""" if isinstance(since, str): since = datetime.datetime.fromisoformat(since) events = read() return events.group_by_subject(since)
def add_read(self): """Make this instance react to new data on the connection. """ re = self._ev_read if re: if re.pending(): return self._ev_read = None self._ev_read = event.read(self.fileno(), self.handle_read) return
def add_read(self): """Make this instance react to new data on the connection. """ re = getattr(self, '_ev_read', None) if re: if re.pending(): return self._ev_read = None self._ev_read = event.read(self._fileno, self._read_cb) return
def bind(self, *args): """ Bind the socket. """ if self._debug: self._logger.debug( "binding to %s", str(args) ) self._sock.bind( *args ) self._peername = "%s:%d"%self.getsockname() self._accept_event = event.read( self, self._protected_cb, self._accept_cb )
def wrapped_func(self, *args): try: func(self, *args) except ssl.SSLError, err: self._clear_hdlers() if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._read_hdler = event.read(self._ssl_sock, wrapped_func, self, *args) elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._write_hdler = event.write(self._ssl_sock, wrapped_func, self, *args) else: traceback.print_exc() self.close() self._counterpart.close()
def add_descriptor(self, fileno, read=None, write=None, exc=None): if read: evt = event.read(fileno, read, fileno) self.readers[fileno] = evt, read if write: evt = event.write(fileno, write, fileno) self.writers[fileno] = evt, write if exc: self.excs[fileno] = exc return fileno
def get_weekly(): events = read() weekly_times = analysis.group_by_week(events) def get_lines(): for week_start, times in weekly_times.items(): times = { topic: seconds_to_readable_str(amount) for topic, amount in times.items() } yield f"{week_start}: {times}" return "\n".join(get_lines())
class _TcpConnection(asyncore.dispatcher): "Base class for a TCP connection." write_bufsize = 16 read_bufsize = 1024 * 4 def __init__(self, sock, is_server, host, port, use_ssl, certfile, keyfile, connect_error_handler=None): self.use_ssl = use_ssl self.socket = sock if is_server: if self.use_ssl: try: self.socket = ssl.wrap_socket( sock, server_side=True, certfile=certfile, keyfile=keyfile, do_handshake_on_connect=False) self.socket.do_handshake() except ssl.SSLError, err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: select.select([self.socket], [], []) elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: select.select([], [self.socket], []) else: raise self.host = host self.port = port self.connect_error_handler = connect_error_handler self.read_cb = None self.close_cb = None self._close_cb_called = False self.pause_cb = None self.tcp_connected = True # always handed a connected socket (we assume) self._paused = False # TODO: should be paused by default self._closing = False self._write_buffer = [] if event: self._revent = event.read(self.socket, self.handle_read) self._wevent = event.write(self.socket, self.handle_write) else: # asyncore asyncore.dispatcher.__init__(self, self.socket)
def track(item, task="N/A", start=None): """Track the item that is passed in""" end() if start is None: now = datetime.datetime.now() else: if isinstance(start, str): now = datetime.datetime.fromisoformat(start) else: now = start events = read() events.add(Event(item, task, now, now, "started")) events.write()
def add(self, evtype, fileno, real_cb, real_tb, mac): # this is stupid: pyevent won't call a callback unless it's a function, # so we have to force it to be one here if isinstance(real_cb, types.BuiltinMethodType): def cb(_d): real_cb(_d) else: cb = real_cb if evtype is READ: evt = event.read(fileno, cb, fileno) elif evtype is WRITE: evt = event.write(fileno, cb, fileno) return super(Hub, self).add(evtype, fileno, evt, real_tb, mac)
def __init__(self, sock, host, port, connect_error_handler=None): self.socket = sock self.host = host self.port = port self.connect_error_handler = connect_error_handler self.read_cb = None self.close_cb = None self._close_cb_called = False self.pause_cb = None self.tcp_connected = True self._paused = False self._closing = False self.write_buffer = [] self._revent = event.read(sock, self.handle_read) self._wevent = event.write(sock, self.handle_write)
def add(self, evtype, fileno, real_cb): # this is stupid: pyevent won't call a callback unless it's a function, # so we have to force it to be one here if isinstance(real_cb, types.BuiltinMethodType): def cb(_d): real_cb(_d) else: cb = real_cb if evtype is READ: evt = event.read(fileno, cb, fileno) elif evtype is WRITE: evt = event.write(fileno, cb, fileno) listener = FdListener(evtype, fileno, evt) self.listeners[evtype].setdefault(fileno, []).append(listener) return listener
def __init__(self, sock, host, port): self.socket = sock self.host = host self.port = port self.read_cb = None self.close_cb = None self._close_cb_called = False self.pause_cb = None self.tcp_connected = True # we assume a connected socket self._paused = False # TODO: should be paused by default self._closing = False self._write_buffer = [] if event: self._revent = event.read(sock, self.handle_read) self._wevent = event.write(sock, self.handle_write) else: # asyncore asyncore.dispatcher.__init__(self, sock)
def __init__(self, sock, addr): self.service = None self.application = None self.sock = sock self.remote_addr = addr self._rev = event.read(self.sock, eventbase.event_read_handler, self) self._wev = event.write(self.sock, eventbase.event_write_handler, self) self._renable = False self._wenable = False # Mix-Ins self._sighand = {} self._setup_mixins() self.closed = False self.emit('prot.new_connection')
def __init__( self, family=socket.AF_INET, type=socket.SOCK_STREAM, \ protocol=socket.IPPROTO_IP, read_cb=None, accept_cb=None, \ close_cb=None, error_cb=None, output_empty_cb=None, sock=None, \ debug=False, logger=None, max_read_buffer=0, **kwargs): """ Initialize the socket. If no read_cb defined, socket will only be used for reading. If this socket will be used for accepting new connections, set read_cb here and it will be passed to new sockets. You can also set accept_cb and be notified with an EventSocket object on accept(). The error_cb will be called if there are any errors on the socket. The args to it will be this socket, an error message, and an optional exception. The close_cb will be called when this socket closes, with this socket as its argument. If needed, you can wrap an existing socket by setting the sock argument to a socket object. """ self._debug = debug self._logger = logger if self._debug and not self._logger: print 'WARNING: to debug EventSocket, must provide a logger' self._debug = False # There various events we may or may not schedule self._read_event = None self._write_event = None self._accept_event = None self._connect_event = None self._pending_read_cb_event = None # Cache the peername so we can include it in logs even if the socket # is closed. Note that connect() and bind() have to be the ones to do # that work. self._peername = 'unknown' if sock: self._sock = sock try: self._peername = "%s:%d"%self._sock.getpeername() # Like connect(), only initialize these if the socket is already connected. self._read_event = event.read( self._sock, self._protected_cb, self._read_cb ) self._write_event = event.write( self._sock, self._protected_cb, self._write_cb ) except socket.error, e: # unconnected pass
def _connect_cb(self, timeout_at, *args, **kwargs): ''' Local support for synch and asynch connect. Required because `event.timeout` doesn't support kwargs. They are spec'd though so that we can branch how exceptions are handled. ''' err = self._sock.connect_ex( *args ) if not err: self._peername = "%s:%d"%self._sock.getpeername() self._read_event = event.read( self._sock, self._protected_cb, self._read_cb ) self._write_event = event.write( self._sock, self._protected_cb, self._write_cb ) if self._connect_event: self._connect_event.delete() self._connect_event = None elif err in (errno.EINPROGRESS,errno.EALREADY): # Only track timeout if we're about to re-schedule. Should only receive # these on a non-blocking socket. if isinstance(timeout_at,float) and time.time()>timeout_at: self._error_msg = 'timeout connecting to %s'%str(args) self.close() return if self._connect_event: self._connect_event.delete() # Checking every 100ms seems to be a reasonable amount of frequency. If # requested this too can be configurable. self._connect_event = event.timeout(0.1, self._connect_cb, timeout_at, *args) else: if self._connect_event: self._connect_event.delete() self._error_msg = os.strerror(err) serr = socket.error( err, self._error_msg ) if kwargs.get('immediate_raise'): raise serr else: self._handle_error( serr )
def __init__(self, operation, opargs, callback): self._operation = operation self._opargs = opargs self._callback = callback self._result_buffer = "" somePid = os.fork() if somePid != 0: # in the master process, open a listening unix domain socket, and # wait for the result from the slave process. sock = io.server_unix_socket("/tmp/eventstorm_p_%d" % somePid) listen_ev = event.read(sock, self._on_worker_connect, sock) listen_ev.add() else: # in the worker process, run the operation, # compute the result and send it back to the master # over the domain socket. result = operation(*opargs) path_to_socket = "/tmp/eventstorm_p_%d" % os.getpid() sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) while(True): try: sock.connect(path_to_socket) except socket.error, e: #print e.args[0] errcode = e.args[0] if errcode == errno.ENOENT: #print "going to retry..." sys.stdout.flush() # retry after sleeping time.sleep(1) continue sock.close() sock = None break if sock: sock.sendall(str(result)) sock.close() os._exit(0)
def cancel(): """Cancel the current event, if it exists. Does nothing if no current event. If event is marked, make the last marked time the end time. Else, delete the event. """ events = read() if not events: return latest = events[-1] if latest.flag == "ended" or latest.flag == "cancelled": return if latest.flag == "marked": # If marked, make last marked time the end time latest.flag = "ended" else: latest.end = datetime.datetime.now() # to determine inactivity latest.flag = "cancelled" events.write()
def end(when=None): """ End the event """ if when is None: when = datetime.datetime.now() elif isinstance(when, str): when = datetime.datetime.fromisoformat(when) events = read() if not events: return False latest = events[-1] if latest.flag == "ended" or latest.flag == "cancelled": return False latest.end = when latest.flag = "ended" events.write() return True
def mark(when=None): """Mark the current task, if it exists. A marked event, if cancelled by the user, will instead be considered to have ended the last time the user marked the task.""" events = read() if not events: return if when is None: when = datetime.datetime.now() latest = events[-1] if latest.flag == "ended" or latest.flag == "cancelled": return latest.end = when latest.flag = "marked" events.write()
def recvfrom(self, bytes, *args): event.read(self.sock, self.handle_recvfrom, bytes) return self.read_channel.receive()
def start(self, start_read=True): """ Begin scheduling events to handle data on the socket """ if start_read: self._read_hdler = event.read(self._ssl_sock, self.handshake) else: self._write_hdler = event.write(self._ssl_sock, self.handshake)
def _reschedule(self): self._clear_hdlers() if self._write_buf.len() > 0: self._write_hdler = event.write(self._ssl_sock, self._write_data) if self._read_buf.len() < recvbuf_max: self._read_hdler = event.read(self._ssl_sock, self._read_data)
# asyncronius version async def timelapse(e, loop): current_time = int(time()) min_time = e[1] # заснути на n секунд sl = get_sleep_time(current_time, min_time) mesg = event_manager.get_mesg(e[2]) print("\nsleep: " + str(sl)) print("next message: {}".format(mesg)) await asyncio.sleep(sl) data = { 'update_id': 0, 'message': mesg, } # requests.post(config.outer_url, json=data) print(data) if e[3] > 0: e[1] = int(time()) + e[3] loop.create_task(timelapse(e, loop)) loop = asyncio.get_event_loop() for e in event.read(config.table_name): loop.create_task(timelapse(list(e), loop)) loop.run_forever()
def get_mesg(event_id): messages = event.read(config.messages, 'message', event_id) mesg = messages[random.randint(0, len(messages) - 1)][0] return mesg