def __init__(self): self.queue_send = Queue() self.queue_recv = Queue() self.appid = None # interface self.sock = None
class EventletInbox(object): def __init__(self, logger=None): ''' __init__ ''' self.__inbox = EventletQueue() if logger is None: self._logger = getLogger('%s.EventletInbox' % __name__) else: self._logger = logger def get(self): ''' get data from inbox ''' try: result = self.__inbox.get_nowait() except EventletEmpty: raise EmptyInboxException return result def put(self, message): ''' put message to inbox ''' self.__inbox.put(message) def __len__(self): ''' return length of inbox ''' return self.__inbox.qsize()
def _make_app_iter(self, node, source): """ Returns an iterator over the contents of the source (via its read func). There is also quite a bit of cleanup to ensure garbage collection works and the underlying socket of the source is closed. :param source: The httplib.Response object this iterator should read from. :param node: The node the source is reading from, for logging purposes. """ try: # Spawn reader to read from the source and place in the queue. # We then drop any reference to the source or node, for garbage # collection purposes. queue = Queue(1) spawn_n(self._make_app_iter_reader, node, source, queue, self.app.logger.thread_locals) source = node = None while True: chunk = queue.get(timeout=self.app.node_timeout) if isinstance(chunk, bool): # terminator success = chunk if not success: raise Exception( _('Failed to read all data' ' from the source')) break yield chunk except Empty: raise ChunkReadTimeout() except (GeneratorExit, Timeout): self.app.logger.warn(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = socket.socket(af, socktype, proto) self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None break except socket.error as err: sockerr = err if sockerr: raise socket.error( sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message()
def setUp(self): logger = logging.getLogger() logger.level = logging.DEBUG self.log_file = tempfile.NamedTemporaryFile() logger.addHandler(logging.FileHandler(self.log_file.name)) logger.addHandler(logging.StreamHandler(sys.stdout)) self.chewie = Chewie('lo', logger, auth_handler, failure_handler, logoff_handler, '127.0.0.1', 1812, 'SECRET', '44:44:44:44:44:44') self.fake_scheduler = FakeTimerScheduler() self.chewie.timer_scheduler = self.fake_scheduler global FROM_SUPPLICANT # pylint: disable=global-statement global TO_SUPPLICANT # pylint: disable=global-statement global FROM_RADIUS # pylint: disable=global-statement global TO_RADIUS # pylint: disable=global-statement global FROM_SUPPLICANT_ACTIVITY # pylint: disable=global-statement FROM_SUPPLICANT = Queue() FROM_SUPPLICANT_ACTIVITY = Queue() TO_SUPPLICANT = Queue() FROM_RADIUS = Queue() TO_RADIUS = Queue()
def __init__(self, local_as, peer_as, router_id, local_address, neighbor, hold_time=DEFAULT_HOLD_TIME, open_handler=None): self.local_as = local_as if local_as > 65535: self.local_as2 = 23456 else: self.local_as2 = self.local_as self.peer_as = peer_as self.router_id = IPAddress.from_string(router_id) self.local_address = IPAddress.from_string(local_address) self.neighbor = IPAddress.from_string(neighbor) self.hold_time = hold_time self.open_handler = open_handler self.keepalive_time = hold_time // 3 self.output_messages = Queue() self.route_updates = Queue() self.routes_to_advertise = [] self.fourbyteas = False self.timers = { "hold": Timer(self.hold_time), "keepalive": Timer(self.keepalive_time), } self.state = "active"
def _make_app_iter(self, node, source): """ Returns an iterator over the contents of the source (via its read func). There is also quite a bit of cleanup to ensure garbage collection works and the underlying socket of the source is closed. :param source: The httplib.Response object this iterator should read from. :param node: The node the source is reading from, for logging purposes. """ try: # Spawn reader to read from the source and place in the queue. # We then drop any reference to the source or node, for garbage # collection purposes. queue = Queue(1) spawn_n(self._make_app_iter_reader, node, source, queue, self.app.logger.thread_locals) source = node = None while True: chunk = queue.get(timeout=self.app.node_timeout) if isinstance(chunk, bool): # terminator success = chunk if not success: raise Exception(_('Failed to read all data' ' from the source')) break yield chunk except Empty: raise ChunkReadTimeout() except (GeneratorExit, Timeout): self.app.logger.warn(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise
def __init__(self, *args, **kwargs): super(AsyncClient, self).__init__(*args, **kwargs) self.pool = eventlet.greenpool.GreenPool(DEFAULT_POOL_SIZE) self.reader_thread = None self.writer_thread = None self.queue = Queue(DEFAULT_MAX_QUEUE_SIZE) self.max_pending = MAX_PENDING self.closing = False
def __init__(self, interface_name, logger=None, auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None, radius_server_port=None, radius_server_secret=None, chewie_id=None): self.interface_name = interface_name self.log_name = Chewie.__name__ if logger: self.log_name = logger.name + "." + Chewie.__name__ self.logger = get_logger(self.log_name) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler self.radius_server_ip = radius_server_ip self.radius_secret = radius_server_secret self.radius_server_port = self.RADIUS_UDP_PORT if radius_server_port: self.radius_server_port = radius_server_port self.radius_listen_ip = "0.0.0.0" self.radius_listen_port = 0 self.chewie_id = "44-44-44-44-44-44:" # used by the RADIUS Attribute # 'Called-Station' in Access-Request if chewie_id: self.chewie_id = chewie_id self.state_machines = {} # port_id_str: { mac : state_machine} self.port_to_eapol_id = { } # port_id: last ID used in preemptive identity request. # TODO for port_to_eapol_id - may want to set ID to null (-1...) if sent from the # state machine. self.port_status = {} # port_id: status (true=up, false=down) self.port_to_identity_job = {} # port_id: timerJob self.eap_output_messages = Queue() self.radius_output_messages = Queue() self.radius_lifecycle = RadiusLifecycle(self.radius_secret, self.chewie_id, self.logger) self.timer_scheduler = timer_scheduler.TimerScheduler(self.logger) self.eap_socket = None self.mab_socket = None self.pool = None self.eventlets = None self.radius_socket = None self.interface_index = None self.eventlets = []
def _recvData_init(self): self.__dataq=Queue(1000) self.readBannerBytes=0 self.bannerLength=0 self.readFrameBytes=0 self.frameBodyLength=0 self.frameBodyLengthStr=b'' self.frameBody=b'' self.banner={'version':0,'length':0,'pid':0,'realWidth':0,'realHeight':0,'virtualWidth':0,'realHeight':0,'orientation':0,'quirks':0}
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self._write_queue = Queue() self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message()
def __init__(self, addr, serial): self.serial = serial self.addr = addr # self.desiredState=StateQueue() self.serviceQueue = Queue(500) self.send_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.get_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.service = Service(serial, [self.addr[1], self.addr[1] + 1]) self.msgQ = {} self.phone = None
class EchoTerminal(BaseTerminal): def __init__(self): super().__init__() self._queue = Queue(1024) def send(self, data): self._queue.put(data) def recv(self, count=None): return self._queue.get()
class LocalMailbox(Mailbox): def __init__(self): self._queue = Queue() def put(self, message): self._queue.put(message) def get(self): return self._queue.get()
class MethodReader(object): """ Helper class to receive frames from the broker, combine them if necessary with content-headers and content-bodies into complete methods. Normally a method is represented as a tuple containing (channel, method_sig, args, content). In the case of a framing error, an AMQPConnectionException is placed in the queue. In the case of unexpected frames, a tuple made up of (channel, AMQPChannelException) is placed in the queue. """ def __init__(self, source): self.source = source self.queue = Queue() self.running = False self.partial_messages = {} # For each channel, which type is expected next self.expected_types = defaultdict(lambda:1) def _next_method(self): """ Read the next method from the source, once one complete method has been assembled it is placed in the internal queue. """ while self.queue.empty(): try: frame_type, channel, payload = self.source.read_frame() except Exception, e: # # Connection was closed? Framing Error? # self.queue.put(e) break if self.expected_types[channel] != frame_type: self.queue.put(( channel, Exception('Received frame type %s while expecting type: %s' % (frame_type, self.expected_types[channel]) ) )) elif frame_type == 1: self._process_method_frame(channel, payload) elif frame_type == 2: self._process_content_header(channel, payload) elif frame_type == 3: self._process_content_body(channel, payload)
def setUp(self): self.logger = logging.getLogger() self.logger.level = logging.DEBUG self.log_file = tempfile.NamedTemporaryFile() self.logger.addHandler(logging.FileHandler(self.log_file.name)) self.logger.addHandler(logging.StreamHandler(sys.stdout)) self.fake_scheduler = FakeTimerScheduler() self.timer_scheduler = self.fake_scheduler self.managed_port = None self.eap_output_messages = Queue() # pylint: disable=global-statement self.radius_output_messages = Queue() # pylint: disable=global-statement
def __init__(self, pipe): self._pipe = pipe self._queue = Queue() _queue = self._queue def _enqueue_output(): for line in iter(pipe.readline, b''): _queue.put(line) pipe.close() self._green_thread = eventlet.spawn(_enqueue_output)
def __init__(self, addr, serial, deviceInfos): self.serial = serial self.addr = addr self.pid = -1 self.desiredState = StateQueue() self.socket = None self.runningState = 1 #0 STATE_STOPPED = 1 STATE_STARTING = 2 STATE_STARTED = 3 STATE_STOPPING = 4 self.minitouchService = MinitouchService(serial, self.addr[1], deviceInfos) self.touchQueue = Queue(500) self.send_status = 0 #0:stoped;1:start;-2:stoping self.get_status = 0 #0:stoped;1:start;-2:stoping self._init() self.actionStatus = None
def __init__(self, id_): self.id = id_ self._peers = {} self.peers_info = {} self.available_peers = [] self.main_channel = PriorityQueue() self.data_channel = Queue(1) self.sending_queue = Queue() self.receiving_queue = Queue() self.buffer = {} # for stats self.sent_bytes = 0 self.received_bytes = 0 self.delays = {}
def __init__(self, src_mac, auth_success=None): self.src_mac = src_mac self.auth_success = auth_success self.txn_id = None self.challenge = None self.expected_response = None # TODO - some way to query for this based on identity self.password = "******" self.state = "idle" self.output_messages = Queue() self.txn_id_method = generate_txn_id self.challenge_method = generate_random_bytes
class StateMachine: def __init__(self, src_mac, auth_success=None): self.src_mac = src_mac self.auth_success = auth_success self.txn_id = None self.challenge = None self.expected_response = None # TODO - some way to query for this based on identity self.password = "******" self.state = "idle" self.output_messages = Queue() self.txn_id_method = generate_txn_id self.challenge_method = generate_random_bytes def event(self, event): if isinstance(event, EventMessageReceived): self.handle_message_received(event.message) def handle_message_received(self, message): if self.state == "idle": self.handle_idle_message(message) elif self.state == "identity request sent": self.handle_identity_sent_message(message) elif self.state == "challenge sent": self.handle_challenge_sent_message(message) def handle_idle_message(self, message): if isinstance(message, EapolStartMessage): self.txn_id = self.txn_id_method() identity_request = IdentityMessage(self.src_mac, self.txn_id, Eap.REQUEST, "") self.output_messages.put(identity_request) self.state = "identity request sent" def handle_identity_sent_message(self, message): if isinstance(message, IdentityMessage): self.challenge = self.challenge_method(16) self.calculate_expected_response() challenge_request = Md5ChallengeMessage(self.src_mac, self.txn_id, Eap.REQUEST, self.challenge, b"") self.output_messages.put(challenge_request) self.state = "challenge sent" def handle_challenge_sent_message(self, message): if isinstance(message, Md5ChallengeMessage): if message.challenge == self.expected_response: if self.auth_success: self.auth_success(message.src_mac) message = SuccessMessage(self.src_mac, self.txn_id) self.output_messages.put(message) self.state = "authenticated" else: message = FailureMessage(self.src_mac, self.txn_id) self.output_messages.put(message) self.state = "idle" def calculate_expected_response(self): txn_id_string = struct.pack("B", self.txn_id) self.expected_response = md5(txn_id_string + self.password.encode() + self.challenge).digest()
def __init__(self, logger: Logger, interval: float = 10): self._logger = logger self._q = Queue() self._ok_submits = 0 self._bad_submits = 0 self._connections = 0 self._interval = interval self._was_ok = self._ok_submits self._was_bad = self._bad_submits self._was_conn = self._connections self._labels = ['attacker_id', 'victim_id', 'task_id', 'submit_ok']
def __init__(self, addr, serial): self.serial = serial self.addr = addr self.socket = None self.socket22 = None self.desiredState = StateQueue() self.serviceQueue = Queue(500) self.runningState = 1 #0 STATE_STOPPED = 1 STATE_STARTING = 2 STATE_STARTED = 3 STATE_STOPPING = 4 self.send_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.get_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.service = Service(serial, [self.addr[1], self.addr[1] + 1]) self.msgQ = {} self.phone = None self._init() self.namespace = '/screen%s' % self.serial
class PipeReader: def __init__(self, pipe): self._pipe = pipe self._queue = Queue() _queue = self._queue def _enqueue_output(): for line in iter(pipe.readline, b''): _queue.put(line) pipe.close() self._green_thread = eventlet.spawn(_enqueue_output) def get(self): try: return self._queue.get_nowait() except Empty: pass return None def shutdown(self): if self._green_thread is not None: try: self._green_thread.kill() except: pass
def __init__(self, source): self.source = source self.queue = Queue() self.running = False self.partial_messages = {} # For each channel, which type is expected next self.expected_types = defaultdict(lambda:1)
def __init__(self, settings): self.temp_dir = settings['temp_dir'] self.download_path = settings['download_path'] self.connection_pool = Queue(settings['connections']) for _ in xrange(settings['connections']): self.connection_pool.put(NNTP(settings['host'], settings['port'], settings['username'], settings['password']))
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self._write_queue = Queue() sockerr = None addresses = socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM ) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = socket.socket(af, socktype, proto) self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None break except socket.error as err: sockerr = err if sockerr: raise socket.error( sockerr.errno, "Tried connecting to %s. Last error: %s" % ( [a[4] for a in addresses], sockerr.strerror) ) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message()
class LocalMailbox(Mailbox): def __init__(self): self._queue = Queue() def put(self, message): self._queue.put(message) def get(self): return self._queue.get() def encode(self): raise NotImplementedError @staticmethod def decode(params): raise NotImplementedError
class Actor(object): def __init__(self, callback): self._inbox = Queue() self._callback = callback self._greenlet = None def run(self, *args, **kwargs): greenlet_id = id(eventlet.getcurrent()) _actor_map[greenlet_id] = self try: self._callback(*args, **kwargs) finally: del _actor_map[greenlet_id] def spawn(self, *args, **kwargs): self._greenlet = _actor_pool.spawn(self.run, *args, **kwargs) def link(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.link(func, *args, **kwargs) def unlink(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.unlink(func, *args, **kwargs) def cancel(self, *throw_args): if self._greenlet is None: return return self._greenlet.cancel(*throw_args) def kill(self, *throw_args): if self._greenlet is None: return return self._greenlet.kill(*throw_args) def wait(self): if self._greenlet is None: return return self._greenlet.wait() def send(self, message): self._inbox.put(message) def receive(self): return self._inbox.get()
class Actor(ActorBase): def __init__(self, callback): self._inbox = Queue() self._callback = callback self._greenlet = None def run(self, *args, **kwargs): greenlet_id = id(eventlet.getcurrent()) _actor_map[greenlet_id] = self try: self._callback(*args, **kwargs) finally: del _actor_map[greenlet_id] def spawn(self, *args, **kwargs): self._greenlet = _actor_pool.spawn(self.run, *args, **kwargs) def link(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.link(func, *args, **kwargs) def unlink(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.unlink(func, *args, **kwargs) def cancel(self, *throw_args): if self._greenlet is None: return return self._greenlet.cancel(*throw_args) def kill(self, *throw_args): if self._greenlet is None: return return self._greenlet.kill(*throw_args) def wait(self): if self._greenlet is None: return return self._greenlet.wait() def send(self, message): self._inbox.put(message) def receive(self): return self._inbox.get()
def __init__(self, *args, **kwargs): super(Client, self).__init__(*args, **kwargs) self.pool = eventlet.greenpool.GreenPool(DEFAULT_POOL_SIZE) self.reader_thread = None self.writer_thread = None self.queue = Queue(DEFAULT_MAX_QUEUE_SIZE) self.max_pending = MAX_PENDING self.closing = False
def __init__(self, interface_name, logger=None, auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None, radius_server_port=None, radius_server_secret=None, chewie_id=None): self.interface_name = interface_name self.logger = get_logger(logger.name + "." + Chewie.__name__) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler self.radius_server_ip = radius_server_ip self.radius_secret = radius_server_secret self.radius_server_port = self.RADIUS_UDP_PORT if radius_server_port: self.radius_server_port = radius_server_port self.radius_listen_ip = "0.0.0.0" self.radius_listen_port = 0 self.chewie_id = "44-44-44-44-44-44:" # used by the RADIUS Attribute # 'Called-Station' in Access-Request if chewie_id: self.chewie_id = chewie_id self.state_machines = {} # mac: state_machine self.eap_output_messages = Queue() self.radius_output_messages = Queue() self.radius_lifecycle = RadiusLifecycle(self.radius_secret, self.chewie_id, self.logger) self.timer_scheduler = timer_scheduler.TimerScheduler(self.logger) self.eap_socket = None self.pool = None self.eventlets = None self.radius_socket = None self.interface_index = None self.eventlets = []
def __init__(self, logger=None): ''' __init__ ''' self.__inbox = EventletQueue() if logger is None: self._logger = getLogger('%s.EventletInbox' % __name__) else: self._logger = logger
def __init__(self, socket, address): super(Connection, self).__init__() self.socket = socket self.address = address self.is_active = True # The limit is arbitrary. We need to limit queue size to # prevent it from eating memory up self.send_q = Queue(128) # data structures for BGP self.peer_ip = None self.peer_as = None self.peer_id = None self.peer_capabilities = [] self._4or6 = 0 self.hold_time = 240
def wait(self): """The difference from Queue.wait: if there is an only item in the Queue and it is an exception, raise it, but keep it in the Queue, so that future calls to wait() will raise it again. """ if self.has_error() and len(self.items) == 1: # the last item, which is an exception, raise without emptying the Queue getcurrent().throw(*self.items[0][1]) else: return Queue.wait(self)
class Dispatcher(object): def __init__(self): self.queue = Queue() def dispatch(self): try: fun, args = self.queue.get() logging.info("dispatching %s(%s)" % (fun.__name__, ", ".join(repr(a) for a in args))) fun(*args) except: logging.error(traceback.format_exc()) def work(self): while True: self.dispatch() def schedule(self, fun, *args): self.queue.put((fun, args))
def __init__(self): self._queue = Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() nb_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class) self.db = nb_driver_class() self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
class DownloadPool(object): def __init__(self, settings): self.temp_dir = settings['temp_dir'] self.download_path = settings['download_path'] self.connection_pool = Queue(settings['connections']) for _ in xrange(settings['connections']): self.connection_pool.put(NNTP(settings['host'], settings['port'], settings['username'], settings['password'])) def download(self, segment): #print 'getting', segment['segment'] # Get an availble connection; if there are none, block until available. connection = self.connection_pool.get() segment_path = connection.get_body(segment['segment'], self.temp_dir) # Connection is done, put it back in the ready queue. self.connection_pool.put(connection) #print 'got', segment_path Tracker.downloaded += segment['segment_bytes'] #print Tracker.downloaded return segment_path
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._iobuf = StringIO() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(1.0) self._socket.connect((self.host, self.port)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message()
def __init__(self, conf): TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) self.conf = conf self.logger = logging.getLogger('statsdlogd') self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address='/dev/log') self.formatter = logging.Formatter('%(name)s: %(message)s') self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) self.debug = conf.get('debug', 'false').lower() in TRUE_VALUES self.statsd_host = conf.get('statsd_host', '127.0.0.1') self.statsd_port = int(conf.get('statsd_port', '8125')) self.listen_addr = conf.get('listen_addr', '127.0.0.1') self.listen_port = int(conf.get('listen_port', 8126)) self.report_internal_stats = conf.get('report_internal_stats', 'true').lower() in TRUE_VALUES self.int_stats_interval = int(conf.get('internal_stats_interval', 5)) self.buff = int(conf.get('buffer_size', 8192)) self.max_q_size = int(conf.get('max_line_backlog', 512)) self.statsd_sample_rate = float(conf.get('statsd_sample_rate', '.5')) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get('patterns_file', '/etc/statsdlog/patterns.json') self.json_patterns = conf.get('json_pattern_file', 'true').lower() in TRUE_VALUES try: self.patterns = self.load_patterns() except Exception as err: self.logger.exception(err) print "Encountered exception at startup: %s" % err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item])
def add(self, key, addr): if self.clients.get(key): return -1 else: try: c = RealtimeScreenCap(addr, key) q = Queue(500) q2 = Queue(500) t = RealtimeScreenTouch(('localhost', 1111), key, q) s = stfServices(('localhost', 1100), key, q2) self.clients[key] = { 'touch': t, 'cap': c, 'touchQ': q, 'services': s, 'servicesQ': q2 } print(self.keys(), 'keys') return 1 except Exception as e: print(str(e)) return 0
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message()
def init_portlist(self): self.minicap_q = Queue(50) self.minitouch_q = Queue(50) self.stfservices_q = Queue(50) for port in range(1300, 1350): self.minicap_q.put(port) self.minitouch_q.put(port - 100) for port in range(1100, 1190, 2): self.stfservices_q.put(port)
def __init__(self, max_connections, input_is_plain): self.max_connections = max_connections self.input_is_plain = input_is_plain self.queue = Queue(1) self.closed = False self._handler_pool = GreenPool(self.max_connections) self._robots_cache = PoolMap(self.get_robots_checker, pool_max_size=1, timeout=600) # Start IO worker and die if he does. self.io_worker = io.Worker(lambda: self.closed) t = spawn(self.io_worker.run_loop) t.link(reraise_errors, greenthread.getcurrent()) log.debug(u"Crawler started. Max connections: %d.", self.max_connections)
def __init__(self,addr,channel,manager,key): self.key=key self.manager=manager self.channel='' self.addr=addr self.__dataq=Queue(1000) self.recv_status=True self.push_status=True self.readBannerBytes=0 self.bannerLength=0 self.readFrameBytes=0 self.frameBodyLength=0 self.frameBodyLengthStr=b'' self.frameBody=b'' self.banner={'version':0,'length':0,'pid':0,'realWidth':0,'realHeight':0,'virtualWidth':0,'realHeight':0,'orientation':0,'quirks':0}
def __init__(self, conf): TRUE_VALUES = set(("true", "1", "yes", "on", "t", "y")) self.conf = conf self.logger = logging.getLogger("statsdlogd") self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address="/dev/log") self.formatter = logging.Formatter("%(name)s: %(message)s") self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) if conf.get("debug", False) in TRUE_VALUES: self.debug = True else: self.debug = False self.statsd_host = conf.get("statsd_host", "127.0.0.1") self.statsd_port = int(conf.get("statsd_port", "8125")) self.listen_addr = conf.get("listen_addr", "127.0.0.1") self.listen_port = int(conf.get("listen_port", 8126)) if conf.get("report_internal_stats", False) in TRUE_VALUES: self.report_internal_stats = True else: self.report_internal_stats = False self.int_stats_interval = int(conf.get("internal_stats_interval", 5)) self.buff = int(conf.get("buffer_size", 8192)) self.max_q_size = int(conf.get("max_line_backlog", 512)) self.statsd_sample_rate = float(conf.get("statsd_sample_rate", ".5")) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get("patterns_file", "patterns.json") try: with open(self.patterns_file) as pfile: self.patterns = json.loads(pfile.read()) except Exception as err: self.logger.critical(err) print err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item])
def __init__(self, socket, address): super(Connection, self).__init__() self.socket = socket self.address = address self.is_active = True # The limit is arbitrary. We need to limit queue size to # prevent it from eating memory up self.send_q = Queue(128) # data structures for BGP self.peer_ip = None self.peer_as = None self.peer_id = None self.peer_capabilities = [] self.peer_last_keepalive_timestamp = None self._4or6 = 0 self.hold_time = 240
class StatsdLog(object): def __init__(self, conf): TRUE_VALUES = set(("true", "1", "yes", "on", "t", "y")) self.conf = conf self.logger = logging.getLogger("statsdlogd") self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address="/dev/log") self.formatter = logging.Formatter("%(name)s: %(message)s") self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) if conf.get("debug", False) in TRUE_VALUES: self.debug = True else: self.debug = False self.statsd_host = conf.get("statsd_host", "127.0.0.1") self.statsd_port = int(conf.get("statsd_port", "8125")) self.listen_addr = conf.get("listen_addr", "127.0.0.1") self.listen_port = int(conf.get("listen_port", 8126)) if conf.get("report_internal_stats", False) in TRUE_VALUES: self.report_internal_stats = True else: self.report_internal_stats = False self.int_stats_interval = int(conf.get("internal_stats_interval", 5)) self.buff = int(conf.get("buffer_size", 8192)) self.max_q_size = int(conf.get("max_line_backlog", 512)) self.statsd_sample_rate = float(conf.get("statsd_sample_rate", ".5")) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get("patterns_file", "patterns.json") try: with open(self.patterns_file) as pfile: self.patterns = json.loads(pfile.read()) except Exception as err: self.logger.critical(err) print err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item]) def check_line(self, line): """ Check if a line matches our search patterns. :param line: The string to check :returns: None or regex entry that matched """ for entry in self.comp_patterns: if self.comp_patterns[entry].match(line): return entry return None def internal_stats(self): """ Periodically send our own stats to statsd. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(self.int_stats_interval) self.send_event("statsdlog.lines:%s|c" % (self.counter - lastcount)) lastcount = self.counter self.send_event("statsdlog.hits:%s|c" % (self.hits - lasthit)) lasthit = self.hits def stats_print(self): """ Periodically dump some stats to the logs. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(2) lps = (self.counter - lastcount) / 60 hps = (self.hits - lasthit) / 60 lastcount = self.counter lasthit = self.hits self.logger.info("per second: %d lines - hits %d" % (lps, hps)) self.logger.info("totals: %d hits - %d lines" % (self.hits, self.counter)) if self.skip_counter is not 0: self.logger.info("Had to skip %d log lines so far" % self.skip_counter) def send_event(self, payload): """ Fire event to statsd :param payload: The payload of the udp packet to send. """ try: udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(payload, self.statsd_addr) except Exception: # udp sendto failed (socket already in use?), but thats ok self.logger.error("Error trying to send statsd event") def statsd_counter_increment(self, stats, delta=1): """ Increment multiple statsd stats counters :param stats: list of stats items to package and send :param delta: delta of stats items """ if self.statsd_sample_rate < 1: if random() <= self.statsd_sample_rate: for item in stats: payload = "%s:%s|c|@%s" % (item, delta, self.statsd_sample_rate) self.send_event(payload) else: for item in stats: payload = "%s:%s|c" % (item, delta) self.send_event(payload) def worker(self): """ Check for and process log lines in queue """ while True: msg = self.q.get() matched = self.check_line(msg) if matched: self.statsd_counter_increment([matched]) if self.hits >= maxint: self.logger.info("hit maxint, reset hits counter") self.hits = 0 self.hits += 1 else: pass def listener(self): """ syslog udp listener """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) bind_addr = (self.listen_addr, self.listen_port) sock.bind(bind_addr) self.logger.info("listening on %s:%d" % bind_addr) while 1: data, addr = sock.recvfrom(self.buff) if not data: break else: if self.q.qsize() < self.max_q_size: self.q.put(data) if self.counter >= maxint: self.logger.info("hit maxint, reset seen counter") self.counter = 0 self.counter += 1 else: if self.debug: self.logger.notice("max log lines in queue, skipping") if self.skip_counter >= maxint: self.logger.info("hit maxint, reset skip counter") self.skip_counter = 0 self.skip_counter += 1 def start(self): """ Start the listener, worker, and mgmt server. """ eventlet.spawn_n(self.worker) if self.debug: eventlet.spawn_n(self.stats_print) if self.report_internal_stats: eventlet.spawn_n(self.internal_stats) while True: try: self.listener() except Exception as err: self.logger.error(err)
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None @classmethod def factory(cls, *args, **kwargs): timeout = kwargs.pop('timeout', 5.0) conn = cls(*args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._iobuf = StringIO() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(1.0) self._socket.connect((self.host, self.port)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.send(next_msg) except socket.error as err: log.debug( "Exception during socket sendall for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug( "Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug( "Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): while True: pos = self._iobuf.tell() if pos < 8 or (self._total_reqd_bytes > 0 and pos < self._total_reqd_bytes): # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet break else: # have enough for header, read body len from header self._iobuf.seek(4) body_len = int32_unpack(self._iobuf.read(4)) # seek to end to get length of current buffer self._iobuf.seek(0, os.SEEK_END) pos = self._iobuf.tell() if pos >= body_len + 8: # read message header and body self._iobuf.seek(0) msg = self._iobuf.read(8 + body_len) # leave leftover in current buffer leftover = self._iobuf.read() self._iobuf = StringIO() self._iobuf.write(leftover) self._total_reqd_bytes = 0 self.process_msg(msg, body_len) else: self._total_reqd_bytes = body_len + 8 break else: log.debug("connection closed by server") self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
def __init__(self): self._queue = Queue()
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl @classmethod def initialize_reactor(cls): eventlet.monkey_patch() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
def __init__(self, callback): self._inbox = Queue() self._callback = callback self._greenlet = None
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. This implementation assumes all eventlet monkey patching is active. It is not tested with partial patching. """ _read_watcher = None _write_watcher = None _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl _timers = None _timeout_watcher = None _new_timer = None @classmethod def initialize_reactor(cls): eventlet.monkey_patch() if not cls._timers: cls._timers = TimerManager() cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) cls._new_timer = Event() @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) cls._timers.add_timer(timer) cls._new_timer.set() return timer @classmethod def service_timeouts(cls): """ cls._timeout_watcher runs in this loop forever. It is usually waiting for the next timeout on the cls._new_timer Event. When new timers are added, that event is set so that the watcher can wake up and possibly set an earlier timeout. """ timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() sleep_time = max(next_end - time.time(), 0) if next_end else 10000 cls._new_timer.wait(sleep_time) cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self._write_queue = Queue() self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size])
class Client(baseasync.BaseAsync): def __init__(self, *args, **kwargs): super(Client, self).__init__(*args, **kwargs) self.pool = eventlet.greenpool.GreenPool(DEFAULT_POOL_SIZE) self.reader_thread = None self.writer_thread = None self.queue = Queue(DEFAULT_MAX_QUEUE_SIZE) self.max_pending = MAX_PENDING self.closing = False def build_socket(self, family=socket.AF_INET): return socket.socket(family) def wrap_secure_socket(self, s, ssl_version): return GreenSSLSocket(s, ssl_version=ssl_version) def connect(self): super(Client, self).connect() self.closing = False self.reader_thread = eventlet.greenthread.spawn(self._reader_run) self.writer_thread = eventlet.greenthread.spawn(self._writer_run) def dispatch(self, fn, *args, **kwargs): if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Dispatching: Pending {0}".format(len(self._pending))) self.pool.spawn_n(fn, *args, **kwargs) def shutdown(self): self.closing = True if len(self._pending) + self.queue.qsize() == 0: self._end_close() def close(self): self.shutdown() self.wait() def _end_close(self): self.writer_thread.kill() self.reader_thread.kill() super(Client, self).close() self.writer_thread = None self.reader_thread = None def sendAsync(self, header, value, onSuccess, onError, no_ack=False): if self.closing: raise common.ConnectionClosed("Client is closing, can't queue more operations.") if self.faulted: self._raise(common.ConnectionFaulted("Can't send message when connection is on a faulted state."), onError) return # skip the rest # fail fast on NotConnected if not self.isConnected: self._raise(common.NotConnected("Not connected."), onError) return # skip the rest if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Queue: {0}".format(self.queue.qsize())) self.queue.put((header, value, onSuccess, onError, no_ack)) eventlet.sleep(0) def wait(self): self.queue.join() def send(self, header, value): done = eventlet.event.Event() class Dummy: pass d = Dummy() d.error = None d.result = None def innerSuccess(m, r, value): d.result = (m, r, value) done.send() def innerError(e): d.error = e done.send() self.sendAsync(header, value, innerSuccess, innerError) done.wait() # TODO(Nacho): should be add a default timeout? if d.error: raise d.error return d.result def _writer_run(self): while self.isConnected and not self.faulted: try: while len(self._pending) > self.max_pending: eventlet.sleep(0) (header, value, onSuccess, onError, no_ack) = self.queue.get() super(Client, self).sendAsync(header, value, onSuccess, onError, no_ack) except common.ConnectionFaulted: pass except common.ConnectionClosed: pass except Exception as ex: self._fault_client(ex) # Yield execution, don't starve the reader eventlet.sleep(0) def _reader_run(self): while self.isConnected and not self.faulted: try: self._async_recv() self.queue.task_done() if self.closing and len(self._pending) + self.queue.qsize() == 0: self._end_close() except common.ConnectionFaulted: pass except Exception as ex: self._fault_client(ex)