def _make_app_iter(self, node, source): """ Returns an iterator over the contents of the source (via its read func). There is also quite a bit of cleanup to ensure garbage collection works and the underlying socket of the source is closed. :param source: The httplib.Response object this iterator should read from. :param node: The node the source is reading from, for logging purposes. """ try: # Spawn reader to read from the source and place in the queue. # We then drop any reference to the source or node, for garbage # collection purposes. queue = Queue(1) spawn_n(self._make_app_iter_reader, node, source, queue, self.app.logger.thread_locals) source = node = None while True: chunk = queue.get(timeout=self.app.node_timeout) if isinstance(chunk, bool): # terminator success = chunk if not success: raise Exception( _('Failed to read all data' ' from the source')) break yield chunk except Empty: raise ChunkReadTimeout() except (GeneratorExit, Timeout): self.app.logger.warn(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise
def _make_app_iter(self, node, source): """ Returns an iterator over the contents of the source (via its read func). There is also quite a bit of cleanup to ensure garbage collection works and the underlying socket of the source is closed. :param source: The httplib.Response object this iterator should read from. :param node: The node the source is reading from, for logging purposes. """ try: # Spawn reader to read from the source and place in the queue. # We then drop any reference to the source or node, for garbage # collection purposes. queue = Queue(1) spawn_n(self._make_app_iter_reader, node, source, queue, self.app.logger.thread_locals) source = node = None while True: chunk = queue.get(timeout=self.app.node_timeout) if isinstance(chunk, bool): # terminator success = chunk if not success: raise Exception(_('Failed to read all data' ' from the source')) break yield chunk except Empty: raise ChunkReadTimeout() except (GeneratorExit, Timeout): self.app.logger.warn(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise
class LocalMailbox(Mailbox): def __init__(self): self._queue = Queue() def put(self, message): self._queue.put(message) def get(self): return self._queue.get()
class EchoTerminal(BaseTerminal): def __init__(self): super().__init__() self._queue = Queue(1024) def send(self, data): self._queue.put(data) def recv(self, count=None): return self._queue.get()
class LocalMailbox(Mailbox): def __init__(self): self._queue = Queue() def put(self, message): self._queue.put(message) def get(self): return self._queue.get() def encode(self): raise NotImplementedError @staticmethod def decode(params): raise NotImplementedError
class Actor(ActorBase): def __init__(self, callback): self._inbox = Queue() self._callback = callback self._greenlet = None def run(self, *args, **kwargs): greenlet_id = id(eventlet.getcurrent()) _actor_map[greenlet_id] = self try: self._callback(*args, **kwargs) finally: del _actor_map[greenlet_id] def spawn(self, *args, **kwargs): self._greenlet = _actor_pool.spawn(self.run, *args, **kwargs) def link(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.link(func, *args, **kwargs) def unlink(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.unlink(func, *args, **kwargs) def cancel(self, *throw_args): if self._greenlet is None: return return self._greenlet.cancel(*throw_args) def kill(self, *throw_args): if self._greenlet is None: return return self._greenlet.kill(*throw_args) def wait(self): if self._greenlet is None: return return self._greenlet.wait() def send(self, message): self._inbox.put(message) def receive(self): return self._inbox.get()
class Actor(object): def __init__(self, callback): self._inbox = Queue() self._callback = callback self._greenlet = None def run(self, *args, **kwargs): greenlet_id = id(eventlet.getcurrent()) _actor_map[greenlet_id] = self try: self._callback(*args, **kwargs) finally: del _actor_map[greenlet_id] def spawn(self, *args, **kwargs): self._greenlet = _actor_pool.spawn(self.run, *args, **kwargs) def link(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.link(func, *args, **kwargs) def unlink(self, func, *args, **kwargs): if self._greenlet is None: return return self._greenlet.unlink(func, *args, **kwargs) def cancel(self, *throw_args): if self._greenlet is None: return return self._greenlet.cancel(*throw_args) def kill(self, *throw_args): if self._greenlet is None: return return self._greenlet.kill(*throw_args) def wait(self): if self._greenlet is None: return return self._greenlet.wait() def send(self, message): self._inbox.put(message) def receive(self): return self._inbox.get()
class Dispatcher(object): def __init__(self): self.queue = Queue() def dispatch(self): try: fun, args = self.queue.get() logging.info("dispatching %s(%s)" % (fun.__name__, ", ".join(repr(a) for a in args))) fun(*args) except: logging.error(traceback.format_exc()) def work(self): while True: self.dispatch() def schedule(self, fun, *args): self.queue.put((fun, args))
class DownloadPool(object): def __init__(self, settings): self.temp_dir = settings['temp_dir'] self.download_path = settings['download_path'] self.connection_pool = Queue(settings['connections']) for _ in xrange(settings['connections']): self.connection_pool.put(NNTP(settings['host'], settings['port'], settings['username'], settings['password'])) def download(self, segment): #print 'getting', segment['segment'] # Get an availble connection; if there are none, block until available. connection = self.connection_pool.get() segment_path = connection.get_body(segment['segment'], self.temp_dir) # Connection is done, put it back in the ready queue. self.connection_pool.put(connection) #print 'got', segment_path Tracker.downloaded += segment['segment_bytes'] #print Tracker.downloaded return segment_path
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None @classmethod def factory(cls, *args, **kwargs): timeout = kwargs.pop('timeout', 5.0) conn = cls(*args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._iobuf = StringIO() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(1.0) self._socket.connect((self.host, self.port)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.send(next_msg) except socket.error as err: log.debug( "Exception during socket sendall for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug( "Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug( "Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): while True: pos = self._iobuf.tell() if pos < 8 or (self._total_reqd_bytes > 0 and pos < self._total_reqd_bytes): # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet break else: # have enough for header, read body len from header self._iobuf.seek(4) body_len = int32_unpack(self._iobuf.read(4)) # seek to end to get length of current buffer self._iobuf.seek(0, os.SEEK_END) pos = self._iobuf.tell() if pos >= body_len + 8: # read message header and body self._iobuf.seek(0) msg = self._iobuf.read(8 + body_len) # leave leftover in current buffer leftover = self._iobuf.read() self._iobuf = StringIO() self._iobuf.write(leftover) self._total_reqd_bytes = 0 self.process_msg(msg, body_len) else: self._total_reqd_bytes = body_len + 8 break else: log.debug("connection closed by server") self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
class Minitouch(): def __init__(self, addr, serial, deviceInfos): self.serial = serial self.addr = addr self.pid = -1 self.desiredState = StateQueue() self.socket = None self.runningState = 1 #0 STATE_STOPPED = 1 STATE_STARTING = 2 STATE_STARTED = 3 STATE_STOPPING = 4 self.minitouchService = MinitouchService(serial, self.addr[1], deviceInfos) self.touchQueue = Queue(500) self.send_status = 0 #0:stoped;1:start;-2:stoping self.get_status = 0 #0:stoped;1:start;-2:stoping self._init() self.actionStatus = None def _init(self): self.version = -1 self.max_contacts = -1 self.max_x = -1 self.max_y = -1 self.max_pressure = -1 self.pid = -1 def createSocket(self): try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(0.2) self.socket.connect(self.addr) return 1 except Exception as e: self.errorhandler('connectError', 'createSocket') return 0 def log(self, log): print('[%s-minitouch] %s' % (self.serial, log)) def errorhandler(self, error='', location=''): raise TouchError(error, location) print('[minitouch] ERROR: %s' % error) def _startService(self): try: self.log('Launching minitouch service') t1 = eventlet.spawn_n(self.minitouchService.minitouch_run) except Exception as e: self.errorhandler(str(e), '_startService') # raise TouchError('startService error') def __connectService(self): self.log('Connecting to minitouch service') try: self.minitouchService.forward_minitouch() self.log('addr:%s' % self.addr[1]) self.createSocket() if self.get_status == 0 and self.send_status == 0: self.send_status = 1 self.get_status = 1 t1 = eventlet.spawn_n(self.getInfo) t2 = eventlet.spawn_n(self.send) else: self.errorhandler( 'get:%s/send:%s Error' % (self.get_status, self.send_status), '__connectService') self.log('Connecting to minitouch service FAILED') # except TouchError as e: # self.errorhandler(e.name) # self.log('Connecting to minitouch service FAILED') # raise MyError(e.name) except Exception as e: s = sys.exc_info() # self.errorhandler(str(e)+str(s[2].tb_lineno)) self.errorhandler(str(e), '__connectService') self.log('Connecting to minitouch service FAILED') def install(self): self.log('installing minitouch resource') self.minitouchService.installAll() def init(self): self.install() def _disconnectService(self): self.log('Disconnecting from minitouch service') if self.get_status == 1 and self.send_status == 1: self.send_status = -2 self.get_status = -2 else: self.log('revb status2 error:%s_%s' % (self.get_status, self.send_status)) self.send_status = -2 self.get_status = -2 if self.socket: self.socket.close() def _stopService(self): if self.pid > 0: self.log('Stopping minitouch service') self.minitouchService.killPid(self.pid) else: self.log('Stopping minitouch service SKIP') def _waitForPid(self): for i in range(10): if self.pid > 0: self.log('get minitouch pid%s' % self.pid) return self.pid time.sleep(0.1) if self.pid <= 0: self.errorhandler('can not get pid', '_waitForPid') # raise TouchError('pid not found') def _stop(self): self._disconnectService() self._stopService() self.runningState = 1 self._init() self.pid = -1 self.socket = None self.get_status = 0 self.send_status = 0 self.log('minitouch service stopped') def _ensureState(self): if self.desiredState.isEmpty(): return if self.runningState == 2 or self.runningState == 4: pass elif self.runningState == 1: if self.desiredState.get() == 3: try: self.runningState = 2 self._startService() # print ('f1'*10) time.sleep(2) # print ('f1'*10) self.__connectService() self._waitForPid() self.runningState = 3 self.log('minitouch service started') # except TouchError as e: # self.log('EXCEPTION-->STOP:%s'%str(e.name)) # self._stop() except Exception as e: s = sys.exc_info() self.log('EXCEPTION-->STOP:%s' % str(e)) if self.runningState != 1: self._stop() else: self.log('exception SKIP, already Closed') finally: self._ensureState() else: self.log('stop SKIP') elif self.runningState == 3: if self.desiredState.get() == 1: self.runningState == 4 try: self._stop() finally: self._ensureState() else: self.log('start SKIP') def start(self): self.log('Requesting frame producer to START') self.desiredState.push(3) self._ensureState() def stop(self): self.log('Requesting frame producer to STOP') self.desiredState.push(1) self._ensureState() def getInfo(self): buffersize = 1024 self.log('<start get>') errorFlag = 0 while self.get_status > 0: try: data = self.socket.recv(buffersize) if data: infos = data.decode('utf-8').split('\n') self.log(infos) self.version = infos[0].split(' ')[1] self.max_contacts = float(infos[1].split(' ')[1]) self.max_x = int(infos[1].split(' ')[2]) self.max_y = int(infos[1].split(' ')[3]) self.max_pressure = int(infos[1].split(' ')[4]) self.pid = int(infos[2].split(' ')[1]) self.log('getinfos:%s_%s_%s_%s' % (self.version, self.pid, self.max_x, self.max_y)) else: assert False, 'touchService recv empty data' except socket.timeout: pass except Exception as e: errorFlag = 1 self.log('getInfoError_%s' % str(e)) # self.sc.close() break # self.get_status=0 # if self.socket: # self.socket.close() self.log('<close get>') if errorFlag: self._stop() def send(self): self.log('[start send]') errorFlag = 0 while self.send_status > 0: try: data = self.touchQueue.get(False) self.socket.send(data) except eventlet.queue.Empty: time.sleep(0.01) except Exception as e: errorFlag = 1 self.log('sendError_%s' % str(e), 'send') break self.log('[close send]') if errorFlag: self._stop() def gestureStart(self, seq): self.actionStatus = True def gestureStop(self, seq): self.actionStatus = False def touchDown(self, point): print(point, 'point') socketData = 'd %s %s %s %s\n' % ( point['contact'], round( point['x'] * self.max_x), round(point['y'] * self.max_y), round((point['pressure'] or 0.5) * self.max_pressure)) if self.actionStatus: self.touchQueue.put(socketData.encode('ascii')) def touchMove(self, point): socketData = 'm %s %s %s %s\n' % ( point['contact'], round( point['x'] * self.max_x), round(point['y'] * self.max_y), round((point['pressure'] or 0.5) * self.max_pressure)) if self.actionStatus: self.touchQueue.put(socketData.encode('ascii')) def touchUp(self, point): socketData = 'u %s\n' % point['contact'] if self.actionStatus: self.touchQueue.put(socketData.encode('ascii')) def touchCommit(self, data): if self.actionStatus: self.touchQueue.put('c\n'.encode('ascii')) def touchReset(self, data): if self.actionStatus: self.touchQueue.put('r\n'.encode('ascii'))
class StatsdLog(object): """Simple server to monitor a syslog udp stream for statsd events""" def __init__(self, conf): TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) self.conf = conf self.logger = logging.getLogger('statsdlogd') self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address='/dev/log') self.formatter = logging.Formatter('%(name)s: %(message)s') self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) self.debug = conf.get('debug', 'false').lower() in TRUE_VALUES self.statsd_host = conf.get('statsd_host', '127.0.0.1') self.statsd_port = int(conf.get('statsd_port', '8125')) self.listen_addr = conf.get('listen_addr', '127.0.0.1') self.listen_port = int(conf.get('listen_port', 8126)) self.report_internal_stats = conf.get('report_internal_stats', 'true').lower() in TRUE_VALUES self.int_stats_interval = int(conf.get('internal_stats_interval', 5)) self.buff = int(conf.get('buffer_size', 8192)) self.max_q_size = int(conf.get('max_line_backlog', 512)) self.statsd_sample_rate = float(conf.get('statsd_sample_rate', '.5')) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get('patterns_file', '/etc/statsdlog/patterns.json') self.json_patterns = conf.get('json_pattern_file', 'true').lower() in TRUE_VALUES try: self.patterns = self.load_patterns() except Exception as err: self.logger.exception(err) print "Encountered exception at startup: %s" % err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item]) def load_patterns(self): if self.json_patterns: self.logger.info("Using json based patterns file: %s" % self.patterns_file) with open(self.patterns_file) as pfile: return json.loads(pfile.read()) else: self.logger.info("Using plain text patterns file: %s" % self.patterns_file) patterns = {} with open(self.patterns_file) as f: for line in f: if line: pattern = [x.strip() for x in line.split("=", 1)] else: pattern = None if len(pattern) != 2: # skip this line self.logger.error( "Skipping pattern. Unable to parse: %s" % line) else: if pattern[0] and pattern[1]: patterns[pattern[0]] = pattern[1] else: self.logger.error( "Skipping pattern. Unable to parse: %s" % line) return patterns def check_line(self, line): """ Check if a line matches our search patterns. :param line: The string to check :returns: List of regex entries that matched (or empty list if none) """ matches = [] for entry in self.comp_patterns: if self.comp_patterns[entry].match(line): matches.append(entry) return matches def internal_stats(self): """ Periodically send our own stats to statsd. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(self.int_stats_interval) self.send_event("statsdlog.lines:%s|c" % (self.counter - lastcount)) lastcount = self.counter self.send_event("statsdlog.hits:%s|c" % (self.hits - lasthit)) lasthit = self.hits def stats_print(self): """ Periodically dump some stats to the logs. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(2) lps = (self.counter - lastcount) / 60 hps = (self.hits - lasthit) / 60 lastcount = self.counter lasthit = self.hits self.logger.info('per second: %d lines - hits %d' % (lps, hps)) self.logger.info('totals: %d hits - %d lines' % (self.hits, self.counter)) if self.skip_counter is not 0: self.logger.info('Had to skip %d log lines so far' % self.skip_counter) def send_event(self, payload): """ Fire event to statsd :param payload: The payload of the udp packet to send. """ try: udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(payload, self.statsd_addr) except Exception: # udp sendto failed (socket already in use?), but thats ok self.logger.error("Error trying to send statsd event") def statsd_counter_increment(self, stats, delta=1): """ Increment multiple statsd stats counters :param stats: list of stats items to package and send :param delta: delta of stats items """ if self.statsd_sample_rate < 1: if random() <= self.statsd_sample_rate: for item in stats: payload = "%s:%s|c|@%s" % (item, delta, self.statsd_sample_rate) self.send_event(payload) else: for item in stats: payload = "%s:%s|c" % (item, delta) self.send_event(payload) def worker(self): """ Check for and process log lines in queue """ while True: msg = self.q.get() matches = self.check_line(msg) for match in matches: self.statsd_counter_increment([match]) if self.hits >= maxint: self.logger.info("hit maxint, reset hits counter") self.hits = 0 self.hits += 1 def listener(self): """ syslog udp listener """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) bind_addr = (self.listen_addr, self.listen_port) sock.bind(bind_addr) self.logger.info("listening on %s:%d" % bind_addr) while 1: data, addr = sock.recvfrom(self.buff) if not data: break else: if self.q.qsize() < self.max_q_size: self.q.put(data) if self.counter >= maxint: self.logger.info("hit maxint, reset seen counter") self.counter = 0 self.counter += 1 else: if self.debug: self.logger.notice("max log lines in queue, skipping") if self.skip_counter >= maxint: self.logger.info("hit maxint, reset skip counter") self.skip_counter = 0 self.skip_counter += 1 def start(self): """ Start the listener, worker, and mgmt server. """ eventlet.spawn_n(self.worker) if self.debug: eventlet.spawn_n(self.stats_print) if self.report_internal_stats: eventlet.spawn_n(self.internal_stats) while True: try: self.listener() except Exception as err: self.logger.error(err)
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None @classmethod def initialize_reactor(cls): eventlet.monkey_patch() @classmethod def factory(cls, *args, **kwargs): timeout = kwargs.pop('timeout', 5.0) conn = cls(*args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = socket.socket(af, socktype, proto) self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None break except socket.error as err: sockerr = err if sockerr: raise socket.error( sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host, )) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket, ), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response(RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
class AsyncClient(baseasync.BaseAsync): def __init__(self, *args, **kwargs): super(AsyncClient, self).__init__(*args, **kwargs) self.pool = eventlet.greenpool.GreenPool(DEFAULT_POOL_SIZE) self.reader_thread = None self.writer_thread = None self.queue = Queue(DEFAULT_MAX_QUEUE_SIZE) self.max_pending = MAX_PENDING self.closing = False def build_socket(self, family=socket.AF_INET): return socket.socket(family) def connect(self): super(AsyncClient, self).connect() self.closing = False self.reader_thread = eventlet.greenthread.spawn(self._reader_run) self.writer_thread = eventlet.greenthread.spawn(self._writer_run) def dispatch(self, fn, *args, **kwargs): if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Dispatching: Pending {0}".format(len(self._pending))) self.pool.spawn_n(fn, *args, **kwargs) def shutdown(self): self.closing = True if len(self._pending) + self.queue.qsize() == 0: self._end_close() def close(self): self.shutdown() self.wait() def _end_close(self): self.writer_thread.kill() self.reader_thread.kill() super(AsyncClient, self).close() self.writer_thread = None self.reader_thread = None def sendAsync(self, header, value, onSuccess, onError): if self.closing: raise common.ConnectionClosed( "Client is closing, can't queue more operations.") if self.faulted: self._raise( common.ConnectionFaulted( "Can't send message when connection is on a faulted state." ), onError) return #skip the rest # fail fast on NotConnected if not self.isConnected: self._raise(common.NotConnected("Not connected."), onError) return #skip the rest if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Queue: {0}".format(self.queue.qsize())) self.queue.put((header, value, onSuccess, onError)) eventlet.sleep(0) def wait(self): self.queue.join() def send(self, header, value): done = eventlet.event.Event() class Dummy: pass d = Dummy() d.error = None d.result = None def innerSuccess(m, r, value): d.result = (m, r, value) done.send() def innerError(e): d.error = e done.send() self.sendAsync(header, value, innerSuccess, innerError) done.wait() # TODO(Nacho): should be add a default timeout? if d.error: raise d.error return d.result def _writer_run(self): while self.isConnected and not self.faulted: try: while len(self._pending) > self.max_pending: eventlet.sleep(0) (header, value, onSuccess, onError) = self.queue.get() super(AsyncClient, self).sendAsync(header, value, onSuccess, onError) except common.ConnectionFaulted: pass except common.ConnectionClosed: pass except Exception as ex: self._fault_client(ex) # Yield execution, don't starve the reader eventlet.sleep(0) def _reader_run(self): while self.isConnected and not self.faulted: try: self._async_recv() self.queue.task_done() if self.closing and len( self._pending) + self.queue.qsize() == 0: self._end_close() except common.ConnectionFaulted: pass except Exception as ex: self._fault_client(ex)
class supportManager(): def __init__(self): self.minicap_clients = {} self.minitouch_clients = {} self.stfservice_clients = {} self.init_portlist() def init_portlist(self): self.minicap_q = Queue(50) self.minitouch_q = Queue(50) self.stfservices_q = Queue(50) for port in range(1300, 1350): self.minicap_q.put(port) self.minitouch_q.put(port - 100) for port in range(1100, 1190, 2): self.stfservices_q.put(port) def _getPort(self, type): if type == 'minicap': return self.minicap_q.get() elif type == 'minitouch': return self.minitouch_q.get() else: return self.stfservices_q.get() def register_minicap(self, serial, minicap_port, deviceInfos): self.minicap_clients[serial] = Minicap(('localhost', minicap_port), serial, deviceInfos) def register_minitouch(self, serial, minitouch_port, deviceInfos): self.minitouch_clients[serial] = Minitouch( ('localhost', minitouch_port), serial, deviceInfos) def register_stfservice(self, serial, stfservice_port): self.stfservice_clients[serial] = stfServices( ('localhost', stfservice_port), serial) def checkDeviceAvaliable(self, serial): touch_client = self.minitouch_clients.get(serial) service_client = self.stfservice_clients.get(serial) if not touch_client or not service_client: return False if touch_client.runningState != 3 and touch_client.runningState != 3: # print ('not starting',serial,touch_client.runningState,touch_client.runningState) return False return True def init2(self, serial): deviceInfos = get_deviceInfo(serial) if not deviceInfos: db.setDeviceError(serial) socketio.emit('change', 'hehe', namespace='/default') return stfservice_port = self._getPort('stfservice') self.register_stfservice(serial, stfservice_port) self.stfservice_clients[serial].init() self.stfservice_clients[serial].start() self.stfservice_clients[serial].getProperties('haha') if not self.stfservice_clients[serial].app: db.setDeviceError(serial) socketio.emit('change', 'hehe', namespace='/default') return minitouch_port = self._getPort('minitouch') self.register_minitouch(serial, minitouch_port, deviceInfos) self.minitouch_clients[serial].init() self.minitouch_clients[serial].start() if self.minitouch_clients[serial].pid <= 0: db.setDeviceError(serial) socketio.emit('change', 'hehe', namespace='/default') return minicap_port = self._getPort('minicap') self.register_minicap(serial, minicap_port, deviceInfos) displayinfos = self.minicap_clients[serial].init() db.setDeviceReady(serial) db.setDeviceInfo(serial, { 'minicap_port': minicap_port, 'minitouch_port': minitouch_port }) socketio.emit('change', 'hehe', namespace='/default') deviceInfos['display'] = displayinfos if self.stfservice_clients[serial].phone: tempd = {} for i in self.stfservice_clients[serial].phone: tempd[i.name] = i.value deviceInfos['phone'] = tempd db.setDeviceInfo(serial, deviceInfos) socketio.emit('change', 'hehe', namespace='/default') def init(self, serial): eventlet.spawn_n(self.init2, serial) def startCap(self, key, current_user): try: if not self.checkDeviceAvaliable(key): return 0 minicap_client = self.minicap_clients.get(key) res = minicap_client.start() # db.setDeviceBusy(serial,current_user.id,current_user.username) # socketio.emit('change','hehe',namespace='/default') return 1 except Exception as e: print('Cap_start Error', str(e)) return 0 def updateConfig(self, key, width, height): try: if not self.checkDeviceAvaliable(key): return 0 minicap_client = self.minicap_clients.get(key) minicap_client.updateConfig(width, height) return 1 except Exception as e: print('Cap_updataConfig Error', str(e)) return 0 def updateRotation(self, key, rotation): try: if not self.checkDeviceAvaliable(key): return 0 minicap_client = self.minicap_clients.get(key) minicap_client.updateRotation(rotation) return 1 except: print('Cap_updataConfig Error') return 0 def stopCap(self, key): try: r = self.minicap_clients.get(key) res = r.stop() return 1 except: print('Cap_stop Error') return 0 def startTouch(self, key): r = self.minitouch_clients.get(key) if r: t = r.get('touch') if t: res = t.start() # print (res,'touch start') return 1 else: # print ('%s[touch] client not found'%key) return 0 else: # print ('%s not found') return 0 def startServices(self, key): r = self.stfservice_clients.get(key) if r: t = r.get('stf') if t: res = t.start() return 1 else: return 0 else: return 0 def closeAll(self, key): touch_client = self.minitouch_clients.get(key) stf_client = self.stfservice_clients.get(key) cap_client = self.minicap_clients.get(key) if cap_client: print('cap', cap_client.close()) if touch_client: t = touch_client.get('touch') if t: # t.close() print('touch', t.close()) if stf_client: s = stf_client.get('stf') if s: print('services', s.close()) self.minitouch_clients.pop(key) self.stfservice_clients.pop(key) self.minicap_clients.pop(key) del touch_client del stf_client del cap_client return 1 def sendTouch(self, key, action, data): r = self.minitouch_clients.get(key) if r: eval('r.%s(data)' % action) return 1 else: return 0 def sendService(self, key, type_t, data): r = self.stfservice_clients.get(key) if r: eval('r.%s(data)' % type_t) return 1 else: return 0 def sendTouchs(self, keys, action, data): for k in keys: self.sendTouch(k, action, data) def sendServices(self, keys, type_t, data): for k in keys: self.sendService(k, type_t, data)
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. This implementation assumes all eventlet monkey patching is active. It is not tested with partial patching. """ _read_watcher = None _write_watcher = None _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl _timers = None _timeout_watcher = None _new_timer = None @classmethod def initialize_reactor(cls): eventlet.monkey_patch() if not cls._timers: cls._timers = TimerManager() cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) cls._new_timer = Event() @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) cls._timers.add_timer(timer) cls._new_timer.set() return timer @classmethod def service_timeouts(cls): """ cls._timeout_watcher runs in this loop forever. It is usually waiting for the next timeout on the cls._new_timer Event. When new timers are added, that event is set so that the watcher can wake up and possibly set an earlier timeout. """ timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() sleep_time = max(next_end - time.time(), 0) if next_end else 10000 cls._new_timer.wait(sleep_time) cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self._write_queue = Queue() self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size])
class Connection(object): def __init__(self, socket, address): super(Connection, self).__init__() self.socket = socket self.address = address self.is_active = True # The limit is arbitrary. We need to limit queue size to # prevent it from eating memory up self.send_q = Queue(128) # data structures for BGP self.peer_ip = None self.peer_as = None self.peer_id = None self.peer_capabilities = [] self.peer_last_keepalive_timestamp = None self._4or6 = 0 self.hold_time = 240 def close(self): LOG.info('Connection %s closing...', self.address) self.socket.close() @_deactivate def _recv_loop(self): header_size = BGP4_HEADER_SIZE while self.is_active: buf = bytearray() receive = self._exact_receive(header_size) if receive != '': buf.extend(receive) else: break (marker, packet_len, msg_type) = struct.unpack(BGP4_PACK_STR, buffer(buf)) required_len = packet_len - header_size if required_len: # notification message has only a header receive = self._exact_receive(required_len) if receive != '': buf.extend(receive) else: break msg = BGP4.bgp4.parser(buffer(buf[0:packet_len])) self._handle(msg) eventlet.sleep(0) def _exact_receive(self, required_len): """ receive exact size of data from socket returns empty string if socket closed/error """ buf = bytearray() while len(buf) < required_len: more_data = self.socket.recv(required_len - len(buf)) if len(more_data) != 0: buf.extend(more_data) else: self.is_active = False return '' return buf def _handle(self, msg): msg_type = msg.type_ if msg_type == BGP4.BGP4_OPEN: self._handle_open(msg.data) LOG.debug('Receive OPEN msg') elif msg_type == BGP4.BGP4_UPDATE: LOG.debug('Receive UPDATE msg') self._handle_update(msg.data) elif msg_type == BGP4.BGP4_NOTIFICATION: LOG.debug('Receive NOTIFICATION msg') self._handle_notification(msg.data) elif msg_type == BGP4.BGP4_KEEPALIVE: self._handle_keepalive(msg) LOG.debug('Receive KEEPALIVE msg') else: LOG.debug('Receive unknown msg_type %s', msg_type) def __check_capabilities(self, peer_capabilities): """ ideally, 1) checks if some important capabilities are supported by peer return True if OK 2) assigns self.capabilities, which is the actual capabilities used in this connection """ self_capability_types = [] peer_capability_types = [] for c in Server.capabilities: self_capability_types.append(type(c)) for c in peer_capabilities: peer_capability_types.append(type(c)) for self_capability in self_capability_types: if self_capability not in peer_capability_types: return False return True def _handle_open(self, msg): self.peer_as = msg.my_as peer_holdtime = msg.hold_time self.hold_time = min(peer_holdtime, self.hold_time) self.peer_id = msg.bgp_identifier self.peer_capabilities = msg.data for capability in self.peer_capabilities: if isinstance(capability, BGP4.multi_protocol_extension): if capability.addr_family == 1: self._4or6 = 4 elif capability.addr_family == 2: self._4or6 = 6 else: self._4or6 = 0 if isinstance(capability, BGP4.support_4_octets_as_num): self.peer_as = capability.as_num LOG.info('BGP peer info. 4/6: %s, AS %s, hold time %s, ID %s, capability %s', self._4or6, self.peer_as, self.hold_time, self.peer_id, self.peer_capabilities) self.send_open_msg() if self.__check_capabilities(self.peer_capabilities): self.peer_last_keepalive_timestamp = time.time() hub.spawn(self.keepalive) self.send_current_route_table() else: self.send_notification_msg(err_code=2, err_subcode=0, data="Capability check failed.") def keepalive(self): while True: self.send_keepalive_msg() current_time = time.time() if current_time - self.peer_last_keepalive_timestamp > \ self.hold_time: self.send_notification_msg(err_code=4, err_subcode=0, data="Hold timer expired.") self.is_active = False hub.sleep(self.hold_time / 3) def __check_AFI(self, afi): if afi == BGP4.AFI_IPV4: return 4 elif afi == BGP4.AFI_IPV6: return 6 else: return None def _handle_update(self, msg): LOG.debug('Handling UPDATE msg') advert_entries = [] withdraw_entries = [] if msg.wd_routes: for i in msg.wd_routes: entry = route_entry.BGPEntry(i.network, i.length, 4) entry.announcer = netaddr.IPAddress(self.address[0]) withdraw_entries.append(entry) if msg.nlri: for i in msg.nlri: entry = route_entry.BGPEntry(i.network, i.length, 4) entry.announcer = netaddr.IPAddress(self.address[0]) advert_entries.append(entry) attributes = route_entry.Attributes() for i in msg.path_attr: if i.code == BGP4.bgp4_update._ORIGIN: attributes.origin = i.value elif i.code == BGP4.bgp4_update._AS_PATH: if Server.local_as in i.as_values: return attributes.as_path_type = i.as_type attributes.as_path = i.as_values elif i.code == BGP4.bgp4_update._NEXT_HOP: attributes.next_hop = i._next_hop elif i.code == BGP4.bgp4_update._MULTI_EXIT_DISC: attributes.multi_exit_disc = i.value elif i.code == BGP4.bgp4_update._MP_REACH_NLRI: _4or6 = self.__check_AFI(i.addr_family) attributes.next_hop = i.next_hop if i.nlri: for j in i.nlri: entry = route_entry.BGPEntry(j.network, j.length, _4or6) entry.announcer = netaddr.IPAddress(self.address[0]) advert_entries.append(entry) elif i.code == BGP4.bgp4_update._MP_UNREACH_NLRI: _4or6 = self.__check_AFI(i.addr_family) if i.wd_routes: for j in i.wd_routes: entry = route_entry.BGPEntry(j.network, j.length, _4or6) entry.announcer = netaddr.IPAddress(self.address[0]) withdraw_entries.append(entry) self.__add_route(advert_entries, attributes) self.__remove_route(withdraw_entries) def __add_route(self, advert_entries, attributes): # XXX acquire route table lock? # XXX remove duplicate for entry in advert_entries: entry.attributes = attributes Server.route_table.append(entry) def __remove_route(self, withdraw_entries): # XXX acquire route table lock? for i in withdraw_entries: for j in Server.route_table: if i == j: Server.route_table.remove(j) def _handle_notification(self, msg): LOG.error('BGP error code %s, error sub code %s', msg.err_code, msg.err_subcode) def _handle_keepalive(self, msg): self.peer_last_keepalive_timestamp = time.time() @_deactivate def _send_loop(self): try: while self.is_active: buf = self.send_q.get() self.socket.sendall(buf) finally: self.send_q = None def send(self, buf): if self.send_q: self.send_q.put(buf) def serve(self): send_thr = hub.spawn(self._send_loop) try: self._recv_loop() finally: hub.kill(send_thr) hub.joinall([send_thr]) # # Utility methods for convenience # def send_open_msg(self): open_reply = BGP4.bgp4_open(version=4, my_as=Server.local_as, hold_time=self.hold_time, bgp_identifier=Server.local_ipv4, data=Server.capabilities) bgp4_reply = BGP4.bgp4(type_=BGP4.BGP4_OPEN, data=open_reply) self.serialize_and_send(bgp4_reply) def send_keepalive_msg(self): keepalive = BGP4.bgp4(type_=BGP4.BGP4_KEEPALIVE, data=None) self.serialize_and_send(keepalive) def send_notification_msg(self, err_code, err_subcode, data): """ input: err_code, err_subcode, and data output: send msg """ notification_msg = BGP4.bgp4_notification(err_code, err_subcode, data) bgp_msg = BGP4.bgp4(type_=BGP4.BGP4_NOTIFICATION, data=notification_msg) self.serialize_and_send(bgp_msg) def serialize_and_send(self, protocol_data): p = packet.Packet() p.add_protocol(protocol_data) p.serialize() self.send(p.data) def send_current_route_table(self): """ used after OPEN to send current route_table to peer """ LOG.info('Sending local route table...') for i in Server.route_table: self.send_update_msg(i) def send_update_msg(self, entry): """ convenient method to send update message input is a BGPEntry object """ path_attr = [] # 0 is a valid origin number, compare with None if entry.attributes.origin is not None: origin_msg = BGP4.origin(value=entry.attributes.origin) path_attr.append(origin_msg) if entry.attributes.multi_exit_disc: multi_exit_disc_msg = BGP4.multi_exit_disc(value= \ entry.attributes.multi_exit_disc) path_attr.append(multi_exit_disc_msg) if entry.attributes.as_path: # information stored in as_path is the original got from # peer's update messages, so when sending to others, we should # insert server's AS number as_path_msg = BGP4.as_path(as_type=entry.attributes.as_path_type, as_len=len(entry.attributes.as_path)+1, as_values=[Server.local_as] + \ entry.attributes.as_path) path_attr.append(as_path_msg) # nlri if entry._4or6 == 4: nlri = [BGP4.NLRI(entry.prefix_len, entry.ip, entry._4or6)] if entry.attributes.next_hop: next_hop_msg = BGP4.next_hop(_next_hop= \ entry.attributes.next_hop) path_attr.append(next_hop_msg) elif entry._4or6 == 6: nlri = [] nlri_in_mp_reach = [BGP4.NLRI(entry.prefix_len, entry.ip, entry._4or6)] mp_reach_nlri_msg = BGP4.mp_reach_nlri(next_hop_len= \ 16 * len(entry.attributes.next_hop), next_hop=entry.attributes.next_hop, nlri=nlri_in_mp_reach) path_attr.append(mp_reach_nlri_msg) update_msg = BGP4.bgp4_update(path_attr=path_attr, nlri=nlri) bgp4_msg = BGP4.bgp4(type_=BGP4.BGP4_UPDATE, data=update_msg) self.serialize_and_send(bgp4_msg)
class RelayServer(object): def __init__(self, hostname, port): self._address = (hostname, port) self._clients = {} self._messages = [] self._pool = eventlet.GreenPool(256) self._queue = Queue() def start(self): server = eventlet.listen(self._address) eventlet.spawn(self._broadcast) while True: sock, address = server.accept() print "Accepted connection from {}".format(address) self._clients[address] = sock self._pool.spawn_n(self._handle_client, sock, address) def _handle_client(self, client, address): buffer = LineBuffer(self._receive, address) while True: buffer.receive(client.recv(4096)) def _receive(self, message, address): self._handle_message(message, address) def _handle_message(self, message, address): try: structure = json.loads(message) except ValueError: structure = {"id": str(uuid.uuid4()), "message": message.rstrip()} payload = structure['message'] if payload.startswith("connect "): address = payload[8:].split(':') self._connect((address[0], int(address[1]))) elif payload == "source": self._clients[address].sendall(open(__file__).read()) else: structure['address'] = address self._send(json.dumps(structure), address) def _send(self, message, address): self._queue.put((message, address)) def _broadcast(self): while True: message, address = self._queue.get() try: payload = json.loads(message) except ValueError: print "Unable to send message: '{!r}'".format(message) continue if payload['id'] not in self._messages: self._messages = [payload['id']] + self._messages[:256] for client_address, client in self._clients.items(): if client_address != address: client.sendall(message + "\n") def _connect(self, address): sock = eventlet.connect(address) self._clients[address] = sock self._pool.spawn_n(self._handle_client, sock, address) print "Connected to {!r}".format(address)
class Connection(object): def __init__(self, socket, address): super(Connection, self).__init__() self.socket = socket self.address = address self.is_active = True # The limit is arbitrary. We need to limit queue size to # prevent it from eating memory up self.send_q = Queue(128) # data structures for BGP self.peer_ip = None self.peer_as = None self.peer_id = None self.peer_capabilities = [] self._4or6 = 0 self.hold_time = 240 def close(self): print "close the connect from", self.address self.socket.close() @_deactivate def _recv_loop(self): header_size = BGP4_HEADER_SIZE while self.is_active: buf = bytearray() receive = self._exact_receive(header_size) if receive != '': buf.extend(receive) else: break (marker, packet_len, msg_type) = struct.unpack(BGP4_PACK_STR, buffer(buf)) required_len = packet_len - header_size if required_len: # notification message has only a header receive = self._exact_receive(required_len) if receive != '': buf.extend(receive) else: break msg = BGP4.bgp4.parser(buffer(buf[0:packet_len])) self._handle(msg) eventlet.sleep(0) def _exact_receive(self, required_len): ''' receive exact size of data from socket returns empty string if socket closed/error ''' buf = bytearray() while len(buf) < required_len: more_data = self.socket.recv(required_len - len(buf)) if len(more_data) != 0: buf.extend(more_data) else: self.is_active = False return '' return buf def _handle(self, msg): msg_type = msg.type_ if msg_type == BGP4.BGP4_OPEN: self._handle_open(msg.data) print 'receive OPEN msg' elif msg_type == BGP4.BGP4_UPDATE: print 'receive UPDATE msg' self._handle_update(msg.data) elif msg_type == BGP4.BGP4_NOTIFICATION: print 'receive NOTIFICATION msg' self._handle_notification(msg.data) elif msg_type == BGP4.BGP4_KEEPALIVE: self._handle_keepalive(msg) print 'receive KEEPALIVE msg' else: print 'receive unknown msg_type', msg_type def __check_capabilities(self, peer_capabilities): """ 1) checks if some important capabilities are supported by peer return True if OK 2) assigns self.capabilities, which is the actual capabilities used in this connection """ # XXX return True def _handle_open(self, msg): self.peer_as = msg.my_as peer_holdtime = msg.hold_time self.hold_time = min(peer_holdtime, self.hold_time) self.peer_id = msg.bgp_identifier self.peer_capabilities = msg.data for capability in self.peer_capabilities: if isinstance(capability, BGP4.multi_protocol_extension): if capability.addr_family == 1: self._4or6 = 4 elif capability.addr_family == 2: self._4or6 = 6 else: self._4or6 = 0 if isinstance(capability, BGP4.support_4_octets_as_num): self.peer_as = capability.as_num print '4/6:', self._4or6 print 'peer_as:', self.peer_as print 'hold_time:', self.hold_time print 'peer_id:', convert.ipv4_to_str(self.peer_id) print 'capability:', self.peer_capabilities self.send_open_msg() if self.__check_capabilities(self.peer_capabilities): hub.spawn(self.keepalive) self.send_current_route_table() else: self.send_notification_msg() def keepalive(self): while True: self.send_keepalive_msg() hub.sleep(self.hold_time / 3) def __check_AFI(self, afi): if afi == BGP4.AFI_IPV4: return 4 elif afi == BGP4.AFI_IPV6: return 6 else: return None def _handle_update(self, msg): print '----UPDATE----' advert_entries = [] withdraw_entries = [] if msg.wd_routes: for i in msg.wd_routes: entry = route_entry.BGPEntry(i.prefix, i.length, 4) withdraw_entries.append(entry) if msg.nlri: for i in msg.nlri: entry = route_entry.BGPEntry(i.prefix, i.length, 4) advert_entries.append(entry) attributes = route_entry.Attributes() for i in msg.path_attr: if i.code == BGP4.bgp4_update._ORIGIN: attributes.origin = i.value elif i.code == BGP4.bgp4_update._AS_PATH: if Server.local_as in i.as_values: return attributes.as_path_type = i.as_type attributes.as_path = i.as_values elif i.code == BGP4.bgp4_update._NEXT_HOP: attributes.next_hop = i._next_hop elif i.code == BGP4.bgp4_update._MULTI_EXIT_DISC: attributes.multi_exit_disc = i.value elif i.code == BGP4.bgp4_update._MP_REACH_NLRI: _4or6 = self.__check_AFI(i.addr_family) attributes.next_hop = i.next_hop if i.nlri: for j in i.nlri: entry = route_entry.BGPEntry(j.prefix, j.length, _4or6) advert_entries.append(entry) elif i.code == BGP4.bgp4_update._MP_UNREACH_NLRI: _4or6 = self.__check_AFI(i.addr_family) if i.wd_routes: for j in i.wd_routes: entry = route_entry.BGPEntry(j.prefix, j.length, _4or6) withdraw_entries.append(entry) self.__add_route(advert_entries, attributes) self.__remove_route(withdraw_entries) def __add_route(self, advert_entries, attributes): # XXX acquire route table lock? for entry in advert_entries: entry.attributes = attributes Server.route_table.append(entry) def __remove_route(self, withdraw_entries): # XXX acquire route table lock? for i in withdraw_entries: for j in Server.route_table: if i == j: Server.route_table.remove(j) def _handle_notification(self, msg): print 'error code', msg.err_code, 'sub error code', msg.err_subcode def _handle_keepalive(self, msg): pass @_deactivate def _send_loop(self): try: while self.is_active: buf = self.send_q.get() self.socket.sendall(buf) finally: self.send_q = None def send(self, buf): if self.send_q: self.send_q.put(buf) def serve(self): send_thr = hub.spawn(self._send_loop) try: self._recv_loop() finally: hub.kill(send_thr) hub.joinall([send_thr]) # # Utility methods for convenience # def send_open_msg(self): open_reply = BGP4.bgp4_open(version=4, my_as=Server.local_as, hold_time=self.hold_time, bgp_identifier=Server.local_ipv4, data=Server.capabilities) bgp4_reply = BGP4.bgp4(type_=BGP4.BGP4_OPEN, data=open_reply) p = packet.Packet() p.add_protocol(bgp4_reply) p.serialize() self.send(p.data) def send_keepalive_msg(self): keepalive = BGP4.bgp4(type_=BGP4.BGP4_KEEPALIVE, data=None) p = packet.Packet() p.add_protocol(keepalive) p.serialize() self.send(p.data) def send_notification_msg(self): """ input: err_code, err_subcode, and data output: send msg """ pass def send_current_route_table(self): """ used after OPEN to send current route_table to peer """ print '** Sending route_table' for i in Server.route_table: path_attr = [] # 0 is a valid origin number, campare with None if i.attributes.origin != None: origin_msg = BGP4.origin(value=i.attributes.origin) path_attr.append(origin_msg) if i.attributes.multi_exit_disc: multi_exit_disc_msg = BGP4.multi_exit_disc(value = \ i.attributes.multi_exit_disc) path_attr.append(multi_exit_disc_msg) if i.attributes.as_path: # information stored in as_path is the original got from # peer's update messages, so when sending to others, we should # insert server's AS number as_path_msg = BGP4.as_path(as_type = i.attributes.as_path_type, as_len = len(i.attributes.as_path) + 1, as_values = [Server.local_as] + \ i.attributes.as_path) path_attr.append(as_path_msg) # nlri if i._4or6 == 4: nlri = [BGP4.NLRI(i.prefix_len, i.ip, i._4or6)] if i.attributes.next_hop: next_hop_msg = BGP4.next_hop(_next_hop = \ i.attributes.next_hop) path_attr.append(next_hop_msg) elif i._4or6 == 6: nlri = [] nlri_in_mp_reach = [BGP4.NLRI(i.prefix_len, i.ip, i._4or6)] mp_reach_nlri_msg = BGP4.mp_reach_nlri(next_hop_len = \ 16 * len(i.attributes.next_hop), next_hop = i.attributes.next_hop, nlri = nlri_in_mp_reach) path_attr.append(mp_reach_nlri_msg) update_msg = BGP4.bgp4_update(path_attr=path_attr, nlri=nlri) bgp4_msg = BGP4.bgp4(type_=BGP4.BGP4_UPDATE, data=update_msg) p = packet.Packet() p.add_protocol(bgp4_msg) p.serialize() self.send(p.data) def send_update_msg(self): """ convenient method to send update message """ pass
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None @classmethod def factory(cls, *args, **kwargs): timeout = kwargs.pop('timeout', 5.0) conn = cls(*args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._iobuf = StringIO() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(1.0) self._socket.connect((self.host, self.port)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host, )) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.send(next_msg) except socket.error as err: log.debug("Exception during socket sendall for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket, ), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): while True: pos = self._iobuf.tell() if pos < 8 or (self._total_reqd_bytes > 0 and pos < self._total_reqd_bytes): # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet break else: # have enough for header, read body len from header self._iobuf.seek(4) body_len = int32_unpack(self._iobuf.read(4)) # seek to end to get length of current buffer self._iobuf.seek(0, os.SEEK_END) pos = self._iobuf.tell() if pos >= body_len + 8: # read message header and body self._iobuf.seek(0) msg = self._iobuf.read(8 + body_len) # leave leftover in current buffer leftover = self._iobuf.read() self._iobuf = StringIO() self._iobuf.write(leftover) self._total_reqd_bytes = 0 self.process_msg(msg, body_len) else: self._total_reqd_bytes = body_len + 8 break else: log.debug("connection closed by server") self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response(RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
class Minicap(): def __init__(self, addr, serial, deviceInfos): # self.room=room self.addr = addr self.serial = serial self.pid = -1 self.desiredState = StateQueue() self.socket = None self.runningState = 1 #0 STATE_STOPPED = 1 STATE_STARTING = 2 STATE_STARTED = 3 STATE_STOPPING = 4 self.recv_status = 0 #0:stoped;1:start;-2:stoping self.push_status = 0 #0:stoped;1:start;-2:stoping self._recvData_init() self.socket = None self.minicapService = MinicapService(serial, addr[1], deviceInfos) self.namespace = '/screen%s' % self.serial def _recvData_init(self): self.__dataq = Queue(1000) self.readBannerBytes = 0 self.bannerLength = 0 self.readFrameBytes = 0 self.frameBodyLength = 0 self.frameBodyLengthStr = b'' self.frameBody = b'' self.banner = { 'version': 0, 'length': 0, 'pid': 0, 'realWidth': 0, 'realHeight': 0, 'virtualWidth': 0, 'realHeight': 0, 'orientation': 0, 'quirks': 0 } def errorhandler(self, error=''): print('[%s-minicap] ERROR: %s' % (self.serial, error)) # print ('[minicap Error]: %s'%(error)) try: socketio.emit('system', { 'name': 'screenStatus', 'data': error }, namespace=self.namespace) except: self.log('socketio send error') def createSocket(self): try: if self.socket: self.socket.close() self.socket = None self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(0.15) self.socket.connect(self.addr) except Exception as e: raise CapError('connect error:' + str(e), 'createSocket') def recvdata(self): errorFlag = 0 while self.recv_status > 0: try: data = self.socket.recv(4096) if data: self.__dataq.put(data) else: raise CapError('recv empty data', 'recvdata') except socket.timeout: pass except Exception as e: s = sys.exc_info() # self.log('recvError_%s_%s'%(str(e),s[2].tb_lineno)) self.errorhandler('recvError_%s_%s' % (str(e), s[2].tb_lineno)) errorFlag = 1 break self.recv_status = 0 self.log('[recvClose]') if errorFlag: self.runningState = 4 self._stop() def processdata(self): errorFlag = 0 while self.push_status > 0: try: data = self.__dataq.get(timeout=0.1) self.ReadMsg(data) except eventlet.queue.Empty: time.sleep(0.05) except Exception as e: s = sys.exc_info() self.errorhandler('processdataError_%s_%s' % (str(e), str(s[2].tb_lineno))) # socketio.emit('system',str(e),namespace=self.namespace) errorFlag = 1 break self.push_status = 0 self.log('[processClose]') if errorFlag: self.runningState = 4 self._stop() def ReadMsg(self, streamInfo): if self.bannerLength == 0: self.banner['version'], self.banner['length'], self.banner[ 'pid'], self.banner['realWidth'], self.banner[ 'realHeight'], self.banner['virtualWidth'], self.banner[ 'virtualHeight'], self.banner[ 'orientation'], self.banner[ 'quirks'] = struct.unpack( '<BBIIIIIBB', streamInfo[:24]) self.pid = self.banner['pid'] self.bannerLength = self.banner['length'] self.getOneImageInfo(streamInfo[24:]) else: self.getOneImageInfo(streamInfo) def getOneImageInfo(self, stream): for i, v in enumerate(stream): if self.readFrameBytes < 4: self.frameBodyLengthStr += stream[i:i + 1] if self.readFrameBytes == 3: self.frameBodyLength, = struct.unpack( '<I', self.frameBodyLengthStr) self.readFrameBytes += 1 else: if len(stream) - i >= self.frameBodyLength: self.frameBody += bytes(stream[i:i + self.frameBodyLength]) self.datahandler(self.frameBody) temp = self.frameBodyLength self.frameBody = b'' self.readFrameBytes, self.frameBodyLength = 0, 0 self.frameBodyLengthStr = b'' if i + temp < len(stream): # print ('<<<',len(stream)-i-temp) self.getOneImageInfo(stream[i + temp:]) break else: break else: self.frameBody += bytes(stream[i:len(stream)]) self.readFrameBytes += len(stream) - i self.frameBodyLength -= len(stream) - i break def datahandler(self, data): try: # socketio.emit('imgdata%s'%self.serial,data,namespace='/screen') # socketio.emit('imgdata',data,room=self.room,namespace='/screen') socketio.emit('imgdata', data, namespace=self.namespace) except Exception as e: print('socketio error:%s' % str(e)) def log(self, log): print('[%s-minicap] %s' % (self.serial, log)) def _startService(self): try: self.log('Launching MiniCap service') cmd = "-P %s" % self.frameConfig.toString() self.minicapService.minicap_run(cmd) time.sleep(0.8) except Exception as e: raise CapError('startService error:' + str(e), '_startService') def _waitForPid(self): self.log('waiting pid...') for i in range(10): if self.pid > 0: self.log('get minicap pid%s' % self.pid) return self.pid time.sleep(0.1) if self.pid <= 0: raise CapError('cannot get pid', '_waitForPid') def __connectService(self): try: self.log('Connecting to minicap service...') self.minicapService.forward_minicap() self.log('addr:%s' % self.addr[1]) self.createSocket() if self.recv_status == 0 and self.push_status == 0: self.recv_status = 1 self.push_status = 1 t1 = eventlet.spawn_n(self.recvdata) t2 = eventlet.spawn_n(self.processdata) else: raise CapError( 'recv:%s/push:%s Error' % (self.recv_status, self.push_status), '__connectService') except Exception as e: raise CapError(str(e), '__connectService') def _disconnectService(self): self.log('Disconnecting from minicap service') if self.recv_status == 1: self.recv_status = -2 if self.push_status == 1: self.push_status = -2 if self.socket is not None: # self.socket.close() self.log('close socket') def _stopService(self): if self.pid > 0: st = time.time() self.log('Stopping minicap service') self.minicapService.killPid(self.pid) print('kill time', time.time() - st) # assert False,'ffff' # time.sleep(10) else: self.log('Stopping minicap service SKIP') def checkclosed(self): for i in range(30): if self.recv_status == 0 and self.push_status == 0: return 0 time.sleep(0.01) return 1 def _stop(self): self._disconnectService() if self.checkclosed(): self.log('WARNING_recv:%s;push:%s' % (self.recv_status, self.push_status)) self._stopService() self._recvData_init() self.pid = -1 self.socket = None self.recv_status = 0 self.push_status = 0 self.runningState = 1 socketio.emit('system', { 'name': 'screenStatus', 'data': 'disconnected' }, namespace=self.namespace) self.log('Stopped minicap') def _ensureState(self): if self.desiredState.isEmpty(): return if self.runningState == 2 or self.runningState == 4: self.log('WAIT') return elif self.runningState == 1: if self.desiredState.get() == 3: try: self.runningState = 2 self._startService() self.__connectService() self._waitForPid() self.runningState = 3 self.log('Started minicap') socketio.emit('system', { 'name': 'screenStatus', 'data': 'connected' }, namespace=self.namespace) except Exception as e: self.errorhandler('EXCEPTION-->STOP:%s' % str(e)) self.log('catch error,will close') if self.runningState != 1: self.runningState = 4 self._stop() else: self.log('ERROR SKIP, already Closed') finally: self._ensureState() else: self.log('stop ignore') elif self.runningState == 3: if self.desiredState.get() == 1: try: self.runningState = 4 self._stop() finally: self._ensureState() else: self.log('start ignore' + str(self.runningState)) # socketio.emit('system','start fail',self.namespace) def init(self): self.install() display = getDisplayInfo(self.serial) if display: real = {'width': display['width'], 'height': display['height']} virtual = { 'width': display['width'], 'height': display['height'], 'rotation': display['rotation'] } self.frameConfig = FrameConfig(real, virtual) self.log('get display infos:%s' % display) return display else: self.frameConfig = None self.log('init failed cause display is None') return None def install(self): self.log('installing minicap resource') self.minicapService.installAll() def start(self): st = time.time() self.log('Requesting frame producer to start') self.desiredState.push(3) self._ensureState() print('start time', time.time() - st) def stop(self): st = time.time() self.log('Requesting frame producer to stop') self.desiredState.push(1) self._ensureState() print('stop time', time.time() - st) def restart(self): st = time.time() self.log('restart') self.stop() self.start() # if self.runningState==2 or self.runningState==3: # self.desiredState.push(1) # self.desiredState.push(3) # self._ensureState() # else: # self.log('restart error:%s'%(self.runningState)) print('total time', time.time() - st) def updateConfig(self, width, height): if self.frameConfig.virtualWidth == width and self.frameConfig.virtualHeight == height: self.log('Keeping %dx%d as current frame producer projection' % (width, height)) return else: self.log('Setting frame producer projection to %dx%d' % (width, height)) self.frameConfig.virtualWidth = width self.frameConfig.virtualHeight = height self.restart() #restart def updateRotation(self, rotation): if self.frameConfig.rotation == rotation: self.log('Keeping %d as current frame producer rotation' % rotation) return else: self.log('Setting frame producer rotation to %d' % rotation) self.frameConfig.rotation = rotation self.restart() #restart
class SockWrapper(object): """ base class for SFK and Client(Rengine) sockets wrappers """ NATIVE_PACKET = ScpPacket # placeholder, change it in successors def __init__(self): self.queue_send = Queue() self.queue_recv = Queue() self.appid = None # interface self.sock = None # interface for packet dispatchers - dockers def put_packet(self, packet): self.queue_send.put(packet) def get_packet(self): return self.queue_recv.get() # sender and recver started as greenthreads def sender(self, callback=lambda: None): """get packet from sending queue, send it via sock. By convention, packet type checking performed before putting in queue """ try: while True: packet = self.queue_send.get() data = packet.assemble() self.sock.sendall(data) # TODO if DEBUG try: if packet.get_msg_type() == 'pong': LOGGER.debug('pong sent %s' % self) except AttributeError: pass except Exception: LOGGER.error(str(self) + " sender error") eventlet.spawn_n(callback) def recver(self, callback=lambda: None): """ recieve packets from sock, check packet's type, put packet to recv queue """ f = self.sock.makefile() try: while True: try: packet_class = determine_packet_type(f) except Disconnection as e: raise Disconnection if packet_class == self.NATIVE_PACKET: packet = packet_class() packet.read_fields(f) self.queue_recv.put(packet) else: LOGGER.error("{0} recver: unexpected magic".format( str(self))) raise UnexpectedProtocol except Disconnection as e: LOGGER.info("Disconnection: {0}".format(str(self))) except Exception as e: LOGGER.error("recver error: {0} {1}".format(str(self), str(e))) LOGGER.info(str(self) + " recver terminate") eventlet.spawn_n(callback) def close_socket(self): try: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() LOGGER.info("{0} sockwrapper close socket".format(str(self))) except Exception as e: LOGGER.error("Fails socket close: %s" % str(e))
class stfServices(): def __init__(self, addr, serial): self.serial = serial self.addr = addr self.socket = None self.socket22 = None self.desiredState = StateQueue() self.serviceQueue = Queue(500) self.runningState = 1 #0 STATE_STOPPED = 1 STATE_STARTING = 2 STATE_STARTED = 3 STATE_STOPPING = 4 self.send_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.get_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.service = Service(serial, [self.addr[1], self.addr[1] + 1]) self.msgQ = {} self.phone = None self._init() self.namespace = '/screen%s' % self.serial def _init(self): self.app = [] def log(self, log): print('[%s-stfService]:%s' % (self.serial, log)) def errorhandler(self, error='', location=''): print('[%s-stfService] ERROR: %s' % (self.serial, error)) try: socketio.emit('system', { 'name': 'serviceStatus', 'data': error }, namespace=self.namespace) except: self.log('socketio send error') def createSocket(self): try: # self.log('addr:%s_%s'%(self.addr[0],self.addr[1])) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket22 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(0.5) self.socket.connect(self.addr) self.socket22.connect((self.addr[0], self.addr[1] + 1)) except Exception as e: raise ServiceError(str(e), 'createSocket') def _startService(self): try: self.log('Launching STF service') self.service.service_run() # t1=eventlet.spawn_n(self.service.service_run) except Exception as e: raise ServiceError(str(e), '_startService') def __connectService(self): try: self.log('Connecting to STF service') self.service.forward_service(str(self.addr[1])) self.service.forward_agent(str(self.addr[1] + 1)) time.sleep(0.5) self.log('addr:%s&%s' % (self.addr[1], self.addr[1] + 1)) self.createSocket() if self.get_status == 0 and self.send_status == 0: self.send_status = 1 t2 = eventlet.spawn_n(self.send) self.get_status = 1 t1 = eventlet.spawn_n(self.getInfo) else: raise ServiceError( 'get:%s/send:%s Error' % (self.get_status, self.send_status), '__connectService') except Exception as e: raise ServiceError(str(e), '__connectService') def install(self): self.log('installing stfservice resource') self.service.installAll() def init(self): self.install() def _disconnectService(self): self.log('Disconnecting from STF service') if self.get_status == 1 and self.send_status == 1: self.send_status = -2 self.get_status = -2 else: self.log('WARNING revb status2 error:%s_%s' % (self.get_status, self.send_status)) if self.socket: self.socket.close() if self.socket22: self.socket22.close() def _stopService(self): if self.app: self.log('Stopping STF service') self.service.killProc('stf.agent') else: self.log('Stopping STF service SKIP') def _waitForAppEvent(self): for i in range(10): if self.app: self.log('get STF app:%s' % self.app) return self.app time.sleep(0.1) if self.app == []: raise ServiceError('can not get app', '_waitForAppEvent') def _stop(self): self._disconnectService() self._stopService() self.runningState = 1 self._init() self.phone = None self.socket = None self.socket22 = None self.msgQ = {} self.get_status = 0 self.send_status = 0 self.app = [] self.log('STF service stopped') socketio.emit('system', { 'name': 'serviceStatus', 'data': 'disconnected' }, namespace=self.namespace) def _ensureState(self): if self.desiredState.isEmpty(): return if self.runningState == 2 or self.runningState == 4: pass elif self.runningState == 1: if self.desiredState.get() == 3: try: self.runningState = 2 self._startService() self.__connectService() self._waitForAppEvent() self.runningState = 3 self.log('STF service started') socketio.emit('system', { 'name': 'serviceStatus', 'data': 'connected' }, namespace=self.namespace) except Exception as e: self.errorhandler(str(e), '_ensureState') if self.runningState != 1: self.runningState = 4 self._stop() else: self.log('ERROR SKIP, already Closed') finally: self._ensureState() else: self.log('stop SKIP') elif self.runningState == 3: if self.desiredState.get() == 1: self.runningState = 4 try: self._stop() finally: self._ensureState() else: self.log('start SKIP') def start(self): self.log('Requesting frame producer to START') self.desiredState.push(3) self._ensureState() def stop(self): self.log('Requesting frame producer to STOP') self.desiredState.push(1) self._ensureState() def restart(self): self.log('restart') if self.runningState == 2 or self.runningState == 3: self.desiredState.push(1) self.desiredState.push(3) self._ensureState() else: self.log('restart error:%s' % (self.runningState)) def eventHandler(self, data): data = delimitedStream(data) envelop = Envelope() envelop.ParseFromString(data) if envelop.id: self.msgQ[str(envelop.id)] = envelop else: etype = envelop.type if etype == EVENT_BATTERY: temp = BatteryEvent() temp.ParseFromString(envelop.message) print('BatteryEvent', temp.status, temp.health, temp.level, type(temp)) elif etype == EVENT_AIRPLANE_MODE: temp = AirplaneModeEvent() temp.ParseFromString(envelop.message) print('AirplaneModeEvent', temp.enabled) elif etype == EVENT_BROWSER_PACKAGE: temp = BrowserPackageEvent() temp.ParseFromString(envelop.message) print('BrowserPackageEvent', temp.selected) for app in temp.apps: self.app.append(app.name) print('app', app.name, app.component) elif etype == EVENT_CONNECTIVITY: temp = ConnectivityEvent() temp.ParseFromString(envelop.message) print('ConnectivityEvent', temp.connected) elif etype == EVENT_PHONE_STATE: temp = PhoneStateEvent() temp.ParseFromString(envelop.message) print('PhoneStateEvent', temp.state) elif etype == EVENT_ROTATION: temp = RotationEvent() temp.ParseFromString(envelop.message) print('RotationEvent', temp.rotation) self.notify('RotationEvent', {'rotation': temp.rotation}) else: print('heheh', envelop.type) def notify(self, eventname, data): socketio.emit('event', { 'eventname': eventname, 'data': data }, namespace=self.namespace) def getInfo(self): buffersize = 1024 self.log('<start get>') errorFlag = 0 while self.get_status > 0: try: data = self.socket.recv(buffersize) if data: self.eventHandler(data) else: # assert False,'stfService recv empty data' # self.log('stfservice recv empty data') time.sleep(0.5) except socket.timeout: pass except Exception as e: errorFlag = 1 self.errorhandler('getInfoError_%s' % str(e)) break self.get_status = 0 self.log('<close get>') if errorFlag: self._stop() def send(self): self.log('[start send]') errorFlag = 0 while self.send_status > 0: try: data = self.serviceQueue.get(False) if data[0] == 'agent': self.socket22.send(data[1]) else: self.socket.send(data[1]) # self.sendhandler(data) except eventlet.queue.Empty: time.sleep(0.01) except Exception as e: errorFlag = 1 # print (str(e)) self.errorhandler('sendError_%s' % str(e)) break self.send_status = 0 self.log('[close send]') if errorFlag: self._stop() def sendF(self, data): self.socket.send(data) # def start(self): # self.log('start') # if self.send_status!=1 and self.get_status!=-2 and self.send_status!=1 and self.get_status!=-2: # if self.createSocket(): # self.get_status=1 # self.send_status=1 # t1=eventlet.spawn_n(self.send) # t2=eventlet.spawn_n(self.getInfo) # self.log('stfservice start success') # return 1 # else: # self.log('stfservice start failed:connect error') # return 0 # else: # self.log('stfservice already started') # return 0 # def close(self): # if self.send_status==1 and self.get_status==1 : # self.send_status=-2 # self.get_status=-2 # socketio.emit('event2','stop',namespace='/stfservice') # print ('stfservice close success') # return 1 # else: # print ('stfservice close fail:already close') # return 0 def getkey(self, keyname): key = keyMap.get('KEYCODE_' + keyname.upper()) if key: return key else: print('unKnown key:%s' % keyname) return None def runAgentCommand(self, type1, message): envelop = Envelope() envelop.type = type1 envelop.message = message self.serviceQueue.put( ['agent', delimitingStream(envelop.SerializeToString())]) def runServiceCommand(self, mid, typeT, message): envelop = Envelope() envelop.type = typeT envelop.message = message envelop.id = mid self.serviceQueue.put( ['service', delimitingStream(envelop.SerializeToString())]) eventlet.spawn_n(self.getResponse, mid) def getProperties(self, data): d = GetPropertiesRequest() d.properties.extend(['imei', 'phoneNumber', 'iccid', 'network']) mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def GetBrowsersRequest(self, data): d = GetBrowsersRequest() mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def setlockStatue(self, data): d = SetKeyguardStateRequest() if data['enabled'] == True or data['enabled'] == 'true': d.enabled = True else: d.enabled = False mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def getResponse(self, mid): for i in range(10): envelop = self.msgQ.get(str(mid)) if envelop: self.msgQ.pop(str(mid)) if envelop.type == GET_PROPERTIES: temp = GetPropertiesResponse() temp.ParseFromString(envelop.message) self.phone = temp.properties print(temp) elif envelop.type == GET_BROWSERS: temp = GetBrowsersResponse() temp.ParseFromString(envelop.message) print(temp, temp.selected) elif envelop.type == SET_KEYGUARD_STATE: temp = SetKeyguardStateResponse() temp.ParseFromString(envelop.message) print(temp) else: print('else') return return else: time.sleep(0.1) print('nothing') def type(self, data): d = DoTypeRequest() d.text = data['text'] self.runAgentCommand(DO_TYPE, d.SerializeToString()) def keyDown(self, data): d = KeyEventRequest() d.event = DOWN key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def keyUp(self, data): d = KeyEventRequest() d.event = UP key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def keyPress(self, data): d = KeyEventRequest() d.event = PRESS key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def wake(self, data): d = DoWakeRequest() self.runAgentCommand(DO_WAKE, d.SerializeToString()) print('dowake') def rotate(self, data): d = SetRotationRequest() d.rotation = data['rotation'] d.lock = data['lock'] self.runAgentCommand(SET_ROTATION, d.SerializeToString())
class RealtimeScreenCap(): def __init__(self, addr, key): self.key = key self.addr = addr self.recv_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.push_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.recvData_init() def recvData_init(self): self.__dataq = Queue(1000) self.readBannerBytes = 0 self.bannerLength = 0 self.readFrameBytes = 0 self.frameBodyLength = 0 self.frameBodyLengthStr = b'' self.frameBody = b'' self.banner = { 'version': 0, 'length': 0, 'pid': 0, 'realWidth': 0, 'realHeight': 0, 'virtualWidth': 0, 'realHeight': 0, 'orientation': 0, 'quirks': 0 } # def init_minicap(self): # deviceInfos=get_deviceInfo(self.key) # c=ConnectScreenCap(self.key,deviceInfos) # c.connect_cap(self.addr[1]) def connect(self): try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(2) self.socket.connect(self.addr) return 1 except Exception as e: self.errorhandler('connectError') return 0 # def init(self): # t1=eventlet.spawn_n(self.init_minicap) def recvdata(self): while self.recv_status > 0: try: data = self.socket.recv(4096) if data: self.__dataq.put(data) else: pass # print ('recv empty data') assert False, 'recv empty data' except socket.timeout: pass # print ('timeout') except Exception as e: s = sys.exc_info() self.errorhandler('recvError_%s_%s' % (str(e), s[2].tb_lineno)) break self.recv_status = -1 self.socket.close() print('[recvClose]') def myReadMsg(self, streamInfo): if self.bannerLength == 0: self.banner['version'], self.banner['length'], self.banner[ 'pid'], self.banner['realWidth'], self.banner[ 'realHeight'], self.banner['virtualWidth'], self.banner[ 'virtualHeight'], self.banner[ 'orientation'], self.banner[ 'quirks'] = struct.unpack( '<BBIIIIIBB', streamInfo[:24]) self.bannerLength = self.banner['length'] print('banner:', self.banner) self.getOneImageInfo(streamInfo[24:]) else: self.getOneImageInfo(streamInfo) def getOneImageInfo(self, stream): # try: for i, v in enumerate(stream): if self.readFrameBytes < 4: self.frameBodyLengthStr += stream[i:i + 1] if self.readFrameBytes == 3: self.frameBodyLength, = struct.unpack( '<I', self.frameBodyLengthStr) self.readFrameBytes += 1 else: if len(stream) - i >= self.frameBodyLength: self.frameBody += bytes(stream[i:i + self.frameBodyLength]) self.datahandler(self.frameBody) temp = self.frameBodyLength self.frameBody = b'' self.readFrameBytes, self.frameBodyLength = 0, 0 self.frameBodyLengthStr = b'' if i + temp < len(stream): # print ('<<<',len(stream)-i-temp) self.getOneImageInfo(stream[i + temp:]) break else: break else: self.frameBody += bytes(stream[i:len(stream)]) self.readFrameBytes += len(stream) - i self.frameBodyLength -= len(stream) - i break # except Exception as e: # s=sys.exc_info() # print ('except',str(e),str(s[2].tb_lineno)) # assert 1==2,'errorfuck' def datahandler(self, data): try: socketio.emit('imgdata%s' % self.key, data, namespace='/screen') # socketio.emit('imgdata',data,room=self.key,namespace='/screen') except Exception as e: print('socketio error:%s' % str(e)) def errorhandler(self, error=''): self.close() data = {'status': 1, 'msg': error} socketio.emit('errormsg%s' % self.key, data, namespace='/screen') # socketio.emit('errormsg',data,room=self.key,namespace='/screen') print(data, 'error') def processdata(self): while self.push_status > 0: try: # print ('process') data = self.__dataq.get(timeout=2) self.myReadMsg(data) # time.sleep(0.5) except eventlet.queue.Empty: pass # print ('empty') except Exception as e: s = sys.exc_info() self.errorhandler('processdataError_%s_%s' % (str(e), str(s[2].tb_lineno))) # print ('except',str(e),str(s[2].tb_lineno)) break self.push_status = -1 print('[processClose]') def close(self): if self.recv_status == 1 and self.push_status == 1: self.recv_status = -2 self.push_status = -2 # self.socket.close() socketio.emit('event', 'stop', namespace='/screen') # socketio.emit('event','stop',room=self.key,namespace='/screen') print('cap close success') return 1 else: print('cap close fail:already close') return 0 def start(self): if self.recv_status != 1 and self.recv_status != -2 and self.push_status != 1 and self.push_status != -2: if self.connect(): self.recv_status = 1 self.push_status = 1 t1 = eventlet.spawn_n(self.recvdata) t2 = eventlet.spawn_n(self.processdata) print('cap start sucess') return 1 else: print('cap start fail:connect error') return 0 else: print('cap start fail:already starting') return 0
class SockWrapper(object): """ base class for SFK and Client(Rengine) sockets wrappers """ NATIVE_PACKET = ScpPacket # placeholder, change it in successors def __init__(self): self.queue_send = Queue() self.queue_recv = Queue() self.appid = None # interface self.sock = None # interface for packet dispatchers - dockers def put_packet(self, packet): self.queue_send.put(packet) def get_packet(self): return self.queue_recv.get() # sender and recver started as greenthreads def sender(self, callback=lambda: None): """get packet from sending queue, send it via sock. By convention, packet type checking performed before putting in queue """ try: while True: packet = self.queue_send.get() data = packet.assemble() self.sock.sendall(data) # TODO if DEBUG try: if packet.get_msg_type() == 'pong': LOGGER.debug('pong sent %s' % self) except AttributeError: pass except Exception: LOGGER.error(str(self) + " sender error") eventlet.spawn_n(callback) def recver(self, callback=lambda: None): """ recieve packets from sock, check packet's type, put packet to recv queue """ f = self.sock.makefile() try: while True: try: packet_class = determine_packet_type(f) except Disconnection as e: raise Disconnection if packet_class == self.NATIVE_PACKET: packet = packet_class() packet.read_fields(f) self.queue_recv.put(packet) else: LOGGER.error( "{0} recver: unexpected magic".format(str(self))) raise UnexpectedProtocol except Disconnection as e: LOGGER.info("Disconnection: {0}".format(str(self))) except Exception as e: LOGGER.error("recver error: {0} {1}".format(str(self), str(e))) LOGGER.info(str(self) + " recver terminate") eventlet.spawn_n(callback) def close_socket(self): try: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() LOGGER.info("{0} sockwrapper close socket".format(str(self))) except Exception as e: LOGGER.error("Fails socket close: %s" % str(e))
class supportManager(): def __init__(self): self.minicap_clients = {} self.minitouch_clients = {} self.stfservice_clients = {} self.init_portlist() def init_portlist(self): self.minicap_q = Queue(50) self.minitouch_q = Queue(50) self.stfservices_q = Queue(50) for port in range(1300, 1350): self.minicap_q.put(port) self.minitouch_q.put(port - 100) for port in range(1100, 1190, 2): self.stfservices_q.put(port) def _getPort(self, type): if type == 'minicap': return self.minicap_q.get() elif type == 'minitouch': return self.minitouch_q.get() else: return self.stfservices_q.get() def register_minicap(self, serial, minicap_port, deviceInfos): # minicap_port=self._getPort('minicap') # deviceInfos=get_deviceInfo(serial) room = 'thefuck' self.minicap_clients[serial] = Minicap(room, ('localhost', minicap_port), serial, deviceInfos) def register_minitouch(self, serial, minitouch_port, deviceInfos): # minitouch_port=self._getPort('minitouch') # touchQ=Queue(500) self.minitouch_clients[serial] = Minitouch( ('localhost', minitouch_port), serial, deviceInfos) def register_stfservice(self, serial, stfservice_port): # stfservice_port=self._getPort('stfservice') # stfQ=Queue(500) self.stfservice_clients[serial] = stfServices( ('localhost', stfservice_port), serial) def register_all(self, serial): self.register_minicap(serial) self.register_minitouch(serial) self.register_stfservice(serial) # def init(self,serial): # c=ConnectScreenCap(serial) # minicap_port=self._getPort('minicap') # minitouch_port=self._getPort('minitouch') # stfservice_port=self._getPort('stfservice') # # # print ('stfserviceport',stfservice_port) # c.init_device({'minicap_port':minicap_port,'minitouch_port':minitouch_port,'stfservice_port1':stfservice_port,'stfservice_port2':stfservice_port+1}) # self.register_minicap(serial,minicap_port) # self.register_minitouch(serial,minitouch_port) # self.register_stfservice(serial,stfservice_port) # return minitouch_port,minitouch_port,stfservice_port def init2(self, serial): deviceInfos = get_deviceInfo(serial) minitouch_port = self._getPort('minitouch') self.register_minitouch(serial, minitouch_port, deviceInfos) self.minitouch_clients[serial].init() self.minitouch_clients[serial].start() minicap_port = self._getPort('minicap') self.register_minicap(serial, minicap_port, deviceInfos) self.minicap_clients[serial].init() stfservice_port = self._getPort('stfservice') self.register_stfservice(serial, stfservice_port) self.stfservice_clients[serial].init() self.stfservice_clients[serial].start() db.setDeviceReady(serial) db.setDeviceInfo(serial, { 'minicap_port': minicap_port, 'minitouch_port': minitouch_port }) socketio.emit('change', 'hehe', namespace='/default') db.setDeviceInfo(serial, get_deviceInfo(serial)) socketio.emit('change', 'hehe', namespace='/default') def init(self, serial): eventlet.spawn_n(self.init2, serial) def init_all(self, serial): self.init_minicap(serial) self.init_minitouch(serial) def init_minicap(self, serial): client = self.minicap_clients.get(serial) if client: client.init() return 1 else: return 0 def init_minitouch(self, serial): client = self.minitouch_clients.get(serial) if client: client['touch'].init() return 1 else: return 0 def startCap(self, key): r = self.minicap_clients.get(key) if r: t = r if t: res = t.start() print(res, 'cap start') return res else: print('%s[touch] client not found' % key) return 0 else: print('%s not found' % key) return 0 def updateConfig(self, key, width, height): r = self.minicap_clients.get(key) if r: t = r if t: res = t.updateConfig(width, height) print(res, 'cap update') return res else: print('%s[touch] client not found' % key) return 0 else: print('%s not found' % key) return 0 def updateRotation(self, key, rotation): r = self.minicap_clients.get(key) if r: t = r if t: res = t.updateRotation(rotation) print(res, 'cap update') return res else: print('%s[touch] client not found' % key) return 0 else: print('%s not found' % key) return 0 def stopCap(self, key): r = self.minicap_clients.get(key) if r: t = r if t: res = t.stop() print(res, 'cap stop') return res else: print('%s[touch] client not found' % key) return 0 else: print('%s not found' % key) return 0 def startTouch(self, key): r = self.minitouch_clients.get(key) if r: t = r.get('touch') if t: res = t.start() print(res, 'touch start') return res else: print('%s[touch] client not found' % key) return 0 else: print('%s not found') return 0 def startServices(self, key): r = self.stfservice_clients.get(key) if r: t = r.get('stf') if t: res = t.start() print(res, 'stfServices start') return res else: print('%s[stfServices] client not found' % key) return 0 else: print('%s not found') return 0 def closeAll(self, key): touch_client = self.minitouch_clients.get(key) stf_client = self.stfservice_clients.get(key) cap_client = self.minicap_clients.get(key) if cap_client: print('cap', cap_client.close()) if touch_client: t = touch_client.get('touch') if t: # t.close() print('touch', t.close()) if stf_client: s = stf_client.get('stf') if s: print('services', s.close()) self.minitouch_clients.pop(key) self.stfservice_clients.pop(key) self.minicap_clients.pop(key) del touch_client del stf_client del cap_client return 1 def sendTouch(self, key, action, data): # print (seq) r = self.minitouch_clients.get(key) if r: eval('r.%s(data)' % action) return 1 else: return 0 def sendService(self, key, type_t, data): r = self.stfservice_clients.get(key) if r: eval('r.%s(data)' % type_t) return 1 else: return 0
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. This implementation assumes all eventlet monkey patching is active. It is not tested with partial patching. """ _read_watcher = None _write_watcher = None _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl _timers = None _timeout_watcher = None _new_timer = None @classmethod def initialize_reactor(cls): eventlet.monkey_patch() if not cls._timers: cls._timers = TimerManager() cls._timeout_watcher = eventlet.spawn(cls.service_timeouts) cls._new_timer = Event() @classmethod def create_timer(cls, timeout, callback): timer = Timer(timeout, callback) cls._timers.add_timer(timer) cls._new_timer.set() return timer @classmethod def service_timeouts(cls): """ cls._timeout_watcher runs in this loop forever. It is usually waiting for the next timeout on the cls._new_timer Event. When new timers are added, that event is set so that the watcher can wake up and possibly set an earlier timeout. """ timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() sleep_time = max(next_end - time.time(), 0) if next_end else 10000 cls._new_timer.wait(sleep_time) cls._new_timer.clear() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.uses_legacy_ssl_options = self.ssl_options and not self.ssl_context self._write_queue = Queue() self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def _wrap_socket_from_context(self): _check_pyopenssl() self._socket = SSL.Connection(self.ssl_context, self._socket) self._socket.set_connect_state() if self.ssl_options and 'server_hostname' in self.ssl_options: # This is necessary for SNI self._socket.set_tlsext_host_name( self.ssl_options['server_hostname'].encode('ascii')) def _initiate_connection(self, sockaddr): if self.uses_legacy_ssl_options: super(EventletConnection, self)._initiate_connection(sockaddr) else: self._socket.connect(sockaddr) if self.ssl_context or self.ssl_options: self._socket.do_handshake() def _match_hostname(self): if self.uses_legacy_ssl_options: super(EventletConnection, self)._match_hostname() else: cert_name = self._socket.get_peer_certificate().get_subject( ).commonName if cert_name != self.endpoint.address: raise Exception( "Hostname verification failed! Certificate name '{}' " "doesn't endpoint '{}'".format(cert_name, self.endpoint.address)) def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.endpoint, )) if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop except GreenletExit: # graceful greenthread exit return def handle_read(self): while True: try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop except GreenletExit: # graceful greenthread exit return if buf and self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size])
class Chewie: """Facilitates EAP supplicant and RADIUS server communication""" RADIUS_UDP_PORT = 1812 PAE_GROUP_ADDRESS = MacAddress.from_string("01:80:C2:00:00:03") DEFAULT_PORT_UP_IDENTITY_REQUEST_WAIT_PERIOD = 20 DEFAULT_PREEMPTIVE_IDENTITY_REQUEST_INTERVAL = 60 # pylint: disable=too-many-arguments def __init__(self, interface_name, logger=None, auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None, radius_server_port=None, radius_server_secret=None, chewie_id=None): self.interface_name = interface_name self.log_name = Chewie.__name__ if logger: self.log_name = logger.name + "." + Chewie.__name__ self.logger = get_logger(self.log_name) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler self.radius_server_ip = radius_server_ip self.radius_secret = radius_server_secret self.radius_server_port = self.RADIUS_UDP_PORT if radius_server_port: self.radius_server_port = radius_server_port self.radius_listen_ip = "0.0.0.0" self.radius_listen_port = 0 self.chewie_id = "44-44-44-44-44-44:" # used by the RADIUS Attribute # 'Called-Station' in Access-Request if chewie_id: self.chewie_id = chewie_id self.state_machines = {} # port_id_str: { mac : state_machine} self.port_to_eapol_id = { } # port_id: last ID used in preemptive identity request. # TODO for port_to_eapol_id - may want to set ID to null (-1...) if sent from the # state machine. self.port_status = {} # port_id: status (true=up, false=down) self.port_to_identity_job = {} # port_id: timerJob self.eap_output_messages = Queue() self.radius_output_messages = Queue() self.radius_lifecycle = RadiusLifecycle(self.radius_secret, self.chewie_id, self.logger) self.timer_scheduler = timer_scheduler.TimerScheduler(self.logger) self.eap_socket = None self.mab_socket = None self.pool = None self.eventlets = None self.radius_socket = None self.interface_index = None self.eventlets = [] def run(self): """setup chewie and start socket eventlet threads""" self.logger.info("Starting") self.setup_eap_socket() self.setup_mab_socket() self.setup_radius_socket() self.start_threads_and_wait() def running(self): # pylint: disable=no-self-use """Used to nicely exit the event loops""" return True def shutdown(self): """kill eventlets and quit""" for eventlet in self.eventlets: eventlet.kill() def start_threads_and_wait(self): """Start the thread and wait until they complete (hopefully never)""" self.pool = GreenPool() self.eventlets.append(self.pool.spawn(self.send_eap_messages)) self.eventlets.append(self.pool.spawn(self.receive_eap_messages)) self.eventlets.append(self.pool.spawn(self.receive_mab_messages)) self.eventlets.append(self.pool.spawn(self.send_radius_messages)) self.eventlets.append(self.pool.spawn(self.receive_radius_messages)) self.eventlets.append(self.pool.spawn(self.timer_scheduler.run)) self.pool.waitall() def auth_success(self, src_mac, port_id, period, *args, **kwargs): # pylint: disable=unused-variable """authentication shim between faucet and chewie Args: src_mac (MacAddress): the mac of the successful supplicant port_id (MacAddress): the 'mac' identifier of what switch port the success is on period (int): time (seconds) until the session times out. """ if self.auth_handler: self.auth_handler(src_mac, port_id, *args, **kwargs) self.port_to_identity_job[port_id] = self.timer_scheduler.call_later( period, self.reauth_port, src_mac, port_id) def auth_failure(self, src_mac, port_id): """failure shim between faucet and chewie Args: src_mac (MacAddress): the mac of the failed supplicant port_id (MacAddress): the 'mac' identifier of what switch port the failure is on""" if self.failure_handler: self.failure_handler(src_mac, port_id) def auth_logoff(self, src_mac, port_id): """logoff shim between faucet and chewie Args: src_mac (MacAddress): the mac of the logoff supplicant port_id (MacAddress): the 'mac' identifier of what switch port the logoff is on""" if self.logoff_handler: self.logoff_handler(src_mac, port_id) def port_down(self, port_id): """ should be called by faucet when port has gone down. Args: port_id (str): id of port. """ # all chewie needs to do is change its internal state. # faucet will remove the acls by itself. self.set_port_status(port_id, False) job = self.port_to_identity_job.get(port_id, None) if port_id in self.state_machines: del self.state_machines[port_id] if job: job.cancel() self.port_to_eapol_id.pop(port_id, None) def port_up(self, port_id): """ should be called by faucet when port has come up Args: port_id (str): id of port. """ self.logger.info("port %s up", port_id) self.set_port_status(port_id, True) self.port_to_identity_job[port_id] = self.timer_scheduler.call_later( self.DEFAULT_PORT_UP_IDENTITY_REQUEST_WAIT_PERIOD, self.send_preemptive_identity_request_if_no_active_on_port, port_id) def send_preemptive_identity_request_if_no_active_on_port(self, port_id): """ If there is no active (in progress, or in state success(2)) supplicant send out the preemptive identity request message. Args: port_id (str): """ self.logger.debug( "thinking about executing timer preemptive on port %s", port_id) # schedule next request. self.port_to_identity_job[port_id] = self.timer_scheduler.call_later( self.DEFAULT_PREEMPTIVE_IDENTITY_REQUEST_INTERVAL, self.send_preemptive_identity_request_if_no_active_on_port, port_id) if not self.port_status.get(port_id, False): self.logger.debug('cant send output on port %s is down', port_id) return state_machines = self.state_machines.get(port_id, {}) # pylint: disable=invalid-name for sm in state_machines.values(): if sm.is_in_progress() or sm.is_success(): self.logger.debug('port is active not sending on port %s', port_id) break else: self.logger.debug("executing timer premptive on port %s", port_id) self.send_preemptive_identity_request(port_id) def send_preemptive_identity_request(self, port_id, state_machine=None): """ Message (EAP Identity Request) that notifies supplicant that port is using 802.1X Args: port_id (str): """ _id = get_random_id() # ID of preemptive reauth attempt must be different to ID of initial authentication. if state_machine is not None and hasattr(state_machine, 'current_id'): while _id == state_machine.current_id: _id = get_random_id() data = IdentityMessage(self.PAE_GROUP_ADDRESS, _id, Eap.REQUEST, "") self.port_to_eapol_id[port_id] = _id self.eap_output_messages.put_nowait( EapQueueMessage(data, self.PAE_GROUP_ADDRESS, MacAddress.from_string(port_id))) self.logger.info("sending premptive on port %s with ID %s", port_id, _id) def reauth_port(self, src_mac, port_id): """ Send an Identity Request to src_mac, on port_id. prompting the supplicant to re authenticate. Args: src_mac (MacAddress): port_id (str): """ state_machine = self.state_machines.get(port_id, {}).get(str(src_mac), None) if state_machine and state_machine.is_success(): self.logger.info('reauthenticating src_mac: %s on port: %s', src_mac, port_id) self.send_preemptive_identity_request(port_id, state_machine) elif state_machine is None: self.logger.debug( 'not reauthing. state machine on port: %s, mac: %s is none', port_id, src_mac) else: self.logger.debug( "not reauthing, authentication is not in success(2) (state: %s)'", state_machine.state) def set_port_status(self, port_id, status): """ Send status of a port at port_id Args: port_id (): status (): """ port_id_str = str(port_id) self.port_status[port_id] = status if port_id_str not in self.state_machines: self.state_machines[port_id_str] = {} for _, state_machine in self.state_machines[port_id_str].items(): event = EventPortStatusChange(status) state_machine.event(event) def setup_eap_socket(self): """Setup EAP socket""" log_prefix = "%s.EapSocket" % self.logger.name self.eap_socket = EapSocket(self.interface_name, log_prefix) self.eap_socket.setup() def setup_mab_socket(self): """Setup Mab socket""" log_prefix = "%s.MabSocket" % self.logger.name self.mab_socket = MabSocket(self.interface_name, log_prefix) self.mab_socket.setup() def setup_radius_socket(self): """Setup Radius socket""" log_prefix = "%s.RadiusSocket" % self.logger.name self.radius_socket = RadiusSocket(self.radius_listen_ip, self.radius_listen_port, self.radius_server_ip, self.radius_server_port, log_prefix) self.radius_socket.setup() self.logger.info("Radius Listening on %s:%d", self.radius_listen_ip, self.radius_listen_port) def send_eap_messages(self): """Send EAP messages to Supplicant forever.""" while self.running(): sleep(0) eap_queue_message = self.eap_output_messages.get() self.logger.info("Sending message %s from %s to %s", eap_queue_message.message, str(eap_queue_message.port_mac), str(eap_queue_message.src_mac)) self.eap_socket.send( MessagePacker.ethernet_pack(eap_queue_message.message, eap_queue_message.port_mac, eap_queue_message.src_mac)) def send_eth_to_state_machine(self, packed_message): """Send an ethernet frame to MAB State Machine""" ethernet_packet = EthernetPacket.parse(packed_message) port_id = ethernet_packet.dst_mac src_mac = ethernet_packet.src_mac self.logger.info("Sending MAC to MAB State Machine: %s", src_mac) message_id = -2 state_machine = self.get_state_machine(src_mac, port_id, message_id) event = EventMessageReceived(ethernet_packet, port_id) state_machine.event(event) # NOTE: Should probably throttle packets in once one is received def receive_eap_messages(self): """receive eap messages from supplicant forever.""" while self.running(): sleep(0) self.logger.info("waiting for eap.") packed_message = self.eap_socket.receive() self.logger.info("Received packed_message: %s", str(packed_message)) try: eap, dst_mac = MessageParser.ethernet_parse(packed_message) except MessageParseError as exception: self.logger.warning( "MessageParser.ethernet_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.logger.info("Received eap message: %s", str(eap)) self.send_eap_to_state_machine(eap, dst_mac) def receive_mab_messages(self): """Receive DHCP request for MAB.""" while self.running(): sleep(0) self.logger.info("waiting for MAB activity.") packed_message = self.mab_socket.receive() self.logger.info( "Received DHCP packet for MAB. packed_message: %s", str(packed_message)) self.send_eth_to_state_machine(packed_message) def send_eap_to_state_machine(self, eap, dst_mac): """sends an eap message to the state machine""" self.logger.info("eap EAP(): %s", eap) message_id = getattr(eap, 'message_id', -1) state_machine = self.get_state_machine(eap.src_mac, dst_mac, message_id) # Check for response to preemptive_eap preemptive_eap_message_id = self.port_to_eapol_id.get(str(dst_mac), -2) if message_id != -1 and message_id == preemptive_eap_message_id: self.logger.debug( 'eap packet is response to chewie initiated authentication') event = EventPreemptiveEAPResponseMessageReceived( eap, dst_mac, preemptive_eap_message_id) else: event = EventMessageReceived(eap, dst_mac) state_machine.event(event) def send_radius_messages(self): """send RADIUS messages to RADIUS Server forever.""" while self.running(): sleep(0) radius_output_bits = self.radius_output_messages.get() packed_message = self.radius_lifecycle.process_outbound( radius_output_bits) self.radius_socket.send(packed_message) self.logger.info("sent radius message.") def receive_radius_messages(self): """receive RADIUS messages from RADIUS server forever.""" while self.running(): sleep(0) self.logger.info("waiting for radius.") packed_message = self.radius_socket.receive() try: radius = MessageParser.radius_parse(packed_message, self.radius_secret, self.radius_lifecycle) except MessageParseError as exception: self.logger.warning( "MessageParser.radius_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.logger.info("Received RADIUS message: %s", str(radius)) self.send_radius_to_state_machine(radius) def send_radius_to_state_machine(self, radius): """sends a radius message to the state machine""" event = self.radius_lifecycle.build_event_radius_message_received( radius) state_machine = self.get_state_machine_from_radius_packet_id( radius.packet_id) state_machine.event(event) def get_state_machine_from_radius_packet_id(self, packet_id): """Gets a FullEAPStateMachine from the RADIUS message packet_id Args: packet_id (int): id of the received RADIUS message Returns: FullEAPStateMachine """ return self.get_state_machine( **self.radius_lifecycle.packet_id_to_mac[packet_id]) # TODO change message_id functionality def get_state_machine(self, src_mac, port_id, message_id=-1): """Gets or creates if it does not already exist an FullEAPStateMachine for the src_mac. Args: message_id (int): eap message id, -1 means none found. src_mac (MacAddress): who's to get. port_id (MacAddress): ID of the port where the src_mac is. Returns: FullEAPStateMachine """ port_id_str = str(port_id) src_mac_str = str(src_mac) port_state_machines = self.state_machines.get(port_id_str, None) if port_state_machines is None: self.state_machines[port_id_str] = {} self.logger.info("Port based state machines are as follows: %s", self.state_machines[port_id_str]) state_machine = self.state_machines[port_id_str].get(src_mac_str, None) if not state_machine and message_id == -2: # Do MAB self.logger.info("Creating MAB State Machine") log_prefix = "%s.SM - port: %s, client: %s" % ( self.logger.name, port_id_str, src_mac) state_machine = MacAuthenticationBypassStateMachine( self.radius_output_messages, src_mac, self.timer_scheduler, self.auth_success, self.auth_failure, log_prefix) self.state_machines[port_id_str][src_mac_str] = state_machine return state_machine if not state_machine: self.logger.info("Creating EAP FULL State Machine") log_prefix = "%s.SM - port: %s, client: %s" % ( self.logger.name, port_id_str, src_mac) state_machine = FullEAPStateMachine(self.eap_output_messages, self.radius_output_messages, src_mac, self.timer_scheduler, self.auth_success, self.auth_failure, self.auth_logoff, log_prefix) self.state_machines[port_id_str][src_mac_str] = state_machine self.logger.debug( "created new state machine for '%s' on port '%s'", src_mac_str, port_id_str) return state_machine
class Chewie: """Facilitates EAP supplicant and RADIUS server communication""" _RADIUS_UDP_PORT = 1812 PAE_GROUP_ADDRESS = MacAddress.from_string("01:80:C2:00:00:03") # pylint: disable=too-many-arguments def __init__(self, interface_name, logger=None, auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None, radius_server_port=None, radius_server_secret=None, chewie_id=None): self.interface_name = interface_name self.log_name = Chewie.__name__ if logger: self.log_name = logger.name + "." + Chewie.__name__ self.logger = get_logger(self.log_name) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler self.radius_server_ip = radius_server_ip self.radius_secret = radius_server_secret self.radius_server_port = self._RADIUS_UDP_PORT if radius_server_port: self.radius_server_port = radius_server_port self.radius_listen_ip = "0.0.0.0" self.radius_listen_port = 0 self.chewie_id = "44-44-44-44-44-44:" # used by the RADIUS Attribute # 'Called-Station' in Access-Request if chewie_id: self.chewie_id = chewie_id self.port_to_eapol_id = { } # port_id: last ID used in preemptive identity request. # TODO for port_to_eapol_id - may want to set ID to null (-1...) if sent from the # state machine. self._managed_ports = {} self.eap_output_messages = Queue() self.radius_output_messages = Queue() self.radius_lifecycle = RadiusLifecycle(self.radius_secret, self.chewie_id, self.logger) self.timer_scheduler = timer_scheduler.TimerScheduler(self.logger) self._eap_socket = None self._mab_socket = None self._radius_socket = None self.pool = None self.eventlets = [] def run(self): """setup chewie and start socket eventlet threads""" self.logger.info("Starting") self._setup_eap_socket() self._setup_mab_socket() self._setup_radius_socket() self._start_threads_and_wait() def running(self): # pylint: disable=no-self-use """Used to nicely exit the event loops""" return True def shutdown(self): """kill eventlets and quit""" for eventlet in self.eventlets: eventlet.kill() def _start_threads_and_wait(self): """Start the thread and wait until they complete (hopefully never)""" self.pool = GreenPool() self.eventlets.append(self.pool.spawn(self._send_eap_messages)) self.eventlets.append(self.pool.spawn(self._receive_eap_messages)) self.eventlets.append(self.pool.spawn(self._receive_mab_messages)) self.eventlets.append(self.pool.spawn(self._send_radius_messages)) self.eventlets.append(self.pool.spawn(self._receive_radius_messages)) self.eventlets.append(self.pool.spawn(self.timer_scheduler.run)) self.pool.waitall() def _auth_success(self, src_mac, port_id, period, *args, **kwargs): # pylint: disable=unused-variable """authentication shim between faucet and chewie Args: src_mac (MacAddress): the mac of the successful supplicant port_id (MacAddress): the 'mac' identifier of what switch port the success is on period (int): time (seconds) until the session times out. """ if self.auth_handler: self.auth_handler(src_mac, port_id, *args, **kwargs) managed_port = self._get_managed_port(port_id) managed_port.start_port_session(period, src_mac) def _auth_failure(self, src_mac, port_id): """failure shim between faucet and chewie Args: src_mac (MacAddress): the mac of the failed supplicant port_id (MacAddress): the 'mac' identifier of what switch port the failure is on""" if self.failure_handler: self.failure_handler(src_mac, port_id) # TODO Need to stop sessions on Failure def _auth_logoff(self, src_mac, port_id): """logoff shim between faucet and chewie Args: src_mac (MacAddress): the mac of the logoff supplicant port_id (MacAddress): the 'mac' identifier of what switch port the logoff is on""" if self.logoff_handler: self.logoff_handler(src_mac, port_id) # TODO Need to stop sessions on Logoff def _get_managed_port(self, port_id): port_id = str(port_id) if port_id in self._managed_ports: return self._managed_ports[port_id] managed_port = ManagedPort(port_id, self.logger.name, self.timer_scheduler, self.eap_output_messages, self.radius_output_messages) self._managed_ports[port_id] = managed_port return managed_port def port_down(self, port_id): """ should be called by faucet when port has gone down. Args: port_id (str): id of port. """ # all chewie needs to do is change its internal state. # faucet will remove the acls by itself. self.logger.info("port %s down", port_id) managed_port = self._get_managed_port(port_id) managed_port.status = False managed_port.stop_identity_requests() def port_up(self, port_id): """ should be called by faucet when port has come up Args: port_id (str): id of port. """ self.logger.info("port %s up", port_id) managed_port = self._get_managed_port(port_id) managed_port.status = True managed_port.start_identity_requests() def _setup_eap_socket(self): """Setup EAP socket""" log_prefix = "%s.EapSocket" % self.logger.name self._eap_socket = EapSocket(self.interface_name, log_prefix) self._eap_socket.setup() def _setup_mab_socket(self): """Setup Mab socket""" log_prefix = "%s.MabSocket" % self.logger.name self._mab_socket = MabSocket(self.interface_name, log_prefix) self._mab_socket.setup() def _setup_radius_socket(self): """Setup Radius socket""" log_prefix = "%s.RadiusSocket" % self.logger.name self._radius_socket = RadiusSocket(self.radius_listen_ip, self.radius_listen_port, self.radius_server_ip, self.radius_server_port, log_prefix) self._radius_socket.setup() self.logger.info("Radius Listening on %s:%d", self.radius_listen_ip, self.radius_listen_port) def _send_eap_messages(self): """Send EAP messages to Supplicant forever.""" while self.running(): sleep(0) eap_queue_message = self.eap_output_messages.get() self.logger.info("Sending message %s from %s to %s", eap_queue_message.message, str(eap_queue_message.port_mac), str(eap_queue_message.src_mac)) self._eap_socket.send( MessagePacker.ethernet_pack(eap_queue_message.message, eap_queue_message.port_mac, eap_queue_message.src_mac)) def _send_eth_to_state_machine(self, packed_message): """Send an ethernet frame to MAB State Machine""" ethernet_packet = EthernetPacket.parse(packed_message) port_id = ethernet_packet.dst_mac src_mac = ethernet_packet.src_mac self.logger.info("Sending MAC to MAB State Machine: %s", src_mac) message_id = -2 state_machine = self.get_state_machine(src_mac, port_id, message_id) event = EventMessageReceived(ethernet_packet, port_id) state_machine.event(event) # NOTE: Should probably throttle packets in once one is received def _receive_eap_messages(self): """receive eap messages from supplicant forever.""" while self.running(): sleep(0) self.logger.info("waiting for eap.") packed_message = self._eap_socket.receive() self.logger.info("Received packed_message: %s", str(packed_message)) try: eap, dst_mac = MessageParser.ethernet_parse(packed_message) except MessageParseError as exception: self.logger.warning( "MessageParser.ethernet_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.logger.info("Received eap message: %s", str(eap)) self._send_eap_to_state_machine(eap, dst_mac) def _receive_mab_messages(self): """Receive DHCP request for MAB.""" while self.running(): sleep(0) self.logger.info("waiting for MAB activity.") packed_message = self._mab_socket.receive() self.logger.info( "Received DHCP packet for MAB. packed_message: %s", str(packed_message)) self._send_eth_to_state_machine(packed_message) def _send_eap_to_state_machine(self, eap, dst_mac): """sends an eap message to the state machine""" self.logger.info("eap EAP(): %s", eap) message_id = getattr(eap, 'message_id', -1) state_machine = self.get_state_machine(eap.src_mac, dst_mac, message_id) # Check for response to preemptive_eap preemptive_eap_message_id = self.port_to_eapol_id.get(str(dst_mac), -2) if message_id != -1 and message_id == preemptive_eap_message_id: self.logger.debug( 'eap packet is response to chewie initiated authentication') event = EventPreemptiveEAPResponseMessageReceived( eap, dst_mac, preemptive_eap_message_id) else: event = EventMessageReceived(eap, dst_mac) state_machine.event(event) def _send_radius_messages(self): """send RADIUS messages to RADIUS Server forever.""" while self.running(): sleep(0) radius_output_bits = self.radius_output_messages.get() packed_message = self.radius_lifecycle.process_outbound( radius_output_bits) self._radius_socket.send(packed_message) self.logger.info("sent radius message.") def _receive_radius_messages(self): """receive RADIUS messages from RADIUS server forever.""" while self.running(): sleep(0) self.logger.info("waiting for radius.") packed_message = self._radius_socket.receive() try: radius = MessageParser.radius_parse(packed_message, self.radius_secret, self.radius_lifecycle) except MessageParseError as exception: self.logger.warning( "MessageParser.radius_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.logger.info("Received RADIUS message: %s", str(radius)) self._send_radius_to_state_machine(radius) def _send_radius_to_state_machine(self, radius): """sends a radius message to the state machine""" event = self.radius_lifecycle.build_event_radius_message_received( radius) state_machine = self._get_state_machine_from_radius_packet_id( radius.packet_id) state_machine.event(event) def _get_state_machine_from_radius_packet_id(self, packet_id): """Gets a FullEAPStateMachine from the RADIUS message packet_id Args: packet_id (int): id of the received RADIUS message Returns: FullEAPStateMachine """ return self.get_state_machine( **self.radius_lifecycle.packet_id_to_mac[packet_id]) # TODO change message_id functionality # TODO Make Private def get_state_machine(self, src_mac, port_id, message_id=-1): """Gets or creates if it does not already exist an FullEAPStateMachine for the src_mac. Args: message_id (int): eap message id, -1 means none found. src_mac (MacAddress): who's to get. port_id (MacAddress): ID of the port where the src_mac is. Returns: FullEAPStateMachine """ port_id_str = str(port_id) src_mac_str = str(src_mac) port_state_machines = self._get_managed_port(port_id).state_machines self.logger.info("Port based state machines are as follows: %s", port_state_machines) state_machine = port_state_machines.get(src_mac_str, None) if not state_machine and message_id == -2: # Do MAB self.logger.info("Creating MAB State Machine") log_prefix = "%s.SM - port: %s, client: %s" % ( self.logger.name, port_id_str, src_mac) state_machine = MacAuthenticationBypassStateMachine( self.radius_output_messages, src_mac, self.timer_scheduler, self._auth_success, self._auth_failure, log_prefix) port_state_machines[src_mac_str] = state_machine return state_machine if not state_machine: self.logger.info("Creating EAP FULL State Machine") log_prefix = "%s.SM - port: %s, client: %s" % ( self.logger.name, port_id_str, src_mac) state_machine = FullEAPStateMachine(self.eap_output_messages, self.radius_output_messages, src_mac, self.timer_scheduler, self._auth_success, self._auth_failure, self._auth_logoff, log_prefix) port_state_machines[src_mac_str] = state_machine self.logger.debug( "created new state machine for '%s' on port '%s'", src_mac_str, port_id_str) return state_machine @property def state_machines(self): """state_machines property returns a list of all state machines managed by Chewie""" state_machines = {} for port in self._managed_ports.values(): if port.state_machines: state_machines[port.port_id] = port.state_machines return state_machines @property def clients(self): """clients property returns a list of all clients managed by Chewie""" clients = [] for port in self._managed_ports.values(): clients.extend(port.clients) return clients
class stfServices(): def __init__(self, addr, serial): self.serial = serial self.addr = addr # self.desiredState=StateQueue() self.serviceQueue = Queue(500) self.send_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.get_status = 0 #0:初始化;1:接收状态;-1:关闭状态;-2:关闭ing self.service = Service(serial, [self.addr[1], self.addr[1] + 1]) self.msgQ = {} self.phone = None def log(self, log): print('[%s-stfService]:%s' % (self.serial, log)) def createSocket(self): try: self.log('addr:%s_%s' % (self.addr[0], self.addr[1])) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket22 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(0.5) self.socket.connect(self.addr) time.sleep(1) self.socket22.connect((self.addr[0], self.addr[1] + 1)) return 1 except Exception as e: self.errorhandler('connectError_' + str(e)) return 0 def install(self): self.log('installing stfservice resource') self.service.installAll() def init(self): self.install() def errorhandler(self, error=''): self.close() data = {'status': 1, 'msg': error} socketio.emit('errormsg%s' % self.serial, data, namespace='/touch') print(data, 'error') def responseHandler(self, data): if not data: return data = delimitedStream(data) envelop = Envelope() envelop.ParseFromString(data) print(envelop.type, envelop.id, 'responseHandler') def eventHandler(self, data): data = delimitedStream(data) envelop = Envelope() envelop.ParseFromString(data) if envelop.id: self.msgQ[str(envelop.id)] = envelop else: etype = envelop.type if etype == EVENT_BATTERY: temp = BatteryEvent() temp.ParseFromString(envelop.message) print('BatteryEvent', temp.status, temp.health, temp.level, type(temp)) elif etype == EVENT_AIRPLANE_MODE: temp = AirplaneModeEvent() temp.ParseFromString(envelop.message) print('AirplaneModeEvent', temp.enabled) elif etype == EVENT_BROWSER_PACKAGE: temp = BrowserPackageEvent() temp.ParseFromString(envelop.message) print('BrowserPackageEvent', temp.selected) for app in temp.apps: print('app', app.name, app.component) elif etype == EVENT_CONNECTIVITY: temp = ConnectivityEvent() temp.ParseFromString(envelop.message) print('ConnectivityEvent', temp.connected) elif etype == EVENT_PHONE_STATE: temp = PhoneStateEvent() temp.ParseFromString(envelop.message) print('PhoneStateEvent', temp.state) elif etype == EVENT_ROTATION: temp = RotationEvent() temp.ParseFromString(envelop.message) print('RotationEvent', temp.rotation) self.notify('RotationEvent', {'rotation': temp.rotation}) else: print('heheh', envelop.type) def notify(self, eventname, data): socketio.emit('event', { 'eventname': eventname, 'data': data }, namespace='/stfservice' + self.serial) def getInfo(self): buffersize = 1024 while self.get_status > 0: try: data = self.socket.recv(buffersize) if data: self.eventHandler(data) else: # self.log('stfservice recv empty data') time.sleep(0.5) except socket.timeout: pass except Exception as e: self.errorhandler('getInfoError_%s' % str(e)) break self.get_status = -1 self.socket.close() self.log('[get close]') def send(self): while self.send_status > 0: try: data = self.serviceQueue.get(False) if data[0] == 'agent': self.socket22.send(data[1]) else: self.socket.send(data[1]) # self.sendhandler(data) except eventlet.queue.Empty: time.sleep(0.01) except Exception as e: # print (str(e)) self.errorhandler('sendError_%s' % str(e)) break self.send_status = -1 self.log('[send close]') def sendF(self, data): self.socket.send(data) def start(self): self.log('start') if self.send_status != 1 and self.get_status != -2 and self.send_status != 1 and self.get_status != -2: if self.createSocket(): self.get_status = 1 self.send_status = 1 t1 = eventlet.spawn_n(self.send) t2 = eventlet.spawn_n(self.getInfo) self.log('stfservice start success') return 1 else: self.log('stfservice start failed:connect error') return 0 else: self.log('stfservice already started') return 0 def close(self): if self.send_status == 1 and self.get_status == 1: self.send_status = -2 self.get_status = -2 socketio.emit('event2', 'stop', namespace='/stfservice') print('stfservice close success') return 1 else: print('stfservice close fail:already close') return 0 def getkey(self, keyname): key = keyMap.get('KEYCODE_' + keyname.upper()) if key: return key else: print('unKnown key:%s' % keyname) return None def runAgentCommand(self, type1, message): envelop = Envelope() envelop.type = type1 envelop.message = message self.serviceQueue.put( ['agent', delimitingStream(envelop.SerializeToString())]) def runServiceCommand(self, mid, typeT, message): envelop = Envelope() envelop.type = typeT envelop.message = message envelop.id = mid self.serviceQueue.put( ['service', delimitingStream(envelop.SerializeToString())]) eventlet.spawn_n(self.getResponse, mid) def getProperties(self, data): d = GetPropertiesRequest() d.properties.extend(['imei', 'phoneNumber', 'iccid', 'network']) mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def GetBrowsersRequest(self, data): d = GetBrowsersRequest() mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def setlockStatue(self, data): d = SetKeyguardStateRequest() if data['enabled'] == True or data['enabled'] == 'true': d.enabled = True else: d.enabled = False mid = random.randint(10001, 99999) self.runServiceCommand(mid, GET_PROPERTIES, d.SerializeToString()) def getResponse(self, mid): for i in range(10): envelop = self.msgQ.get(str(mid)) if envelop: self.msgQ.pop(str(mid)) if envelop.type == GET_PROPERTIES: temp = GetPropertiesResponse() temp.ParseFromString(envelop.message) self.phone = temp.properties print(temp) elif envelop.type == GET_BROWSERS: temp = GetBrowsersResponse() temp.ParseFromString(envelop.message) print(temp, temp.selected) elif envelop.type == SET_KEYGUARD_STATE: temp = SetKeyguardStateResponse() temp.ParseFromString(envelop.message) print(temp) else: print('else') return return else: time.sleep(0.1) print('nothing') def type(self, data): d = DoTypeRequest() d.text = data['text'] self.runAgentCommand(DO_TYPE, d.SerializeToString()) def keyDown(self, data): d = KeyEventRequest() d.event = DOWN key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def keyUp(self, data): d = KeyEventRequest() d.event = UP key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def keyPress(self, data): d = KeyEventRequest() d.event = PRESS key = self.getkey(data['key']) if key: d.keyCode = key self.runAgentCommand(DO_KEYEVENT, d.SerializeToString()) def wake(self, data): d = DoWakeRequest() self.runAgentCommand(DO_WAKE, d.SerializeToString()) print('dowake') def rotate(self, data): d = SetRotationRequest() d.rotation = data['rotation'] d.lock = data['lock'] self.runAgentCommand(SET_ROTATION, d.SerializeToString())
class BaseNode(object): SIZE = 256 INPUT_SPEED = 512 OUTPUT_SPEED = 512 BUFFER_SIZE = 10 BITRATE = 256 def __init__(self, id_): self.id = id_ self._peers = {} self.peers_info = {} self.available_peers = [] self.main_channel = PriorityQueue() self.data_channel = Queue(1) self.sending_queue = Queue() self.receiving_queue = Queue() self.buffer = {} # for stats self.sent_bytes = 0 self.received_bytes = 0 self.delays = {} def __repr__(self): return "Node(id=%d, peers=%s)" % (self.id, self.peers_info.keys()) @property def peers(self): return self._peers @peers.setter def peers(self, peers): self._peers = peers self.available_peers = peers.keys() self.peers_info = dict((peer_id, []) for peer_id in peers.keys()) def run(self): return eventlet.spawn(self._do_main_loop) def _do_main_loop(self): sending_thread = eventlet.spawn(self._sending, self.sending_queue, self.main_channel) receiving_thread = eventlet.spawn(self._receiving, self.receiving_queue, self.main_channel) start_time = time.time() while True: message = self.main_channel.get() block_id = message.block_id peer_id = message.sender_id log.info("%s: %s" % (self, message)) if message.type == MessageType.notify: self._do_receive_notify(peer_id, block_id) elif message.type == MessageType.request: self._do_receive_request(peer_id, block_id) elif message.type == MessageType.done_sending: self._after_sending(peer_id, block_id) elif message.type == MessageType.done_receiving: self._after_receiving(peer_id, block_id, message.payload[0]) elif message.type == MessageType.exit: self._broadcast(block_id, MessageType.exit) sending_thread.kill() receiving_thread.kill() break self._try_to_request() eventlet.sleep(0) log.debug("%s: available peers - %s" % (self, self.available_peers)) transmission_time = time.time() - start_time stats = { 'input_load': round(self.received_bytes / transmission_time / self.INPUT_SPEED, 4), 'output_load': round(self.sent_bytes / transmission_time / self.OUTPUT_SPEED, 4), 'delays': self.delays, } return stats def _sending(self, queue, back_queue): while True: block_id, receiver_id, part = queue.get() receiver = self.peers[receiver_id] if receiver.data_channel.full(): queue.put((block_id, receiver.id, part)) eventlet.sleep(0) continue # log.debug("Send %d/%d of %d to %d" %(part+1, self.SIZE, block_id, receiver_id)) receiver.data_channel.put((block_id, self.id, part)) if part < self.SIZE - 1: queue.put((block_id, receiver_id, part + 1)) else: self.sent_bytes += self.SIZE back_queue.put(Message(MessageType.done_sending, receiver_id, block_id)) eventlet.sleep(1/self.OUTPUT_SPEED) def _receiving(self, queue, back_queue): while True: block_id, sender_id, part = self.data_channel.get() # log.debug("%s: Get %d/%d of %d to %d" % (self, part+1, self.SIZE, block_id, sender_id)) if part == self.SIZE - 1: self.received_bytes += self.SIZE back_queue.put(Message(MessageType.done_receiving, sender_id, block_id, block_id)) eventlet.sleep(1/self.INPUT_SPEED) def _after_receiving(self, sender_id, block_id, block): # if len(self.buffer) == self.BUFFER_SIZE: # ids = sorted(self.buffer) # del self.buffer[ids[0]] self.buffer[block_id] = block if sender_id != self.id: self.available_peers.append(sender_id) self.delays[block_id] = time.time() self._broadcast(block_id, MessageType.notify) def _do_receive_notify(self, sender_id, block_id): # self.peers[sender_id].main_channel.put(Message(Message.REQUEST, self.id, block_id)) log.info("%s: Notify about (%d) from {%d}" % (self, block_id, sender_id)) def _do_receive_request(self, sender_id, block_id): # assert (sender_id in self.available_peers), "only one connection between peers" assert (block_id in self.buffer.keys()), "WTF?!" if sender_id not in self.available_peers: self.main_channel.put(Message(MessageType.request, sender_id, block_id)) eventlet.sleep(0) return self.available_peers.remove(sender_id) self.sending_queue.put((block_id, sender_id, 0)) def _after_sending(self, receiver_id, block_id): self.available_peers.append(receiver_id) def _try_to_request(self): raise NotImplementedError def _broadcast(self, block_id, message_type): for peer in self.peers.values(): peer.main_channel.put(Message(message_type, self.id, block_id))
class Client(baseasync.BaseAsync): def __init__(self, *args, **kwargs): super(Client, self).__init__(*args, **kwargs) self.pool = eventlet.greenpool.GreenPool(DEFAULT_POOL_SIZE) self.reader_thread = None self.writer_thread = None self.queue = Queue(DEFAULT_MAX_QUEUE_SIZE) self.max_pending = MAX_PENDING self.closing = False def build_socket(self, family=socket.AF_INET): return socket.socket(family) def wrap_secure_socket(self, s, ssl_version): return GreenSSLSocket(s, ssl_version=ssl_version) def connect(self): super(Client, self).connect() self.closing = False self.reader_thread = eventlet.greenthread.spawn(self._reader_run) self.writer_thread = eventlet.greenthread.spawn(self._writer_run) def dispatch(self, fn, *args, **kwargs): if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Dispatching: Pending {0}".format(len(self._pending))) self.pool.spawn_n(fn, *args, **kwargs) def shutdown(self): self.closing = True if len(self._pending) + self.queue.qsize() == 0: self._end_close() def close(self): self.shutdown() self.wait() def _end_close(self): self.writer_thread.kill() self.reader_thread.kill() super(Client, self).close() self.writer_thread = None self.reader_thread = None def sendAsync(self, header, value, onSuccess, onError, no_ack=False): if self.closing: raise common.ConnectionClosed("Client is closing, can't queue more operations.") if self.faulted: self._raise(common.ConnectionFaulted("Can't send message when connection is on a faulted state."), onError) return # skip the rest # fail fast on NotConnected if not self.isConnected: self._raise(common.NotConnected("Not connected."), onError) return # skip the rest if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Queue: {0}".format(self.queue.qsize())) self.queue.put((header, value, onSuccess, onError, no_ack)) eventlet.sleep(0) def wait(self): self.queue.join() def send(self, header, value): done = eventlet.event.Event() class Dummy: pass d = Dummy() d.error = None d.result = None def innerSuccess(m, r, value): d.result = (m, r, value) done.send() def innerError(e): d.error = e done.send() self.sendAsync(header, value, innerSuccess, innerError) done.wait() # TODO(Nacho): should be add a default timeout? if d.error: raise d.error return d.result def _writer_run(self): while self.isConnected and not self.faulted: try: while len(self._pending) > self.max_pending: eventlet.sleep(0) (header, value, onSuccess, onError, no_ack) = self.queue.get() super(Client, self).sendAsync(header, value, onSuccess, onError, no_ack) except common.ConnectionFaulted: pass except common.ConnectionClosed: pass except Exception as ex: self._fault_client(ex) # Yield execution, don't starve the reader eventlet.sleep(0) def _reader_run(self): while self.isConnected and not self.faulted: try: self._async_recv() self.queue.task_done() if self.closing and len(self._pending) + self.queue.qsize() == 0: self._end_close() except common.ConnectionFaulted: pass except Exception as ex: self._fault_client(ex)
class StatsdLog(object): def __init__(self, conf): TRUE_VALUES = set(("true", "1", "yes", "on", "t", "y")) self.conf = conf self.logger = logging.getLogger("statsdlogd") self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address="/dev/log") self.formatter = logging.Formatter("%(name)s: %(message)s") self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) if conf.get("debug", False) in TRUE_VALUES: self.debug = True else: self.debug = False self.statsd_host = conf.get("statsd_host", "127.0.0.1") self.statsd_port = int(conf.get("statsd_port", "8125")) self.listen_addr = conf.get("listen_addr", "127.0.0.1") self.listen_port = int(conf.get("listen_port", 8126)) if conf.get("report_internal_stats", False) in TRUE_VALUES: self.report_internal_stats = True else: self.report_internal_stats = False self.int_stats_interval = int(conf.get("internal_stats_interval", 5)) self.buff = int(conf.get("buffer_size", 8192)) self.max_q_size = int(conf.get("max_line_backlog", 512)) self.statsd_sample_rate = float(conf.get("statsd_sample_rate", ".5")) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get("patterns_file", "patterns.json") try: with open(self.patterns_file) as pfile: self.patterns = json.loads(pfile.read()) except Exception as err: self.logger.critical(err) print err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item]) def check_line(self, line): """ Check if a line matches our search patterns. :param line: The string to check :returns: None or regex entry that matched """ for entry in self.comp_patterns: if self.comp_patterns[entry].match(line): return entry return None def internal_stats(self): """ Periodically send our own stats to statsd. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(self.int_stats_interval) self.send_event("statsdlog.lines:%s|c" % (self.counter - lastcount)) lastcount = self.counter self.send_event("statsdlog.hits:%s|c" % (self.hits - lasthit)) lasthit = self.hits def stats_print(self): """ Periodically dump some stats to the logs. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(2) lps = (self.counter - lastcount) / 60 hps = (self.hits - lasthit) / 60 lastcount = self.counter lasthit = self.hits self.logger.info("per second: %d lines - hits %d" % (lps, hps)) self.logger.info("totals: %d hits - %d lines" % (self.hits, self.counter)) if self.skip_counter is not 0: self.logger.info("Had to skip %d log lines so far" % self.skip_counter) def send_event(self, payload): """ Fire event to statsd :param payload: The payload of the udp packet to send. """ try: udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(payload, self.statsd_addr) except Exception: # udp sendto failed (socket already in use?), but thats ok self.logger.error("Error trying to send statsd event") def statsd_counter_increment(self, stats, delta=1): """ Increment multiple statsd stats counters :param stats: list of stats items to package and send :param delta: delta of stats items """ if self.statsd_sample_rate < 1: if random() <= self.statsd_sample_rate: for item in stats: payload = "%s:%s|c|@%s" % (item, delta, self.statsd_sample_rate) self.send_event(payload) else: for item in stats: payload = "%s:%s|c" % (item, delta) self.send_event(payload) def worker(self): """ Check for and process log lines in queue """ while True: msg = self.q.get() matched = self.check_line(msg) if matched: self.statsd_counter_increment([matched]) if self.hits >= maxint: self.logger.info("hit maxint, reset hits counter") self.hits = 0 self.hits += 1 else: pass def listener(self): """ syslog udp listener """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) bind_addr = (self.listen_addr, self.listen_port) sock.bind(bind_addr) self.logger.info("listening on %s:%d" % bind_addr) while 1: data, addr = sock.recvfrom(self.buff) if not data: break else: if self.q.qsize() < self.max_q_size: self.q.put(data) if self.counter >= maxint: self.logger.info("hit maxint, reset seen counter") self.counter = 0 self.counter += 1 else: if self.debug: self.logger.notice("max log lines in queue, skipping") if self.skip_counter >= maxint: self.logger.info("hit maxint, reset skip counter") self.skip_counter = 0 self.skip_counter += 1 def start(self): """ Start the listener, worker, and mgmt server. """ eventlet.spawn_n(self.worker) if self.debug: eventlet.spawn_n(self.stats_print) if self.report_internal_stats: eventlet.spawn_n(self.internal_stats) while True: try: self.listener() except Exception as err: self.logger.error(err)
class StatsdLog(object): """Simple server to monitor a syslog udp stream for statsd events""" def __init__(self, conf): TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) self.conf = conf self.logger = logging.getLogger('statsdlogd') self.logger.setLevel(logging.INFO) self.syslog = SysLogHandler(address='/dev/log') self.formatter = logging.Formatter('%(name)s: %(message)s') self.syslog.setFormatter(self.formatter) self.logger.addHandler(self.syslog) self.debug = conf.get('debug', 'false').lower() in TRUE_VALUES self.statsd_host = conf.get('statsd_host', '127.0.0.1') self.statsd_port = int(conf.get('statsd_port', '8125')) self.listen_addr = conf.get('listen_addr', '127.0.0.1') self.listen_port = int(conf.get('listen_port', 8126)) self.report_internal_stats = conf.get('report_internal_stats', 'true').lower() in TRUE_VALUES self.int_stats_interval = int(conf.get('internal_stats_interval', 5)) self.buff = int(conf.get('buffer_size', 8192)) self.max_q_size = int(conf.get('max_line_backlog', 512)) self.statsd_sample_rate = float(conf.get('statsd_sample_rate', '.5')) self.counter = 0 self.skip_counter = 0 self.hits = 0 self.q = Queue(maxsize=self.max_q_size) # key: regex self.patterns_file = conf.get('patterns_file', '/etc/statsdlog/patterns.json') self.json_patterns = conf.get('json_pattern_file', 'true').lower() in TRUE_VALUES try: self.patterns = self.load_patterns() except Exception as err: self.logger.exception(err) print "Encountered exception at startup: %s" % err sys.exit(1) self.statsd_addr = (self.statsd_host, self.statsd_port) self.comp_patterns = {} for item in self.patterns: self.comp_patterns[item] = re.compile(self.patterns[item]) def load_patterns(self): if self.json_patterns: self.logger.info("Using json based patterns file: %s" % self.patterns_file) with open(self.patterns_file) as pfile: return json.loads(pfile.read()) else: self.logger.info("Using plain text patterns file: %s" % self.patterns_file) patterns = {} with open(self.patterns_file) as f: for line in f: if line: pattern = [x.strip() for x in line.split("=", 1)] else: pattern = None if len(pattern) != 2: # skip this line self.logger.error("Skipping pattern. Unable to parse: %s" % line) else: if pattern[0] and pattern[1]: patterns[pattern[0]] = pattern[1] else: self.logger.error( "Skipping pattern. Unable to parse: %s" % line) return patterns def check_line(self, line): """ Check if a line matches our search patterns. :param line: The string to check :returns: List of regex entries that matched (or empty list if none) """ matches = [] for entry in self.comp_patterns: if self.comp_patterns[entry].match(line): matches.append(entry) return matches def internal_stats(self): """ Periodically send our own stats to statsd. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(self.int_stats_interval) self.send_event("statsdlog.lines:%s|c" % (self.counter - lastcount)) lastcount = self.counter self.send_event("statsdlog.hits:%s|c" % (self.hits - lasthit)) lasthit = self.hits def stats_print(self): """ Periodically dump some stats to the logs. """ lastcount = 0 lasthit = 0 while True: eventlet.sleep(2) lps = (self.counter - lastcount) / 60 hps = (self.hits - lasthit) / 60 lastcount = self.counter lasthit = self.hits self.logger.info('per second: %d lines - hits %d' % (lps, hps)) self.logger.info('totals: %d hits - %d lines' % (self.hits, self.counter)) if self.skip_counter is not 0: self.logger.info('Had to skip %d log lines so far' % self.skip_counter) def send_event(self, payload): """ Fire event to statsd :param payload: The payload of the udp packet to send. """ try: udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(payload, self.statsd_addr) except Exception: # udp sendto failed (socket already in use?), but thats ok self.logger.error("Error trying to send statsd event") def statsd_counter_increment(self, stats, delta=1): """ Increment multiple statsd stats counters :param stats: list of stats items to package and send :param delta: delta of stats items """ if self.statsd_sample_rate < 1: if random() <= self.statsd_sample_rate: for item in stats: payload = "%s:%s|c|@%s" % (item, delta, self.statsd_sample_rate) self.send_event(payload) else: for item in stats: payload = "%s:%s|c" % (item, delta) self.send_event(payload) def worker(self): """ Check for and process log lines in queue """ while True: msg = self.q.get() matches = self.check_line(msg) for match in matches: self.statsd_counter_increment([match]) if self.hits >= maxint: self.logger.info("hit maxint, reset hits counter") self.hits = 0 self.hits += 1 def listener(self): """ syslog udp listener """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) bind_addr = (self.listen_addr, self.listen_port) sock.bind(bind_addr) self.logger.info("listening on %s:%d" % bind_addr) while 1: data, addr = sock.recvfrom(self.buff) if not data: break else: if self.q.qsize() < self.max_q_size: self.q.put(data) if self.counter >= maxint: self.logger.info("hit maxint, reset seen counter") self.counter = 0 self.counter += 1 else: if self.debug: self.logger.notice("max log lines in queue, skipping") if self.skip_counter >= maxint: self.logger.info("hit maxint, reset skip counter") self.skip_counter = 0 self.skip_counter += 1 def start(self): """ Start the listener, worker, and mgmt server. """ eventlet.spawn_n(self.worker) if self.debug: eventlet.spawn_n(self.stats_print) if self.report_internal_stats: eventlet.spawn_n(self.internal_stats) while True: try: self.listener() except Exception as err: self.logger.error(err)
class PublisherService(object): def __init__(self): self._queue = Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() nb_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class) self.db = nb_driver_class() self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, ) def _get_publisher(self): pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_publisher() def _get_multiproc_subscriber(self): """ Return the subscriber for inter-process communication. If multi-proc communication is not use (i.e. disabled from config), return None. """ if not cfg.CONF.df.pub_sub_use_multiproc: return None pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_subscriber() def initialize(self): if self.multiproc_subscriber: self.multiproc_subscriber.initialize(self._append_event_to_queue) self.publisher.initialize() def _append_event_to_queue(self, table, key, action, value, topic): event = db_common.DbUpdate(table, key, action, value, topic=topic) self._queue.put(event) eventlet.sleep(0) def run(self): if self.multiproc_subscriber: self.multiproc_subscriber.daemonize() self.db.initialize( db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port, config=cfg.CONF.df ) self._register_as_publisher() self._start_db_table_monitors() while True: try: event = self._queue.get() self.publisher.send_event(event) if event.table != pub_sub_api.PUBLISHER_TABLE: self._update_timestamp_in_db() eventlet.sleep(0) except Exception as e: LOG.warning(_LW("Exception in main loop: {}, {}").format( e, traceback.format_exc() )) # Ignore def _update_timestamp_in_db(self): if self._rate_limit(): return try: publisher_json = self.db.get_key( pub_sub_api.PUBLISHER_TABLE, self.uuid, ) publisher = jsonutils.loads(publisher_json) publisher['last_activity_timestamp'] = time.time() publisher_json = jsonutils.dumps(publisher) self.db.set_key( pub_sub_api.PUBLISHER_TABLE, self.uuid, publisher_json ) except exceptions.DBKeyNotFound: self._register_as_publisher() def _register_as_publisher(self): publisher = { 'id': self.uuid, 'uri': self._get_uri(), 'last_activity_timestamp': time.time(), } publisher_json = jsonutils.dumps(publisher) self.db.create_key( pub_sub_api.PUBLISHER_TABLE, self.uuid, publisher_json ) def _get_uri(self): ip = cfg.CONF.df.publisher_bind_address if ip == '*' or ip == '127.0.0.1': ip = cfg.CONF.df.local_ip return "{}://{}:{}".format( cfg.CONF.df.publisher_transport, ip, cfg.CONF.df.publisher_port, ) def _start_db_table_monitor(self, table_name): if table_name == 'publisher': table_monitor = pub_sub_api.StalePublisherMonitor( self.db, self.publisher, cfg.CONF.df.publisher_timeout, cfg.CONF.df.monitor_table_poll_time, ) else: table_monitor = pub_sub_api.TableMonitor( table_name, self.db, self.publisher, cfg.CONF.df.monitor_table_poll_time, ) table_monitor.daemonize() return table_monitor def _start_db_table_monitors(self): self.db_table_monitors = [self._start_db_table_monitor(table_name) for table_name in pub_sub_api.MONITOR_TABLES]
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket_impl = eventlet.green.socket _ssl_impl = eventlet.green.ssl @classmethod def initialize_reactor(cls): eventlet.monkey_patch() def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) self._connect_socket() self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
class Rawserver: SIOCGIFHWADDR = 0x8927 SIOCGIFINDEX = 0x8933 PACKET_MR_PROMISC = 1 SOL_PACKET = 263 PACKET_ADD_MEMBERSHIP = 1 DUMMY_ADDRESS = b"\x00\x00\x00\x00\x00\x00" def __init__(self, interface_name, ethertype, handler): self.interface_name = interface_name self.ethertype = ethertype self.handler = handler self.greenlets = set() self.queue = Queue() def get_interface_index(self): # http://man7.org/linux/man-pages/man7/netdevice.7.html ifreq = struct.pack('16sI', self.interface_name.encode("utf-8"), 0) response = ioctl(self.socket, self.SIOCGIFINDEX, ifreq) _ifname, self.interface_index = struct.unpack('16sI', response) def set_socket_promiscuous(self): mreq = struct.pack("IHH8s", self.interface_index, self.PACKET_MR_PROMISC, len(self.DUMMY_ADDRESS), self.DUMMY_ADDRESS) self.socket.setsockopt(self.SOL_PACKET, self.PACKET_ADD_MEMBERSHIP, mreq) def serve_forever(self): self.running = True self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(self.ethertype)) self.socket.bind((self.interface_name, 0)) self.get_interface_index() self.set_socket_promiscuous() self.poller = select.poll() self.poller.register( self.socket, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL) pool = GreenPool() self.greenlets.add(pool.spawn(self.server)) self.greenlets.add(pool.spawn(self.dispatcher)) pool.waitall() def server(self): try: while self.running: sleep(1) events = self.poller.poll(10) if events: while events: if len(events) > 1: raise Exception( "Too many events returned from poller") fd, event = events[0] if event == select.POLLERR or event == select.POLLHUP or event == select.POLLNVAL: break if event == select.POLLIN: data = self.socket.recv(4096) self.queue.put(data) events = self.poller.poll(10) except OSError: pass def dispatcher(self): try: while self.running: data = self.queue.get() self.handler(data) except OSError: pass def call_handler(self, socket, address): self.greenlets.add(greenthread.getcurrent()) self.handler(socket, address) self.greenlets.remove(greenthread.getcurrent()) def stop(self): self.running = False for greenlet in self.greenlets: greenlet.kill() self.server.shutdown(socket.SHUT_RDWR)
class EventletConnection(Connection): """ An implementation of :class:`.Connection` that utilizes ``eventlet``. """ _total_reqd_bytes = 0 _read_watcher = None _write_watcher = None _socket = None @classmethod def factory(cls, *args, **kwargs): timeout = kwargs.pop('timeout', 5.0) conn = cls(*args, **kwargs) conn.connected_event.wait(timeout) if conn.last_error: raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection") else: return conn def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.connected_event = Event() self._write_queue = Queue() self._callbacks = {} self._push_watchers = defaultdict(set) sockerr = None addresses = socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM ) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = socket.socket(af, socktype, proto) self._socket.settimeout(1.0) self._socket.connect(sockaddr) sockerr = None break except socket.error as err: sockerr = err if sockerr: raise socket.error( sockerr.errno, "Tried connecting to %s. Last error: %s" % ( [a[4] for a in addresses], sockerr.strerror) ) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) self._read_watcher = eventlet.spawn(lambda: self.handle_read()) self._write_watcher = eventlet.spawn(lambda: self.handle_write()) self._send_options_message() def close(self): with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s" % (id(self), self.host)) cur_gthread = eventlet.getcurrent() if self._read_watcher and self._read_watcher != cur_gthread: self._read_watcher.kill() if self._write_watcher and self._write_watcher != cur_gthread: self._write_watcher.kill() if self._socket: self._socket.close() log.debug("Closed socket to %s" % (self.host,)) if not self.is_defunct: self.error_all_callbacks( ConnectionShutdown("Connection to %s was closed" % self.host)) # don't leave in-progress operations hanging self.connected_event.set() def handle_close(self): log.debug("connection closed by server") self.close() def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.send(next_msg) except socket.error as err: log.debug("Exception during socket send for %s: %s", self, err) self.defunct(err) return # Leave the write loop def handle_read(self): run_select = partial(select.select, (self._socket,), (), ()) while True: try: run_select() except Exception as exc: if not self.is_closed: log.debug("Exception during read select() for %s: %s", self, exc) self.defunct(exc) return try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: if not is_timeout(err): log.debug("Exception during socket recv for %s: %s", self, err) self.defunct(err) return # leave the read loop if self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return def push(self, data): chunk_size = self.out_buffer_size for i in xrange(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) def register_watcher(self, event_type, callback, register_timeout=None): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
class Chewie: """Facilitates EAP supplicant and RADIUS server communication""" RADIUS_UDP_PORT = 1812 def __init__(self, interface_name, logger=None, auth_handler=None, failure_handler=None, logoff_handler=None, radius_server_ip=None, radius_server_port=None, radius_server_secret=None, chewie_id=None): self.interface_name = interface_name self.logger = get_logger(logger.name + "." + Chewie.__name__) self.auth_handler = auth_handler self.failure_handler = failure_handler self.logoff_handler = logoff_handler self.radius_server_ip = radius_server_ip self.radius_secret = radius_server_secret self.radius_server_port = self.RADIUS_UDP_PORT if radius_server_port: self.radius_server_port = radius_server_port self.radius_listen_ip = "0.0.0.0" self.radius_listen_port = 0 self.chewie_id = "44-44-44-44-44-44:" # used by the RADIUS Attribute # 'Called-Station' in Access-Request if chewie_id: self.chewie_id = chewie_id self.state_machines = {} # mac: state_machine self.eap_output_messages = Queue() self.radius_output_messages = Queue() self.radius_lifecycle = RadiusLifecycle(self.radius_secret, self.chewie_id, self.logger) self.timer_scheduler = timer_scheduler.TimerScheduler(self.logger) self.eap_socket = None self.pool = None self.eventlets = None self.radius_socket = None self.interface_index = None self.eventlets = [] def run(self): """setup chewie and start socket eventlet threads""" self.logger.info("Starting") self.setup_eap_socket() self.setup_radius_socket() self.start_threads_and_wait() def running(self): """Used to nicely exit the event loops""" return True def shutdown(self): """kill eventlets and quit""" for eventlet in self.eventlets: eventlet.kill() def start_threads_and_wait(self): """Start the thread and wait until they complete (hopefully never)""" self.pool = GreenPool() self.eventlets.append(self.pool.spawn(self.send_eap_messages)) self.eventlets.append(self.pool.spawn(self.receive_eap_messages)) self.eventlets.append(self.pool.spawn(self.send_radius_messages)) self.eventlets.append(self.pool.spawn(self.receive_radius_messages)) self.eventlets.append(self.pool.spawn(self.timer_scheduler.run)) self.pool.waitall() def auth_success(self, src_mac, port_id): """authentication shim between faucet and chewie Args: src_mac (MacAddress): the mac of the successful supplicant port_id (MacAddress): the 'mac' identifier of what switch port the success is on""" if self.auth_handler: self.auth_handler(src_mac, port_id) def auth_failure(self, src_mac, port_id): """failure shim between faucet and chewie Args: src_mac (MacAddress): the mac of the failed supplicant port_id (MacAddress): the 'mac' identifier of what switch port the failure is on""" if self.failure_handler: self.failure_handler(src_mac, port_id) def auth_logoff(self, src_mac, port_id): """logoff shim between faucet and chewie Args: src_mac (MacAddress): the mac of the logoff supplicant port_id (MacAddress): the 'mac' identifier of what switch port the logoff is on""" if self.logoff_handler: self.logoff_handler(src_mac, port_id) def port_down(self, port_id): """ should be called by faucet when port has gone down. Args: port_id (str): id of port. """ # all chewie needs to do is change its internal state. # faucet will remove the acls by itself. self.set_port_status(port_id, False) def port_up(self, port_id): """ should be called by faucet when port has come up Args: port_id (str): id of port. """ self.set_port_status(port_id, True) # TODO send preemptive identity request. def set_port_status(self, port_id, status): port_id_str = str(port_id) if port_id_str not in self.state_machines: self.state_machines[port_id_str] = {} for src_mac, state_machine in self.state_machines[port_id_str].items(): event = EventPortStatusChange(status) state_machine.event(event) def setup_eap_socket(self): self.eap_socket = EapSocket(self.interface_name) self.eap_socket.setup() def setup_radius_socket(self): self.radius_socket = RadiusSocket(self.radius_listen_ip, self.radius_listen_port, self.radius_server_ip, self.radius_server_port) self.radius_socket.setup() self.logger.info("Radius Listening on %s:%d" % (self.radius_listen_ip, self.radius_listen_port)) def send_eap_messages(self): """send eap messages to supplicant forever.""" while self.running(): sleep(0) eap_queue_message = self.eap_output_messages.get() self.logger.info( "Sending message %s from %s to %s" % (eap_queue_message.message, str(eap_queue_message.port_mac), str(eap_queue_message.src_mac))) self.eap_socket.send( MessagePacker.ethernet_pack(eap_queue_message.message, eap_queue_message.port_mac, eap_queue_message.src_mac)) def receive_eap_messages(self): """receive eap messages from supplicant forever.""" while self.running(): sleep(0) self.logger.info("waiting for eap.") packed_message = self.eap_socket.receive() self.logger.info("Received packed_message: %s", str(packed_message)) try: eap, dst_mac = MessageParser.ethernet_parse(packed_message) except MessageParseError as exception: self.logger.info( "MessageParser.ethernet_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.send_eap_to_state_machine(eap, dst_mac) def send_eap_to_state_machine(self, eap, dst_mac): """sends an eap message to the state machine""" self.logger.info("eap EAP(): %s", eap) state_machine = self.get_state_machine(eap.src_mac, dst_mac) event = EventMessageReceived(eap, dst_mac) state_machine.event(event) def send_radius_messages(self): """send RADIUS messages to RADIUS Server forever.""" while self.running(): sleep(0) radius_output_bits = self.radius_output_messages.get() packed_message = self.radius_lifecycle.process_outbound( radius_output_bits) self.radius_socket.send(packed_message) self.logger.info("sent radius message.") def receive_radius_messages(self): """receive RADIUS messages from RADIUS server forever.""" while self.running(): sleep(0) self.logger.info("waiting for radius.") packed_message = self.radius_socket.receive() try: radius = MessageParser.radius_parse(packed_message, self.radius_secret, self.radius_lifecycle) except MessageParseError as exception: self.logger.info( "MessageParser.radius_parse threw exception.\n" " packed_message: '%s'.\n" " exception: '%s'.", packed_message, exception) continue self.send_radius_to_state_machine(radius) self.logger.info("Received RADIUS message: %s", radius) def send_radius_to_state_machine(self, radius): """sends a radius message to the state machine""" event = self.radius_lifecycle.build_event_radius_message_received( radius) state_machine = self.get_state_machine_from_radius_packet_id( radius.packet_id) state_machine.event(event) def get_state_machine_from_radius_packet_id(self, packet_id): """Gets a FullEAPStateMachine from the RADIUS message packet_id Args: packet_id (int): id of the received RADIUS message Returns: FullEAPStateMachine """ return self.get_state_machine( **self.radius_lifecycle.packet_id_to_mac[packet_id]) def get_state_machine(self, src_mac, port_id): """Gets or creates if it does not already exist an FullEAPStateMachine for the src_mac. Args: src_mac (MacAddress): who's to get. port_id (MacAddress): ID of the port where the src_mac is. Returns: FullEAPStateMachine """ port_id_str = str(port_id) src_mac_str = str(src_mac) port_state_machines = self.state_machines.get(port_id_str, None) if port_state_machines is None: self.state_machines[port_id_str] = {} state_machine = self.state_machines[port_id_str].get(src_mac_str, None) if not state_machine: state_machine = FullEAPStateMachine( self.eap_output_messages, self.radius_output_messages, src_mac, self.timer_scheduler, self.auth_success, self.auth_failure, self.auth_logoff, self.logger.name) state_machine.eap_restart = True # TODO what if port is not actually enabled, but then how did they auth? state_machine.port_enabled = True self.state_machines[port_id_str][src_mac_str] = state_machine return state_machine