コード例 #1
0
class FrameSaver( threading.Thread ):
	def __init__( self ):
		threading.Thread.__init__( self )
		self.daemon = True
		self.name = 'FrameSaver'
		self.reset()
	
	def reset( self ):
		self.queue = Queue()
	
	def run( self ):
		self.reset()
		while 1:
			message = self.queue.get()
			if   message[0] == 'Save':
				cmd, fileName, bib, t, frame = message
				#sys.stderr.write( 'save' )
				PhotoFinish.SavePhoto( fileName, bib, t, frame )
				self.queue.task_done()
			elif message[0] == 'Terminate':
				self.queue.task_done()
				self.reset()
				break
	
	def stop( self ):
		self.queue.put( ['Terminate'] )
		self.join()
	
	def save( self, fileName, bib, t, frame ):
		self.queue.put( ['Save', fileName, bib, t, frame] )
コード例 #2
0
ファイル: TagGroup.py プロジェクト: esitarski/CrossMgr
class TagGroup( object ):
	'''
		Process groups of tag reads and return the best time estimated using quadratic regression.
		Stray reads are also detected if there is no quiet period for the tag.
		The first read time of each stray read is returned.
	'''
	def __init__( self ):
		self.q = Queue()
		self.tagInfo = {}
		
	def add( self, antenna, tag, t, db ):
		self.q.put((antenna, tag, t, db))

	def flush( self ):
		# Process all waiting reads.
		while 1:
			try:
				antenna, tag, t, db = self.q.get(False)
			except Empty:
				break
			try:
				self.tagInfo[tag].add( antenna, t, db )
			except KeyError:
				self.tagInfo[tag] = TagGroupEntry( antenna, t, db )
			self.q.task_done()
			
	def getReadsStrays( self, tNow=None, method=QuadraticRegressionMethod, antennaChoice=MostReadsChoice, removeOutliers=True ):
		'''
			Returns two lists:
				reads = [(tag1, t1, sampleSize1, antennaID1), (tag2, t2, sampleSize2, , antennaID2), ...]
				strays = [(tagA, tFirstReadA), (tagB, tFirstReadB), ...]
				
			Each stray will be reported as a read the first time it is detected.
		'''
		self.flush()
		
		trNow = datetimeToTr( tNow or datetime.now() )
		reads, strays = [], []
		toDelete = []
		
		for tag, tge in six.iteritems(self.tagInfo):
			if trNow - tge.lastReadMax >= tQuiet:				# Tag has left read range.
				if not tge.isStray:
					t, sampleSize, antennaID = tge.getBestEstimate(method, antennaChoice, removeOutliers)
					reads.append( (tag, t, sampleSize, antennaID) )
				toDelete.append( tag )
			elif tge.lastReadMax - tge.firstReadMin >= tStray:	# This is a stray.
				t = trToDatetime( tge.firstReadMin )
				if not tge.isStray:
					tge.setStray()
					reads.append( (tag, t, 1, 0) )				# Report stray first read time.
				strays.append( (tag, t) )
				
		for tag in toDelete:
			del self.tagInfo[tag]
		
		reads.sort( key=operator.itemgetter(1,0))
		strays.sort( key=operator.itemgetter(1,0) )
		return reads, strays
コード例 #3
0
class CountingQueue(object):
    """A Queue wrapper adding some counting & progress reporting features."""
    def __init__(self, **kwargs):
        # Name is expected to be set after construction, as phases are wired together.
        self.name = "<unknown queue>"
        self._get_count = 0
        self._put_count = 0
        self._done_count = 0
        self._lock = Lock()
        self._delegate = Queue(**kwargs)
        self.qsize = self._delegate.qsize

    def _incr_get(self):
        with self._lock:
            self._get_count += 1

    def _incr_put(self):
        with self._lock:
            self._put_count += 1

    def _incr_done(self):
        with self._lock:
            self._done_count += 1

    @property
    def counts(self):
        """Returns a QueueCounts tuple for the current state of this queue.

        Can be used to estimate the progress of a phase reading from this
        queue.
        """
        with self._lock:
            return QueueCounts(self._put_count, self._get_count,
                               self._done_count)

    # The following methods are API-compatible with the standard Queue
    # methods, but simultaneously update our counts.

    def put(self, item, *args, **kwargs):
        self._delegate.put(item, *args, **kwargs)
        if item not in (Phase.ERROR, Phase.FINISHED):
            self._incr_put()

    def get(self, *args, **kwargs):
        out = self._delegate.get(*args, **kwargs)
        if out not in (Phase.ERROR, Phase.FINISHED):
            self._incr_get()
        return out

    def task_done(self):
        self._delegate.task_done()
        self._incr_done()
コード例 #4
0
ファイル: pubnub.py プロジェクト: pubnub/python
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    def wait_for_connect(self):
        if not self.connected_event.is_set():
            self.connected_event.wait()
        else:
            raise Exception("the instance is already connected")

    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            self.disconnected_event.wait()
        else:
            raise Exception("the instance is already disconnected")

    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.message_queue.get()
            self.message_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue

    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.presence_queue.get()
            self.presence_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue
コード例 #5
0
ファイル: call_python_client.py プロジェクト: mposa/drake
    def _handle_messages_threaded(self):
        # Handles messages in a threaded fashion.
        queue = Queue()

        def producer_loop():
            # Read messages from file, and queue them for execution.
            for msg in self._read_next_message():
                queue.put(msg)
                # Check if an error occurred.
                if self._done:
                    break
            # Wait until the queue empties out to signal completion from the
            # producer's side.
            if not self._done:
                queue.join()
                self._done = True

        producer = Thread(name="Producer", target=producer_loop)
        # @note Previously, when trying to do `queue.clear()` in the consumer,
        # and `queue.join()` in the producer, there would be intermittent
        # deadlocks. By demoting the producer to a daemon, I (eric.c) have not
        # yet encountered a deadlock.
        producer.daemon = True
        producer.start()

        # Consume.
        # TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
        # works). Is there a way to have `plt.pause` handle Ctrl+C differently?
        try:
            pause = self.scope_globals['pause']
            while not self._done:
                # Process messages.
                while not queue.empty():
                    msg = queue.get()
                    queue.task_done()
                    self._execute_message(msg)
                # Spin busy for a bit, let matplotlib (or whatever) flush its
                # event queue.
                pause(0.01)
        except KeyboardInterrupt:
            # User pressed Ctrl+C.
            self._done = True
            print("Quitting")
        except Exception as e:
            # We encountered an error, and must stop.
            self._done = True
            self._had_error = True
            traceback.print_exc(file=sys.stderr)
            sys.stderr.write("  Stopping (--stop_on_error)\n")
コード例 #6
0
ファイル: call_python_client.py プロジェクト: psprecher/drake
    def _handle_messages_threaded(self):
        # Handles messages in a threaded fashion.
        queue = Queue()

        def producer_loop():
            # Read messages from file, and queue them for execution.
            for msg in self._read_next_message():
                queue.put(msg)
                # Check if an error occurred.
                if self._done:
                    break
            # Wait until the queue empties out to signal completion from the
            # producer's side.
            if not self._done:
                queue.join()
                self._done = True

        producer = Thread(name="Producer", target=producer_loop)
        # @note Previously, when trying to do `queue.clear()` in the consumer,
        # and `queue.join()` in the producer, there would be intermittent
        # deadlocks. By demoting the producer to a daemon, I (eric.c) have not
        # yet encountered a deadlock.
        producer.daemon = True
        producer.start()

        # Consume.
        # TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
        # works). Is there a way to have `plt.pause` handle Ctrl+C differently?
        try:
            pause = self.scope_globals['pause']
            while not self._done:
                # Process messages.
                while not queue.empty():
                    msg = queue.get()
                    queue.task_done()
                    self._execute_message(msg)
                # Spin busy for a bit, let matplotlib (or whatever) flush its
                # event queue.
                pause(0.01)
        except KeyboardInterrupt:
            # User pressed Ctrl+C.
            self._done = True
            print("Quitting")
        except Exception as e:
            # We encountered an error, and must stop.
            self._done = True
            self._had_error = True
            traceback.print_exc(file=sys.stderr)
            sys.stderr.write("  Stopping (--stop_on_error)\n")
コード例 #7
0
def runServer(host='localhost',
              llrp_host=None,
              transmitPower=None,
              receiverSensitivity=None):
    messageQ = Queue()

    transmitPower = transmitPower if transmitPower else None
    receiverSensitivity = receiverSensitivity if receiverSensitivity else None

    server = None
    retryDelaySeconds = 3

    # Define function to get llrp_host name.
    if llrp_host and llrp_host.lower() != 'autodetect':
        LLRPHostFunc = lambda: llrp_host
    else:
        LLRPHostFunc = doAutoDetect

    # Outer loop - connect/reconnect to the reader.
    while True:
        if server is not None:
            writeLog('Attempting reconnect in {} seconds...'.format(
                retryDelaySeconds))
            time.sleep(retryDelaySeconds)

        server = LLRPServer(LLRPHostFunc=LLRPHostFunc,
                            messageQ=messageQ,
                            transmitPower=transmitPower,
                            receiverSensitivity=receiverSensitivity)
        writeLog('runServer: LLRP Server on ({}:{})'.format(
            server.host, server.port))

        try:
            server.connect()
            writeLog('runServer: Successfully connected to ({}:5084)!'.format(
                server.llrp_host))
        except Exception as e:
            writeLog('runServer: {}'.format(e))
            writeLog('runServer: Connection to ({}:5084) fails.'.format(
                server.llrp_host))
            continue

        # Inner loop - process messages from the reader.
        while True:
            writeLog('runServer: Server: ' + messageQ.get())
            messageQ.task_done()
            if server.exception_termination:
                writeLog('runServer: Exceptional RFID Reader Termination.')
                break
コード例 #8
0
class FluentdEvent(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()

    def init_app(self, app):
        tag_prefix = app.config.get("FLUENTD_EVENT_TAG_PREFIX",
                                    "flask.fluentd_event")
        host = app.config.get("FLUENTD_EVENT_HOST", "localhost")
        port = int(app.config.get("FLUENTD_EVENT_PORT", 24224))
        self._sender = sender.FluentSender(tag_prefix, host=host, port=port)

        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, "teardown_appcontext"):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, tag, event):
        self.queue.put((tag, event))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, event = self.queue.get_nowait()
                self._sender.emit(tag, event)
                self.queue.task_done()
            except Empty:
                pumping = False
            except Exception as e:
                # This is bad but it's worse to foul the request because
                # of a logging issue
                logging.exception(e)
                self.queue.task_done()

        return exception
コード例 #9
0
ファイル: settings_view.py プロジェクト: zk20/piksi_tools
class WorkQueue():
    def __init__(self, settings_view):
        self._settings_view = settings_view
        self._work_queue = Queue()
        self._worker = threading.Thread(target=self._work_thd)
        self._worker.daemon = True
        self._worker.start()

    def put(self, func, *argv):
        self._work_queue.put((func, argv))

    def _work_thd(self):
        while True:
            (func, argv) = self._work_queue.get(block=True)
            func(*argv)
            self._work_queue.task_done()
コード例 #10
0
ファイル: flask_fluentd.py プロジェクト: thread/flask-fluentd
class Fluentd(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()
        tag_label = app.config.get('EVENT_TAG_PREFIX', 'flask.fluentd')
        self._sender = sender.FluentSender(tag_label)

    def init_app(self, app):
        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, 'teardown_appcontext'):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, pair):
        tag, evt = pair
        self.queue.put((tag, evt))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, evt = self.queue.get_nowait()
                self._sender.emit(tag, evt)
                self.queue.task_done()
            except Empty:
                pumping = False
            except Exception as e:
                # This is bad but it's worse to foul the request because
                # of a logging issue
                logging.exception(e)
                self.queue.task_done()

        return exception
コード例 #11
0
ファイル: mixins.py プロジェクト: wazo-platform/xivo-bus
class QueuePublisherMixin(PublisherMixin):
    queue_publisher_args = {
        'interval_start': 2,
        'interval_step': 2,
        'interval_max': 32,
    }

    def __init__(self, **kwargs):
        super(QueuePublisherMixin, self).__init__(**kwargs)
        self.__flushing = False
        self.__fifo = FifoQueue()
        try:
            self._register_thread('publisher_queue', self.__run, on_stop=self.__stop)
        except AttributeError:
            pass

    def __run(self, ready_flag, **kwargs):
        ready_flag.set()
        publisher_args = self.queue_publisher_args

        with Connection(self.url, transport_options=publisher_args) as connection:
            while not self.is_stopping or self.__flushing:
                try:
                    payload, headers, routing_key = self.__fifo.get()
                except (Empty, TypeError):
                    self.__flushing = False
                    continue
                try:
                    with self.Producer(connection, **publisher_args) as publish:
                        publish(payload, headers=headers, routing_key=routing_key)
                except OperationalError as exc:
                    self.log.error('Publishing queue error: %s', exc, exc_info=1)
                else:
                    self.__fifo.task_done()

    def __stop(self):
        self.__flushing = True
        self.__fifo.put(None)

    def publish_soon(self, event, headers=None, routing_key=None, payload=None):
        headers, payload, routing_key = self._marshal(
            event, headers, payload, routing_key
        )
        self.__fifo.put((payload, headers, routing_key))
コード例 #12
0
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = "EVENT-QUEUE"
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as e:
            logger.log(
                u"Exception generated in thread " + self.name + ": " + ex(e),
                logger.ERROR)
            logger.log(repr(traceback.format_exc()), logger.DEBUG)

    # System Events
    class SystemEvent(Event):
        RESTART = "RESTART"
        SHUTDOWN = "SHUTDOWN"
コード例 #13
0
ファイル: event_queue.py プロジェクト: pymedusa/SickRage
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = 'EVENT-QUEUE'
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as error:
            log.error(u'Exception generated in thread %s: %s',
                      self.name, ex(error))
            log.debug(repr(traceback.format_exc()))

    # System Events
    class SystemEvent(Event):
        RESTART = 'RESTART'
        SHUTDOWN = 'SHUTDOWN'
コード例 #14
0
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = 'EVENT-QUEUE'
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as error:
            log.error(u'Exception generated in thread %s: %s', self.name,
                      ex(error))
            log.debug(repr(traceback.format_exc()))

    # System Events
    class SystemEvent(Event):
        RESTART = 'RESTART'
        SHUTDOWN = 'SHUTDOWN'
コード例 #15
0
ファイル: fluentd.py プロジェクト: voidabhi/flask
class Fluentd(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()
        tag_label = app.config.get('EVENT_TAG_PREFIX', 'flask.fluentd')
        self._sender = sender.FluentSender(tag_label)

    def init_app(self, app):
        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, 'teardown_appcontext'):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, pair):
        tag, evt = pair
        self.queue.put((tag, evt))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, evt = self.queue.get_nowait()
                self._sender.emit(tag, evt)
                self.queue.task_done()
            except Empty:
                pumping = False

        return exception
コード例 #16
0
ファイル: event_queue.py プロジェクト: KraXed112/SickRage
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        self.daemon = True
        self.callback = callback
        self.name = "EVENT-QUEUE"
        self.stop = threading.Event()

    def put(self, type):
        self.queue.put(type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    type = None

            # exiting thread
            self.stop.clear()
        except Exception as e:
            logger.log("Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR)
            logger.log(repr(traceback.format_exc()), logger.DEBUG)

    # System Events
    class SystemEvent(Event):
        RESTART = "RESTART"
        SHUTDOWN = "SHUTDOWN"
コード例 #17
0
class JobWorker(threading.Thread):
    def __init__(self, cb, sensor_id, result_queue):
        super(JobWorker, self).__init__()
        self.cb = cb
        self.sensor_id = sensor_id
        self.job_queue = Queue()
        self.lr_session = None
        self.result_queue = result_queue

    def run(self):
        try:
            self.lr_session = self.cb.live_response.request_session(
                self.sensor_id)
            self.result_queue.put(WorkerStatus(self.sensor_id, status="ready"))

            while True:
                work_item = self.job_queue.get(block=True)
                if not work_item:
                    self.job_queue.task_done()
                    return

                self.run_job(work_item)
                self.result_queue.put(CompletionNotification(self.sensor_id))
                self.job_queue.task_done()
        except Exception as e:
            self.result_queue.put(
                WorkerStatus(self.sensor_id, status="error", exception=e))
        finally:
            if self.lr_session:
                self.lr_session.close()
            self.result_queue.put(
                WorkerStatus(self.sensor_id, status="exiting"))

    def run_job(self, work_item):
        try:
            work_item.future.set_result(work_item.fn(self.lr_session))
        except Exception as e:
            work_item.future.set_exception(e)
コード例 #18
0
class CrawlerExecutionQueue(object):
    """Queue and execute operations in a separate thread when celery is
    running in eager mode.
    """
    def __init__(self):
        self.queue = Queue()

    def queue_operation(self, context, data):
        if not hasattr(self, 'worker'):
            self.worker = Thread(target=self.execute_crawler)
            self.worker.setDaemon(True)
            self.worker.start()
        self.queue.put((context, data))

    def execute_crawler(self):
        while True:
            context, data = self.queue.get()
            context.execute(data)
            self.queue.task_done()

    @property
    def is_empty(self):
        return not self.queue.unfinished_tasks
コード例 #19
0
def listen():
    queue = Queue()

    def _consume(queue):
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.bind(('0.0.0.0', 6899))
        print('listening for sdb notifications on :6899...')
        while True:
            r, w, x = select.select([sock], [], [])
            for i in r:
                data = i.recv(1024)
                queue.put(data)

    worker = threading.Thread(target=_consume, args=(queue, ))
    worker.setDaemon(True)
    worker.start()

    orig_tty = termios.tcgetattr(sys.stdin)
    try:
        tty.setcbreak(sys.stdin.fileno())
        while True:
            try:
                port = queue.get(timeout=1)
                queue.task_done()
                if port == 'q':
                    break
                port = int(port)
                print('opening telnet session at port :%d...' % port)
                telnet(port).connect()
                print('listening for sdb notifications on :6899...')
            except Empty:
                pass
    except KeyboardInterrupt:
        print('got Ctrl-C')
        queue.put('q')
    finally:
        termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_tty)
コード例 #20
0
ファイル: TagGroup.py プロジェクト: scottwedge/CrossMgr
class TagGroup(object):
    '''
		Process groups of tag reads and return the best time estimated using quadratic regression.
		Stray reads are also detected if there is no quiet period for the tag.
		The first read time of each stray read is returned.
	'''
    def __init__(self):
        self.q = Queue()
        self.tagInfo = {}

    def add(self, antenna, tag, t, db):
        self.q.put((antenna, tag, t, db))

    def flush(self):
        # Process all waiting reads.
        while 1:
            try:
                antenna, tag, t, db = self.q.get(False)
            except Empty:
                break
            try:
                self.tagInfo[tag].add(antenna, t, db)
            except KeyError:
                self.tagInfo[tag] = TagGroupEntry(antenna, t, db)
            self.q.task_done()

    def getReadsStrays(self,
                       tNow=None,
                       method=QuadraticRegressionMethod,
                       antennaChoice=MostReadsChoice,
                       removeOutliers=True):
        '''
			Returns two lists:
				reads = [(tag1, t1, sampleSize1, antennaID1), (tag2, t2, sampleSize2, , antennaID2), ...]
				strays = [(tagA, tFirstReadA), (tagB, tFirstReadB), ...]
				
			Each stray will be reported as a read the first time it is detected.
		'''
        self.flush()

        trNow = datetimeToTr(tNow or datetime.now())
        reads, strays = [], []
        toDelete = []

        for tag, tge in self.tagInfo.items():
            if trNow - tge.lastReadMax >= tQuiet:  # Tag has left read range.
                if not tge.isStray:
                    t, sampleSize, antennaID = tge.getBestEstimate(
                        method, antennaChoice, removeOutliers)
                    reads.append((tag, t, sampleSize, antennaID))
                toDelete.append(tag)
            elif tge.lastReadMax - tge.firstReadMin >= tStray:  # This is a stray.
                t = trToDatetime(tge.firstReadMin)
                if not tge.isStray:
                    tge.setStray()
                    reads.append(
                        (tag, t, 1, 0))  # Report stray first read time.
                strays.append((tag, t))

        for tag in toDelete:
            del self.tagInfo[tag]

        reads.sort(key=operator.itemgetter(1, 0))
        strays.sort(key=operator.itemgetter(1, 0))
        return reads, strays
コード例 #21
0
class TaskIO(object):
    """Object used to stream I/O between a
    running Mesos task and the local terminal.

    :param task: task ID
    :type task: str
    :param cmd: a command to launch inside the task's container
    :type cmd: str
    :param args: Additional arguments for the command
    :type args: str
    :param interactive: whether to attach STDIN of the current
                        terminal to the new command being launched
    :type interactive: bool
    :param tty: whether to allocate a tty for this command and attach
                the local terminal to it
    :type tty: bool
    """

    # The interval to send heartbeat messages to
    # keep persistent connections alive.
    HEARTBEAT_INTERVAL = 30
    HEARTBEAT_INTERVAL_NANOSECONDS = HEARTBEAT_INTERVAL * 1000000000

    def __init__(self,
                 task_id,
                 cmd=None,
                 args=None,
                 interactive=False,
                 tty=False):
        # Store relevant parameters of the call for later.
        self.cmd = cmd
        self.interactive = interactive
        self.tty = tty
        self.args = args

        # Create a client and grab a reference to the DC/OS master.
        client = DCOSClient()
        master = get_master(client)

        # Get the task and make sure its container was launched by the UCR.
        # Since task's containers are launched by the UCR by default, we want
        # to allow most tasks to pass through unchecked. The only exception is
        # when a task has an explicit container specified and it is not of type
        # "MESOS". Having a type of "MESOS" implies that it was launched by the
        # UCR -- all other types imply it was not.
        task_obj = master.task(task_id)
        if "container" in task_obj.dict():
            if "type" in task_obj.dict()["container"]:
                if task_obj.dict()["container"]["type"] != "MESOS":
                    raise DCOSException(
                        "This command is only supported for tasks"
                        " launched by the Universal Container Runtime (UCR).")

        # Get the URL to the agent running the task.
        if client._mesos_master_url:
            self.agent_url = client.slave_url(
                slave_id="",
                private_url=task_obj.slave().http_url(),
                path="api/v1")
        else:
            self.agent_url = client.slave_url(slave_id=task_obj.slave()['id'],
                                              private_url="",
                                              path="api/v1")

        # Grab a reference to the container ID for the task.
        self.parent_id = master.get_container_id(task_obj)

        # Generate a new UUID for the nested container
        # used to run commands passed to `task exec`.
        self.container_id = str(uuid.uuid4())

        # Set up a recordio encoder and decoder
        # for any incoming and outgoing messages.
        self.encoder = recordio.Encoder(
            lambda s: bytes(json.dumps(s, ensure_ascii=False), "UTF-8"))
        self.decoder = recordio.Decoder(
            lambda s: json.loads(s.decode("UTF-8")))

        # Set up queues to send messages between threads used for
        # reading/writing to STDIN/STDOUT/STDERR and threads
        # sending/receiving data over the network.
        self.input_queue = Queue()
        self.output_queue = Queue()

        # Set up an event to block attaching
        # input until attaching output is complete.
        self.attach_input_event = threading.Event()
        self.attach_input_event.clear()

        # Set up an event to block printing the output
        # until an attach input event has successfully
        # been established.
        self.print_output_event = threading.Event()
        self.print_output_event.clear()

        # Set up an event to block the main thread
        # from exiting until signaled to do so.
        self.exit_event = threading.Event()
        self.exit_event.clear()

        # Use a class variable to store exceptions thrown on
        # other threads and raise them on the main thread before
        # exiting.
        self.exception = None

    def run(self):
        """Run the helper threads in this class which enable streaming
        of STDIN/STDOUT/STDERR between the CLI and the Mesos Agent API.

        If a tty is requested, we take over the current terminal and
        put it into raw mode. We make sure to reset the terminal back
        to its original settings before exiting.
        """

        # Without a TTY.
        if not self.tty:
            try:
                self._start_threads()
                self.exit_event.wait()
            except Exception as e:
                self.exception = e

            if self.exception:
                raise self.exception
            return

        # With a TTY.
        if util.is_windows_platform():
            raise DCOSException(
                "Running with the '--tty' flag is not supported on windows.")

        if not sys.stdin.isatty():
            raise DCOSException(
                "Must be running in a tty to pass the '--tty flag'.")

        fd = sys.stdin.fileno()
        oldtermios = termios.tcgetattr(fd)

        try:
            if self.interactive:
                tty.setraw(fd, when=termios.TCSANOW)
                self._window_resize(signal.SIGWINCH, None)
                signal.signal(signal.SIGWINCH, self._window_resize)

            self._start_threads()
            self.exit_event.wait()
        except Exception as e:
            self.exception = e

        termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, oldtermios)

        if self.exception:
            raise self.exception

    def _thread_wrapper(self, func):
        """A wrapper around all threads used in this class

        If a thread throws an exception, it will unblock the main
        thread and save the exception in a class variable. The main
        thread will then rethrow the exception before exiting.

        :param func: The start function for the thread
        :type func: function
        """
        try:
            func()
        except Exception as e:
            self.exception = e
            self.exit_event.set()

    def _start_threads(self):
        """Start all threads associated with this class
        """
        if self.interactive:
            # Collects input from STDIN and puts
            # it in the input_queue as data messages.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._input_thread, ))
            thread.daemon = True
            thread.start()

            # Prepares heartbeat control messages and
            # puts them in the input queueaat a specific
            # heartbeat interval.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._heartbeat_thread, ))
            thread.daemon = True
            thread.start()

            # Opens a persistent connection with the mesos agent and
            # feeds it both control and data messages from the input
            # queue via ATTACH_CONTAINER_INPUT messages.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._attach_container_input, ))
            thread.daemon = True
            thread.start()

        # Opens a persistent connection with a mesos agent, reads
        # data messages from it and feeds them to an output_queue.
        thread = threading.Thread(
            target=self._thread_wrapper,
            args=(self._launch_nested_container_session, ))
        thread.daemon = True
        thread.start()

        # Collects data messages from the output queue and writes
        # their content to STDOUT and STDERR.
        thread = threading.Thread(target=self._thread_wrapper,
                                  args=(self._output_thread, ))
        thread.daemon = True
        thread.start()

    def _launch_nested_container_session(self):
        """Sends a request to the Mesos Agent to launch a new
        nested container and attach to its output stream.
        The output stream is then sent back in the response.
        """

        message = {
            'type': "LAUNCH_NESTED_CONTAINER_SESSION",
            'launch_nested_container_session': {
                'container_id': {
                    'parent': self.parent_id,
                    'value': self.container_id
                },
                'command': {
                    'value': self.cmd,
                    'arguments': [self.cmd] + self.args,
                    'shell': False
                }
            }
        }

        if self.tty:
            message['launch_nested_container_session']['container'] = {
                'type': 'MESOS',
                'tty_info': {}
            }

        req_extra_args = {
            'stream': True,
            'headers': {
                'Content-Type': 'application/json',
                'Accept': 'application/recordio',
                'Message-Accept': 'application/json'
            }
        }

        response = http.post(self.agent_url,
                             data=json.dumps(message),
                             timeout=None,
                             **req_extra_args)

        self._process_output_stream(response)

    def _process_output_stream(self, response):
        """Gets data streamed over the given response and places the
        returned messages into our output_queue. Only expects to
        receive data messages.

        :param response: Response from an http post
        :type response: requests.models.Response
        """

        # Now that we are ready to process the output stream (meaning
        # our output connection has been established), allow the input
        # stream to be attached by setting an event.
        self.attach_input_event.set()

        # If we are running in interactive mode, wait to make sure that
        # our input connection succeeds before pushing any output to the
        # output queue.
        if self.interactive:
            self.print_output_event.wait()

        try:
            for chunk in response.iter_content(chunk_size=None):
                records = self.decoder.decode(chunk)

                for r in records:
                    if r.get('type') and r['type'] == 'DATA':
                        self.output_queue.put(r['data'])
        except Exception as e:
            raise DCOSException(
                "Error parsing output stream: {error}".format(error=e))

        self.output_queue.join()
        self.exit_event.set()

    def _attach_container_input(self):
        """Streams all input data (e.g. STDIN) from the client to the agent
        """
        def _initial_input_streamer():
            """Generator function yielding the initial ATTACH_CONTAINER_INPUT
            message for streaming. We have a separate generator for this so
            that we can attempt the connection once before committing to a
            persistent connection where we stream the rest of the input.

            :returns: A RecordIO encoded message
            """

            message = {
                'type': 'ATTACH_CONTAINER_INPUT',
                'attach_container_input': {
                    'type': 'CONTAINER_ID',
                    'container_id': {
                        'parent': self.parent_id,
                        'value': self.container_id
                    }
                }
            }

            yield self.encoder.encode(message)

        def _input_streamer():
            """Generator function yielding ATTACH_CONTAINER_INPUT
            messages for streaming. It yields the _intitial_input_streamer()
            message, followed by messages from the input_queue on each
            subsequent call.

            :returns: A RecordIO encoded message
            """

            yield next(_initial_input_streamer())

            while True:
                record = self.input_queue.get()
                if not record:
                    break
                yield record

        req_extra_args = {
            'headers': {
                'Content-Type': 'application/recordio',
                'Message-Content-Type': 'application/json',
                'Accept': 'application/json',
                'Connection': 'close',
                'Transfer-Encoding': 'chunked'
            }
        }

        # Ensure we don't try to attach our input to a container that isn't
        # fully up and running by waiting until the
        # `_process_output_stream` function signals us that it's ready.
        self.attach_input_event.wait()

        # Send an intial "Test" message to ensure that we are able to
        # establish a connection with the agent. If we aren't we will throw
        # an exception and break out of this thread. However, in cases where
        # we receive a 500 response from the agent, we actually want to
        # continue without throwing an exception. A 500 error indicates that
        # we can't connect to the container because it has already finished
        # running. In that case we continue running to allow the output queue
        # to be flushed.
        try:
            http.post(self.agent_url,
                      data=_initial_input_streamer(),
                      **req_extra_args)
        except DCOSHTTPException as e:
            if not e.response.status_code == 500:
                raise e

        # If we succeeded with that connection, unblock process_output_stream()
        # from sending output data to the output thread.
        self.print_output_event.set()

        # Begin streaming the input.
        http.post(self.agent_url,
                  data=_input_streamer(),
                  timeout=None,
                  **req_extra_args)

    def _input_thread(self):
        """Reads from STDIN and places a message
        with that data onto the input_queue.
        """

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'DATA',
                    'data': {
                        'type': 'STDIN',
                        'data': ''
                    }
                }
            }
        }

        for chunk in iter(partial(os.read, sys.stdin.fileno(), 1024), b''):
            message['attach_container_input']['process_io']['data'][
                'data'] = base64.b64encode(chunk).decode('utf-8')

            self.input_queue.put(self.encoder.encode(message))

        # Push an empty string to indicate EOF to the server and push
        # 'None' to signal that we are done processing input.
        message['attach_container_input']['process_io']['data']['data'] = ''
        self.input_queue.put(self.encoder.encode(message))
        self.input_queue.put(None)

    def _output_thread(self):
        """Reads from the output_queue and writes the data
        to the appropriate STDOUT or STDERR.
        """

        while True:
            # Get a message from the output queue and decode it.
            # Then write the data to the appropriate stdout or stderr.
            output = self.output_queue.get()
            if not output.get('data'):
                raise DCOSException("Error no 'data' field in output message")

            data = output['data']
            data = base64.b64decode(data.encode('utf-8'))

            if output.get('type') and output['type'] == 'STDOUT':
                sys.stdout.buffer.write(data)
                sys.stdout.flush()
            elif output.get('type') and output['type'] == 'STDERR':
                sys.stderr.buffer.write(data)
                sys.stderr.flush()
            else:
                raise DCOSException("Unsupported data type in output stream")

            self.output_queue.task_done()

    def _heartbeat_thread(self):
        """Generates a heartbeat message to send over the
        ATTACH_CONTAINER_INPUT stream every `interval` seconds and
        inserts it in the input queue.
        """

        interval = self.HEARTBEAT_INTERVAL
        nanoseconds = self.HEARTBEAT_INTERVAL_NANOSECONDS

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'CONTROL',
                    'control': {
                        'type': 'HEARTBEAT',
                        'heartbeat': {
                            'interval': {
                                'nanoseconds': nanoseconds
                            }
                        }
                    }
                }
            }
        }

        while True:
            self.input_queue.put(self.encoder.encode(message))
            time.sleep(interval)

    def _window_resize(self, signum, frame):
        """Signal handler for SIGWINCH.

        Generates a message with the current demensions of the
        terminal and puts it in the input_queue.

        :param signum: the signal number being handled
        :type signum: int
        :param frame: current stack frame
        :type frame: frame
        """

        # Determine the size of our terminal, and create the message to be sent
        rows, columns = os.popen('stty size', 'r').read().split()

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'CONTROL',
                    'control': {
                        'type': 'TTY_INFO',
                        'tty_info': {
                            'window_size': {
                                'rows': int(rows),
                                'columns': int(columns)
                            }
                        }
                    }
                }
            }
        }

        self.input_queue.put(self.encoder.encode(message))
コード例 #22
0
class CallbackWorkerPool(object):
    """
    A Worker Pool implementation that creates a number of predefined threads
    used for invoking Session callbacks.
    """
    def __init__(self, write_queue=None, size=1):
        """
        Creates a Callback Worker Pool for use in invoking Session Callbacks
        when data is received by a push client.

        :param write_queue: Queue used for queueing up socket write events
            for when a payload message is received and processed.
        :param size: The number of worker threads to invoke callbacks.
        """
        # Used to queue up PublishMessageReceived events to be sent back to
        # the iDigi server.
        self._write_queue = write_queue
        # Used to queue up sessions and data to callback with.
        self._queue = Queue(size)
        # Number of workers to create.
        self.size = size
        self.log = logging.getLogger(
            '{}.callback_worker_pool'.format(__name__))

        for _ in range(size):
            worker = Thread(target=self._consume_queue)
            worker.daemon = True
            worker.start()

    def _consume_queue(self):
        """
        Continually blocks until data is on the internal queue, then calls
        the session's registered callback and sends a PublishMessageReceived
        if callback returned True.
        """
        while True:
            session, block_id, raw_data = self._queue.get()
            data = json.loads(raw_data.decode('utf-8'))  # decode as JSON
            try:
                result = session.callback(data)
                if result is None:
                    self.log.warn(
                        "Callback %r returned None, expected boolean.  Messages "
                        "are not marked as received unless True is returned",
                        session.callback)
                elif result:
                    # Send a Successful PublishMessageReceived with the
                    # block id sent in request
                    if self._write_queue is not None:
                        response_message = struct.pack(
                            '!HHH', PUBLISH_MESSAGE_RECEIVED, block_id, 200)
                        self._write_queue.put(
                            (session.socket, response_message))
            except Exception as exception:
                self.log.exception(exception)

            self._queue.task_done()

    def queue_callback(self, session, block_id, data):
        """
        Queues up a callback event to occur for a session with the given
        payload data.  Will block if the queue is full.

        :param session: the session with a defined callback function to call.
        :param block_id: the block_id of the message received.
        :param data: the data payload of the message received.
        """
        self._queue.put((session, block_id, data))
コード例 #23
0
ファイル: lsf.py プロジェクト: aef-/toil
class LSFBatchSystem(BatchSystemSupport):
    """
    The interface for running jobs on lsf, runs all the jobs you give it as they come in,
    but in parallel.
    """
    @classmethod
    def supportsWorkerCleanup(cls):
        return False

    @classmethod
    def supportsHotDeployment(cls):
        return False

    def shutdown(self):
        pass

    def __init__(self, config, maxCores, maxMemory, maxDisk):
        super(LSFBatchSystem, self).__init__(config, maxCores, maxMemory,
                                             maxDisk)
        self.lsfResultsFile = self._getResultsFileName(config.jobStore)
        #Reset the job queue and results (initially, we do this again once we've killed the jobs)
        self.lsfResultsFileHandle = open(self.lsfResultsFile, 'w')
        self.lsfResultsFileHandle.close(
        )  #We lose any previous state in this file, and ensure the files existence
        self.currentjobs = set()
        self.obtainSystemConstants()
        self.jobIDs = dict()
        self.lsfJobIDs = dict()
        self.nextJobID = 0

        self.newJobsQueue = Queue()
        self.updatedJobsQueue = Queue()
        self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self)
        self.worker.setDaemon(True)
        self.worker.start()

    def __des__(self):
        #Closes the file handle associated with the results file.
        self.lsfResultsFileHandle.close(
        )  #Close the results file, cos were done.

    def issueBatchJob(self, jobNode):
        jobID = self.nextJobID
        self.nextJobID += 1
        self.currentjobs.add(jobID)
        bsubline = prepareBsub(jobNode.cores, jobNode.memory,
                               jobNode.jobName) + [jobNode.command]
        self.newJobsQueue.put((jobID, bsubline))
        time.sleep(5)
        logger.debug("Issued the job command: %s with job id: %s " %
                     (jobNode.command, str(jobID)))
        return jobID

    def getLsfID(self, jobID):
        if not jobID in self.lsfJobIDs:
            RuntimeError("Unknown jobID, could not be converted")

        (job, task) = self.lsfJobIDs[jobID]
        if task is None:
            return str(job)
        else:
            return str(job) + "." + str(task)

    def killBatchJobs(self, jobIDs):
        """Kills the given job IDs.
        """
        for jobID in jobIDs:
            logger.debug("DEL: " + str(self.getLsfID(jobID)))
            self.currentjobs.remove(jobID)
            process = subprocess.Popen(["bkill", self.getLsfID(jobID)])
            del self.jobIDs[self.lsfJobIDs[jobID]]
            del self.lsfJobIDs[jobID]

        toKill = set(jobIDs)
        while len(toKill) > 0:
            for jobID in list(toKill):
                if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
                    toKill.remove(jobID)

            if len(toKill) > 0:
                logger.warn(
                    "Tried to kill some jobs, but something happened and they are still going, "
                    "so I'll try again")
                time.sleep(5)

    def getIssuedBatchJobIDs(self):
        """A list of jobs (as jobIDs) currently issued (may be running, or maybe 
        just waiting).
        """
        return self.currentjobs

    def getRunningBatchJobIDs(self):
        """Gets a map of jobs (as jobIDs) currently running (not just waiting) 
        and a how long they have been running for (in seconds).
        """
        times = {}
        currentjobs = set()
        for x in self.getIssuedBatchJobIDs():
            if x in self.lsfJobIDs:
                currentjobs.add(self.lsfJobIDs[x])
            else:
                #not yet started
                pass
        process = subprocess.Popen(["bjobs"], stdout=subprocess.PIPE)

        for curline in process.stdout:
            items = curline.strip().split()
            if (len(items) > 9 and
                (items[0]) in currentjobs) and items[2] == 'RUN':
                jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
                jobstart = jobstart + ' ' + items[9]
                jobstart = time.mktime(
                    time.strptime(jobstart, "%b/%d/%Y %H:%M"))
                jobstart = time.mktime(
                    time.strptime(jobstart, "%m/%d/%Y %H:%M:%S"))
                times[self.jobIDs[(items[0])]] = time.time() - jobstart
        return times

    def getUpdatedBatchJob(self, maxWait):
        try:
            sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
            self.updatedJobsQueue.task_done()
            jobID, retcode = (self.jobIDs[sgeJobID], retcode)
            self.currentjobs -= {self.jobIDs[sgeJobID]}
        except Empty:
            pass
        else:
            return jobID, retcode, None

    def getWaitDuration(self):
        """We give parasol a second to catch its breath (in seconds)
        """
        #return 0.0
        return 15

    @classmethod
    def getRescueBatchJobFrequency(cls):
        """Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
        making it expensive. We allow this every 10 minutes..
        """
        return 1800

    def shutdown(self):
        pass

    def obtainSystemConstants(self):
        p = subprocess.Popen(["lshosts"],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)

        line = p.stdout.readline()
        items = line.strip().split()
        num_columns = len(items)
        cpu_index = None
        mem_index = None
        for i in range(num_columns):
            if items[i] == 'ncpus':
                cpu_index = i
            elif items[i] == 'maxmem':
                mem_index = i

        if cpu_index is None or mem_index is None:
            RuntimeError(
                "lshosts command does not return ncpus or maxmem columns")

        p.stdout.readline()

        self.maxCPU = 0
        self.maxMEM = MemoryString("0")
        for line in p.stdout:
            items = line.strip().split()
            if len(items) < num_columns:
                RuntimeError("lshosts output has a varying number of columns")
            if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
                self.maxCPU = items[cpu_index]
            if items[mem_index] != '-' and MemoryString(
                    items[mem_index]) > self.maxMEM:
                self.maxMEM = MemoryString(items[mem_index])

        if self.maxCPU is 0 or self.maxMEM is 0:
            RuntimeError("lshosts returns null ncpus or maxmem info")
        logger.debug("Got the maxCPU: %s" % (self.maxMEM))
コード例 #24
0
ファイル: thawab-lite.py プロジェクト: ojuba-org/thawab-lite
class MyApp(object):
    instances = 0

    def __init__(self, filename=None):
        MyApp.instances += 1
        self.file_dlg = None
        self.info = None
        self.page_id = 0
        self.has_hadith_numbers = False
        self.has_ayat = False
        self.keep_running = True
        self.queue = Queue()
        thread = Thread(target=self.worker_loop)
        thread.daemon = True
        thread.start()

        builder = Gtk.Builder()
        builder.add_from_file(GLADE_FN)
        builder.connect_signals(self)
        self.window = builder.get_object("main_win")
        self.header = builder.get_object("header")
        self.body = builder.get_object("body")
        self.toc_store = builder.get_object("toc_store")
        self.toc_tree = builder.get_object("toc_tree")
        self.search_entry = builder.get_object("search_entry")
        self.popover1 = builder.get_object("popover1")
        self.search_btn = builder.get_object("search_btn")
        self.page_btn = builder.get_object("page_btn")
        self.hadith_btn = builder.get_object("hadith_btn")
        self.aya_btn = builder.get_object("aya_btn")
        self.popover1_box = builder.get_object("popover1_box")
        self.search_menu_btns = [
            self.search_btn, self.page_btn, self.hadith_btn, self.aya_btn
        ]
        self.window.show()
        if filename is not None:
            self.open(filename)

    def on_search_entry_key_release_event(self, w, event):
        __, keyval = event.get_keyval()
        if keyval == Gdk.KEY_Down or keyval == Gdk.KEY_KP_Down:
            self.update_search()
            ch = self.popover1_box.get_children()
            if ch: ch[0].grab_focus()
        elif keyval == Gdk.KEY_Escape:
            self.popover1.popdown()

    def on_search_entry_focus_out_event(self, w, event):
        if not self.popover1.get_focus_child(): self.popover1.popdown()

    def update_search(self):
        txt = touni(self.search_entry.get_text()).strip()
        if not txt:
            self.popover1.popdown()
            return
        entered_i = try_int(txt)
        m = sura_aya_re.match(txt) if self.has_ayat else None
        if m:
            for btn in self.search_menu_btns:
                btn.hide()
            self.aya_btn.show()
            self.aya_btn.set_label(u"سورة {} آية {}".format(
                m.group(1), m.group(2)))
        elif entered_i is None:
            for btn in self.search_menu_btns:
                btn.hide()
            self.search_btn.show()
            self.search_btn.set_label(u"البحث عن [{}]".format(txt))
            self.popover1_box.reorder_child(self.search_btn, 0)
        else:
            self.search_btn.show()
            self.page_btn.show()
            self.hadith_btn.show()
            if self.has_ayat: self.aya_btn.show()
            else: self.aya_btn.hide()
            self.page_btn.set_label(u"صفحة [{}]".format(txt))
            self.hadith_btn.set_label(u"حديث رقم [{}]".format(txt))
            self.aya_btn.set_label(u"سورة رقم [{}]".format(txt))
            if self.has_hadith_numbers:
                self.popover1_box.reorder_child(self.hadith_btn, 0)
                self.popover1_box.reorder_child(self.page_btn, 1)
            else:
                self.popover1_box.reorder_child(self.page_btn, 0)
                self.popover1_box.reorder_child(self.hadith_btn, 1)
            self.search_btn.set_label(u"البحث عن [{}]".format(txt))
        self.popover1.set_relative_to(self.search_entry)
        self.popover1.popup()

    def on_search_entry_changed(self, w):
        self.update_search()

    def on_info_btn_clicked(self, w):
        self.goto_page(0)

    def on_previous_btn_clicked(self, w):
        self.goto_page(max(0, self.page_id - 1))

    def on_next_btn_clicked(self, w):
        self.goto_page(self.page_id + 1)

    def on_search_entry_activate(self, w):
        self.popover1.popdown()
        text = w.get_text()
        page_id = try_int(text)
        if page_id is not None: self.goto_page(page_id)

    def worker_loop(self):
        while self.keep_running:
            try:
                a = self.queue.get(timeout=10)
            except Empty:
                continue
            cb_name, kwargs = a
            cb = getattr(self, cb_name)
            if not cb:
                self.queue.task_done()
                continue
            try:
                cb(**kwargs)
            except Exception as e:
                logger.error("ERROR: %r", e)
            self.queue.task_done()
        logger.info("worker thread exited")

    def open(self, filename):
        self.filename = filename
        cols = get_table_col(filename, 'Main')
        self.db = db = pyodbc.connect(
            tob('DRIVER=libmdbodbc.so;DBQ={}'.format(filename)),
            readonly=True,
            ansi=True,
            unicode_results=False,
        )
        cursor = db.cursor()
        cursor.execute(u'SELECT {} FROM Main'.format(','.join(cols)))
        self.info = row_to_dict(cursor.fetchone(), cols)
        self.header.set_title(self.info['Bk'])
        self.goto_page(0)
        #cols = cursor.columns('Main') # does not work
        self.id = int(self.info['BkId'])
        cursor = db.cursor()
        tbl_body = 'b{}'.format(self.info['BkId'])
        cols = get_table_col(self.filename, tbl_body)
        if 'Hno' in cols:
            # take sample of 100 page to see if it has Hno
            cursor.execute(u'SELECT Hno FROM {}'.format(tbl_body))
            for i in range(100):
                r = cursor.fetchone()
                if r is None: break
                if try_int(r[0]) is not None:
                    print("has Hno")
                    self.has_hadith_numbers = True
                    break
        if 'sora' in cols and 'aya' in cols:
            # take sample of 100 page to see if it has ayat
            cursor.execute(u'SELECT sora, aya FROM {}'.format(tbl_body))
            for i in range(100):
                r = cursor.fetchone()
                if r is None: break
                if try_int(r[0]) is not None and try_int(r[1]) is not None:
                    print("has ayat")
                    self.has_ayat = True
                    break
        tbl_toc = 't{}'.format(self.info['BkId'])
        cols = get_table_col(filename, tbl_toc)
        cursor.execute(u'SELECT {} FROM {}'.format(','.join(cols), tbl_toc))
        rows = [row_to_dict(row, cols) for row in cursor.fetchall()]
        rows.sort(key=lambda r: (r['id'], r['sub']))

        def cb(r):
            t1 = time.time()
            parents_stack = [None]
            levels_stack = []
            loop_it = iter(r)
            while (True):
                l = list(islice(loop_it, 1000))
                if not l: break
                for row in r:
                    level = row['lvl']
                    while (levels_stack and levels_stack[-1] >= level):
                        levels_stack.pop()
                        parents_stack.pop()
                    it = self.toc_store.append(parents_stack[-1], (
                        row['tit'],
                        row['lvl'],
                        row['sub'],
                        row['id'],
                    ))
                    parents_stack.append(it)
                    levels_stack.append(level)
                Gtk.main_iteration_do(False)
            logger.info('building toc took %r', time.time() - t1)

        # it's a store, not UI, so we might be able to edit it directly
        # cb(rows)
        # if not then it's added like this
        GLib.idle_add(cb, rows)

    def goto_page(self, page_id, move_toc=False):
        if self.info is None: return
        self.page_id = page_id
        if page_id == 0:
            text = self.info['Betaka']
        else:
            tbl_body = 'b{}'.format(self.info['BkId'])
            cols = get_table_col(self.filename, tbl_body)
            cursor = self.db.cursor()
            cursor.execute(u'SELECT {} FROM {} WHERE id={}'.format(
                ','.join(cols), tbl_body, page_id))
            self.page = row_to_dict(cursor.fetchone(), cols)
            text = self.page['nass']
        GLib.idle_add(lambda: self.body.get_buffer().set_text(text))

    def on_window_destroy(self, w):
        self.keep_running = False
        MyApp.instances -= 1
        logger.info("running instances = %r", MyApp.instances)
        if MyApp.instances == 0:
            Gtk.main_quit()

    def on_toc_tree_selection_changed(self, w):
        s, i = w.get_selected()
        # can be accessed in many ways row=tuple(s[i]) or id=s[i][3] or id=s.get_value(i, 3)
        self.queue.put((
            'goto_page',
            {
                'page_id': s[i][3]
            },
        ))

    def on_open_btn_clicked(self, w):
        filename = get_filename(self.window)
        if filename:
            if self.info is None:
                self.queue.put((
                    'open',
                    {
                        'filename': filename
                    },
                ))
            else:
                spawn_clone(filename)
コード例 #25
0
ファイル: util.py プロジェクト: Cue/scales
class GraphiteReporter(threading.Thread):
  """A graphite reporter thread."""

  def __init__(self, host, port, maxQueueSize=10000):
    """Connect to a Graphite server on host:port."""
    threading.Thread.__init__(self)

    self.host, self.port = host, port
    self.sock = None
    self.queue = Queue()
    self.maxQueueSize = maxQueueSize
    self.daemon = True


  def run(self):
    """Run the thread."""
    while True:
      try:
        try:
          name, value, valueType, stamp = self.queue.get()
        except TypeError:
          break
        self.log(name, value, valueType, stamp)
      finally:
        self.queue.task_done()


  def connect(self):
    """Connects to the Graphite server if not already connected."""
    if self.sock is not None:
      return
    backoff = 0.01
    while True:
      try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(5)
        sock.connect((self.host, self.port))
        self.sock = sock
        return
      except socket.error:
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)


  def disconnect(self):
    """Disconnect from the Graphite server if connected."""
    if self.sock is not None:
      try:
        self.sock.close()
      except socket.error:
        pass
      finally:
        self.sock = None


  def _sendMsg(self, msg):
    """Send a line to graphite. Retry with exponential backoff."""
    if not self.sock:
      self.connect()
    if not isinstance(msg, binary_type):
      msg = msg.encode("UTF-8")

    backoff = 0.001
    while True:
      try:
        self.sock.sendall(msg)
        break
      except socket.error:
        log.warning('Graphite connection error', exc_info = True)
        self.disconnect()
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)
        self.connect()


  def _sanitizeName(self, name):
    """Sanitize a metric name."""
    return name.replace(' ', '-')


  def log(self, name, value, valueType=None, stamp=None):
    """Log a named numeric value. The value type may be 'value',
    'count', or None."""
    if type(value) == float:
      form = "%s%s %2.2f %d\n"
    else:
      form = "%s%s %s %d\n"

    if valueType is not None and len(valueType) > 0 and valueType[0] != '.':
      valueType = '.' + valueType

    if not stamp:
      stamp = time.time()

    self._sendMsg(form % (self._sanitizeName(name), valueType or '', value, stamp))


  def enqueue(self, name, value, valueType=None, stamp=None):
    """Enqueue a call to log."""
    # If queue is too large, refuse to log.
    if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
      return
    # Stick arguments into the queue
    self.queue.put((name, value, valueType, stamp))


  def flush(self):
    """Block until all stats have been sent to Graphite."""
    self.queue.join()


  def shutdown(self):
    """Shut down the background thread."""
    self.queue.put(None)
    self.flush()
コード例 #26
0
class TCPClientManager(object):
    """A Client for the 'Push' feature in Device Cloud"""

    def __init__(self, conn, secure=True, ca_certs=None, workers=1):
        """
        Arbitrator for multiple TCP Client Sessions

        :param conn: The :class:`devicecloud.DeviceCloudConnection` to use
        :param secure: Whether or not to create a secure SSL wrapped session.
        :param ca_certs: Path to a file containing Certificates.
            If not provided, the devicecloud.crt file provided with the module will
            be used.  In most cases, the devicecloud.crt file should be acceptable.
        :param workers: Number of workers threads to process callback calls.
        """
        self._conn = conn
        self._secure = secure
        self._ca_certs = ca_certs

        # A dict mapping Sockets to their PushSessions
        self.sessions = {}
        # IO thread is used monitor sockets and consume data.
        self._io_thread = None
        # Writer thread is used to send data on sockets.
        self._writer_thread = None
        # Write queue is used to queue up data to write to sockets.
        self._write_queue = Queue()
        # A pool that monitors callback events and invokes them.
        self._callback_pool = CallbackWorkerPool(self._write_queue, size=workers)

        self.closed = False
        self.log = logging.getLogger(__name__)

    @property
    def hostname(self):
        return self._conn.hostname

    @property
    def username(self):
        return self._conn.username

    @property
    def password(self):
        return self._conn.password

    def _restart_session(self, session):
        """Restarts and re-establishes session

        :param session: The session to restart
        """
        # remove old session key, if socket is None, that means the
        # session was closed by user and there is no need to restart.
        if session.socket is not None:
            self.log.info("Attempting restart session for Monitor Id %s."
                          % session.monitor_id)
            del self.sessions[session.socket.fileno()]
            session.stop()
            session.start()
            self.sessions[session.socket.fileno()] = session

    def _writer(self):
        """
        Indefinitely checks the writer queue for data to write
        to socket.
        """
        while not self.closed:
            try:
                sock, data = self._write_queue.get(timeout=0.1)
                self._write_queue.task_done()
                sock.send(data)
            except Empty:
                pass  # nothing to write after timeout
            except socket.error as err:
                if err.errno == errno.EBADF:
                    self._clean_dead_sessions()

    def _clean_dead_sessions(self):
        """
        Traverses sessions to determine if any sockets
        were removed (indicates a stopped session).
        In these cases, remove the session.
        """
        for sck in list(self.sessions.keys()):
            session = self.sessions[sck]
            if session.socket is None:
                del self.sessions[sck]

    def _select(self):
        """
        While the client is not marked as closed, performs a socket select
        on all PushSession sockets.  If any data is received, parses and
        forwards it on to the callback function.  If the callback is
        successful, a PublishMessageReceived message is sent.
        """
        try:
            while not self.closed:
                try:
                    inputready = select.select(self.sessions.keys(), [], [], 0.1)[0]
                    for sock in inputready:
                        session = self.sessions[sock]
                        sck = session.socket

                        if sck is None:
                            # Socket has since been deleted, continue
                            continue

                        # If no defined message length, nothing has been
                        # consumed yet, parse the header.
                        if session.message_length == 0:
                            # Read header information before receiving rest of
                            # message.
                            response_type = _read_msg_header(session)
                            if response_type == NO_DATA:
                                # No data could be read, assume socket closed.
                                if session.socket is not None:
                                    self.log.error("Socket closed for Monitor %s." % session.monitor_id)
                                    self._restart_session(session)
                                continue
                            elif response_type == INCOMPLETE:
                                # More Data to be read.  Continue.
                                continue
                            elif response_type != PUBLISH_MESSAGE:
                                self.log.warn("Response Type (%x) does not match PublishMessage (%x)"
                                              % (response_type, PUBLISH_MESSAGE))
                                continue

                        try:
                            if not _read_msg(session):
                                # Data not completely read, continue.
                                continue
                        except PushException as err:
                            # If Socket is None, it was closed,
                            # otherwise it was closed when it shouldn't
                            # have been restart it.
                            session.data = six.b("")
                            session.message_length = 0

                            if session.socket is None:
                                del self.sessions[sck]
                            else:
                                self.log.exception(err)
                                self._restart_session(session)
                            continue

                        # We received full payload,
                        # clear session data and parse it.
                        data = session.data
                        session.data = six.b("")
                        session.message_length = 0
                        block_id = struct.unpack('!H', data[0:2])[0]
                        compression = struct.unpack('!B', data[4:5])[0]
                        payload = data[10:]

                        if compression == 0x01:
                            # Data is compressed, uncompress it.
                            payload = zlib.decompress(payload)

                        # Enqueue payload into a callback queue to be
                        # invoked
                        self._callback_pool.queue_callback(session, block_id, payload)
                except select.error as err:
                    # Evaluate sessions if we get a bad file descriptor, if
                    # socket is gone, delete the session.
                    if err.args[0] == errno.EBADF:
                        self._clean_dead_sessions()
                except Exception as err:
                    self.log.exception(err)
        finally:
            for session in self.sessions.values():
                if session is not None:
                    session.stop()

    def _init_threads(self):
        """Initializes the IO and Writer threads"""
        if self._io_thread is None:
            self._io_thread = Thread(target=self._select)
            self._io_thread.start()

        if self._writer_thread is None:
            self._writer_thread = Thread(target=self._writer)
            self._writer_thread.start()

    def create_session(self, callback, monitor_id):
        """
        Creates and Returns a PushSession instance based on the input monitor
        and callback.  When data is received, callback will be invoked.
        If neither monitor or monitor_id are specified, throws an Exception.

        :param callback: Callback function to call when PublishMessage
            messages are received. Expects 1 argument which will contain the
            payload of the pushed message.  Additionally, expects
            function to return True if callback was able to process
            the message, False or None otherwise.
        :param monitor_id: The id of the Monitor, will be queried
            to understand parameters of the monitor.
        """
        self.log.info("Creating Session for Monitor %s." % monitor_id)
        session = SecurePushSession(callback, monitor_id, self, self._ca_certs) \
            if self._secure else PushSession(callback, monitor_id, self)

        session.start()
        self.sessions[session.socket.fileno()] = session

        self._init_threads()
        return session

    def stop(self):
        """Stops all session activity.

        Blocks until io and writer thread dies
        """
        if self._io_thread is not None:
            self.log.info("Waiting for I/O thread to stop...")
            self.closed = True
            self._io_thread.join()

        if self._writer_thread is not None:
            self.log.info("Waiting for Writer Thread to stop...")
            self.closed = True
            self._writer_thread.join()

        self.log.info("All worker threads stopped.")
コード例 #27
0
class CallbackWorkerPool(object):
    """
    A Worker Pool implementation that creates a number of predefined threads
    used for invoking Session callbacks.
    """

    def __init__(self, write_queue=None, size=1):
        """
        Creates a Callback Worker Pool for use in invoking Session Callbacks
        when data is received by a push client.

        :param write_queue: Queue used for queueing up socket write events
            for when a payload message is received and processed.
        :param size: The number of worker threads to invoke callbacks.
        """
        # Used to queue up PublishMessageReceived events to be sent back to
        # the iDigi server.
        self._write_queue = write_queue
        # Used to queue up sessions and data to callback with.
        self._queue = Queue(size)
        # Number of workers to create.
        self.size = size
        self.log = logging.getLogger('{}.callback_worker_pool'.format(__name__))

        for _ in range(size):
            worker = Thread(target=self._consume_queue)
            worker.daemon = True
            worker.start()

    def _consume_queue(self):
        """
        Continually blocks until data is on the internal queue, then calls
        the session's registered callback and sends a PublishMessageReceived
        if callback returned True.
        """
        while True:
            session, block_id, raw_data = self._queue.get()
            data = json.loads(raw_data.decode('utf-8'))  # decode as JSON
            try:
                result = session.callback(data)
                if result is None:
                    self.log.warn("Callback %r returned None, expected boolean.  Messages "
                                  "are not marked as received unless True is returned", session.callback)
                elif result:
                    # Send a Successful PublishMessageReceived with the
                    # block id sent in request
                    if self._write_queue is not None:
                        response_message = struct.pack('!HHH',
                                                       PUBLISH_MESSAGE_RECEIVED,
                                                       block_id, 200)
                        self._write_queue.put((session.socket, response_message))
            except Exception as exception:
                self.log.exception(exception)

            self._queue.task_done()

    def queue_callback(self, session, block_id, data):
        """
        Queues up a callback event to occur for a session with the given
        payload data.  Will block if the queue is full.

        :param session: the session with a defined callback function to call.
        :param block_id: the block_id of the message received.
        :param data: the data payload of the message received.
        """
        self._queue.put((session, block_id, data))
コード例 #28
0
ファイル: lsf.py プロジェクト: chapmanb/toil
class LSFBatchSystem(BatchSystemSupport):
    """
    The interface for running jobs on lsf, runs all the jobs you give it as they come in,
    but in parallel.
    """
    @classmethod
    def supportsWorkerCleanup(cls):
        return False

    @classmethod
    def supportsHotDeployment(cls):
        return False

    def shutdown(self):
        pass

    def __init__(self, config, maxCores, maxMemory, maxDisk):
        super(LSFBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk)
        self.lsfResultsFile = self._getResultsFileName(config.jobStore)
        #Reset the job queue and results (initially, we do this again once we've killed the jobs)
        self.lsfResultsFileHandle = open(self.lsfResultsFile, 'w')
        self.lsfResultsFileHandle.close() #We lose any previous state in this file, and ensure the files existence
        self.currentjobs = set()
        self.obtainSystemConstants()
        self.jobIDs = dict()
        self.lsfJobIDs = dict()
        self.nextJobID = 0

        self.newJobsQueue = Queue()
        self.updatedJobsQueue = Queue()
        self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self)
        self.worker.setDaemon(True)
        self.worker.start()

    def __des__(self):
        #Closes the file handle associated with the results file.
        self.lsfResultsFileHandle.close() #Close the results file, cos were done.

    def issueBatchJob(self, jobNode):
        jobID = self.nextJobID
        self.nextJobID += 1
        self.currentjobs.add(jobID)
        bsubline = prepareBsub(jobNode.cores, jobNode.memory) + [jobNode.command]
        self.newJobsQueue.put((jobID, bsubline))
        logger.debug("Issued the job command: %s with job id: %s " % (jobNode.command, str(jobID)))
        return jobID

    def getLsfID(self, jobID):
        if not jobID in self.lsfJobIDs:
             RuntimeError("Unknown jobID, could not be converted")

        (job,task) = self.lsfJobIDs[jobID]
        if task is None:
             return str(job)
        else:
             return str(job) + "." + str(task)

    def killBatchJobs(self, jobIDs):
        """Kills the given job IDs.
        """
        for jobID in jobIDs:
            logger.debug("DEL: " + str(self.getLsfID(jobID)))
            self.currentjobs.remove(jobID)
            process = subprocess.Popen(["bkill", self.getLsfID(jobID)])
            del self.jobIDs[self.lsfJobIDs[jobID]]
            del self.lsfJobIDs[jobID]

        toKill = set(jobIDs)
        while len(toKill) > 0:
            for jobID in list(toKill):
                if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
                    toKill.remove(jobID)

            if len(toKill) > 0:
                logger.warn("Tried to kill some jobs, but something happened and they are still going, "
                             "so I'll try again")
                time.sleep(5)

    def getIssuedBatchJobIDs(self):
        """A list of jobs (as jobIDs) currently issued (may be running, or maybe 
        just waiting).
        """
        return self.currentjobs

    def getRunningBatchJobIDs(self):
        """Gets a map of jobs (as jobIDs) currently running (not just waiting) 
        and a how long they have been running for (in seconds).
        """
        times = {}
        currentjobs = set()
        for x in self.getIssuedBatchJobIDs():
            if x in self.lsfJobIDs:
                currentjobs.add(self.lsfJobIDs[x])
            else:
                #not yet started
                pass
        process = subprocess.Popen(["bjobs"], stdout = subprocess.PIPE)

        for curline in process.stdout:
            items = curline.strip().split()
            if (len(items) > 9 and (items[0]) in currentjobs) and items[2] == 'RUN':
                jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
                jobstart = jobstart + ' ' + items[9]
                jobstart = time.mktime(time.strptime(jobstart,"%b/%d/%Y %H:%M"))
                jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
                times[self.jobIDs[(items[0])]] = time.time() - jobstart
        return times

    def getUpdatedBatchJob(self, maxWait):
        try:
            sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
            self.updatedJobsQueue.task_done()
            jobID, retcode = (self.jobIDs[sgeJobID], retcode)
            self.currentjobs -= {self.jobIDs[sgeJobID]}
        except Empty:
            pass
        else:
            return jobID, retcode, None

    def getWaitDuration(self):
        """We give parasol a second to catch its breath (in seconds)
        """
        #return 0.0
        return 15

    @classmethod
    def getRescueBatchJobFrequency(cls):
        """Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
        making it expensive. We allow this every 10 minutes..
        """
        return 1800

    def obtainSystemConstants(self):
        p = subprocess.Popen(["lshosts"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)

        line = p.stdout.readline()
        items = line.strip().split()
        num_columns = len(items)
        cpu_index = None
        mem_index = None
        for i in range(num_columns):
                if items[i] == 'ncpus':
                        cpu_index = i
                elif items[i] == 'maxmem':
                        mem_index = i

        if cpu_index is None or mem_index is None:
                RuntimeError("lshosts command does not return ncpus or maxmem columns")

        p.stdout.readline()

        self.maxCPU = 0
        self.maxMEM = MemoryString("0")
        for line in p.stdout:
                items = line.strip().split()
                if len(items) < num_columns:
                        RuntimeError("lshosts output has a varying number of columns")
                if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
                        self.maxCPU = items[cpu_index]
                if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
                        self.maxMEM = MemoryString(items[mem_index])

        if self.maxCPU is 0 or self.maxMEM is 0:
                RuntimeError("lshosts returns null ncpus or maxmem info")
        logger.debug("Got the maxCPU: %s" % (self.maxMEM))
コード例 #29
0
ファイル: server.py プロジェクト: psav/riggerlib
class Rigger(object):
    """ A Rigger event framework instance.

    The Rigger object holds all configuration and instances of plugins. By default Rigger accepts
    a configuration file name to parse, though it is perfectly acceptable to pass the configuration
    into the ``self.config`` attribute.

    Args:
        config_file: A configuration file holding all of Riggers base and plugin configuration.
    """
    def __init__(self, config_file):
        self.gdl = threading.Lock()
        self.pre_callbacks = defaultdict(dict)
        self.post_callbacks = defaultdict(dict)
        self.plugins = {}
        self.config_file = config_file
        self.squash_exceptions = False
        self.initialized = False
        self._task_list = {}
        self._queue_lock = threading.Lock()
        self._global_queue = Queue()
        self._background_queue = Queue()
        self._server_shutdown = False
        self._zmq_event_handler_shutdown = False
        self._global_queue_shutdown = False
        self._background_queue_shutdown = False

        globt = threading.Thread(target=self.process_queue, name="global_queue_processor")
        globt.start()
        bgt = threading.Thread(
            target=self.process_background_queue, name="background_queue_processor")
        bgt.start()

    def process_queue(self):
        """
        The ``process_queue`` thread manages taking events on and off of the global queue.
        Both TCP and in-object fire_hooks place events onto the global_queue and these are both
        handled by the same handler called ``process_hook``. If there is an exception during
        processing, the exception is printed and execution continues.
        """
        while not self._global_queue_shutdown:
            while not self._global_queue.empty():
                with self._queue_lock:
                    tid = self._global_queue.get()
                    obj = self._task_list[tid].json_dict
                    self._task_list[tid].status = Task.RUNNING
                try:
                    loc, glo = self.process_hook(obj['hook_name'], **obj['data'])
                    combined_dict = {}
                    combined_dict.update(glo)
                    combined_dict.update(loc)
                    self._task_list[tid].output = combined_dict
                except Exception as e:
                    self.log_message(e)
                with self._queue_lock:
                    self._global_queue.task_done()
                    self._task_list[tid].status = Task.FINISHED
                if not self._task_list[tid].json_dict.get('grab_result', None):
                    del self._task_list[tid]
            time.sleep(0.1)

    def process_background_queue(self):
        """
        The ``process_background_queue`` manages the hooks which have been backgrounded. In this
        respect the tasks that are completed are not required to continue with the test and as such
        can be forgotten about. An example of this would be some that sends an email, or tars up
        files, it has all the information it needs and the main process doesn't need to wait for it
        to complete.
        """
        while not self._background_queue_shutdown:
            while not self._background_queue.empty():
                obj = self._background_queue.get()
                try:
                    local, globals_updates = self.process_callbacks(obj['cb'], obj['kwargs'])
                    with self.gdl:
                        self.global_data = recursive_update(self.global_data, globals_updates)
                except Exception as e:
                    self.log_message(e)
                self._background_queue.task_done()
            time.sleep(0.1)

    def zmq_event_handler(self, zmq_socket_address):
        """
        The ``zmq_event_handler`` thread receives (and responds to) updates from the
        zmq socket, which is normally embedded in the web server running alongside this
        riggerlib instance, in its own process.

        """
        ctx = zmq.Context()
        zmq_socket = ctx.socket(zmq.REP)
        zmq_socket.set(zmq.RCVTIMEO, 300)
        zmq_socket.bind(zmq_socket_address)

        def zmq_reply(message, **extra):
            payload = {'message': message}
            payload.update(extra)
            zmq_socket.send_json(payload)
        bad_request = partial(zmq_reply, 'BAD REQUEST')

        while not self._zmq_event_handler_shutdown:
            try:
                json_dict = zmq_socket.recv_json()
            except zmq.Again:
                continue

            try:
                event_name = json_dict['event_name']
            except KeyError:
                bad_request()

            if event_name == 'fire_hook':
                tid = self._fire_internal_hook(json_dict)
                if tid:
                    zmq_reply('OK', tid=tid)
                else:
                    bad_request()
            elif event_name == 'task_check':
                try:
                    tid = json_dict['tid']
                    extra = {
                        "tid": tid,
                        "status": self._task_list[tid].status,
                    }
                    if json_dict['grab_result']:
                        extra["output"] = self._task_list[tid].output
                    zmq_reply('OK', **extra)
                except KeyError:
                    zmq_reply('NOT FOUND')
            elif event_name == 'task_delete':
                try:
                    tid = json_dict['tid']
                    del self._task_list[tid]
                    zmq_reply('OK', tid=tid)
                except KeyError:
                    zmq_reply('OK', tid=tid)
            elif event_name == 'shutdown':
                zmq_reply('OK')
                # We gotta initiate server stop from here and stop this thread
                self._server_shutdown = True
                break
            elif event_name == 'ping':
                zmq_reply('PONG')
            else:
                bad_request()

        zmq_socket.close()

    def read_config(self, config_file):
        """
        Reads in the config file and parses the yaml data.

        Args:
            config_file: A configuration file holding all of Riggers base and plugin configuration.

        Raises:
            IOError: If the file can not be read.
            Exception: If there is any error parsing the configuration file.
        """
        try:
            with open(config_file, "r") as stream:
                data = yaml.load(stream)
        except IOError:
            print("!!! Configuration file could not be loaded...exiting")
            sys.exit(127)
        except Exception as e:
            print(e)
            print("!!! Error parsing Configuration file")
            sys.exit(127)
        self.config = data

    def parse_config(self):
        """
        Takes the configuration data from ``self.config`` and sets up the plugin instances.
        """
        self.read_config(self.config_file)
        self.setup_plugin_instances()
        self.start_server()

    def setup_plugin_instances(self):
        """
        Sets up instances into a dict called ``self.instances`` and instantiates each
        instance of the plugin. It also sets the ``self._threaded`` option to determine
        if plugins will be processed synchronously or asynchronously.
        """
        self.instances = {}
        self._threaded = self.config.get("threaded", False)
        plugins = self.config.get("plugins", {})
        for ident, config in plugins.items():
            self.setup_instance(ident, config)

    def setup_instance(self, ident, config):
        """
        Sets up a single instance into the ``self.instances`` dict. If the instance does
        not exist, a warning is printed out.

        Args:
            ident: A plugin instance identifier.
            config: Configuration dict from the yaml.
        """
        plugin_name = config.get('plugin', {})
        if plugin_name in self.plugins:
            obj = self.plugins[plugin_name]
            if obj:
                obj_instance = obj(ident, config, self)
                self.instances[ident] = RiggerPluginInstance(ident, obj_instance, config)
        else:
            msg = "Plugin [{}] was not found, "\
                  "disabling instance [{}]".format(plugin_name, ident)
            self.log_message(msg)

    def start_server(self):
        """
        Starts the ZMQ server if the ``server_enabled`` is True in the config.
        """
        self._server_hostname = self.config.get('server_address', '127.0.0.1')
        self._server_port = self.config.get('server_port', 21212)
        self._server_enable = self.config.get('server_enabled', False)
        if self._server_enable:
            zmq_socket_address = 'tcp://{}:{}'.format(self._server_hostname, self._server_port)
            # set up reciever thread for zmq event handling
            zeh = threading.Thread(
                target=self.zmq_event_handler, args=(zmq_socket_address,), name="zmq_event_handler")
            zeh.start()
            exect = threading.Thread(target=self.await_shutdown, name="executioner")
            exect.start()

    def await_shutdown(self):
        while not self._server_shutdown:
            time.sleep(0.3)
        self.stop_server()

    def stop_server(self):
        """
        Responsible for the following:
            - stopping the zmq event handler (unless already stopped through 'terminate')
            - stopping the global queue
            - stopping the background queue
        """
        self.log_message("Shutdown initiated : {}".format(self._server_hostname))
        # The order here is important
        self._zmq_event_handler_shutdown = True
        self._global_queue.join()
        self._global_queue_shutdown = True
        self._background_queue.join()
        self._background_queue_shutdown = True
        raise SystemExit

    def fire_hook(self, hook_name, **kwargs):
        """
        Parses the hook information into a dict for passing to process_hook. This is used
        to enable both the TCP and in-object fire_hook methods to use the same process_hook
        method call.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.

        """
        json_dict = {'hook_name': hook_name, 'data': kwargs}
        self._fire_internal_hook(json_dict)

    def _fire_internal_hook(self, json_dict):
        task = Task(json_dict)
        tid = task.tid.hexdigest()
        self._task_list[tid] = task
        if self._global_queue:
            with self._queue_lock:
                self._global_queue.put(tid)
            return tid
        else:
            return None

    def process_hook(self, hook_name, **kwargs):
        """
        Takes a hook_name and a selection of kwargs and fires off the appropriate callbacks.

        This function is the guts of Rigger and is responsible for running the callback and
        hook functions. It first loads some blank dicts to collect the updates for the local
        and global namespaces. After this, it loads the pre_callback functions along with
        the kwargs into the callback collector processor.

        The return values are then classifed into local and global dicts and updates proceed.
        After this, the plugin hooks themselves are then run using the same methodology. Their
        return values are merged with the existing dicts and then the same process happens
        for the post_callbacks.

        Note: If the instance of the plugin has been marked as a background instance, and hooks
              which are called in that instance will be backgrounded. The hook will also not
              be able to return any data to the post-hook callback, although updates to globals
              will be processed as and when the backgrounded task is completed.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.
        """
        if not self.initialized:
            return
        kwargs_updates = {}
        globals_updates = {}
        kwargs.update({'config': self.config})

        # First fire off any pre-hook callbacks
        if self.pre_callbacks.get(hook_name):
            # print "Running pre hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.pre_callbacks[hook_name].values(), kwargs)

            # Now we can update the kwargs passed to the real hook with the updates
            with self.gdl:
                self.global_data = recursive_update(self.global_data, globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)

        # Now fire off each plugin hook
        event_hooks = []
        for instance_name, instance in self.instances.items():
            callbacks = instance.obj.callbacks
            enabled = instance.data.get('enabled', None)
            if callbacks.get(hook_name) and enabled:
                cb = callbacks[hook_name]
                if instance.data.get('background', False):
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                elif cb['bg']:
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                else:
                    event_hooks.append(cb)
        kwargs_updates, globals_updates = self.process_callbacks(event_hooks, kwargs)

        # One more update for the post_hook callback
        with self.gdl:
            self.global_data = recursive_update(self.global_data, globals_updates)
        kwargs = recursive_update(kwargs, kwargs_updates)

        # Finally any post-hook callbacks
        if self.post_callbacks.get(hook_name):
            # print "Running post hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.post_callbacks[hook_name].values(), kwargs)
            with self.gdl:
                self.global_data = recursive_update(self.global_data, globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)
        return kwargs, self.global_data

    def process_callbacks(self, callback_collection, kwargs):
        """
        Processes a collection of callbacks or hooks for a particular event, namely pre, hook or
        post.

        The functions are passed in as an array to ``callback_collection`` and process callbacks
        first iterates each function and ensures that each one has the correct arguments available
        to it. If not, an Exception is raised. Then, depending on whether Threading is enabled or
        not, the functions are either run sequentially, or loaded into a ThreadPool and executed
        asynchronously.

        The returned local and global updates are either collected and processed sequentially, as
        in the case of the non-threaded behaviour, or collected at the end of the
        callback_collection processing and handled there.

        Note:
            It is impossible to predict the order of the functions being run. If the order is
            important, it is advised to create a second event hook that will be fired before the
            other. Rigger has no concept of hook or callback order and is unlikely to ever have.

        Args:
            callback_collection: A list of functions to call.
            kwargs: A set of kwargs to pass to the functions.

        Returns: A tuple of local and global namespace updates.
        """
        loc_collect = {}
        glo_collect = {}
        if self._threaded:
            results_list = []
            pool = ThreadPool(10)
        for cb in callback_collection:
            required_args = [sig for sig in cb['args'] if isinstance(cb['args'][sig].default, type)]
            missing = list(set(required_args).difference(set(self.global_data.keys()))
                           .difference(set(kwargs.keys())))
            if not missing:
                new_kwargs = self.build_kwargs(cb['args'], kwargs)
                if self._threaded:
                    results_list.append(pool.apply_async(cb['func'], [], new_kwargs))
                else:
                    obtain_result = self.handle_results(cb['func'], [], new_kwargs)
                    loc_collect, glo_collect = self.handle_collects(
                        obtain_result, loc_collect, glo_collect)
            else:
                raise Exception('Function {} is missing kwargs {}'
                                .format(cb['func'].__name__, missing))

        if self._threaded:
            pool.close()
            pool.join()
            for result in results_list:
                obtain_result = self.handle_results(result.get, [], {})
                loc_collect, glo_collect = self.handle_collects(
                    obtain_result, loc_collect, glo_collect)
        return loc_collect, glo_collect

    def handle_results(self, call, args, kwargs):
        """
        Handles results and depending on configuration, squashes exceptions and logs or
        returns the obtained result.

        Args:
            call: The function call.
            args: The positional arguments.
            kwargs: The keyword arguments.

        Returns: The obtained result of the callback or hook.
        """
        try:
            obtain_result = call(*args, **kwargs)
        except:
            if self.squash_exceptions:
                obtain_result = None
                self.handle_failure(sys.exc_info())
            else:
                raise

        return obtain_result

    def handle_collects(self, result, loc_collect, glo_collect):
        """
        Handles extracting the information from the hook/callback result.

        If the hook/callback returns None, then the dicts are returned unaltered, else
        they are updated with local, global namespace updates.

        Args:
            result: The result to process.
            loc_collect: The local namespace updates collection.
            glo_collect: The global namespace updates collection.
        Returns: A tuple containing the local and global updates.
        """
        if result:
            if result[0]:
                loc_collect = recursive_update(loc_collect, result[0])
            if result[1]:
                glo_collect = recursive_update(glo_collect, result[1])
        return loc_collect, glo_collect

    def build_kwargs(self, args, kwargs):
        """
        Builds a new kwargs from a list of allowed args.

        Functions only receive a single set of kwargs, and so the global and local namespaces
        have to be collapsed. In this way, the local overrides the global namespace, hence if
        a key exists in both local and global, the local value will be passed to the function
        under the the key name and the global value will be forgotten.

        The args parameter ensures that only the expected arguments are supplied.

        Args:
            args: A list of allowed argument names
            kwargs: A dict of kwargs from the local namespace.
        Returns: A consolidated global/local namespace with local overrides.
        """
        returned_args = {}
        returned_args.update({
            name: self.global_data[name] for name in args
            if name in self.global_data})
        returned_args.update({
            name: kwargs[name] for name in args
            if name in kwargs})
        return returned_args

    def register_hook_callback(self, hook_name=None, ctype="pre", callback=None, name=None):
        """
        Registers pre and post callbacks.

        Takes a callback function and assigns it to the hook_name with an optional identifier.
        The optional identifier makes it possible to hot bind functions into hooks and to
        remove them at a later date with ``unregister_hook_callback``.

        Args:
            hook_name: The name of the event hook to respond to.
            ctype: The call back type, either ``pre`` or ``post``.
            callback: The callback function.
            name: An optional name for the callback instance binding.
        """
        if hook_name and callback:
            callback_instance = self.create_callback(callback)
            if not name:
                name = hashlib.sha1(
                    str(time.time()) + hook_name + str(callback_instance['args'])).hexdigest()
            if ctype == "pre":
                self.pre_callbacks[hook_name][name] = callback_instance
            elif ctype == "post":
                self.post_callbacks[hook_name][name] = callback_instance

    def unregister_hook_callback(self, hook_name, ctype, name):
        """
        Unregisters a pre or post callback.

        If the binding has a known name, this function allows the removal of a binding.

        Args:
            hook_name: The event hook name.
            ctype: The callback type, either ``pre`` or ``post``.
            name: An optional name for the callback instance binding.
        """
        if ctype == "pre":
            del self.pre_callbacks[hook_name][name]
        elif ctype == "post":
            del self.post_callbacks[hook_name][name]

    def register_plugin(self, cls, plugin_name=None):
        """ Registers a plugin class to a name.

        Multiple instances of the same plugin can be used in Rigger, ``self.plugins``
        stores un-initialized class defintions to be used by ``setup_instances``.

        Args:
            cls: The class.
            plugin_name: The name of the plugin.
        """
        if plugin_name in self.plugins:
            print("Plugin name already taken [{}]".format(plugin_name))
        elif plugin_name is None:
            print("Plugin name cannot be None")
        else:
            # print "Registering plugin {}".format(plugin_name)
            self.plugins[plugin_name] = cls

    def get_instance_obj(self, name):
        """
        Gets the instance object for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The object of the instance.
        """
        if name in self.instances:
            return self.instances[name].obj
        else:
            return None

    def get_instance_data(self, name):
        """
        Gets the instance data(config) for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The data(config) of the instance.
        """
        if name in self.instances:
            return self.instances[name].data
        else:
            return None

    def configure_plugin(self, name, *args, **kwargs):
        """
        Attempts to configure an instance, passing it the args and kwargs.

        Args:
            name: The ident name of the instance.
            args: The positional args.
            kwargs: The keyword arguments.
        """
        obj = self.get_instance_obj(name)
        if obj:
            obj.configure(*args, **kwargs)

    @staticmethod
    def create_callback(callback, bg=False):
        """
        Simple function to inspect a function and return it along with it param names wrapped
        up in a nice dict. This forms a callback object.

        Args:
            callback: The callback function.
        Returns: A dict of function and param names.
        """
        params = signature(callback).parameters
        return {
            'func': callback,
            'args': params,
            'bg': bg
        }

    def handle_failure(self, exc):
        """
        Handles an exception. It is expected that this be overidden.
        """
        self.log_message(exc)

    def log_message(self, message):
        """
        "Logs" a message. It is expected that this be overidden.
        """
        print(message)
コード例 #30
0
class QueueWorker(BaseWorker):
    TIMEOUT_ATTEMPTS = 5
    QUEUE_SIZE = -1  # inf
    END_EVENT = object()
    NAME = 'polyaxon.QueueWorker'

    def __init__(self, timeout=None, queue_size=None):
        super(QueueWorker, self).__init__()
        self._queue = Queue(queue_size or self.QUEUE_SIZE)
        self._timeout = timeout if timeout is not None else settings.TIMEOUT

    def atexit(self):
        with self._lock:
            if not self.is_alive():
                return

            self._queue.put_nowait(self.END_EVENT)

            def timeout_join(timeout, queue):
                end = time() + timeout
                queue.all_tasks_done.acquire()
                try:
                    while queue.unfinished_tasks:
                        current_timeout = end - time()
                        if current_timeout <= 0:
                            # timed out
                            return False

                        queue.all_tasks_done.wait(timeout=current_timeout)

                    return True

                finally:
                    queue.all_tasks_done.release()

            # ensure wait
            timeout = min(settings.MIN_TIMEOUT,
                          self._timeout / self.TIMEOUT_ATTEMPTS)
            if timeout_join(timeout=timeout, queue=self._queue):
                timeout = 0
            else:
                # Queue still has message, try another time
                size = self._queue.qsize()

                if not settings.IN_CLUSTER:
                    print(
                        'Polyaxon %s is attempting to send %i pending messages'
                        % (self.NAME, size))
                    print('Waiting up to {} seconds'.format(self._timeout))
                    if os.name == 'nt':
                        print('Press Ctrl-Break to quit')
                    else:
                        print('Press Ctrl-C to quit')

            sleep(settings.MIN_TIMEOUT)  # Allow tasks to get executed
            while timeout > 0 and not timeout_join(timeout=timeout,
                                                   queue=self._queue):
                timeout = min(timeout + self._timeout / self.TIMEOUT_ATTEMPTS,
                              self._timeout - timeout)

            size = self._queue.qsize()
            if size > 0:
                print(
                    'Polyaxon %s timed out and did not manage to send %i messages'
                    % (self.NAME, size))

            self._thread = None

    def stop(self, timeout=None):
        with self._lock:
            if self._thread:
                self._queue.put_nowait(self.END_EVENT)
                self._thread.join(timeout=timeout)
                self._thread = None
                self._thread_for_pid = None

    def queue(self, callback, *args, **kwargs):
        self.is_running()
        self._queue.put_nowait((callback, args, kwargs))

    def _target(self):
        while True:
            record = self._queue.get()
            try:
                if record is self.END_EVENT:
                    break
                callback, args, kwargs = record
                try:
                    callback(*args, **kwargs)
                except Exception:
                    logger.error('Failed processing job', exc_info=True)
            finally:
                self._queue.task_done()

            sleep(0)
コード例 #31
0
ファイル: backend.py プロジェクト: tanmer/sentry
class QueuedRunner(object):
    """\
    Secondary backend runner that puts method calls on a bounded queue and drops them
    when the queue is full.

    A separate (non-main) thread works the queue.
    """

    def __init__(self):
        self.q = Queue(maxsize=100)
        self.worker_running = False

    def start_worker(self):
        def worker():
            while True:
                (func, args, kwargs) = self.q.get()
                try:
                    func(*args, **kwargs)
                    metrics.incr(
                        'tagstore.multi.runner.execute',
                        instance='success',
                        skip_internal=True,
                    )
                except Exception as e:
                    logger.exception(e)
                    metrics.incr(
                        'tagstore.multi.runner.execute',
                        instance='fail',
                        skip_internal=True,
                    )
                finally:
                    self.q.task_done()

        t = Thread(target=worker)
        t.setDaemon(True)
        t.start()

        self.worker_running = True

    def run(self, f, *args, **kwargs):
        if random.random() <= options.get('tagstore.multi-sampling'):
            if not self.worker_running:
                self.start_worker()

            try:
                self.q.put((f, args, kwargs), block=False)
                metrics.incr(
                    'tagstore.multi.runner.schedule',
                    instance='put',
                    skip_internal=True,
                )
            except Full:
                metrics.incr(
                    'tagstore.multi.runner.schedule',
                    instance='full',
                    skip_internal=True,
                )
                return
        else:
            metrics.incr(
                'tagstore.multi.runner.schedule',
                instance='sampled',
                skip_internal=True,
            )
コード例 #32
0
class ThreadedFifoBuffer(FifoBuffer):
    """
    FIFO-in-memory connection inside dedicated thread.

    This is external-IO usable for Moler since it has it's own runner
    (thread) that can work in background and pull data from FIFO-mem connection.
    Usable for integration tests.
    """

    def __init__(self, moler_connection, echo=True, name=None, logger_name=""):
        """Initialization of FIFO-mem-threaded connection."""
        super(ThreadedFifoBuffer, self).__init__(moler_connection=moler_connection,
                                                 echo=echo,
                                                 name=name,
                                                 logger_name=logger_name)
        self.pulling_thread = None
        self.injections = Queue()

    def open(self):
        """Start thread pulling data from FIFO buffer."""
        ret = super(ThreadedFifoBuffer, self).open()
        done = threading.Event()
        self.pulling_thread = TillDoneThread(target=self.pull_data,
                                             done_event=done,
                                             kwargs={'pulling_done': done})
        self.pulling_thread.start()
        self._log(msg="open {}".format(self), level=logging.INFO)
        self._notify_on_connect()
        return ret

    def close(self):
        """Stop pulling thread."""
        if self.pulling_thread:
            self.pulling_thread.join()
            self.pulling_thread = None
        super(ThreadedFifoBuffer, self).close()
        self._log(msg="closed {}".format(self), level=logging.INFO)
        self._notify_on_disconnect()

    def inject(self, input_bytes, delay=0.0):
        """
        Add bytes to end of buffer

        :param input_bytes: iterable of bytes to inject
        :param delay: delay before each inject
        :return: None
        """
        for data in input_bytes:
            self.injections.put((data, delay))
        if not delay:
            time.sleep(0.05)  # give subsequent read() a chance to get data

    def _inject_deferred(self):
        if self.deferred_injections:
            for data, delay in self.deferred_injections:
                self.injections.put((data, delay))
            self.deferred_injections = []
            time.sleep(0.05)  # give subsequent read() a chance to get data

    def pull_data(self, pulling_done):
        """Pull data from FIFO buffer."""
        while not pulling_done.is_set():
            self.read()  # internally forwards to embedded Moler connection
            try:
                data, delay = self.injections.get_nowait()
                if delay:
                    time.sleep(delay)
                self._inject(data)
                self.injections.task_done()
            except Empty:
                time.sleep(0.01)  # give FIFO chance to get data
コード例 #33
0
ファイル: logger.py プロジェクト: uber/clay
class TCPHandler(logging.Handler):
    '''
    Python logging handler for sending JSON formatted messages over
    TCP, optionally wrapping the connection with TLSv1
    '''
    def __init__(self, host, port, ssl_ca_file=None):
        '''
        Instantiate a TCPHandler with the intent of connecting to the
        given host (string) and port (int) with or without using SSL/TLSv1
        '''
        logging.Handler.__init__(self)
        self.host = host
        self.port = port
        self.ssl_ca_file = ssl_ca_file
        self.sock = None
        self.queue = Queue(LOG_QUEUE_SIZE)
        self.connect_wait = BACKOFF_INITIAL
        self.raiseExceptions = 0

        self.hostname = socket.gethostname()
        if self.hostname.find('.') != -1:
            self.hostname = self.hostname.split('.', 1)[0]

        self.sender = threading.Thread(target=self.run)
        self.sender.setDaemon(True)
        self.sender.start()

    def connect(self):
        '''
        Create a connection with the server, sleeping for some
        period of time if connection errors have occurred recently.
        '''
        self.sock = socket.socket()
        if self.ssl_ca_file:
            self.sock = ssl.wrap_socket(self.sock,
                ssl_version=ssl.PROTOCOL_TLSv1,
                cert_reqs=ssl.CERT_REQUIRED,
                ca_certs=self.ssl_ca_file)

        INTERNAL_LOG.debug('Connecting (backoff: %.03f)' %
            self.connect_wait)
        time.sleep(self.connect_wait)
        self.sock.connect((self.host, self.port))

    def jsonify(self, record):
        '''
        Translate a LogRecord instance into a json_event
        '''
        timestamp = datetime.utcfromtimestamp(record.created)
        timestamp = timestamp.isoformat()

        fields = {
            'level': record.levelname,
            'filename': record.pathname,
            'lineno': record.lineno,
            'method': record.funcName,
        }
        if record.exc_info:
            fields['exception'] = str(record.exc_info)
            fields['traceback'] = format_exc(record.exc_info)

        log = {
            '@source_host': self.hostname,
            '@timestamp': timestamp,
            '@tags': [record.name],
            '@message': record.getMessage(),
            '@fields': fields,
        }
        return json.dumps(log)

    def emit(self, record):
        '''
        Send a LogRecord object formatted as json_event via a
        queue and worker thread.
        '''
        self.queue.put_nowait(record)

    def run(self):
        '''
        Main loop of the logger thread. All network I/O and exception handling
        originates here. Strings are consumed from self.queue and sent to
        self.sock, creating a new connection if necessary.

        If any exceptions are caught, the message is put() back on the queue
        and the exception is allowed to propagate up through
        logging.Handler.handleError(), potentially causing this thread to abort.
        '''
        INTERNAL_LOG.debug('Log I/O thread started')
        while True:
            record = self.queue.get()
            if record is None:
                break

            jsonrecord = self.jsonify(record)
            jsonrecord = '%s\n' % jsonrecord

            try:
                if self.sock is None:
                    self.connect()
                self.send(jsonrecord)
            except Exception:
                # This exception will be silently ignored and the message
                # requeued unless self.raiseExceptions=1
                self.queue.put(record)
                self.handleError(record)
            self.queue.task_done()
        INTERNAL_LOG.debug('Log I/O thread exited cleanly')

    def send(self, data):
        '''
        Keep calling SSLSocket.write until the entire message has been sent
        '''
        while len(data) > 0:
            if self.ssl_ca_file:
                sent = self.sock.write(data)
            else:
                sent = self.sock.send(data)
            data = data[sent:]
        self.connect_wait = BACKOFF_INITIAL

    def handleError(self, record):
        '''
        If an error occurs trying to send the log message, close the connection
        and delegate the exception handling to the superclass' handleError,
        which raises the exception (potentially killing the log thread) unless
        self.raiseExceptions is False.
        http://hg.python.org/cpython/file/e64d4518b23c/Lib/logging/__init__.py#l797
        '''
        INTERNAL_LOG.exception('Unable to send log')
        self.cleanup()
        self.connect_wait *= BACKOFF_MULTIPLE
        logging.Handler.handleError(self, record)

    def cleanup(self):
        '''
        If the socket to the server is still open, close it. Otherwise, do
        nothing.
        '''
        if self.sock:
            INTERNAL_LOG.info('Closing socket')
            self.sock.close()
            self.sock = None

    def close(self):
        '''
        Send a sentinel None object to the worker thread, telling it to exit
        and disconnect from the server.
        '''
        self.queue.put(None)
        self.cleanup()
コード例 #34
0
ファイル: Impinj.py プロジェクト: pythonthings/CrossMgr
class Impinj(object):
    def __init__(self, dataQ, strayQ, messageQ, shutdownQ, impinjHost,
                 impinjPort, antennaStr, statusCB):
        self.impinjHost = impinjHost
        self.impinjPort = impinjPort
        self.statusCB = statusCB
        if not antennaStr:
            self.antennas = [0]
        else:
            self.antennas = [int(a) for a in antennaStr.split()]
        self.tagGroup = None
        self.tagGroupTimer = None
        self.dataQ = dataQ  # Queue to write tag reads.
        self.strayQ = strayQ  # Queue to write stray reads.
        self.messageQ = messageQ  # Queue to write operational messages.
        self.shutdownQ = shutdownQ  # Queue to listen for shutdown.
        self.logQ = Queue()
        self.rospecID = 123
        self.readerSocket = None
        self.timeCorrection = None  # Correction between the reader's time and the computer's time.
        self.connectedAntennas = []
        self.antennaReadCount = defaultdict(int)
        self.lastReadTime = {}
        self.start()

    def start(self):
        # Create a log file name.
        tNow = getTimeNow()
        dataDir = os.path.join(HOME_DIR, 'ImpinjData')
        if not os.path.isdir(dataDir):
            os.makedirs(dataDir)
        self.fname = os.path.join(
            dataDir, tNow.strftime('Impinj-%Y-%m-%d-%H-%M-%S.txt'))

        # Create a log queue and start a thread to write the log.
        self.logQ.put('msg', 'Tag ID,Discover Time')
        self.logFileThread = threading.Thread(target=self.handleLogFile)
        self.logFileThread.daemon = True
        self.logFileThread.start()

        self.keepGoing = True
        self.tagCount = 0

    #-------------------------------------------------------------------------

    def checkKeepGoing(self):
        if not self.keepGoing:
            return False

        try:
            # Check the shutdown queue for a message.  If there is one, shutdown.
            d = self.shutdownQ.get(False)
            self.keepGoing = False
            return False
        except Empty:
            return True

    def reconnectDelay(self):
        if self.checkKeepGoing():
            time.sleep(ReconnectDelaySeconds)

    #-------------------------------------------------------------------------

    def sendCommand(self, message):
        self.messageQ.put(
            ('Impinj',
             '-----------------------------------------------------'))
        self.messageQ.put(('Impinj', 'Sending Message:\n{}\n'.format(message)))
        try:
            message.send(self.readerSocket)
        except Exception as e:
            self.messageQ.put(('Impinj', 'Send command fails: {}'.format(e)))
            return False

        try:
            response = WaitForMessage(message.MessageID, self.readerSocket)
        except Exception as e:
            self.messageQ.put(('Impinj', 'Get response fails: {}'.format(e)))
            return False

        self.messageQ.put(
            ('Impinj', 'Received Response:\n{}\n'.format(response)))
        return True, response

    def sendCommands(self):
        self.connectedAntennas = []
        self.antennaReadCount = defaultdict(int)

        self.messageQ.put(
            ('Impinj', 'Connected to: ({}:{})'.format(self.impinjHost,
                                                      self.impinjPort)))

        self.messageQ.put(
            ('Impinj', 'Waiting for READER_EVENT_NOTIFICATION...'))
        response = UnpackMessageFromSocket(self.readerSocket)
        self.messageQ.put(
            ('Impinj', '\nReceived Response:\n{}\n'.format(response)))

        # Compute a correction between the reader's time and the computer's time.
        readerTime = response.getFirstParameterByClass(
            UTCTimestamp_Parameter).Microseconds
        readerTime = datetime.datetime.utcfromtimestamp(readerTime / 1000000.0)
        self.timeCorrection = getTimeNow() - readerTime

        self.messageQ.put(
            ('Impinj',
             '\nReader time is {} seconds different from computer time\n'.
             format(self.timeCorrection.total_seconds())))

        # Reset to factory defaults.
        success, response = self.sendCommand(
            SET_READER_CONFIG_Message(ResetToFactoryDefault=True))
        if not success:
            return False

        # Get the connected antennas.
        success, response = self.sendCommand(
            GET_READER_CONFIG_Message(
                RequestedData=GetReaderConfigRequestedData.AntennaProperties))
        if success:
            self.connectedAntennas = [
                p.AntennaID for p in response.Parameters
                if isinstance(p, AntennaProperties_Parameter)
                and p.AntennaConnected and p.AntennaID <= 4
            ]

        # Configure a periodic Keepalive message.
        # Change receiver sensitivity (if specified).  This value is reader dependent.
        receiverSensitivityParameter = []
        if ReceiverSensitivity is not None:
            receiverSensitivityParameter.append(
                RFReceiver_Parameter(ReceiverSensitivity=ReceiverSensitivity))

        # Change transmit power (if specified).  This value is reader dependent.
        transmitPowerParameter = []
        if TransmitPower is not None:
            transmitPowerParameter.append(
                RFTransmitter_Parameter(
                    HopTableID=1,
                    ChannelIndex=0,
                    TransmitPower=TransmitPower,
                ))

        # Change Inventory Control (if specified).
        inventoryCommandParameter = []
        if any(v is not None
               for v in [InventorySession, TagPopulation, TagTransitTime]):
            inventoryCommandParameter.append(
                C1G2InventoryCommand_Parameter(Parameters=[
                    C1G2SingulationControl_Parameter(
                        Session=InventorySession or 0,
                        TagPopulation=TagPopulation or TagPopulationDefault,
                        TagTransitTime=(TagTransitTime or 3) * 1000,
                    ),
                ], ))

        success, response = self.sendCommand(
            SET_READER_CONFIG_Message(Parameters=[
                AntennaConfiguration_Parameter(
                    AntennaID=0,
                    Parameters=receiverSensitivityParameter +
                    transmitPowerParameter + inventoryCommandParameter,
                ),
                KeepaliveSpec_Parameter(
                    KeepaliveTriggerType=KeepaliveTriggerType.Periodic,
                    PeriodicTriggerValue=int(KeepaliveSeconds * 1000),
                ),
            ], ), )
        if not success:
            return False

        # Disable all rospecs in the reader.
        success, response = self.sendCommand(
            DISABLE_ROSPEC_Message(ROSpecID=0))
        if not success:
            return False

        # Delete our old rospec.
        success, response = self.sendCommand(
            DELETE_ROSPEC_Message(ROSpecID=self.rospecID))
        if not success:
            return False

        # Get the C1G2UHFRFModeTable and extract available mode identifiers.
        modeIdentifiers = None
        maxNumberOfAntennaSupported = 4
        try:
            success, response = self.sendCommand(
                GET_READER_CAPABILITIES_Message(
                    RequestedData=GetReaderCapabilitiesRequestedData.All))
            if success:
                modeIdentifiers = [
                    e.ModeIdentifier
                    for e in response.getFirstParameterByClass(
                        C1G2UHFRFModeTable_Parameter).Parameters
                ]
                gdc = response.getFirstParameterByClass(
                    GeneralDeviceCapabilities_Parameter)
                maxNumberOfAntennaSupported = gdc.MaxNumberOfAntennaSupported
            else:
                self.messageQ.put(('Impinj', 'GET_READER_CAPABILITIES fails.'))
        except Exception as e:
            self.messageQ.put(
                ('Impinj', 'GET_READER_CAPABILITIES Exception: {}:\n{}'.format(
                    e, traceback.format_exc())))

        # Configure our new rospec.
        if ProcessingMethod == FirstReadMethod:
            cmd = GetBasicAddRospecMessage(ROSpecID=self.rospecID,
                                           antennas=self.antennas)
        else:
            cmd = GetAddRospecRSSIMessage(
                ROSpecID=self.rospecID,
                antennas=self.antennas,
                modeIdentifiers=modeIdentifiers,
                maxNumberOfAntennaSupported=maxNumberOfAntennaSupported)
        success, response = self.sendCommand(cmd)
        if not success:
            return False

        # Enable our new rospec.
        success, response = self.sendCommand(
            ENABLE_ROSPEC_Message(ROSpecID=self.rospecID))
        if not success:
            return False

        success = (success
                   and isinstance(response, ENABLE_ROSPEC_RESPONSE_Message)
                   and response.success())
        return success

    def reportTag(self,
                  tagID,
                  discoveryTime,
                  sampleSize=1,
                  antennaID=0,
                  quadReg=False):
        lrt = self.lastReadTime.get(tagID, tOld)
        if discoveryTime > lrt:
            self.lastReadTime[tagID] = discoveryTime

        if (discoveryTime - lrt).total_seconds() < RepeatSeconds:
            self.messageQ.put((
                'Impinj',
                'Received {}.  tag={} Skipped (<{} secs ago).  {}'.format(
                    self.tagCount, tagID, RepeatSeconds,
                    discoveryTime.strftime('%H:%M:%S.%f')),
                self.antennaReadCount,
            ))
            return False

        self.dataQ.put((tagID, discoveryTime))

        self.logQ.put(('log', '{},{}'.format(
            tagID,
            discoveryTime.strftime('%a %b %d %H:%M:%S.%f %Z %Y-%m-%d'),
        )))

        self.messageQ.put((
            'Impinj',
            '{} {}. {} - {}{}{}'.format(
                'QuadReg' if quadReg else 'FirstRead',
                self.tagCount,
                tagID,
                discoveryTime.strftime('%H:%M:%S.%f'),
                ' samples={}'.format(sampleSize) if sampleSize > 1 else '',
                ' antennaID={}'.format(antennaID) if antennaID else '',
            ),
            self.antennaReadCount,
        ))
        Bell()
        return True

    def handleTagGroup(self):
        if not self.tagGroup:
            return
        reads, strays = self.tagGroup.getReadsStrays(
            method=ProcessingMethod, antennaChoice=AntennaChoice)
        for tagID, discoveryTime, sampleSize, antennaID in reads:
            self.reportTag(tagID, discoveryTime, sampleSize, antennaID, True)

        self.strayQ.put(('strays', strays))
        self.tagGroupTimer = threading.Timer(1.0, self.handleTagGroup)
        self.tagGroupTimer.start()

    def handleLogFile(self):
        while True:
            msg = self.logQ.get()
            self.logQ.task_done()

            if msg[0] == 'shutdown':
                return
            try:
                pf = open(self.fname, 'a')
            except:
                continue

            pf.write(msg[1] if msg[1].endswith('\n') else msg[1] + '\n')
            while True:
                try:
                    msg = self.logQ.get(False)
                except Empty:
                    break
                self.logQ.task_done()

                if msg[0] == 'shutdown':
                    return
                pf.write(msg[1] if msg[1].endswith('\n') else msg[1] + '\n')
            pf.close()
            time.sleep(0.1)

    def runServer(self):
        self.messageQ.put(('BackupFile', self.fname))

        self.messageQ.put(
            ('Impinj', '*****************************************'))
        self.messageQ.put(
            ('Impinj',
             'Reader Server Started: ({}:{})'.format(self.impinjHost,
                                                     self.impinjPort)))

        # Create an old default time for last tag read.
        tOld = getTimeNow() - datetime.timedelta(days=100)
        utcfromtimestamp = datetime.datetime.utcfromtimestamp

        while self.checkKeepGoing():
            #------------------------------------------------------------
            # Connect Mode.
            #
            # Create a socket to connect to the reader.
            self.readerSocket = socket.socket(socket.AF_INET,
                                              socket.SOCK_STREAM)
            self.readerSocket.settimeout(ConnectionTimeoutSeconds)

            self.messageQ.put(('Impinj', 'state', False))
            self.messageQ.put(('Impinj', ''))
            self.messageQ.put(
                ('Impinj', 'Trying to Connect to Reader: ({}:{})...'.format(
                    self.impinjHost, self.impinjPort)))
            self.messageQ.put(
                ('Impinj', 'ConnectionTimeout={:.2f} seconds'.format(
                    ConnectionTimeoutSeconds)))

            try:
                self.readerSocket.connect((self.impinjHost, self.impinjPort))
            except Exception as e:
                self.messageQ.put(
                    ('Impinj', 'Reader Connection Failed: {}'.format(e)))
                self.readerSocket.close()
                self.messageQ.put(
                    ('Impinj', 'Attempting Reconnect in {} seconds...'.format(
                        ReconnectDelaySeconds)))
                self.reconnectDelay()
                continue

            self.messageQ.put(('Impinj', 'state', True))

            try:
                success = self.sendCommands()
            except Exception as e:
                self.messageQ.put(
                    ('Impinj', 'Send Command Error={}'.format(e)))
                success = False

            if not success:
                self.messageQ.put(('Impinj', 'Reader Initialization Failed.'))
                self.messageQ.put(('Impinj', 'Disconnecting Reader.'))
                self.messageQ.put(('Impinj', 'state', False))
                self.readerSocket.close()
                self.messageQ.put(
                    ('Impinj', 'Attempting Reconnect in {} seconds...'.format(
                        ReconnectDelaySeconds)))
                self.reconnectDelay()
                self.statusCB()
                continue

            self.statusCB(
                connectedAntennas=self.connectedAntennas,
                timeCorrection=self.timeCorrection,
            )

            self.tagGroup = TagGroup()
            self.handleTagGroup()

            tUpdateLast = tKeepaliveLast = getTimeNow()
            self.tagCount = 0
            lastDiscoveryTime = None
            while self.checkKeepGoing():

                #------------------------------------------------------------
                # Read Mode.
                #
                try:
                    response = UnpackMessageFromSocket(self.readerSocket)
                except socket.timeout:
                    t = getTimeNow()

                    if (t - tKeepaliveLast
                        ).total_seconds() > KeepaliveSeconds * 2:
                        self.messageQ.put(
                            ('Impinj',
                             'Reader Connection Lost (missing Keepalive).'))
                        self.readerSocket.close()
                        self.messageQ.put(
                            ('Impinj', 'Attempting Reconnect...'))
                        break

                    if (t - tUpdateLast
                        ).total_seconds() >= ReaderUpdateMessageSeconds:
                        self.messageQ.put(
                            ('Impinj', 'Listening for Impinj reader data...'))
                        tUpdateLast = t
                    continue

                if isinstance(response, KEEPALIVE_Message):
                    # Respond to the KEEP_ALIVE message with KEEP_ALIVE_ACK.
                    try:
                        KEEPALIVE_ACK_Message().send(self.readerSocket)
                    except socket.timeout:
                        self.messageQ.put((
                            'Impinj',
                            'Reader Connection Lost (Keepalive_Ack timeout).'))
                        self.readerSocket.close()
                        self.messageQ.put(
                            ('Impinj', 'Attempting Reconnect...'))
                        break

                    tKeepaliveLast = getTimeNow()
                    continue

                if not isinstance(response, RO_ACCESS_REPORT_Message):
                    if not isinstance(response,
                                      READER_EVENT_NOTIFICATION_Message):
                        self.messageQ.put(('Impinj', 'Skipping: {}'.format(
                            response.__class__.__name__)))
                    continue

                try:
                    discoveryTime = utcfromtimestamp(tag['Timestamp'] /
                                                     1000000.0)
                    if ImpinjDebug and lastDiscoveryTime is not None:
                        print('{}            \r'.format(
                            (discoveryTime -
                             lastDiscoveryTime).total_seconds()))
                    lastDiscoveryTime = discoveryTime
                except:
                    pass

                for tag in response.getTagData():
                    self.tagCount += 1

                    antennaID = tag['AntennaID']

                    try:
                        self.antennaReadCount[antennaID] += 1
                    except Exception as e:
                        self.messageQ.put(
                            ('Impinj',
                             'Received {}.  Missing AntennaID.'.format(
                                 self.tagCount)))

                    try:
                        tagID = tag['EPC']
                    except Exception as e:
                        self.messageQ.put(
                            ('Impinj',
                             'Received {}.  Skipping: missing tagID.'.format(
                                 self.tagCount)))
                        continue

                    try:
                        tagID = HexFormatToStr(tagID)
                    except Exception as e:
                        self.messageQ.put((
                            'Impinj',
                            'Received {}.  Skipping: HexFormatToStr fails.  Error={}'
                            .format(self.tagCount, e)))
                        continue

                    try:
                        discoveryTime = tag[
                            'Timestamp']  # In microseconds since Jan 1, 1970
                    except Exception as e:
                        self.messageQ.put((
                            'Impinj',
                            'Received {}.  Skipping: Missing Timestamp'.format(
                                self.tagCount)))
                        continue

                    peakRSSI = tag.get('PeakRSSI', None)  # -127..127 in db.

                    # Convert discoveryTime to Python format and correct for reader time difference.
                    discoveryTime = utcfromtimestamp(
                        discoveryTime / 1000000.0) + self.timeCorrection

                    if peakRSSI is not None:
                        self.tagGroup.add(antennaID, tagID, discoveryTime,
                                          peakRSSI)
                    else:
                        self.reportTag(tagID,
                                       discoveryTime,
                                       antennaID=antennaID)

        # Cleanup.
        if self.readerSocket:
            try:
                response = self.sendCommand(CLOSE_CONNECTION_Message())
            except socket.timeout:
                pass
            self.readerSocket.close()
            self.readerSocket = None

        self.logQ.put(('shutdown', ))
        self.logFileThread.join()

        if self.tagGroupTimer:
            self.tagGroupTimer.cancel()

        return True

    def purgeDataQ(self):
        while True:
            try:
                d = self.dataQ.get(False)
            except Empty:
                break
コード例 #35
0
class MyApp(object):
    instances = 0

    def __init__(self, filename=None):
        MyApp.instances += 1
        self.file_dlg = None
        self.info = None
        self.page_id = 0
        self.keep_running = True
        self.queue = Queue()
        thread = Thread(target=self.worker_loop)
        thread.daemon = True
        thread.start()

        builder = Gtk.Builder()
        builder.add_from_file(GLADE_FN)
        builder.connect_signals(self)
        self.window = builder.get_object("main_win")
        self.header = builder.get_object("header")
        self.body = builder.get_object("body")
        self.toc_store = builder.get_object("toc_store")
        self.toc_tree = builder.get_object("toc_tree")
        self.window.show()
        if filename is not None:
            self.open(filename)

    def on_info_btn_clicked(self, w):
        self.goto_page(0)

    def on_previous_btn_clicked(self, w):
        self.goto_page(max(0, self.page_id - 1))

    def on_next_btn_clicked(self, w):
        self.goto_page(self.page_id + 1)

    def on_search_entry_activate(self, w):
        text = w.get_text()
        page_id = try_int(text)
        if page_id is not None: self.goto_page(page_id)

    def worker_loop(self):
        while self.keep_running:
            try:
                a = self.queue.get(timeout=10)
            except Empty:
                continue
            cb_name, kwargs = a
            cb = getattr(self, cb_name)
            if not cb:
                self.queue.task_done()
                continue
            try:
                cb(**kwargs)
            except Exception as e:
                logger.error("ERROR: %r", e)
            self.queue.task_done()
        logger.info("worker thread exited")

    def open(self, filename):
        self.filename = filename
        cols = get_table_col(filename, 'Main')
        self.db = db = pyodbc.connect(
            tob('DRIVER=libmdbodbc.so;DBQ={}'.format(filename)),
            readonly=True,
            ansi=True,
            unicode_results=False,
        )
        cursor = db.cursor()
        cursor.execute(u'SELECT {} FROM Main'.format(','.join(cols)))
        self.info = row_to_dict(cursor.fetchone(), cols)
        self.header.set_title(self.info['Bk'])
        self.goto_page(0)
        #cols = cursor.columns('Main') # does not work
        self.id = int(self.info['BkId'])
        cursor = db.cursor()
        tbl_toc = 't{}'.format(self.info['BkId'])
        cols = get_table_col(filename, tbl_toc)
        cursor.execute(u'SELECT {} FROM {}'.format(','.join(cols), tbl_toc))
        rows = [row_to_dict(row, cols) for row in cursor.fetchall()]
        rows.sort(key=lambda r: (r['id'], r['sub']))

        def cb(r):
            for row in r:
                self.toc_store.append(None, (
                    row['tit'],
                    row['lvl'],
                    row['sub'],
                    row['id'],
                ))

        # it's a store, not UI, so we might be able to edit it directly
        # cb(rows)
        # if not then it's added like this
        GLib.idle_add(cb, rows)

    def goto_page(self, page_id, move_toc=False):
        if self.info is None: return
        self.page_id = page_id
        if page_id == 0:
            text = self.info['Betaka']
        else:
            tbl_body = 'b{}'.format(self.info['BkId'])
            cols = get_table_col(self.filename, tbl_body)
            cursor = self.db.cursor()
            cursor.execute(u'SELECT {} FROM {} WHERE id={}'.format(
                ','.join(cols), tbl_body, page_id))
            self.page = row_to_dict(cursor.fetchone(), cols)
            text = self.page['nass']
        GLib.idle_add(lambda: self.body.get_buffer().set_text(text))

    def on_window_destroy(self, w):
        self.keep_running = False
        MyApp.instances -= 1
        logger.info("running instances = %r", MyApp.instances)
        if MyApp.instances == 0:
            Gtk.main_quit()

    def on_toc_tree_selection_changed(self, w):
        s, i = w.get_selected()
        # can be accessed in many ways row=tuple(s[i]) or id=s[i][3] or id=s.get_value(i, 3)
        self.queue.put((
            'goto_page',
            {
                'page_id': s[i][3]
            },
        ))

    def on_open_btn_clicked(self, w):
        filename = get_filename(self.window)
        if filename:
            if self.info is None:
                self.queue.put((
                    'open',
                    {
                        'filename': filename
                    },
                ))
            else:
                MyApp(filename)
コード例 #36
0
ファイル: zmqsocket.py プロジェクト: pennmem/RAMControl
class SocketServer(object):
    """ZMQ-based socket server for sending and receiving messages from the host
    PC.

    Because of the weird way in which PyEPL handles events, we can't run this as
    its own thread, but instead have to poll for events in the general PyEPL
    machinery. In the future, we should clean up PyEPL entirely so that it does
    not block other threads (amongst other reasons).

    :param zmq.Context ctx:

    """
    def __init__(self, ctx=None):
        self.ctx = ctx or zmq.Context()

        self._handlers = []

        self.sock = self.ctx.socket(zmq.PAIR)
        self._bound = False

        self.poller = zmq.Poller()
        self.poller.register(self.sock, zmq.POLLIN)

        # Outgoing message queue
        self._out_queue = Queue()

        # time of last sent heartbeat message
        self._last_heartbeat = 0.

        # Logging of sent and received messages.
        self.logger = create_logger("network")

    def join(self):
        """Block until all outgoing messages have been processed."""
        self.logger.warning("Joining doesn't work yet; doing nothing...")
        # self._out_queue.join()

    def bind(self, address="tcp://*:8889"):
        """Bind the socket to start listening for connections.

        :param str address: ZMQ address string

        """
        self.sock.bind(address)
        self._bound = True

    def register_handler(self, func):
        """Register a message handler.

        :param callable func: Handler function which takes the message as its
            only argument.

        """
        self.logger.debug("Adding handler: %s", func.__name__)
        self._handlers.append(func)

    def enqueue_message(self, msg):
        """Submit a new outgoing message to the queue."""
        self._out_queue.put_nowait(msg)

    def send(self, msg):
        """Immediately transmit a message to the host PC. It is advisable to not
        call this method directly in most cases, but rather enqueue a message to
        be sent via :meth:`enqueue_message`.

        :param RAMMessage msg: Message to send.

        """
        out = msg.jsonize()
        try:
            self.log_message(msg, incoming=False)
            self.sock.send(out, zmq.NOBLOCK)
        except:
            self.logger.error("Sending failed!")

    def send_heartbeat(self):
        """Convenience method to send a heartbeat message to the host PC."""
        if time.time() - self._last_heartbeat >= 1.0:
            self.send(HeartbeatMessage())
            self._last_heartbeat = time.time()

    def log_message(self, message, incoming=True):
        """Log a message to the log file."""
        if not incoming:
            message = message.to_dict()

        message["in_or_out"] = "in" if incoming else "out"
        self.logger.info("%s", json.dumps(message))

    def handle_incoming(self):
        events = self.poller.poll(1)
        if self.sock in dict(events):
            try:
                msg = self.sock.recv_json()
                self.log_message(msg, incoming=True)
            except:
                self.logger.error("Unable to decode JSON.", exc_info=True)
                return

            for handler in self._handlers:
                try:
                    handler(msg)
                except:
                    self.logger.error("Error handling message", exc_info=True)
                    continue

    def handle_outgoing(self):
        try:
            while not self._out_queue.empty():
                msg = self._out_queue.get_nowait()
                self.send(msg)
                self._out_queue.task_done(
                )  # so we can join the queue elsewhere
        except:
            self.logger.error("Error in outgoing message processing",
                              exc_info=True)

    def update(self):
        """Call periodically to check for incoming messages and/or send messages
        in the outgoing queue.

        """
        self.handle_incoming()
        self.handle_outgoing()
コード例 #37
0
ファイル: util.py プロジェクト: e42s/scales
class GraphiteReporter(threading.Thread):
    """A graphite reporter thread."""
    def __init__(self, host, port, maxQueueSize=10000):
        """Connect to a Graphite server on host:port."""
        threading.Thread.__init__(self)

        self.host, self.port = host, port
        self.sock = None
        self.queue = Queue()
        self.maxQueueSize = maxQueueSize
        self.daemon = True

    def run(self):
        """Run the thread."""
        while True:
            try:
                try:
                    name, value, valueType, stamp = self.queue.get()
                except TypeError:
                    break
                self.log(name, value, valueType, stamp)
            finally:
                self.queue.task_done()

    def connect(self):
        """Connects to the Graphite server if not already connected."""
        if self.sock is not None:
            return
        backoff = 0.01
        while True:
            try:
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(5)
                sock.connect((self.host, self.port))
                self.sock = sock
                return
            except socket.error:
                time.sleep(random.uniform(0, 2.0 * backoff))
                backoff = min(backoff * 2.0, 5.0)

    def disconnect(self):
        """Disconnect from the Graphite server if connected."""
        if self.sock is not None:
            try:
                self.sock.close()
            except socket.error:
                pass
            finally:
                self.sock = None

    def _sendMsg(self, msg):
        """Send a line to graphite. Retry with exponential backoff."""
        if not self.sock:
            self.connect()
        backoff = 0.001
        while True:
            try:
                self.sock.sendall(msg)
                break
            except socket.error:
                logging.warning('Graphite connection error', exc_info=True)
                self.disconnect()
                time.sleep(random.uniform(0, 2.0 * backoff))
                backoff = min(backoff * 2.0, 5.0)
                self.connect()

    def _sanitizeName(self, name):
        """Sanitize a metric name."""
        return name.replace(' ', '-')

    def log(self, name, value, valueType=None, stamp=None):
        """Log a named numeric value. The value type may be 'value',
    'count', or None."""
        if type(value) == float:
            form = "%s%s %2.2f %d\n"
        else:
            form = "%s%s %s %d\n"

        if valueType is not None and len(
                valueType) > 0 and valueType[0] != '.':
            valueType = '.' + valueType

        if not stamp:
            stamp = time.time()

        self._sendMsg(
            form % (self._sanitizeName(name), valueType or '', value, stamp))

    def enqueue(self, name, value, valueType=None, stamp=None):
        """Enqueue a call to log."""
        # If queue is too large, refuse to log.
        if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
            return
        # Stick arguments into the queue
        self.queue.put((name, value, valueType, stamp))

    def flush(self):
        """Block until all stats have been sent to Graphite."""
        self.queue.join()

    def shutdown(self):
        """Shut down the background thread."""
        self.queue.put(None)
        self.flush()
コード例 #38
0
ファイル: session.py プロジェクト: wburkhardt/netconf_client
class Session:
    """A session with a NETCONF server

    This class is a context manager, and should always be either used
    with a ``with`` statement or the :meth:`close` method should be
    called manually when the object is no longer required.

    :ivar server_capabilities: The list of capabilities parsed from
                               the server's ``<hello>``

    :ivar client_capabilities: The list of capabilities parsed from
                               the client's ``<hello>``

    """
    def __init__(self, sock):
        self.sock = sock
        self.mode = "1.0"

        self.send_msg(DEFAULT_HELLO)
        self.client_hello = DEFAULT_HELLO

        self.parser = parse_messages(sock, self.mode)

        # First message will be the server hello
        self.server_hello = next(self.parser)
        server_ele = etree.fromstring(self.server_hello)
        self.session_id = int(
            server_ele.xpath("/nc:hello/nc:session-id",
                             namespaces=NAMESPACES)[0].text)
        self.server_capabilities = capabilities_from_hello(server_ele)

        client_ele = etree.fromstring(self.client_hello)
        self.client_capabilities = capabilities_from_hello(client_ele)

        if (CAP_NETCONF_11 in self.client_capabilities
                and CAP_NETCONF_11 in self.server_capabilities):
            self.mode = "1.1"

        self.unknown_recvq = Queue()
        self.notifications = Queue()
        self.rpc_reply_futures = Queue()
        self.thread = Thread(target=self._recv_loop)
        self.thread.daemon = True
        self.thread.start()

    def __enter__(self):
        return self

    def __exit__(self, _, __, ___):
        self.close()

    def close(self):
        """Closes any associated sockets and frees any other associated resources"""
        try:
            self.sock.close()
        except Exception:
            pass

        try:
            while True:
                f = self.rpc_reply_futures.get(block=False)
                f.set_exception(SessionClosedException())
                self.rpc_reply_futures.task_done()
        except Empty:
            pass

    def send_msg(self, msg):
        """Sends a raw byte string to the server

        :param bytes msg: The byte string to send
        """
        logger.debug("Sending message on session %s", msg)
        if self.mode == "1.0":
            self.sock.sendall(msg + b"]]>]]>")
        elif self.mode == "1.1":
            self.sock.sendall(frame_message_11(msg))

    def send_rpc(self, rpc):
        """Sends a raw RPC to the server

        :param bytes rpc: The RPC to send

        :rtype: :class:`concurrent.futures.Future` with a result type
                of tuple(:class:`bytes`, :class:`lxml.Element`)

        """
        f = Future()
        self.rpc_reply_futures.put(f)
        self.send_msg(rpc)
        return f

    def _recv_loop(self):
        while True:
            try:
                msg = self.parser.send(self.mode)
            except Exception as e:
                logger.info("Stopping recv thread due to exception %s", e)
                return

            ele = etree.fromstring(msg)
            if ele.xpath("/nc:rpc-reply", namespaces=NAMESPACES):
                try:
                    f = self.rpc_reply_futures.get(block=False)

                    if ele.xpath("/nc:rpc-reply/nc:rpc-error",
                                 namespaces=NAMESPACES):
                        f.set_exception(RpcError(msg, ele))
                    else:
                        f.set_result((msg, ele))
                    self.rpc_reply_futures.task_done()
                    msg = None
                except Empty:
                    logger.warning(
                        "An <rpc-reply> was received "
                        "with no corresponding handler: %s",
                        msg,
                    )
            elif ele.xpath("/notif:notification", namespaces=NAMESPACES):
                self.notifications.put((msg, ele))
                msg = None

            if msg is not None:
                self.unknown_recvq.put((msg, ele))
コード例 #39
0
ファイル: Impinj.py プロジェクト: esitarski/CrossMgr
class Impinj( object ):

	def __init__( self, dataQ, strayQ, messageQ, shutdownQ, impinjHost, impinjPort, antennaStr, statusCB ):
		self.impinjHost = impinjHost
		self.impinjPort = impinjPort
		self.statusCB = statusCB
		if not antennaStr:
			self.antennas = [0]
		else:
			self.antennas = [int(a) for a in antennaStr.split()]
		self.tagGroup = None
		self.tagGroupTimer = None
		self.dataQ = dataQ			# Queue to write tag reads.
		self.strayQ = strayQ		# Queue to write stray reads.
		self.messageQ = messageQ	# Queue to write operational messages.
		self.shutdownQ = shutdownQ	# Queue to listen for shutdown.
		self.logQ = Queue()
		self.rospecID = 123
		self.readerSocket = None
		self.timeCorrection = None	# Correction between the reader's time and the computer's time.
		self.connectedAntennas = []
		self.antennaReadCount = defaultdict(int)
		self.lastReadTime = {}
		self.start()
		
	def start( self ):
		# Create a log file name.
		tNow = getTimeNow()
		dataDir = os.path.join( HOME_DIR, 'ImpinjData' )
		if not os.path.isdir( dataDir ):
			os.makedirs( dataDir )
		self.fname = os.path.join( dataDir, tNow.strftime('Impinj-%Y-%m-%d-%H-%M-%S.txt') )
		
		# Create a log queue and start a thread to write the log.
		self.logQ.put( 'msg', 'Tag ID,Discover Time' )
		self.logFileThread = threading.Thread( target=self.handleLogFile )
		self.logFileThread.daemon = True
		self.logFileThread.start()
	
		self.keepGoing = True
		self.tagCount = 0
		
	#-------------------------------------------------------------------------
	
	def checkKeepGoing( self ):
		if not self.keepGoing:
			return False
			
		try:
			# Check the shutdown queue for a message.  If there is one, shutdown.
			d = self.shutdownQ.get( False )
			self.keepGoing = False
			return False
		except Empty:
			return True
			
	def reconnectDelay( self ):
		if self.checkKeepGoing():
			time.sleep( ReconnectDelaySeconds )
		
	#-------------------------------------------------------------------------
	
	def sendCommand( self, message ):
		self.messageQ.put( ('Impinj', '-----------------------------------------------------') )
		self.messageQ.put( ('Impinj', 'Sending Message:\n{}\n'.format(message)) )
		try:
			message.send( self.readerSocket )
		except Exception as e:
			self.messageQ.put( ('Impinj', 'Send command fails: {}'.format(e)) )
			return False
			
		try:
			response = WaitForMessage( message.MessageID, self.readerSocket )
		except Exception as e:
			self.messageQ.put( ('Impinj', 'Get response fails: {}'.format(e)) )
			return False
			
		self.messageQ.put( ('Impinj', 'Received Response:\n{}\n'.format(response)) )
		return True, response
		
	def sendCommands( self ):
		self.connectedAntennas = []
		self.antennaReadCount = defaultdict(int)
		
		self.messageQ.put( ('Impinj', 'Connected to: ({}:{})'.format(self.impinjHost, self.impinjPort) ) )
		
		self.messageQ.put( ('Impinj', 'Waiting for READER_EVENT_NOTIFICATION...') )
		response = UnpackMessageFromSocket( self.readerSocket )
		self.messageQ.put( ('Impinj', '\nReceived Response:\n{}\n'.format(response)) )
		
		# Compute a correction between the reader's time and the computer's time.
		readerTime = response.getFirstParameterByClass(UTCTimestamp_Parameter).Microseconds
		readerTime = datetime.datetime.utcfromtimestamp( readerTime / 1000000.0 )
		self.timeCorrection = getTimeNow() - readerTime
		
		self.messageQ.put( ('Impinj', '\nReader time is {} seconds different from computer time\n'.format(self.timeCorrection.total_seconds())) )
		
		# Reset to factory defaults.
		success, response = self.sendCommand( SET_READER_CONFIG_Message(ResetToFactoryDefault = True) )
		if not success:
			return False
			
		# Get the connected antennas.
		success, response = self.sendCommand( GET_READER_CONFIG_Message(RequestedData=GetReaderConfigRequestedData.AntennaProperties) )
		if success:
			self.connectedAntennas = [p.AntennaID for p in response.Parameters
				if isinstance(p, AntennaProperties_Parameter) and p.AntennaConnected and p.AntennaID <= 4]
		
		# Configure a periodic Keepalive message.
		# Change receiver sensitivity (if specified).  This value is reader dependent.
		receiverSensitivityParameter = []
		if ReceiverSensitivity is not None:
			receiverSensitivityParameter.append(
				RFReceiver_Parameter( 
					ReceiverSensitivity = ReceiverSensitivity
				)
			)
		
		# Change transmit power (if specified).  This value is reader dependent.
		transmitPowerParameter = []
		if TransmitPower is not None:
			transmitPowerParameter.append(
				RFTransmitter_Parameter( 
					HopTableID = 1,
					ChannelIndex = 0,
					TransmitPower = TransmitPower,
				)
			)
		
		# Change Inventory Control (if specified).
		inventoryCommandParameter = []
		if any(v is not None for v in [InventorySession, TagPopulation, TagTransitTime]):
			inventoryCommandParameter.append(
				C1G2InventoryCommand_Parameter( Parameters = [
						C1G2SingulationControl_Parameter(
							Session = InventorySession or 0,
							TagPopulation = TagPopulation or TagPopulationDefault,
							TagTransitTime = (TagTransitTime or 3)*1000,
						),
					],
				)
			)
		
		success, response = self.sendCommand(
			SET_READER_CONFIG_Message( Parameters = [
					AntennaConfiguration_Parameter(
						AntennaID = 0,
						Parameters = receiverSensitivityParameter + transmitPowerParameter + inventoryCommandParameter,
					),
					KeepaliveSpec_Parameter(
						KeepaliveTriggerType = KeepaliveTriggerType.Periodic,
						PeriodicTriggerValue = int(KeepaliveSeconds*1000),
					),
				],
			),
		)
		if not success:
			return False
		
		# Disable all rospecs in the reader.
		success, response = self.sendCommand( DISABLE_ROSPEC_Message(ROSpecID = 0) )
		if not success:
			return False
		
		# Delete our old rospec.
		success, response = self.sendCommand( DELETE_ROSPEC_Message(ROSpecID = self.rospecID) )
		if not success:
			return False
			
		# Get the C1G2UHFRFModeTable and extract available mode identifiers.
		modeIdentifiers = None
		maxNumberOfAntennaSupported = 4
		try:
			success, response = self.sendCommand(GET_READER_CAPABILITIES_Message(RequestedData = GetReaderCapabilitiesRequestedData.All))
			if success:
				modeIdentifiers = [e.ModeIdentifier for e in response.getFirstParameterByClass(C1G2UHFRFModeTable_Parameter).Parameters]
				gdc = response.getFirstParameterByClass(GeneralDeviceCapabilities_Parameter)
				maxNumberOfAntennaSupported = gdc.MaxNumberOfAntennaSupported
			else:
				self.messageQ.put( ('Impinj', 'GET_READER_CAPABILITIES fails.') )
		except Exception as e:
			self.messageQ.put( ('Impinj', 'GET_READER_CAPABILITIES Exception: {}:\n{}'.format(e, traceback.format_exc())) )
				
		# Configure our new rospec.
		if ProcessingMethod == FirstReadMethod:
			cmd = GetBasicAddRospecMessage(ROSpecID = self.rospecID, antennas = self.antennas)
		else:
			cmd = GetAddRospecRSSIMessage(
				ROSpecID = self.rospecID, antennas = self.antennas,
				modeIdentifiers=modeIdentifiers, maxNumberOfAntennaSupported=maxNumberOfAntennaSupported
			)
		success, response = self.sendCommand(cmd)
		if not success:
			return False
			
		# Enable our new rospec.
		success, response = self.sendCommand( ENABLE_ROSPEC_Message(ROSpecID = self.rospecID) )
		if not success:
			return False
		
		success = (success and isinstance(response, ENABLE_ROSPEC_RESPONSE_Message) and response.success())
		return success
	
	def reportTag( self, tagID, discoveryTime, sampleSize=1, antennaID=0, quadReg=False ):
		lrt = self.lastReadTime.get(tagID, tOld)
		if discoveryTime > lrt:
			self.lastReadTime[tagID] = discoveryTime
		
		if (discoveryTime - lrt).total_seconds() < RepeatSeconds:
			self.messageQ.put( (
				'Impinj',
				'Received {}.  tag={} Skipped (<{} secs ago).  {}'.format(self.tagCount, tagID, RepeatSeconds,
				discoveryTime.strftime('%H:%M:%S.%f')),
				self.antennaReadCount,
				)
			)
			return False
			
		self.dataQ.put( (tagID, discoveryTime) )
		
		self.logQ.put( (
				'log',
				'{},{}'.format(
					tagID,
					discoveryTime.strftime('%a %b %d %H:%M:%S.%f %Z %Y-%m-%d'),
				)
			)
		)
		
		self.messageQ.put( (
			'Impinj',
			'{} {}. {} - {}{}{}'.format(
					'QuadReg' if quadReg else 'FirstRead',
					self.tagCount,
					tagID,
					discoveryTime.strftime('%H:%M:%S.%f'),
					' samples={}'.format(sampleSize) if sampleSize > 1 else '',
					' antennaID={}'.format(antennaID) if antennaID else '',
			),
			self.antennaReadCount,
			)
		)
		Bell()
		return True
	
	def handleTagGroup( self ):
		if not self.tagGroup:
			return
		reads, strays = self.tagGroup.getReadsStrays( method=ProcessingMethod, antennaChoice=AntennaChoice )
		for tagID, discoveryTime, sampleSize, antennaID in reads:
			self.reportTag( tagID, discoveryTime, sampleSize, antennaID, True )
			
		self.strayQ.put( ('strays', strays) )
		self.tagGroupTimer = threading.Timer( 1.0, self.handleTagGroup )
		self.tagGroupTimer.start()
	
	def handleLogFile( self ):
		while 1:
			msg = self.logQ.get()
			self.logQ.task_done()
			
			if msg[0] == 'shutdown':
				return
			try:
				pf = io.open( self.fname, 'a' )
			except:
				continue
			
			pf.write( msg[1] if msg[1].endswith('\n') else msg[1] + '\n' )
			while 1:
				try:
					msg = self.logQ.get( False )
				except Empty:
					break
				self.logQ.task_done()
				
				if msg[0] == 'shutdown':
					return
				pf.write( msg[1] if msg[1].endswith('\n') else msg[1] + '\n' )
			pf.close()
			time.sleep( 0.1 )
	
	def runServer( self ):
		self.messageQ.put( ('BackupFile', self.fname) )
		
		self.messageQ.put( ('Impinj', '*****************************************' ) )
		self.messageQ.put( ('Impinj', 'Reader Server Started: ({}:{})'.format(self.impinjHost, self.impinjPort) ) )
			
		# Create an old default time for last tag read.
		tOld = getTimeNow() - datetime.timedelta( days = 100 )
		utcfromtimestamp = datetime.datetime.utcfromtimestamp
		
		while self.checkKeepGoing():
			#------------------------------------------------------------
			# Connect Mode.
			#
			# Create a socket to connect to the reader.
			self.readerSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
			self.readerSocket.settimeout( ConnectionTimeoutSeconds )
			
			self.messageQ.put( ('Impinj', 'state', False) )
			self.messageQ.put( ('Impinj', '') )
			self.messageQ.put( ('Impinj', 'Trying to Connect to Reader: ({}:{})...'.format(self.impinjHost, self.impinjPort) ) )
			self.messageQ.put( ('Impinj', 'ConnectionTimeout={:.2f} seconds'.format(ConnectionTimeoutSeconds) ) )
			
			try:
				self.readerSocket.connect( (self.impinjHost, self.impinjPort) )
			except Exception as e:
				self.messageQ.put( ('Impinj', 'Reader Connection Failed: {}'.format(e) ) )
				self.readerSocket.close()
				self.messageQ.put( ('Impinj', 'Attempting Reconnect in {} seconds...'.format(ReconnectDelaySeconds)) )
				self.reconnectDelay()
				continue

			self.messageQ.put( ('Impinj', 'state', True) )
			
			try:
				success = self.sendCommands()
			except Exception as e:
				self.messageQ.put( ('Impinj', 'Send Command Error={}'.format(e)) )
				success = False
				
			if not success:
				self.messageQ.put( ('Impinj', 'Reader Initialization Failed.') )
				self.messageQ.put( ('Impinj', 'Disconnecting Reader.' ) )
				self.messageQ.put( ('Impinj', 'state', False) )
				self.readerSocket.close()
				self.messageQ.put( ('Impinj', 'Attempting Reconnect in {} seconds...'.format(ReconnectDelaySeconds)) )
				self.reconnectDelay()
				self.statusCB()
				continue
				
			self.statusCB(
				connectedAntennas = self.connectedAntennas,
				timeCorrection = self.timeCorrection,
			)
			
			self.tagGroup = TagGroup()
			self.handleTagGroup()
				
			tUpdateLast = tKeepaliveLast = getTimeNow()
			self.tagCount = 0
			lastDiscoveryTime = None
			while self.checkKeepGoing():
			
				#------------------------------------------------------------
				# Read Mode.
				#
				try:
					response = UnpackMessageFromSocket( self.readerSocket )
				except socket.timeout:
					t = getTimeNow()
					
					if (t - tKeepaliveLast).total_seconds() > KeepaliveSeconds * 2:
						self.messageQ.put( ('Impinj', 'Reader Connection Lost (missing Keepalive).') )
						self.readerSocket.close()
						self.messageQ.put( ('Impinj', 'Attempting Reconnect...') )
						break
					
					if (t - tUpdateLast).total_seconds() >= ReaderUpdateMessageSeconds:
						self.messageQ.put( ('Impinj', 'Listening for Impinj reader data...') )
						tUpdateLast = t
					continue
				
				if isinstance(response, KEEPALIVE_Message):
					# Respond to the KEEP_ALIVE message with KEEP_ALIVE_ACK.
					try:
						KEEPALIVE_ACK_Message().send( self.readerSocket )
					except socket.timeout:
						self.messageQ.put( ('Impinj', 'Reader Connection Lost (Keepalive_Ack timeout).') )
						self.readerSocket.close()
						self.messageQ.put( ('Impinj', 'Attempting Reconnect...') )
						break
						
					tKeepaliveLast = getTimeNow()
					continue
				
				if not isinstance(response, RO_ACCESS_REPORT_Message):
					if not isinstance(response, READER_EVENT_NOTIFICATION_Message):
						self.messageQ.put( ('Impinj', 'Skipping: {}'.format(response.__class__.__name__)) )
					continue
				
				try:
					discoveryTime = utcfromtimestamp( tag['Timestamp'] / 1000000.0 )
					if ImpinjDebug and lastDiscoveryTime is not None:
						six.print_( '{}            \r'.format( (discoveryTime - lastDiscoveryTime).total_seconds() ) )
					lastDiscoveryTime = discoveryTime
				except:
					pass
				
				for tag in response.getTagData():
					self.tagCount += 1
					
					antennaID = tag['AntennaID']
					
					try:
						self.antennaReadCount[antennaID] += 1
					except Exception as e:
						self.messageQ.put( ('Impinj', 'Received {}.  Missing AntennaID.'.format(self.tagCount)) )
					
					try:
						tagID = tag['EPC']
					except Exception as e:
						self.messageQ.put( ('Impinj', 'Received {}.  Skipping: missing tagID.'.format(self.tagCount)) )
						continue
						
					try:
						tagID = HexFormatToStr( tagID )
					except Exception as e:
						self.messageQ.put( ('Impinj', 'Received {}.  Skipping: HexFormatToStr fails.  Error={}'.format(self.tagCount, e)) )
						continue
					
					try:
						discoveryTime = tag['Timestamp']		# In microseconds since Jan 1, 1970
					except Exception as e:
						self.messageQ.put( ('Impinj', 'Received {}.  Skipping: Missing Timestamp'.format(self.tagCount)) )
						continue
					
					peakRSSI = tag.get('PeakRSSI', None)		# -127..127 in db.
					
					# Convert discoveryTime to Python format and correct for reader time difference.
					discoveryTime = utcfromtimestamp( discoveryTime / 1000000.0 ) + self.timeCorrection
					
					if peakRSSI is not None:
						self.tagGroup.add( antennaID, tagID, discoveryTime, peakRSSI )
					else:
						self.reportTag( tagID, discoveryTime, antennaID=antennaID )
		
		# Cleanup.
		if self.readerSocket:
			try:
				response = self.sendCommand( CLOSE_CONNECTION_Message() )
			except socket.timeout:
				pass
			self.readerSocket.close()
			self.readerSocket = None
		
		self.logQ.put( ('shutdown',) )
		self.logFileThread.join()

		if self.tagGroupTimer:
			self.tagGroupTimer.cancel()
		
		return True
		
	def purgeDataQ( self ):
		while 1:
			try:
				d = self.dataQ.get( False )
			except Empty:
				break
コード例 #40
0
class TCPClientManager(object):
    """A Client for the 'Push' feature in Device Cloud"""
    def __init__(self, conn, secure=True, ca_certs=None, workers=1):
        """
        Arbitrator for multiple TCP Client Sessions

        :param conn: The :class:`devicecloud.DeviceCloudConnection` to use
        :param secure: Whether or not to create a secure SSL wrapped session.
        :param ca_certs: Path to a file containing Certificates.
            If not provided, the devicecloud.crt file provided with the module will
            be used.  In most cases, the devicecloud.crt file should be acceptable.
        :param workers: Number of workers threads to process callback calls.
        """
        self._conn = conn
        self._secure = secure
        self._ca_certs = ca_certs

        # A dict mapping Sockets to their PushSessions
        self.sessions = {}
        # IO thread is used monitor sockets and consume data.
        self._io_thread = None
        # Writer thread is used to send data on sockets.
        self._writer_thread = None
        # Write queue is used to queue up data to write to sockets.
        self._write_queue = Queue()
        # A pool that monitors callback events and invokes them.
        self._callback_pool = CallbackWorkerPool(self._write_queue,
                                                 size=workers)

        self.closed = False
        self.log = logging.getLogger(__name__)

    @property
    def hostname(self):
        return self._conn.hostname

    @property
    def username(self):
        return self._conn.username

    @property
    def password(self):
        return self._conn.password

    def _restart_session(self, session):
        """Restarts and re-establishes session

        :param session: The session to restart
        """
        # remove old session key, if socket is None, that means the
        # session was closed by user and there is no need to restart.
        if session.socket is not None:
            self.log.info("Attempting restart session for Monitor Id %s." %
                          session.monitor_id)
            del self.sessions[session.socket.fileno()]
            session.stop()
            session.start()
            self.sessions[session.socket.fileno()] = session

    def _writer(self):
        """
        Indefinitely checks the writer queue for data to write
        to socket.
        """
        while not self.closed:
            try:
                sock, data = self._write_queue.get(timeout=0.1)
                self._write_queue.task_done()
                sock.send(data)
            except Empty:
                pass  # nothing to write after timeout
            except socket.error as err:
                if err.errno == errno.EBADF:
                    self._clean_dead_sessions()

    def _clean_dead_sessions(self):
        """
        Traverses sessions to determine if any sockets
        were removed (indicates a stopped session).
        In these cases, remove the session.
        """
        for sck in list(self.sessions.keys()):
            session = self.sessions[sck]
            if session.socket is None:
                del self.sessions[sck]

    def _select(self):
        """
        While the client is not marked as closed, performs a socket select
        on all PushSession sockets.  If any data is received, parses and
        forwards it on to the callback function.  If the callback is
        successful, a PublishMessageReceived message is sent.
        """
        try:
            while not self.closed:
                try:
                    inputready = select.select(self.sessions.keys(), [], [],
                                               0.1)[0]
                    for sock in inputready:
                        session = self.sessions[sock]
                        sck = session.socket

                        if sck is None:
                            # Socket has since been deleted, continue
                            continue

                        # If no defined message length, nothing has been
                        # consumed yet, parse the header.
                        if session.message_length == 0:
                            # Read header information before receiving rest of
                            # message.
                            response_type = _read_msg_header(session)
                            if response_type == NO_DATA:
                                # No data could be read, assume socket closed.
                                if session.socket is not None:
                                    self.log.error(
                                        "Socket closed for Monitor %s." %
                                        session.monitor_id)
                                    self._restart_session(session)
                                continue
                            elif response_type == INCOMPLETE:
                                # More Data to be read.  Continue.
                                continue
                            elif response_type != PUBLISH_MESSAGE:
                                self.log.warn(
                                    "Response Type (%x) does not match PublishMessage (%x)"
                                    % (response_type, PUBLISH_MESSAGE))
                                continue

                        try:
                            if not _read_msg(session):
                                # Data not completely read, continue.
                                continue
                        except PushException as err:
                            # If Socket is None, it was closed,
                            # otherwise it was closed when it shouldn't
                            # have been restart it.
                            session.data = six.b("")
                            session.message_length = 0

                            if session.socket is None:
                                del self.sessions[sck]
                            else:
                                self.log.exception(err)
                                self._restart_session(session)
                            continue

                        # We received full payload,
                        # clear session data and parse it.
                        data = session.data
                        session.data = six.b("")
                        session.message_length = 0
                        block_id = struct.unpack('!H', data[0:2])[0]
                        compression = struct.unpack('!B', data[4:5])[0]
                        payload = data[10:]

                        if compression == 0x01:
                            # Data is compressed, uncompress it.
                            payload = zlib.decompress(payload)

                        # Enqueue payload into a callback queue to be
                        # invoked
                        self._callback_pool.queue_callback(
                            session, block_id, payload)
                except select.error as err:
                    # Evaluate sessions if we get a bad file descriptor, if
                    # socket is gone, delete the session.
                    if err.args[0] == errno.EBADF:
                        self._clean_dead_sessions()
                except Exception as err:
                    self.log.exception(err)
        finally:
            for session in self.sessions.values():
                if session is not None:
                    session.stop()

    def _init_threads(self):
        """Initializes the IO and Writer threads"""
        if self._io_thread is None:
            self._io_thread = Thread(target=self._select)
            self._io_thread.start()

        if self._writer_thread is None:
            self._writer_thread = Thread(target=self._writer)
            self._writer_thread.start()

    def create_session(self, callback, monitor_id):
        """
        Creates and Returns a PushSession instance based on the input monitor
        and callback.  When data is received, callback will be invoked.
        If neither monitor or monitor_id are specified, throws an Exception.

        :param callback: Callback function to call when PublishMessage
            messages are received. Expects 1 argument which will contain the
            payload of the pushed message.  Additionally, expects
            function to return True if callback was able to process
            the message, False or None otherwise.
        :param monitor_id: The id of the Monitor, will be queried
            to understand parameters of the monitor.
        """
        self.log.info("Creating Session for Monitor %s." % monitor_id)
        session = SecurePushSession(callback, monitor_id, self, self._ca_certs) \
            if self._secure else PushSession(callback, monitor_id, self)

        session.start()
        self.sessions[session.socket.fileno()] = session

        self._init_threads()
        return session

    def stop(self):
        """Stops all session activity.

        Blocks until io and writer thread dies
        """
        if self._io_thread is not None:
            self.log.info("Waiting for I/O thread to stop...")
            self.closed = True
            self._io_thread.join()

        if self._writer_thread is not None:
            self.log.info("Waiting for Writer Thread to stop...")
            self.closed = True
            self._writer_thread.join()

        self.log.info("All worker threads stopped.")
コード例 #41
0
class OvnDbNotifyHandler(object):

    STOP_EVENT = ("STOP", None, None, None)

    def __init__(self, driver):
        self.driver = driver
        self.__watched_events = set()
        self.__lock = threading.Lock()
        self.notifications = Queue()
        self.notify_thread = greenthread.spawn_n(self.notify_loop)
        atexit.register(self.shutdown)

    def matching_events(self, event, row, updates):
        with self.__lock:
            return tuple(t for t in self.__watched_events
                         if t.matches(event, row, updates))

    def watch_event(self, event):
        with self.__lock:
            self.__watched_events.add(event)

    def watch_events(self, events):
        with self.__lock:
            for event in events:
                self.__watched_events.add(event)

    def unwatch_event(self, event):
        with self.__lock:
            try:
                self.__watched_events.remove(event)
            except KeyError:
                # For ONETIME events, they should normally clear on their own
                pass

    def unwatch_events(self, events):
        with self.__lock:
            for event in events:
                try:
                    self.__watched_events.remove(event)
                except KeyError:
                    # For ONETIME events, they should normally clear on
                    # their own
                    pass

    def shutdown(self):
        self.notifications.put(OvnDbNotifyHandler.STOP_EVENT)

    def notify_loop(self):
        while True:
            try:
                match, event, row, updates = self.notifications.get()
                if (not isinstance(match, row_event.RowEvent) and
                        (match, event, row, updates) == (
                            OvnDbNotifyHandler.STOP_EVENT)):
                    self.notifications.task_done()
                    break
                match.run(event, row, updates)
                if match.ONETIME:
                    self.unwatch_event(match)
                self.notifications.task_done()
            except Exception:
                # If any unexpected exception happens we don't want the
                # notify_loop to exit.
                LOG.exception(_LE('Unexpected exception in notify_loop'))

    def notify(self, event, row, updates=None):
        matching = self.matching_events(
            event, row, updates)
        for match in matching:
            self.notifications.put((match, event, row, updates))
コード例 #42
0
class BasicSpider:
    def __init__(self, max_task, url_page_format, max_page=1000, start_page=1):
        self.next_page = start_page
        self.start_page = start_page
        self.max_task = max_task
        self.max_page = max_page
        self._queue = Queue()
        self.url_page_format = url_page_format
        self.page_number_data = [None for _ in range(max_page)]

    def _task_controller(self):
        task_done_counter = 0
        while True:
            page, _ = self._queue.get()
            self._queue.task_done()
            if page is not None:
                task_done_counter += 1

            print(page, task_done_counter)
            if task_done_counter % 50 == 0:
                self.save_addup_data('./data/%s-%s.txt' %
                                     (__name__, time.time()))
            if task_done_counter + self.start_page - 1 >= self.max_page:
                self.save_addup_data('./data/%s-%s.txt' %
                                     (__name__, time.time()))
                print(self.get_total())
                break
            else:
                if self.next_page <= self.max_page:
                    print('NEXT_PAGE: %d' % self.next_page)
                    self._run_next_page()

    def _spider_handler(self, page, **kwargs):
        while not self._addup_wrapper(page, **kwargs):
            pass
        self._queue.put_nowait((page, kwargs))

    def _run_next_page(self, kwargs=None):
        if self.next_page > self.max_page:
            return
        while self.page_number_data[self.next_page - 1] is not None:
            self.next_page += 1
            if self.next_page > self.max_page:
                self._queue.put_nowait((None, kwargs or {}))
                return
        threading.Thread(target=self._spider_handler,
                         args=(self.next_page, ),
                         kwargs=kwargs).start()
        self.next_page += 1

    def run(self):

        for i in range(self.max_task):
            self._run_next_page()
            time.sleep(0.1)

        ctrl = threading.Thread(target=self._task_controller,
                                name='task_controller')
        ctrl.start()

        # for inspection
        while True:
            time.sleep(1)

    def _addup_wrapper(self, page, **kwargs):
        try:
            self.get_page_addup(page, **kwargs)
        except Exception as e:
            print('===========error')
            print(page)
            print(e)
            # print_exc()
            print('===========error')
        else:
            return True
        return False

    def save_addup_data(self, name):
        tl = [str(i) for i in self.page_number_data]
        with open(name, 'w') as f:
            f.write('\n'.join(tl))

    def get_bs4(self, page):
        req = requests.get(self.url_page_format % page, headers=HEADERS)
        text = req.text
        bs4 = BeautifulSoup(text, 'lxml')
        return bs4

    def get_page_addup(self, page, **kwargs):
        return

    def get_total(self):
        if list(self.check_page_number()):
            raise ValueError('addup data is incomplete ')

        counter = 0
        for i in self.page_number_data:
            counter += i
        return counter

    def check_page_number(self):
        for i, v in enumerate(self.page_number_data):
            if v is None:
                yield i

    def load_page_data(self, filepath):
        self.page_number_data = []
        with open(filepath, 'r') as f:
            data_str = f.read()
        for i in data_str.split('\n'):
            if i:
                if i.strip() == 'None' or i.strip() == '0':
                    self.page_number_data.append(None)
                else:
                    self.page_number_data.append(int(i.strip()))