Example #1
0
class Loader(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.queue = Queue(5)
        self.killed = False
        self.lock = threading.Lock()
        self.locks = {}

    def load(self, filename, callback):
        self.locks[filename] = threading.Condition()
        self.queue.put((filename, callback, self.locks[filename]))
        
    def full(self):
        return self.queue.full()
        
    def get_lock(self, filename):
        self.lock.acquire()
        try:
            return self.locks[filename]
        finally:
            self.lock.release()

    def run(self):
        while not self.killed:
            filename, callback, lock = self.queue.get()
            lock.acquire()
            def discovered(d, is_media):
                callback(d, is_media)
                lock.acquire()
                lock.notifyAll()
                lock.release()
            d = discoverer.Discoverer(filename)
            d.connect('discovered', discovered)
            d.discover()
            lock.wait()
            lock.release()
            self.lock.acquire()
            del self.locks[filename]
            self.lock.release()
            
            
    def wait(self, filename):
        '''Wait for metadata to finish loading'''
        if self.queue.full():
            return
        self.lock.acquire()
        if filename not in self.locks:
            self.lock.release()
            return
        condition = self.locks[filename]
        self.lock.release()
        condition.acquire()
        condition.wait()
        condition.release()
            
    def kill(self):
        self.killed = True
Example #2
0
def test_queue():
    q = Queue()
    print dir(q)
    q.put('a')
    q.put('b')
    print q.queue
    print q.get()
    print q.queue
    print q.empty()
    print q.full()
    lock = threading.Lock()
Example #3
0
class ThreadPool:
	def __init__(self, num_threads):
		self.tasks = Queue(num_threads)
		for _ in range(num_threads):
			Worker(self.tasks)

	def add_task(self, func, *args, **kargs):
		self.tasks.put((func, args, kargs))
		print self.tasks.full()

	def wait_completion(self):
		self.tasks.join()
Example #4
0
class VideoStream(object):
    def __init__(self, url, queueSize=4, mode='buffer'):
        from Queue import Queue
        self.stopped = False
        self.frameBuffer = Queue(maxsize=queueSize)
        self.mode = mode
        self.stream = cv2.VideoCapture(url)
        if cv2_version == '3':
            self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 3)

    def start(self):
        """ start a thread to read frames from the file video stream. """
        from threading import Thread
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        """ capture frame from stream and add it to queue in a loop until eos """
        while self.stream.isOpened():
            # if the thread stop indicator variable is set, stop capturing
            if self.stopped:
                return

            # otherwise, ensure the queue has room in it and add frame to it
            if not self.frameBuffer.full():
                (grabbed, frame) = self.stream.read()
                if not grabbed:
                    self.stop()
                    return
                self.frameBuffer.put(frame)

            # for stream mode, stash the last frame in the queue if queue is full
            if self.mode == 'stream' and self.frameBuffer.full():
                self.frameBuffer.get()
        self.stop()
        return

    def read(self):
        """ returns next frame in the queue. """
        return self.frameBuffer.get()

    def more(self):
        """ checks if there are still frames in the queue. """
        return self.frameBuffer.qsize() > 0

    def stop(self):
        """ Stops the videostream thread """
        self.stopped = True
Example #5
0
    def testQueue(self):
        queue = Queue(5)

        self.assertEqual(queue.empty(), True)
        with self.assertRaises(Exception):
            queue.dequeue()

        queue.enqueue(12)
        queue.enqueue(10)
        queue.enqueue(17)
        self.assertEqual(queue.arr, [12, 10, 17, None, None])

        queue.enqueue(7)
        queue.enqueue(21)
        self.assertEqual(queue.full(), True)

        queue.dequeue()
        queue.dequeue()
        self.assertEqual(queue.arr, [None, None, 17, 7, 21])

        queue.dequeue()
        self.assertEqual(queue.dequeue(), 7)

        queue.dequeue()
        self.assertEqual(queue.empty(), True)
Example #6
0
class AntiFlapping(object):
    """
    AntiFlapping class to process event in a timely maneer
    """
    def __init__(self, window):
        self.window = window
        self.tasks = Queue(maxsize=1)
        self._window_ended = True
        self._thread = Thread(name="AntiFlapping", target=self._run)
        self._thread.start()

    def newEvent(self, func, kwargs={}):
        """
        newEvent Triggered.
        """
        if not self.tasks.full() and self._window_ended:
            self.tasks.put({'func': func, 'args':kwargs})

    def _run(self):
        """
        internal runloop that will fire tasks in order.
        """
        while True:
            task = self.tasks.get()
            self._window_ended = False
            sleep(self.window)
            self._window_ended = True
            if task['args']:
                task['func'](**task['args'])
            else:
                task['func']()
Example #7
0
class AntiFlapping(object):
    """
    AntiFlapping class to process event in a timely maneer
    """
    def __init__(self, window):
        self.window = int(window)
        self.tasks = Queue(maxsize=1)
        self._window_ended = True
        self._thread = Thread(name="AntiFlapping", target=self._run)
        self._thread.start()

    def newEvent(self, func, kwargs={}):
        """
        newEvent Triggered.
        """
        if not self.tasks.full() and self._window_ended:
            self.tasks.put({'func': func, 'args': kwargs})

    def _run(self):
        """
        internal runloop that will fire tasks in order.
        """
        while True:
            task = self.tasks.get()
            self._window_ended = False
            sleep(self.window)
            self._window_ended = True
            if task['args']:
                task['func'](**task['args'])
            else:
                task['func']()
Example #8
0
class ConnectionKeeper(cherrypy.process.plugins.SimplePlugin):
    def __init__(self, bus, factory, description):
        self.bus = bus
        self.description = description
        self.factory = factory
        self.connection_queue = Queue(10)

        cherrypy.process.plugins.SimplePlugin.__init__(self, bus)

    def start(self):
        self.bus.log("Populating connection queue for %s" % self.description)

        while not self.connection_queue.full():
            self.connection_queue.put(self.factory())

    def stop(self):
        self.bus.log("Cleaning up connections for %s" % self.description)
        
        while not self.connection_queue.empty():
            self.connection_queue.get(False)
    
    def connect(self):
        return self.connection_queue.get()
    
    def disconnect(self, cxn):
        self.connection_queue.put(cxn)
class CmdVelCH:
    def __init__(self):
        rospy.init_node('cmd_vel_command_handler')
        # Publisher to Differential Drive robot controller
        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
        ac_rate = rospy.get_param("~action_cycle_rate")
        self.rate = rospy.Rate(ac_rate)
        # Subscriber to Velocity Command coming from Environment
        rospy.Subscriber('env_cmd_vel',
                         Twist,
                         self.callback_env_cmd_vel,
                         queue_size=1)
        self.msg = Twist()
        # Queue with maximum size 1
        self.queue = Queue(maxsize=1)

    def callback_env_cmd_vel(self, data):
        try:
            # Add to the Queue the next command to execute
            self.queue.put(data)
        except:
            pass

    def cmd_vel_publisher(self):
        while not rospy.is_shutdown():
            # If a command from the environment is waiting to be executed,
            # publish the command, otherwise publish zero velocity message
            if self.queue.full():
                self.cmd_vel_pub.publish(self.queue.get())
            else:
                self.cmd_vel_pub.publish(Twist())
            self.rate.sleep()
class FileVideoStream:
    def __init__(self, path, queueSize=128):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        # self.videoname,_ =  os.path.splitext(path)
        self.stream = cv2.VideoCapture(path)
        self.stopped = False
        self.image_name = None
        self.sequence = self.stream.get(cv2.CAP_PROP_POS_FRAMES)
        # initialize the queue used to store frames read from
        # the video file
        self.Q = Queue(maxsize=queueSize)

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                return

            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.stream.read()
                current_index = self.stream.get(cv2.CAP_PROP_POS_FRAMES)
                self.sequence = current_index
                self.temporal_location = self.stream.get(cv2.CAP_PROP_POS_MSEC)
                self.image_name = "img-%d.jpg" % current_index

                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file

                if not grabbed:
                    self.stop()
                    return
                # add the frame to the queue
                self.Q.put(frame)

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def get_image_name(self):
        return self.image_name

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
Example #11
0
class HashQueue(object):
    def __init__(self, maxsize=100):
        self.maxsize = maxsize
        self.queue = Queue(maxsize)
        self.data = {}

    def put(self, key, value):
        if self.queue.full():
            item = self.queue.get()
            self.data.pop(item, None)

        self.queue.put(key)
        self.data[key] = value

    def get(self, key):
        return self.data.get(key, '')

    def status(self, interval=30):
        counter = 0

        for message, timestamp in self.data.values():
            if timestamp > dt.now() - timedelta(seconds=interval):
                counter += 1

        return counter
Example #12
0
class BaseSinker(threading.Thread):
    def __init__(self, filename, timer=10):
        threading.Thread.__init__(self)
        self.queue = Queue()
        self.filename = filename
        self.timer = timer
        self.establish_date = arrow.utcnow().to('Asia/Shanghai').strftime(
            "%Y-%m-%d")
        if not os.path.exists("Flows"):
            os.mkdir("Flows")
            logger.warning('Directory(Flows) not exist, re-build.')
        if os.path.exists("Flows/{}.out".format(filename)):
            timestamp = time.localtime(
                os.stat("Flows/{}.out".format(filename)).st_ctime)
            create_date = "{}-{}-{}".format(timestamp[0], timestamp[1],
                                            timestamp[2])
            if time.strptime(self.establish_date, '%Y-%m-%d') != \
                    time.strptime(create_date, '%Y-%m-%d'):
                shutil.move("Flows/{0}.out".format(filename),
                            "Flows/{0}_{1}.out".format(filename, create_date))
                logger.info('New flow file({0}.out) created'.format(filename))

    def run(self):
        logger.info('Message sinker started.')
        while True:
            current_date = arrow.utcnow().to('Asia/Shanghai').strftime(
                "%Y-%m-%d")
            if current_date != self.establish_date:
                shutil.move(
                    "Flows/{0}.out".format(self.filename),
                    "Flows/{0}_{1}.out".format(self.filename,
                                               self.establish_date))
                self.establish_date = current_date
            self.sink()
            time.sleep(self.timer)

    def sink(self):
        pass

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def full(self):
        return self.queue.full()

    def put(self, item, block=True, timeout=None):
        self.queue.put(item, block, timeout)

    def put_nowait(self, item):
        return self.queue.put(item, False)

    def get(self, block=True, timeout=None):
        return self.queue.get(block, timeout)

    def get_nowait(self):
        return self.queue.get(False)
Example #13
0
def create_queue(raw_range):
    range_queue = Queue(50)
    for foo in raw_range:
        if range_queue.full():
            print("Warning. queue is full(max size is 50)")
            break
        range_queue.put(foo, 1)
    return range_queue
Example #14
0
    def start_servers(self):

        # Prepare to start the servers
        server_list = self._config['servers']

        server_count = len(server_list)
        self.notice("Starting " + str(server_count) + " servers\n")
        wait = Queue(maxsize=server_count)

        # Add servers
        for server_config in server_list:
            handler = server_config['handler']
            server_module = import_module("servers", handler)
            if server_module is None:
                self.error("Invalid handler " + handler)
                sys.exit(2)

            server = None

            if server_config['mode'] == "http":
                print("Starting http on port " + str(server_config['port']))
                server = server_module(server_config['domain'],
                                       int(server_config['port']),
                                       server_config['timeout'], wait,
                                       self.__loggers)
            elif server_config['mode'] == "https":
                if "cert_path" not in server_config:
                    self.error("cert_path not set for https " + handler)
                    sys.exit(3)
                print("Starting https on port " + str(server_config['port']))
                server = server_module(server_config['domain'],
                                       int(server_config['port']),
                                       server_config['timeout'], wait,
                                       self.__loggers,
                                       server_config['cert_path'])

            server.start()
            self._servers.append(server)

        # Wait until they indicate they have bound
        while not wait.full():
            pass

        wait = None

        # Drop privileges
        self.drop_privileges()

        # Wait until told to exit
        try:
            while True:
                time.sleep(2)
        except KeyboardInterrupt:
            print("\nShutting down...")
            # for server in self._servers:
            #     server.stop()
            #     server.join()
            sys.exit(0)
Example #15
0
class Pool(object):
    '''一个数据库连接池'''
    def __init__(self,
                 maxActive=5,
                 maxWait=None,
                 init_size=0,
                 db_type="kafka",
                 **config):
        self.__freeConns = Queue(maxActive)
        self.maxWait = maxWait
        self.db_type = db_type
        self.config = config
        if init_size > maxActive:
            init_size = maxActive
        for i in range(init_size):
            self.free(self._create_conn())

    def __del__(self):
        print("__del__ Pool..")
        self.release()

    def release(self):
        '''释放资源,关闭池中的所有连接'''
        print("release Pool..")
        while self.__freeConns and not self.__freeConns.empty():
            con = self.get()
            con.release()
        self.__freeConns = None

    def _create_conn(self):
        '''创建连接 '''
        if self.db_type in dbcs:
            return dbcs[self.db_type](**self.config)

    def get(self, timeout=None):
        '''获取一个连接
        @param timeout:超时时间
        '''
        if timeout is None:
            timeout = self.maxWait
        conn = None
        if self.__freeConns.empty():  # 如果容器是空的,直接创建一个连接
            conn = self._create_conn()
        else:
            conn = self.__freeConns.get(timeout=timeout)
        conn.pool = self
        return conn

    def free(self, conn):
        '''将一个连接放回池中
        @param conn: 连接对象
        '''
        conn.pool = None
        if (self.__freeConns.full()):  # 如果当前连接池已满,直接关闭连接
            conn.release()
            return
        self.__freeConns.put_nowait(conn)
Example #16
0
class MessageChannel(object):
    """
        Abstract base-class of all message channel types.
        
        Provides functionality and attribute initialization 
        and management common to all channel types.  Considered 
        "abstract" because implementation of notify() raises 
        exception and must be overridden, and because there 
        is no API method defined for getting messages out of the 
        channel.
    """
    implements(IMessageChannel)
    def __init__(self, name, host=None):
        self.name = name
        self.host = host
        self.added = Counter()
        self.removed = Counter()
        self.messages = Queue()
        super(MessageChannel, self).__init__()
    def getname(self):
        return self.name
    def geturl(self):
        return "/".join([self.host.geturl(), self.getname()])
    def send(self, message):
        self.messages.put(message)
        self.added.increment()
        self.notify()
    def receive(self, blocking=True, timeout=None):
        try:
            message = self.messages.get(blocking, timeout)
        except Empty:
            message = None
        else:
            self.removed.increment()
        return message
    def full(self):
        return self.messages.full()
    def empty(self):
        return self.messages.empty()
    def attach(self, listener):
        raise TypeError("notify() must be overridden")
    def detach(self, listener):
        raise TypeError("notify() must be overridden")
    def subscribe(self, channel):
        raise TypeError("notify() must be overridden")
    def unsubscribe(self, channel):
        raise TypeError("notify() must be overridden")
    def notify(self):
        raise TypeError("notify() must be overridden")
    def __str__(self):
        descriptors = ["%s('%s')" % (type(self).__name__, self.getname())]
        descriptors.append('+%d' % self.added.get())
        descriptors.append('-%d' % self.removed.get())
        return " ".join(descriptors)
    def __repr__(self):
        return '<%s at %#x>' % (self, id(self))
Example #17
0
class ThreadStream:
    def __init__(self, path=0, queueSize=128):
        self.stream = cv2.VideoCapture(path)
        self.stopped = True
        self.Q = Queue(maxsize=queueSize)

    def start(self):
        self.stopped = False
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):

        while True:

            if (self.stopped):
                return
            if not self.Q.full():
                success, frame = self.stream.read()

                if (not success):
                    self.stop()
                    return

                scale_percent = 50

                #calculate the 50 percent of original dimensions
                width = int(frame.shape[1] * scale_percent / 100)
                height = int(frame.shape[0] * scale_percent / 100)

                # dsize
                dsize = (width, height)

                # resize image
                frame = cv2.resize(frame, dsize)
                self.Q.put(frame)
            else:
                print("queue full")

    def read(self):
        return self.Q.get(block=True, timeout=10)

    def more(self):
        if (self.Q.qsize() <= 0):
            print("Queue Empty")
            return not self.stopped
        else:
            return True

    def stop(self):
        self.stopped = True
        self.stream.release()
        self.Q.queue.clear()
Example #18
0
class VideoStream:
    def __init__(self,
                 video_source,
                 video_width,
                 video_height,
                 video_fps,
                 queue_size=1):
        self.video_fps = video_fps

        vc = cv2.VideoCapture(video_source)

        if hasattr(cv2, 'cv'):
            vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width)
            vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height)
            vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps)
        else:
            vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width)
            vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height)
            vc.set(cv2.CAP_PROP_FPS, video_fps)

        self.stream = vc
        self.stopped = False
        self.queue = Queue(maxsize=queue_size)
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True
        self.thread.start()

    def update(self):
        while True:
            if self.stopped:
                break

            (flg, frame) = self.stream.read()
            if not flg:
                Exception("Video capture is wrong")
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if self.queue.full():
                time.sleep(1 / float(self.video_fps))
            else:
                if not self.queue.empty():
                    self.queue.get()
                    self.queue.put(frame)
                else:
                    self.queue.put(frame)

        self.stream.release()

    def read(self):
        return self.queue.get()

    def release(self):
        self.stopped = True
        self.thread.join()
Example #19
0
class _RingBuffer():
    def __init__(self, size):
        self._buffer = Queue(maxsize=size)

    def put(self, item):
        if self._buffer.full():
            #If the buffer is full, discard the oldest element to make room
            self._buffer.get()
        self._buffer.put(item)

    def get(self, block=True):
        return self._buffer.get(block=block)
    def __start_scan_thread(self, target):
        """
        Creates a new scan thread, if max_threads allows to.
        Every scan thread is assigned a queue that contains the targets to
        scan. For every element of the queue, the scan callback function is
        called.

        input:
            target  a new target to scan. basically the result of cUtil.mkIP2

        """
        self.__threads = filter(lambda t: t[0].is_alive(), self.__threads)
        func = self.__start_scan_thread.__func__

        def inc_counter():
            """
            in order to not constantly add a target to the first queue,
            we need a counter, that points to the next queue to add the
            target. as consequence, every queue should have round about the
            same qsize

            """
            try:
                if func.counter < len(self.__threads)-1:
                    func.counter += 1
                else:
                    func.counter = 0
            except AttributeError:
                func.counter = 0

        if len(self.__threads) < self.max_threads:
            # create a queue to be filled with targets
            q = Queue(maxsize=1000)
            q.put(target)

            # create a new thread to process targets
            t = Thread(target=scan_thread,
                       args=(q, self.__cb_scan))
            t.start()
            self.__threads.append((t, q))

        else:
            inc_counter()

            assigned = False # successfully assigned target to thread
            while not assigned:
                t, q = self.__threads[func.counter]
                if not q.full():
                    q.put(target)
                    assigned = True
                    break
                else:
                    inc_counter()
Example #21
0
class FIFOCache:
    def __init__(self, size=100):
        self.size = size
        self.cache = Queue(size)

    def put(self, obj):
        if self.cache.full():
            self.cache.get_nowait()
        self.cache.put_nowait(obj)

    def get(self):
        if self.cache.empty():
            return None
        else:
            return self.cache.get_nowait()

    def isFull(self):
        return self.cache.full()

    def isEmpty(self):
        return self.cache.empty()
Example #22
0
    def __start_scan_thread(self, target):
        """
        Creates a new scan thread, if max_threads allows to.
        Every scan thread is assigned a queue that contains the targets to
        scan. For every element of the queue, the scan callback function is
        called.

        input:
            target  a new target to scan. basically the result of cUtil.mkIP2

        """
        self.__threads = filter(lambda t: t[0].is_alive(), self.__threads)
        func = self.__start_scan_thread.__func__

        def inc_counter():
            """
            in order to not constantly add a target to the first queue,
            we need a counter, that points to the next queue to add the
            target. as consequence, every queue should have round about the
            same qsize

            """
            try:
                if func.counter < len(self.__threads) - 1:
                    func.counter += 1
                else:
                    func.counter = 0
            except AttributeError:
                func.counter = 0

        if len(self.__threads) < self.max_threads:
            # create a queue to be filled with targets
            q = Queue(maxsize=1000)
            q.put(target)

            # create a new thread to process targets
            t = Thread(target=scan_thread, args=(q, self.__cb_scan))
            t.start()
            self.__threads.append((t, q))

        else:
            inc_counter()

            assigned = False  # successfully assigned target to thread
            while not assigned:
                t, q = self.__threads[func.counter]
                if not q.full():
                    q.put(target)
                    assigned = True
                    break
                else:
                    inc_counter()
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return self._nextid.next()

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
class FileVideoStream:
    def __init__(self, path, queueSize=128):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.stream = cv2.VideoCapture(path)
        self.stopped = False

        # initialize the queue used to store frames read from
        # the video file
        self.Q = Queue(maxsize=queueSize)

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                return

            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.stream.read()

                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return

                # add the frame to the queue
                self.Q.put(frame)

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
class ProcessHelper:
    def __init__(self, sftp_configs, load_configs, zvelo_configs, eventhub_configs, adls_configs, metadata,
                 max_process = 4, processed_file_log = None):
        self.sftp_configs = sftp_configs
        self.load_configs = load_configs

        if zvelo_configs is not None:
            zvelo_path = zvelo_configs["path"].decode("ascii").encode()
            self.zvelo_helper = ZveloHelper(zvelo_path)
        else:
            self.zvelo_helper = None

        self.eventhub_configs = eventhub_configs
        self.adls_configs = adls_configs
        self.metadata = metadata

        self.process_queue = Queue(max_process)
        self.file_queue = Queue()
        self.processed_file_log = processed_file_log

    def add_file(self, file_path):
        self.file_queue.put(file_path)

    def join_all(self):
        for process in iter(self.process_queue.get, None):
            if process is not None:
                process.join()

    def start_process(self):
        if self.processed_file_log is not None:
            log_writer = open(self.processed_file_log, "a+")
        else:
            log_writer = None

        while (not self.process_queue.empty()) \
                and (not self.process_queue.queue[0].is_alive()):
            process = self.process_queue.get()
            if log_writer is not None:
                log_writer.write(process.file_path + '\n')

        if log_writer is not None:
            log_writer.close()

        while (not self.process_queue.full()) \
                and (not self.file_queue.empty()):
            file_path = self.file_queue.get()
            process = SendDataProcess(self.sftp_configs, self.load_configs, self.zvelo_helper,
                                      self.eventhub_configs, self.adls_configs, self.metadata, file_path)
            process.start()
            self.process_queue.put(process)
Example #26
0
class VideoWriter(object):
    '''
    Threaded video encoding. Images are put in a queue and encoded in
    a background process.
    '''
    
    def __init__(self, fname, fourcc, w, h, fps, color=True):
        self.fname = fname
        if cv2.__version__[0] == "3":
            self.fourcc = cv2.VideoWriter_fourcc(*fourcc)
        else:
            self.fourcc = cv2.VideoWriter_fourcc(*fourcc)
        self.dimension = (w, h)
        self.fps = fps
        self.color = color
        self.vw = cv2.VideoWriter(fname, self.fourcc, self.fps, self.dimension, self.color)
        self.Q = Queue(maxsize=128)
        self.stopped = Event()
        self.t = Thread(target=self.writer, args=())
        #self.t.daemon = True
        self.frame_number = 0
    
    def running(self):
        return not self.stopped.is_set()
    
    def start(self):
        self.t.start()
    
    def stop(self):
        self.stopped.set()
        self.t.join()
    
    def write(self, frame):
        if self.t.is_alive():
            while self.Q.full(): pass
            self.Q.put(frame)
        else:
            raise IOError("The writer is not ready")
        
    def writer(self):
        while self.running(): 
            if not self.Q.empty():
                frame = self.Q.get()
                self.frame_number += 1 
                self.vw.write(frame)
        while not self.Q.empty():
            frame = self.Q.get()
            self.frame_number += 1 
            self.vw.write(frame)        
        self.vw.release()
Example #27
0
class VideoStream(object):
    """
		Desc:	A faster video streaming class which uses Python's threading module 
				for creating and maintaining threads.
	"""
    def __init__(self, path, queue_size=128):
        self.stream = cv2.VideoCapture(path)
        self.exit = False

        self.queue = Queue(maxsize=queue_size)

    def start(self):
        thread = Thread(name="Video Reader",
                        target=self.__read_frames,
                        args=())
        thread.daemon = True
        thread.start()
        time.sleep(
            1.0
        )  # So that the thread can get enough time for building up initial queue.
        return

    def __read_frames(self):
        """
		Desc:	This function is dedicated to reading frames and pushing them into the Queue. 
				It will run in a separate daemon thread.
		"""
        while True:
            if self.exit:
                self.stream.release()
                return

            if not self.queue.full():
                success, frame = self.stream.read()

                if not success:
                    self.stream.release()
                    self.stop()
                    return

                self.queue.put(frame)

    def read(self):
        return self.queue.get()

    def is_empty(self):
        return self.queue.qsize() == 0

    def stop(self):
        self.exit = True
Example #28
0
def run(doc_file, vocab_file, K=10, alpha=0.01, beta=0.01, num_iter=20):
    vocab = dc.vocab_from_file(vocab_file)
    # Word-topic + doc-topic should be less than 30 MB
    V = len(vocab)
    # Try to get the number of documents processed at a time to stay in the L3 cache (play around with this and see what works)
    batch_size = 5 * 10**8 / V
    #print "Running batch size of %i" % batch_size
    #print "K = %i, V = %i" % (K, V)
    queue = Queue(maxsize=batch_size)
    global_nd = np.zeros(batch_size, dtype=np.intc)
    global_wt = np.zeros((V, K), dtype=np.intc)
    global_zt = np.zeros(K, dtype=np.intc)

    t = time()
    with open(doc_file) as f:
        for j, line in enumerate(f):
            if not queue.full():
                queue.put(line.strip())
            else:
                #print "Queue full, starting to process a batch"
                docs = []
                while not queue.empty():
                    doc = queue.get()
                    docs.append(doc)

                doc_word = dc.convert_to_np(docs, vocab)
                docs = []
                dt, wt, zt, WS, DS, ZS = dc.sample_ready(doc_word, K)
                del doc_word
                # Update the local wt and zt with the latest global versions
                wt += global_wt
                zt += global_zt
                randoms = np.array(
                    [np.random.random_sample() for i in range(K)])
                #print "Starting another batch"
                for i in range(num_iter):
                    fp.sample(WS, DS, ZS, wt, dt, zt, randoms, alpha, beta)
                    nd = np.sum(dt, axis=1).astype(np.intc)
                    ll = rg._loglikelihood(wt, dt, zt, nd, alpha, beta)
                    #print ll

                # Update the globals
                #global_nd = nd
                global_wt = wt
                global_zt = zt
                print "Processed %i so far" % j
    print "Final log likelihood: %d" % ll
    print time() - t
Example #29
0
    def start_servers(self):

        # Start watching for port misses
        watch = HoneyPokeWatcher(self._config['ssh_port'],
                                 self._config['ignore_watch'],
                                 self.check_server)
        watch.start()

        # Prepare to start the servers
        server_count = len(self._config['ports'])
        print("Starting " + str(server_count) + " servers\n")
        wait = Queue(maxsize=server_count)

        checked_certs = False
        # Add servers
        for port in self._config['ports']:
            use_ssl = False
            if 'ssl' in port and port['ssl'] is True:
                use_ssl = True
                if not checked_certs:
                    if not os.path.exists(
                            "honeypoke_key.pem") or not os.path.exists(
                                "honeypoke_cert.pem"):
                        print("SSL enabled, but PEM files not found!")
                        sys.exit(0)
                    checked_certs = True
            self.add_server(int(port['port']), port['protocol'], use_ssl,
                            self.__loggers, wait)

        # Wait until they indicate they have bound
        while not wait.full():
            pass

        wait = None

        # Drop privileges
        self.drop_privileges()

        # Wait until told to exit
        try:
            while True:
                time.sleep(2)
        except KeyboardInterrupt:
            print("\nShutting down...")
            # for server in self._servers:
            #     server.stop()
            #     server.join()
            sys.exit(0)
Example #30
0
class _BatchWriter(object):

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            raise ValueTooLarge('value exceeds max encoded size of {}'\
                                .format(self.maxitemsize))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return self._nextid.next()

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
class JointTrajectoryCH:
    def __init__(self):
        rospy.init_node('joint_trajectory_command_handler')
        self.real_robot =  rospy.get_param("~real_robot")
        ac_rate = rospy.get_param("~action_cycle_rate")
        self.rate = rospy.Rate(ac_rate)

        # Publisher to JointTrajectory robot controller
        if self.real_robot:
            # self.jt_pub = rospy.Publisher('/scaled_pos_traj_controller/command', JointTrajectory, queue_size=10)
            self.jt_pub = rospy.Publisher('/pos_traj_controller/command', JointTrajectory, queue_size=10)
        else:
            self.jt_pub = rospy.Publisher('/eff_joint_traj_controller/command', JointTrajectory, queue_size=10)

        # Subscriber to JointTrajectory Command coming from Environment
        rospy.Subscriber('env_arm_command', JointTrajectory, self.callback_env_joint_trajectory, queue_size=1)
        self.msg = JointTrajectory()
        # Queue with maximum size 1
        self.queue = Queue(maxsize=1)
        # Flag used to publish empty JointTrajectory message only once when interrupting execution
        self.stop_flag = False 

    def callback_env_joint_trajectory(self,data):
        try:
            # Add to the Queue the next command to execute
            self.queue.put(data)
        except:
            pass

    def joint_trajectory_publisher(self):

        while not rospy.is_shutdown():
            # If a command from the environment is waiting to be executed,
            # publish the command, otherwise preempt trajectory
            if self.queue.full():
                self.jt_pub.publish(self.queue.get())
                self.stop_flag = False 
            else:
                # If the empty JointTrajectory message has no been published publish it and
                # set the stop_flag to True, else pass
                if not self.stop_flag:
                    self.jt_pub.publish(JointTrajectory())
                    self.stop_flag = True 
                else: 
                    pass 
            self.rate.sleep()
Example #32
0
def main():
    q = Queue(maxsize = 5)
    largest_array = []

    for i in str(num):
        if q.full():
            q.get()
            q.put(i)

        if len(largest_array) < 5:
            largest_array.append(i)
            q.put(i)

        if is_bigger([n for n in q.queue], largest_array):
            largest_array = [n for n in q.queue]

    print product(largest_array)
Example #33
0
class MessageQueue(Thread):
    def __init__(self, filename, timer=10):
        Thread.__init__(self)
        self.queue = Queue()
        self.filename = filename
        self.timer = timer
        self.establish_date = arrow.utcnow().to('Asia/Shanghai').strftime(
            "%Y-%m-%d")

    def run(self):
        while True:
            current_date = arrow.utcnow().to('Asia/Shanghai').strftime(
                "%Y-%m-%d")
            if current_date != self.establish_date:
                shutil.move(
                    "Flows/{0}.out".format(self.filename),
                    "Flows/{0}.{1}.out".format(self.filename,
                                               self.establish_date))
                self.establish_date = current_date
            self.sink()
            time.sleep(self.timer)

    def sink(self):
        pass

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def full(self):
        return self.queue.full()

    def put(self, item, block=True, timeout=None):
        self.queue.put(item, block, timeout)

    def put_nowait(self, item):
        return self.queue.put(item, False)

    def get(self, block=True, timeout=None):
        return self.queue.get(block, timeout)

    def get_nowait(self):
        return self.queue.get(False)
Example #34
0
class ThreadPool(object):
    """Pool of threads consuming tasks from a queue"""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue"""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue"""
        self.tasks.join()

    def full(self):
        """Return status of the Queue"""
        return self.tasks.full()
Example #35
0
class ThreadPool(object):
    """Pool of threads consuming tasks from a queue"""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue"""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue"""
        self.tasks.join()

    def full(self):
        """Return status of the Queue"""
        return self.tasks.full()
class ConsumerProducer(object):
    def __init__(self):
        self.q = Queue(maxsize=10)
        self.counter = 0

    def random_gem(self, dd):
        return self.q.put(dd)

    def playbook(self):
        with ThreadPoolExecutor(max_workers=5) as executor:
            for x in [i for i in range(100)]:
                executor.submit(self.random_gem(x))
                if self.q.full():
                    executor.submit(self.empty_queue())

    def empty_queue(self):
        if not self.q.empty():
            self.q.get(block=False)
Example #37
0
class FileVideoStream:
    def __init__(self, path, queueSize=128):
        self.stream = cv2.VideoCapture(path)
        self.Q = Queue(maxsize=queueSize)
        self.dequeuer_sema = Semaphore(0)
        self.enqueuer_cv = Condition()
        self.stopped = False
    
    def start(self):
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self
    
    def update(self):
        success, frame = self.stream.read()

        num_consecutive_fails = 0
        while num_consecutive_fails < 3:
            if success:
                self.enqueuer_cv.acquire()
                if self.Q.full():
                    self.enqueuer_cv.wait()
                self.enqueuer_cv.release()
                num_consecutive_fails = 0
                self.Q.put(frame)
                self.dequeuer_sema.release()
            else:
                num_consecutive_fails += 1
            
            success, frame = self.stream.read()
        self.stopped = True
        self.dequeuer_sema.release()
        
    def read(self):
        self.enqueuer_cv.acquire()
        frame = self.Q.get()
        self.enqueuer_cv.notify()
        self.enqueuer_cv.release()
        return frame
    
    def more(self):
        self.dequeuer_sema.acquire()
        return not self.stopped or self.Q.qsize() > 0
def parse_sentences(lines):
	'''
	Parse a list of sentences; returns a list of parses, each in lispish notation.
	'''
	assert type(lines) in (list, tuple)
	if not lines:
		return []
	
	# Create jobs
	result_queue = Queue(len(lines))
	for idx,line in enumerate(lines):
		job_queue.put((idx,line,result_queue))
		log(5, 'Equeued', (idx,line,result_queue))

	while not result_queue.full(): time.sleep(.05)
	
	# collect results
	result = dict(result_queue.get() for i in range(result_queue.qsize()))
	return [result.get(idx,'') for idx in range(len(lines))]
class CommandQueue(object):
    def __init__(self, timeTolerance=None):
        self.__commandQueue = Queue()
        self.__timeTolerance = timeTolerance

        self.__finalCommand = None
        self.__finalCommandTimeStamp = None

    def add_command(self, timeStamp, command, ignorable=True):
        if self.__commandQueue.full():
            self.__commandQueue.get()
            self.__commandQueue.task_done()
        if command == self.__finalCommand:
            return
        self.__commandQueue.put((timeStamp, command, ignorable))
        self.__finalCommand = command
        self.__finalCommandTimeStamp = timeStamp

    def getFinalCommandInfo(self):
        return self.__finalCommand, self.__finalCommandTimeStamp

    def get_command(self, currentTime):
        command = None
        if currentTime is None:
            currentTime = time.time()
        while 1 == 1:
            if self.__commandQueue.empty():
                self.__finalCommand = None
                self.__finalCommandTimeStamp = None
                break
            command = self.__commandQueue.get()
            if self.__timeTolerance is None or currentTime - command[
                    0] <= self.__timeTolerance or not command[2]:
                break
            command = None
        return command

    def task_done(self):
        self.__commandQueue.task_done()

    def setTimeTolerance(self, value):
        if value > 0:
            self.__timeTolerance = value
Example #40
0
    def _iter_keys(self, keys):
        if not keys:
            return

        q = Queue(self.batch_size)
        s = self.meta_session.clone()

        for k in keys:
            if not q.full():
                q.put((k, s.read_latest(k)))
            else:
                data = self._fetch_response_data(q.get())
                q.put((k, s.read_latest(k)))
                if data:
                    yield data

        while q.qsize():
            data = self._fetch_response_data(q.get())
            if data:
                yield data
Example #41
0
def parse_sentences(lines):
    '''
	Parse a list of sentences; returns a list of parses, each in lispish notation.
	'''
    assert type(lines) in (list, tuple)
    if not lines:
        return []

    # Create jobs
    result_queue = Queue(len(lines))
    for idx, line in enumerate(lines):
        job_queue.put((idx, line, result_queue))
        log(5, 'Equeued', (idx, line, result_queue))

    while not result_queue.full():
        time.sleep(.05)

    # collect results
    result = dict(result_queue.get() for i in range(result_queue.qsize()))
    return [result.get(idx, '') for idx in range(len(lines))]
Example #42
0
class ThreadPool:
    def __init__(self, numThreads=10):
        self.jobs = Queue(numThreads)
        self.event = threading.Event()

        for _ in range(numThreads):
            Worker(self.event, self.jobs)

        self.logger = logging.getLogger("PoolManager.ThreadPool")

    def addJob(self, func, *args, **kargs):
        self.jobs.put((func, args, kargs))
        self.logger.info("Added a new job")
        self.logger.info("Threads left in pool: " + str(self.jobs.qsize()))
        self.logger.info("If the Queue full? : " + str(self.jobs.full()))

    def waitAll(self):
        self.jobs.join()

    def killAll(self):
        self.event.set()
Example #43
0
class ThreadPool:
	def __init__( self, numThreads = 10 ):
		self.jobs = Queue( numThreads )
		self.event = threading.Event()

		for _ in range( numThreads ):
			Worker( self.event, self.jobs )

		self.logger = logging.getLogger( "PoolManager.ThreadPool" )

	def addJob( self, func, *args, **kargs ):
		self.jobs.put( ( func, args, kargs ) )
		self.logger.info( "Added a new job" )
		self.logger.info( "Threads left in pool: " + str( self.jobs.qsize() ) )
		self.logger.info( "If the Queue full? : " + str( self.jobs.full() ) )
	
	def waitAll( self ):
		self.jobs.join()
	
	def killAll( self ):
		self.event.set();
Example #44
0
class ThreadPool():
    """创建一个线程池"""
    def __init__(self, thread_num, timeout):
        self.work_queue = Queue()
        self.result_queue = Queue()
        self.thread_pool = []
        self.thread_num = thread_num
        self.timeout = timeout

        self.start_thread_pool()

    def start_thread_pool(self):
        for i in range(self.thread_num):
            worker = Worker(self.work_queue, self.result_queue, self.timeout)
            self.thread_pool.append(worker)


    def add_task(self, func, *args, **kwargs):
        if not self.work_queue.full():
            self.work_queue.put(func, args, kwargs)

    def get_result(self):
        if not self.result_queue.empty():
            return self.result_queue.get()
        else:
            return None

    def task_done(self):
        return self.work_queue.task_done()

    def task_join(self):
        self.work_queue.join()

    def stop_thread_pool(self):
        for i in range(self.thread_num):
            self.thread_pool[i].stop()

        del self.thread_pool[:]
Example #45
0
class Terminal(object):
    """Terminal class."""
    def __init__(self, name="", size=0):
        self.name = name
        if size > 0:
            self.log = Queue(size)
        else:
            self.log = Queue(LOG_MAX)

    def write_line(self, line=""):
        """Writes line to the terminal log"""
        print line
        if self.log.full():
            self.log.get_nowait()
            self.log.put(line)
        else:
            self.log.put(line)
        return None

    def parse_str(self, in_str=""):
        """Converts raw string input for use in exec_cmd()"""
        self.write_line(in_str)
        return None
Example #46
0
class SFClient:
    def __init__(self, host, port, qsize=10):
        self._in_queue = Queue(qsize)
        self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._s.connect((host, port))
        data = self._s.recv(2)
        if data != 'U ':
            print "Wrong handshake"
        self._s.send("U ")
        print "Connected"
        thread.start_new_thread(self.run, ())

    def run(self):
        while True:
            length = ord(self._s.recv(1))
            data = self._s.recv(length)
            data = [ord(c) for c in data][1:]
            #print "Recv %d bytes" % (length), ActiveMessage(data)
            if self._in_queue.full():
                print "Warning: Buffer overflow"
                self._in_queue.get()
            p = RawPacket()
            p.data = data
            self._in_queue.put(p, block=False)

    def read(self, timeout=0):
        return self._in_queue.get()

    def write(self, payload):
        print "SFClient: write:", payload
        if type(payload) != type([]):
            # Assume this will be derived from Packet
            payload = payload.payload()
        payload = [0] + payload
        self._s.send(chr(len(payload)))
        self._s.send(''.join([chr(c) for c in payload]))
        return True
class AsynchExecutor(object):
    '''
    Executes tasks submitted to a FIFO queue using a daemon thread.
    Calls to submit() block until there is space in the queue.
    The daemon thread will block if the queue is empty.
    '''

    LOG = logging.getLogger(__name__)

    def __init__(self, queue_size=10):
        self.LOG.debug('Initializing AsynchExecutor')
        self.running = False
        self.task_queue = Queue(queue_size)  # FIFO queue

        def consume_task_queue(executor):
            while executor.is_running():
                task = executor.task_queue.get()
                self.LOG.debug('Running task %s' % task)
                if task == 'SHUTDOWN':
                    break
                try:
                    retval, retval2, retval3 = task.work_func(*task.params)
                    self.LOG.debug('AsynchTask finished with retval %s' % retval)
                    task.on_success(retval, retval2, retval3)
                except Exception as e:
                    task.on_error(e)
        self.work_thread = Thread(target=consume_task_queue, args=(self,))
        self.work_thread.daemon = True

    def start(self):
        ''' Starts the daemon thread '''
        self.running = True
        self.work_thread.start()
        self.LOG.debug('Started AsynchExecutor worker thread')

    def shutdown(self):
        ''' Stops the worker thread and joins with it '''
        self.LOG.debug('Stopping AsynchExecutor worker thread')
        self.running = False
        self.task_queue.put('SHUTDOWN')
        self.work_thread.join()
        self.LOG.debug('AsynchExecutor worker thread stopped')

    def is_running(self):
        return self.running

    def submit(self, work_func, **kwargs):
        '''
        Uses the functions passed as parameters to create and
        AsynchTask and places it in the task queue.
        This operation blocks if the task queue is full.
        '''
        def no_action(param):
            pass

        if 'on_success' not in kwargs:
            on_success = no_action
        else:
            on_success = kwargs['on_success']
        if 'on_error' not in kwargs:
            on_error = no_action
        else:
            on_error = kwargs['on_error']
        if 'params' not in kwargs:
            params = ()
        else:
            params = kwargs['params']

        task = AsynchTask(work_func, on_success, on_error, params)
        self.LOG.debug('Submitting new AsynchTask to FIFO queue...')
        self.task_queue.put(task)
        self.LOG.debug('AsynchTask accepted into FIFO queue')

    def is_full(self):
        '''
        If the task queue is full, calling submit() will block.
        Use this to check if submitting will block.
        '''
        return self.task_queue.full()
Example #48
0
class DataQueue(object):

    """
    Convenience class for using the DAF's notifications feature. This is a
    collection that, once connected to EDEX by calling start(), fills with
    data as notifications come in.

    Example for getting obs data:

      from DataQueue import DataQueue, GEOMETRY
      request = DataAccessLayer.newDataRequest('obs')
      request.setParameters('temperature')
      request.setLocationNames('KOMA')
      q = DataQueue(GEOMETRY, request)
      q.start()
      for item in q:
          print(item.getNumber('temperature'))
    """

    def __init__(self, dtype, request, maxsize=_DEFAULT_MAXSIZE):
        """
        Create a new DataQueue.

        Args:
            dtype: Either GRID or GEOMETRY; must match the type of data
              requested.
            request: IDataRequest describing the data you want. It must at
              least have datatype set. All data produced will satisfy the
              constraints you specify.
            maxsize: Maximum number of data objects the queue can hold at
              one time. If the limit is reached, any data coming in after
              that will not appear until one or more items are removed using
              DataQueue.get().
        """
        assert maxsize > 0
        assert dtype in (GEOMETRY, GRID)
        self._maxsize = maxsize
        self._queue = Queue(maxsize=maxsize)
        self._thread = None
        if dtype is GEOMETRY:
            self._notifier = DNL.getGeometryDataUpdates(request)
        elif dtype is GRID:
            self._notifier = DNL.getGridDataUpdates(request)

    def start(self):
        """Start listening for notifications and requesting data."""
        if self._thread is not None:
            # Already started
            return
        kwargs = {'callback': self._data_received}
        self._thread = Thread(target=self._notifier.subscribe, kwargs=kwargs)
        self._thread.daemon = True
        self._thread.start()
        timer = 0
        while not self._notifier.subscribed:
            time.sleep(0.1)
            timer += 1
            if timer >= 100:  # ten seconds
                raise RuntimeError('timed out when attempting to subscribe')

    def _data_received(self, data):
        for d in data:
            if not isinstance(d, list):
                d = [d]
            for item in d:
                self._queue.put(item)

    def get(self, block=True, timeout=None):
        """
        Get and return the next available data object. By default, if there is
        no data yet available, this method will not return until data becomes
        available.

        Args:
            block: Specifies behavior when the queue is empty. If True, wait
              until an item is available before returning (the default). If
              False, return None immediately if the queue is empty.
            timeout: If block is True, wait this many seconds, and return None
              if data is not received in that time.
        Returns:
            IData
        """
        if self.closed:
            raise Closed
        try:
            return self._queue.get(block, timeout)
        except Empty:
            return None

    def get_all(self):
        """
        Get all data waiting for processing, in a single list. Always returns
        immediately. Returns an empty list if no data has arrived yet.

        Returns:
            List of IData
        """
        data = []
        for _ in range(self._maxsize):
            next_item = self.get(False)
            if next_item is None:
                break
            data.append(next_item)
        return data

    def close(self):
        """Close the queue. May not be re-opened after closing."""
        if not self.closed:
            self._notifier.close()
        self._thread.join()

    def qsize(self):
        """Return number of items in the queue."""
        return self._queue.qsize()

    def empty(self):
        """Return True if the queue is empty."""
        return self._queue.empty()

    def full(self):
        """Return True if the queue is full."""
        return self._queue.full()

    @property
    def closed(self):
        """True if the queue has been closed."""
        return not self._notifier.subscribed

    @property
    def maxsize(self):
        """
        Maximum number of data objects the queue can hold at one time.
        If this limit is reached, any data coming in after that will not appear
        until one or more items are removed using get().
        """
        return self._maxsize

    def __iter__(self):
        if self._thread is not None:
            while not self.closed:
                yield self.get()

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, *unused):
        self.close()
Example #49
0
class FifoReadout(object):
    def __init__(self, dut):
        self.dut = dut
        self.callback = None
        self.errback = None
        self.readout_thread = None
        self.worker_thread = None
        self.watchdog_thread = None
        self.fill_buffer = False
        self.readout_interval = 0.05
        self._moving_average_time_period = 10.0
        self._data_deque = deque()
        self._data_buffer = deque()
        self._words_per_read = deque(maxlen=int(self._moving_average_time_period / self.readout_interval))
        self._result = Queue(maxsize=1)
        self._calculate = Event()
        self.stop_readout = Event()
        self.force_stop = Event()
        self.timestamp = None
        self.update_timestamp()
        self._is_running = False
        self.reset_rx()
        self.reset_sram_fifo()

    @property
    def is_running(self):
        return self._is_running

    @property
    def is_alive(self):
        if self.worker_thread:
            return self.worker_thread.is_alive()
        else:
            False

    @property
    def data(self):
        if self.fill_buffer:
            return self._data_buffer
        else:
            logging.warning('Data requested but software data buffer not active')

    def data_words_per_second(self):
        if self._result.full():
            self._result.get()
        self._calculate.set()
        try:
            result = self._result.get(timeout=2 * self.readout_interval)
        except Empty:
            self._calculate.clear()
            return None
        return result / float(self._moving_average_time_period)

    def start(self, callback=None, errback=None, reset_rx=False, reset_sram_fifo=False, clear_buffer=False, fill_buffer=False, no_data_timeout=None):
        if self._is_running:
            raise RuntimeError('Readout already running: use stop() before start()')
        self._is_running = True
        logging.info('Starting FIFO readout...')
        self.callback = callback
        self.errback = errback
        self.fill_buffer = fill_buffer
        if reset_rx:
            self.reset_rx()
        if reset_sram_fifo:
            self.reset_sram_fifo()
        else:
            fifo_size = self.dut['SRAM']['FIFO_SIZE']
            if fifo_size != 0:
                logging.warning('SRAM FIFO not empty when starting FIFO readout: size = %i', fifo_size)
        self._words_per_read.clear()
        if clear_buffer:
            self._data_deque.clear()
            self._data_buffer.clear()
        self.stop_readout.clear()
        self.force_stop.clear()
        if self.errback:
            self.watchdog_thread = Thread(target=self.watchdog, name='WatchdogThread')
            self.watchdog_thread.daemon = True
            self.watchdog_thread.start()
        if self.callback:
            self.worker_thread = Thread(target=self.worker, name='WorkerThread')
            self.worker_thread.daemon = True
            self.worker_thread.start()
        self.readout_thread = Thread(target=self.readout, name='ReadoutThread', kwargs={'no_data_timeout': no_data_timeout})
        self.readout_thread.daemon = True
        self.readout_thread.start()

    def stop(self, timeout=10.0):
        if not self._is_running:
            raise RuntimeError('Readout not running: use start() before stop()')
        self._is_running = False
        self.stop_readout.set()
        try:
            self.readout_thread.join(timeout=timeout)
            if self.readout_thread.is_alive():
                if timeout:
                    raise StopTimeout('FIFO stop timeout after %0.1f second(s)' % timeout)
                else:
                    logging.warning('FIFO stop timeout')
        except StopTimeout as e:
            self.force_stop.set()
            if self.errback:
                self.errback(sys.exc_info())
            else:
                logging.error(e)
        if self.readout_thread.is_alive():
            self.readout_thread.join()
        if self.errback:
            self.watchdog_thread.join()
        if self.callback:
            self.worker_thread.join()
        self.callback = None
        self.errback = None
        logging.info('Stopped FIFO readout')

    def print_readout_status(self):
        sync_status = self.get_rx_sync_status()
        discard_count = self.get_rx_fifo_discard_count()
        error_count = self.get_rx_8b10b_error_count()
        logging.info('Data queue size: %d', len(self._data_deque))
        logging.info('SRAM FIFO size: %d', self.dut['SRAM']['FIFO_SIZE'])
        logging.info('Channel:                     %s', " | ".join([channel.name.rjust(3) for channel in self.dut.get_modules('fei4_rx')]))
        logging.info('RX sync:                     %s', " | ".join(["YES".rjust(3) if status is True else "NO".rjust(3) for status in sync_status]))
        logging.info('RX FIFO discard counter:     %s', " | ".join([repr(count).rjust(3) for count in discard_count]))
        logging.info('RX FIFO 8b10b error counter: %s', " | ".join([repr(count).rjust(3) for count in error_count]))
        if not any(self.get_rx_sync_status()) or any(discard_count) or any(error_count):
            logging.warning('RX errors detected')

    def readout(self, no_data_timeout=None):
        '''Readout thread continuously reading SRAM.

        Readout thread, which uses read_data() and appends data to self._data_deque (collection.deque).
        '''
        logging.debug('Starting %s', self.readout_thread.name)
        curr_time = get_float_time()
        time_wait = 0.0
        while not self.force_stop.wait(time_wait if time_wait >= 0.0 else 0.0):
            try:
                time_read = time()
                if no_data_timeout and curr_time + no_data_timeout < get_float_time():
                    raise NoDataTimeout('Received no data for %0.1f second(s)' % no_data_timeout)
                data = self.read_data()
            except Exception:
                no_data_timeout = None  # raise exception only once
                if self.errback:
                    self.errback(sys.exc_info())
                else:
                    raise
                if self.stop_readout.is_set():
                    break
            else:
                data_words = data.shape[0]
                if data_words > 0:
                    last_time, curr_time = self.update_timestamp()
                    status = 0
                    if self.callback:
                        self._data_deque.append((data, last_time, curr_time, status))
                    if self.fill_buffer:
                        self._data_buffer.append((data, last_time, curr_time, status))
                    self._words_per_read.append(data_words)
                elif self.stop_readout.is_set():
                    break
                else:
                    self._words_per_read.append(0)
            finally:
                time_wait = self.readout_interval - (time() - time_read)
            if self._calculate.is_set():
                self._calculate.clear()
                self._result.put(sum(self._words_per_read))
        if self.callback:
            self._data_deque.append(None)  # last item, will stop worker
        logging.debug('Stopped %s', self.readout_thread.name)

    def worker(self):
        '''Worker thread continuously calling callback function when data is available.
        '''
        logging.debug('Starting %s', self.worker_thread.name)
        while True:
            try:
                data = self._data_deque.popleft()
            except IndexError:
                self.stop_readout.wait(self.readout_interval)  # sleep a little bit, reducing CPU usage
            else:
                if data is None:  # if None then exit
                    break
                else:
                    try:
                        self.callback(data)
                    except Exception:
                        self.errback(sys.exc_info())

        logging.debug('Stopped %s', self.worker_thread.name)

    def watchdog(self):
        logging.debug('Starting %s', self.watchdog_thread.name)
        while True:
            try:
                if not any(self.get_rx_sync_status()):
                    raise RxSyncError('No RX sync')
                if any(self.get_rx_8b10b_error_count()):
                    raise EightbTenbError('RX 8b10b error(s) detected')
                if any(self.get_rx_fifo_discard_count()):
                    raise FifoError('RX FIFO discard error(s) detected')
            except Exception:
                self.errback(sys.exc_info())
            if self.stop_readout.wait(self.readout_interval * 10):
                break
        logging.debug('Stopped %s', self.watchdog_thread.name)

    def read_data(self):
        '''Read SRAM and return data array

        Can be used without threading.

        Returns
        -------
        data : list
            A list of SRAM data words.
        '''
        return self.dut['SRAM'].get_data()

    def update_timestamp(self):
        curr_time = get_float_time()
        last_time = self.timestamp
        self.timestamp = curr_time
        return last_time, curr_time

    def read_status(self):
        raise NotImplementedError()

    def reset_sram_fifo(self):
        fifo_size = self.dut['SRAM']['FIFO_SIZE']
        logging.info('Resetting SRAM FIFO: size = %i', fifo_size)
        self.update_timestamp()
        self.dut['SRAM']['RESET']
        sleep(0.2)  # sleep here for a while
        fifo_size = self.dut['SRAM']['FIFO_SIZE']
        if fifo_size != 0:
            logging.warning('SRAM FIFO not empty after reset: size = %i', fifo_size)

    def reset_rx(self, channels=None):
        logging.info('Resetting RX')
        if channels:
            filter(lambda channel: self.dut[channel].RX_RESET, channels)
        else:
            filter(lambda channel: channel.RX_RESET, self.dut.get_modules('fei4_rx'))
        sleep(0.1)  # sleep here for a while

    def get_rx_sync_status(self, channels=None):
        if channels:
            return map(lambda channel: True if self.dut[channel].READY else False, channels)
        else:
            return map(lambda channel: True if channel.READY else False, self.dut.get_modules('fei4_rx'))

    def get_rx_8b10b_error_count(self, channels=None):
        if channels:
            return map(lambda channel: self.dut[channel].DECODER_ERROR_COUNTER, channels)
        else:
            return map(lambda channel: channel.DECODER_ERROR_COUNTER, self.dut.get_modules('fei4_rx'))

    def get_rx_fifo_discard_count(self, channels=None):
        if channels:
            return map(lambda channel: self.dut[channel].LOST_DATA_COUNTER, channels)
        else:
            return map(lambda channel: channel.LOST_DATA_COUNTER, self.dut.get_modules('fei4_rx'))
Example #50
0
class Agent:
    def __init__(self, personality=None):
        self.personality = None
        self.neurotics = 0.0
        self.MAX_NEUROTICS = 3.0
        self._in_q = Queue()
        self._out_q = Queue(1)
        self.FRAMES_PER_SECOND = 60.0
        self.SECS_PER_UPDATE = 1.0/self.FRAMES_PER_SECOND
        self.EVENT_DURATION = 1.0  # seconds
        self.BASE_VELOCITY = 1.0/250  # self.TIME_TO_TRAVEL*self.SECS_PER_UPDATE
        self.DISTANCE_TOLERANCE = 1.0/10000.0

        self.set_personality(personality)

    def start(self):
        data = threading.local()
        thread = threading.Thread(name='Agent Runloop', target=self._run, args=(data,))
        thread.daemon = True
        thread.start()

    def stop(self):
        self._in_q.put_nowait(Invocation(self._stop, (), {}))

    def put(self, values=None):
        if values:
            if isinstance(values, np.ndarray):
                v = values
            elif isinstance(values, OCC):
                v = values.pad.state
            else:
                raise ValueError('Valores de evento inválidos')
            self._in_q.put_nowait(Invocation(self._put, (v,), {}))

    def get(self):
        mood = self._out_q.get()
        if mood is not None and len(mood) == 3:
            return PAD(pleasure=mood[0], arousal=mood[1], dominance=mood[2])

    def set_personality(self, values):
        if values:
            if isinstance(values, np.ndarray):
                ocean = OCEAN(personality=values)
            elif isinstance(values, OCEAN):
                ocean = values
            else:
                raise ValueError('Valores de personalidade inválidos')
        else:
            ocean = OCEAN()
        self._in_q.put_nowait(Invocation(self._set_personality, (ocean,), {}))

    def _run(self, data):
        data.running = True
        data.events = []
        data.state = np.zeros(3)
        previous = time()
        lag = 0.0
        print 'running...'
        while data.running:
            current = time()
            elapsed = current - previous
            previous = current
            lag += elapsed

            self._process_input(data)
            while lag >= self.SECS_PER_UPDATE:
                self._update(data)
                lag -= self.SECS_PER_UPDATE

            self._process_output(data)
        print 'stopped!'

    def _process_input(self, data):
        while not self._in_q.empty():
            job = self._in_q.get()
            job.fn(data, *job.args, **job.kwargs)

    def _process_output(self, data):
        if self._out_q.full():
            with self._out_q.mutex:
                self._out_q.queue.clear()
        self._out_q.put_nowait(data.state)

    def _update(self, data):
        if len(data.events) > 0:
            # calculate events weighted average
            vectors = []
            weights = []
            replacement = []
            for i in range(len(data.events)):
                if data.events[i].get_influence() > 0:
                    vectors.append(data.events[i].values)
                    weights.append(data.events[i].get_influence())
                    replacement.append(data.events[i])
            data.events = list(replacement)
            if len(vectors) > 0 and len(weights) > 0:
                avg_event = np.average(vectors, axis=0, weights=weights)
                # move mood towards average event
                data.state = self._move_to(data.state, avg_event)
        else:
            # move mood towards personality
            data.state = self._move_to(data.state, data.personality)

    def _move_to(self, _from, _to):
        if np.allclose(_from, _to, self.DISTANCE_TOLERANCE):
            return np.array(_to)
        direction = _to - _from
        direction /= np.linalg.norm(direction)
        _from += direction * self.BASE_VELOCITY * self.neurotics
        return _from

    def _stop(self, data):
        print 'stopping...'
        data.running = False

    def _put(self, data, value):
        print 'putting data...'
        data.events.append(Event(value, self.EVENT_DURATION))

    def _set_personality(self, data, value):
        print 'setting personality...'
        data.personality = np.array(value.pad.state)
        data.state = np.array(value.pad.state)
        self.personality = value
        self.neurotics = Agent.map_value(value.neuroticism, -1, 1, 1, self.MAX_NEUROTICS)

    @staticmethod
    def map_value(value=0.0, in_min=0.0, in_max=1.0, out_min=0.0, out_max=1.0):
        return (float(value) - float(in_min)) * (float(out_max) - float(out_min)) / (
            float(in_max) - float(in_min)) + float(out_min)
Example #51
0
class Controller(Thread):
  """ Thread object that handles LLDB events and commands. """

  CTRL_VOICE = 238 # doesn't matter what this is
  TARG_NEW = 1
  TARG_DEL = 1 << 1
  PROC_NEW = 1 << 2
  PROC_DEL = 1 << 3
  BP_CHANGED = 1 << 4
  BAD_STATE = 1 << 5 # multiple targets

  def __init__(self, vimx):
    """ Creates the LLDB SBDebugger object and more! """
    import logging
    self.logger = logging.getLogger(__name__)
    self.logger.setLevel(logging.INFO)

    self._sink = open('/dev/null')
    self._dbg = lldb.SBDebugger.Create()
    self._dbg.SetOutputFileHandle(self._sink, False)
    self._ipreter = self._dbg.GetCommandInterpreter()

    self._rcx = lldb.SBListener("the_ear") # receiver
    self._trx = lldb.SBBroadcaster("the_mouth") # transmitter for user events
    self._trx.AddListener(self._rcx, self.CTRL_VOICE)

    self._target = None
    self._process = None
    self._num_bps = 0

    self.in_queue = Queue(maxsize=2)
    self.out_queue = Queue(maxsize=1)

    self.vimx = vimx
    self.busy_stack = 0 # when > 0, buffers are not updated
    self.buffers = VimBuffers(vimx)
    self.session = Session(self, vimx)

    super(Controller, self).__init__() # start the thread

  def is_busy(self):
    return self.busy_stack > 0

  def busy_more(self):
    self.busy_stack += 1

  def busy_less(self):
    self.busy_stack -= 1
    if self.busy_stack < 0:
      self.logger.critical("busy_stack < 0")
      self.busy_stack = 0

  def safe_call(self, method, args=[], sync=False, timeout=None):
    """ (thread-safe) Call `method` with `args`. If `sync` is True, wait for
        `method` to complete and return its value. If timeout is set and non-
        negative, and the `method` did not complete within `timeout` seconds,
        an EventLoopError is raised!
    """
    if self._dbg is None:
      self.logger.critical("Debugger not found!")
      raise EventLoopError("Dead debugger!")
    if self.out_queue.full(): # garbage
      self.out_queue.get() # clean

    try:
      self.in_queue.put((method, args, sync), block=False)
      interrupt = lldb.SBEvent(self.CTRL_VOICE, "the_sound")
      self._trx.BroadcastEvent(interrupt)
      if sync:
        return self.out_queue.get(block=True, timeout=timeout)
    except Empty:
      raise EventLoopError("Timed out!")
    except Full:
      self.logger.critical("Event loop thread is probably dead!")
      raise EventLoopError("Dead event loop!")

  def safe_execute(self, tokens):
    """ (thread-safe) Executes an lldb command defined by a list of tokens.
        If a token contains white-spaces, they are escaped using backslash.
    """
    cmd = ' '.join([ t.replace(' ', '\\ ') for t in tokens ])
    self.safe_call(self.exec_command, [cmd])

  def safe_exit(self):
    """ Exit from the event-loop, and wait for the thread to join.
        Should be called from outside this thread.
    """
    self.safe_call(None)
    self.join()

  def complete_command(self, arg, line, pos):
    """ Returns a list of viable completions for line, and cursor at pos. """
    pos = int(pos)
    result = lldb.SBStringList()

    if arg == line and line != '':
      # provide all possible completions when completing 't', 'b', 'di' etc.
      num = self._ipreter.HandleCompletion('', 0, 1, -1, result)
      cands = ['']
      for x in result:
        if x == line:
          cands.insert(1, x)
        elif x.startswith(line):
          cands.append(x)
    else:
      num = self._ipreter.HandleCompletion(line, pos, 1, -1, result)
      cands = [x for x in result]

    if len(cands) > 1:
      if cands[0] == '' and arg != '':
        if not cands[1].startswith(arg) or not cands[-1].startswith(arg):
          return []
      return cands[1:]
    else:
      return []

  def update_buffers(self, buf=None):
    """ Update lldb buffers and signs placed in source files.
        @param buf
            If None, all buffers and signs would be updated.
            Otherwise, update only the specified buffer.
    """
    if self.is_busy():
      return
    commander = self.get_command_result
    if buf is None:
      self.buffers.update(self._target, commander)
    else:
      self.buffers.update_buffer(buf, self._target, commander)

  def get_state_changes(self):
    """ Get a value denoting how target, process, and/or breakpoint have changed.
        If a new process found, add our listener to its broadcaster.
    """
    changes = 0
    if self._dbg.GetNumTargets() > 1:
      return self.BAD_STATE

    if self._target is None or not self._target.IsValid():
      target = self._dbg.GetSelectedTarget()
      if target.IsValid():
        changes = self.TARG_NEW
        self._target = target
      elif self._target is not None:
        changes = self.TARG_DEL
        self._target = None

    if self._target is None:
      if self._process is not None:
        changes |= self.PROC_DEL
        self._process = None
      if self._num_bps > 0:
        changes |= self.BP_CHANGED
        self._num_bps = 0
      return changes

    if self._process is None or not self._process.IsValid():
      process = self._target.process
      if process.IsValid():
        changes |= self.PROC_NEW
        self._process = process
        process.broadcaster.AddListener(self._rcx,
            lldb.SBProcess.eBroadcastBitStateChanged | \
            lldb.SBProcess.eBroadcastBitSTDOUT | \
            lldb.SBProcess.eBroadcastBitSTDERR)
        self._proc_cur_line_len = 0
        self._proc_lines_count = 0
        self._proc_sigstop_count = 0

      elif self._process is not None:
        changes |= self.PROC_DEL
        self._process = None

    num_bps = self._target.GetNumBreakpoints()
    if self._num_bps != num_bps:
      # TODO what if one was added and another deleted?
      changes |= self.BP_CHANGED
      self._num_bps = num_bps
    # TODO Watchpoints

    return changes

  def change_buffer_cmd(self, buf, cmd):
    """ Change a buffer command and update it. """
    self.buffers._content_map[buf] = cmd
    self.update_buffers(buf=buf)
    if self._target is not None:
      self.vimx.command('drop [lldb]%s' % buf)

  def do_btswitch(self):
    """ Switch backtrace command to show all threads. """
    cmd = self.buffers._content_map['backtrace']
    if cmd != 'bt all':
      cmd = 'bt all'
    else:
      cmd = 'bt'
    self.change_buffer_cmd('backtrace', cmd)

  def bp_set_line(self, spath, line):
    from os.path import abspath
    fpath = abspath(spath).encode('ascii', 'ignore')
    bp = self._target.BreakpointCreateByLocation(fpath, line)
    self.buffers.logs_append(u'\u2192(lldb-bp) %s:%d\n' % (spath, line))
    self.session.bp_map_auto(bp, (spath, line))
    self.update_buffers(buf='breakpoints')

  def do_breakswitch(self, bufnr, line):
    """ Switch breakpoint at the specified line in the buffer. """
    key = (bufnr, line)
    if self.buffers.bp_list.has_key(key):
      bps = self.buffers.bp_list[key]
      args = "delete %s" % " ".join([str(b.GetID()) for b in bps])
      self.exec_command("breakpoint " + args)
    else:
      self.bp_set_line(self.vimx.get_buffer_name(bufnr), line)

  def do_breakdelete(self, bp_id):
    """ Delete a breakpoint by id """
    if bp_id:
      self.exec_command("breakpoint delete {}".format(bp_id))

  def put_stdin(self, instr):
    """ Call PutSTDIN() of process with instr. """
    if self._process is not None:
      self._process.PutSTDIN(instr)
    else:
      self.vimx.log('No active process!')

  def get_command_result(self, command, add2hist=False):
    """ Runs command in the interpreter and returns (success, output)
        Not to be called directly for commands which changes debugger state;
        use exec_command instead.
    """
    result = lldb.SBCommandReturnObject()

    self._ipreter.HandleCommand(command.encode('ascii', 'ignore'), result, add2hist)
    return (result.Succeeded(), result.GetOutput() if result.Succeeded() else result.GetError())

  def exec_command(self, command):
    """ Runs command in the interpreter, calls update_buffers, and display the
        result in the logs buffer. Returns True if succeeded.
    """
    self.buffers.logs_append(u'\u2192(lldb) %s\n' % command)
    (success, output) = self.get_command_result(command, True)
    if not success:
      self.buffers.logs_append(output, u'\u2717')
    elif len(output) > 0:
      self.buffers.logs_append(output, u'\u2713')

    state_changes = self.get_state_changes()
    if state_changes & self.TARG_NEW != 0:
      self.session.new_target(self._target)
    elif state_changes & self.BP_CHANGED != 0 and self._target is not None:
      self.session.bp_changed(command, self._target.breakpoint_iter())

    self.update_buffers()
    return success

  def run(self):
    """ This thread's event loop. """
    import traceback
    to_count = 0
    while True:
      event = lldb.SBEvent()
      if self._rcx.WaitForEvent(30, event): # 30 second timeout

        def event_matches(broadcaster, skip=True):
          if event.BroadcasterMatchesRef(broadcaster):
            if skip:
              while self._rcx.GetNextEventForBroadcaster(broadcaster, event):
                pass
            return True
          return False

        if event_matches(self._trx, skip=False):
          try:
            method, args, sync = self.in_queue.get(block=False)
            if method is None:
              break
            self.logger.info('Calling %s with %s' % (method.func_name, repr(args)))
            ret = method(*args)
            if sync:
              self.out_queue.put(ret, block=False)
          except Exception:
            self.logger.critical(traceback.format_exc())

        elif event_matches(self._process.broadcaster):
          # Dump stdout and stderr to logs buffer
          while True:
            out = ''
            stdout = self._process.GetSTDOUT(256)
            if stdout is not None:
              out += stdout
            stderr = self._process.GetSTDERR(256)
            if stderr is not None:
              out += stderr
            if len(out) == 0:
              break
            n_lines = self.buffers.logs_append(out)
            if n_lines == 0:
              self._proc_cur_line_len += len(out)
            else:
              self._proc_cur_line_len = 0
              self._proc_lines_count += n_lines
            if self._proc_cur_line_len > 8192 or self._proc_lines_count > 2048:
              # detect and stop/kill insane process
              if self._process.state == lldb.eStateStopped:
                pass
              elif self._proc_sigstop_count > 7:
                self._process.Kill()
                self.buffers.logs_append(u'\u2717SIGSTOP limit exceeded! Sent SIGKILL!\n')
              else:
                self._process.SendAsyncInterrupt()
                self._proc_sigstop_count += 1
                self.buffers.logs_append(u'\u2717Output limits exceeded! Sent SIGSTOP!\n')
              break
          # end of dump while
          self.update_buffers()

      else: # Timed out
        to_count += 1
        if to_count > 172800: # in case WaitForEvent() does not wait!
          self.logger.critical('Broke the loop barrier!')
          break
    # end of event-loop while

    self._dbg.Terminate()
    self._dbg = None
    self._sink.close()
    self.logger.info('Terminated!')
Example #52
0
class TaskManager(object):
    """
    Task manager class based on thread module which
    executes assigned tasks concurently. It uses a
    pool of thread workers, queue of tasks and pid
    set to monitor jobs execution.

    .. doctest::

        Use case:
        mgr  = TaskManager()
        jobs = []
        jobs.append(mgr.spaw(func, args))
        mgr.joinall(jobs)

    """
    def __init__(self, nworkers=10, name='TaskManager', qtype='Queue', debug=0):
        self.name   = name
        self.debug  = debug
        self._pids  = set()
        self._uids  = UidSet()
        if  qtype == 'PriorityQueue':
            self._tasks = PriorityQueue()
        else:
            self._tasks = Queue()
        self._workers = [Worker(name, self._tasks, self._pids, self._uids) \
                        for _ in xrange(0, nworkers)]

    def status(self):
        "Return status of task manager queue"
        info = {'qsize':self._tasks.qsize(), 'full':self._tasks.full(),
                'unfinished':self._tasks.unfinished_tasks,
                'nworkers':len(self._workers)}
        return {self.name:info}

    def nworkers(self):
        """Return number of workers associated with this manager"""
        return len(self._workers)

    def assign_priority(self, uid):
        "Assign priority for given uid"
        if  not uid or self._tasks.empty():
            return 0
        frequency = self._uids.get(uid)
        return frequency/10

    def spawn(self, func, *args, **kwargs):
        """Spawn new process for given function"""
        pid = kwargs.get('pid', genkey(str(args) + str(kwargs)))
        evt = Event()
        if  not pid in self._pids:
            self._pids.add(pid)
            task  = (evt, pid, func, args, kwargs)
            if  isinstance(self._tasks, PriorityQueue):
                uid = kwargs.get('uid', None)
                self._uids.add(uid)
                priority = self.assign_priority(uid)
                self._tasks.put((priority, uid, task))
            else:
                self._tasks.put(task)
        else:
            # the event was not added to task list, invoke set()
            # to pass it in wait() call, see joinall
            evt.set()
        return evt, pid

    def remove(self, pid):
        """Remove pid and associative process from the queue"""
        self._pids.discard(pid)

    def is_alive(self, pid):
        """Check worker queue if given pid of the process is still running"""
        return pid in self._pids

    def clear(self, tasks):
        """
        Clear all tasks in a queue. It allows current jobs to run, but will
        block all new requests till workers event flag is set again
        """
        map(lambda (evt, pid): (evt.clear(), pid), tasks)

    def joinall(self, tasks):
        """Join all tasks in a queue and quite"""
        map(lambda (evt, pid): (evt.wait(), pid), tasks)

    def quit(self):
        """Put None task to all workers and let them quit"""
        map(lambda w: self._tasks.put(None), self._workers)
        map(lambda w: w.join(), self._workers)

    def force_exit(self):
        """Force all workers to exit"""
        map(lambda w: w.force_exit(), self._workers)
Example #53
0
from Queue import Queue

#Queue.Queue类即是一个队列的同步实现
#队列的长度可为无线或者有限
#而通过Queue的构造函数的可选参数maxsize来设定队列长度
#如果maxsize小于1表示队列长度无线
myqueue=Queue(maxsize=10)

#将一个值放入队列中
#put()方法有两个参数
#第一个item为必须的,为插入项目的值
#第二个block为可选参数,默认为1
#如果队列当前为空,且block为1,put方法就调用线程暂停,直到空出一个数据单元
#如果block为0,put方法引发Full异常
myqueue.put((10,9))

print "queue size=",myqueue.qsize()#返回队列的大小
print "queue data=",myqueue.get()#获得队列
print "queue is empty?",myqueue.empty()#如果队列为空,返回True,反之False
print "queue is full?",myqueue.full()#如果队列满了,返回True,反之False
#print "queue get_nowait=",myqueue.get_nowait()#相当Queue.get(False),
#非阻塞Queue.put(item)写入队列,timeout等待时间

#print "queue put_nowait=",myqueue.put_nowait()#相当Queue.put(item.False),

print "queue task_done=",myqueue.task_done()#在完成一项工作之后
#task_done函数向已经完成的队列发送一个信号

#join()实际上是意味着等到队列为空,再执行别的操作
	sock_rych = zmqctx.socket(zmq.PUB)
	sock_rych.setsockopt(zmq.HWM, 100)
	sock_rych.bind('tcp://*:13372')

	# input and output queues for worker threads that call enrycher. (zmq is only used in the main thread)
	in_queue = Queue(maxsize=MAX_ENRYCHER_REQUESTS)
	out_queue = Queue(maxsize=100*MAX_ENRYCHER_REQUESTS)
	
	# prepare worker threads
	for i in range(MAX_ENRYCHER_REQUESTS):
		worker = threading.Thread(target=enrycher_worker, args=(in_queue,out_queue))
		worker.start()

	try:
		while True:
			if in_queue.full():
				print 'sleep ... %d:%d ...' % (in_queue.qsize(), out_queue.qsize(),),
				time.sleep(1)
				print '!'
				
			if not in_queue.full() and zmq.select([sock_txt], [], [], 3)[0]:
				article = sock_txt.recv_pyobj()				
				if is_enrychable(article):
					print 'enqueued %s (lang=%r)' % (article['id'], article['lang'])
					print '%d:%d' % (in_queue.qsize(), out_queue.qsize(),),
					in_queue.put(article)
				else:
					print 'ignored %s (lang=%r)' % (article['id'], article['lang'])
					print '%d:%d' % (in_queue.qsize(), out_queue.qsize(),),
					out_queue.put(article)
Example #55
0
class DBPool(object):
    '''''一个数据库连接池'''
    
    def initPool(self, maxActive=5, maxWait=None, init_size=0, db_type="mysql", **config):
        '''初始化数据库连接池
        '''
        log.msg("__init__ Pool..")
        self.__freeConns = Queue(maxActive)
        self.maxWait = maxWait
        self.db_type = db_type
        self.config = config
        if init_size > maxActive:
            init_size = maxActive
        for i in range(init_size):
            self.free(self._create_conn())
        self.nowconn = None
            
    def __del__(self):
        log.msg("__del__ Pool..")
        self.release()
        
    def release(self):
        '''''释放资源,关闭池中的所有连接'''
        log.msg("release Pool..")
        while self.__freeConns and not self.__freeConns.empty():
            con = self.get()
            con.release()
            self.__freeConns = None
            
    def _create_conn(self):
        '''''创建连接 '''
        if self.db_type in DBCS:
            return DBCS[self.db_type](**self.config);
            
    def get(self, timeout=None):
        '''''获取一个连接
        @param timeout:超时时间
        '''
        if timeout is None:
            timeout = self.maxWait
            conn = None
        if self.__freeConns.empty():#如果容器是空的,直接创建一个连接
            conn = self._create_conn()
        else:
            conn = self.__freeConns.get(timeout=timeout)
            conn.pool = self
        return conn
    
    def cursor(self,cursorclass = None):
        '''通配接口'''
        conn = self.get()
        self.nowconn = conn
        ucur = UCursor( conn, cursorclass)
        return ucur
    
    def commit(self):
        '''提交'''
        try:
            self.nowconn.commit()
        except Exception as e:
            log.err(e.message)
    def rollback(self):
        '''事务回滚
        '''
        try:
            self.nowconn.rollback()
        except Exception as e:
            log.err(e.message)
    
    def free(self, conn):
        '''''将一个连接放回池中
        @param conn: 连接对象
        '''
        conn.pool = None
        if(self.__freeConns.full()):#如果当前连接池已满,直接关闭连接
            conn.release()
        return self.__freeConns.put_nowait(conn)
    
    def execSql(self,sqlstr):
        '''执行数据库的写操作(插入,修改,删除)
        @param sqlstr: str 需要执行的sql语句
        '''
        try:
            conn = self.get(5)
            cursor = conn.cursor()
            count = cursor.execute(sqlstr)
            conn.commit()
            cursor.close()
            conn.close()
            if count>0:
                return True
            return False
        except Exception,err:
            log.err(err)
            conn.close()
            return None#通过放回NONE在远程调用上抛出异常
Example #56
0
class TaskManager(object):
    """
    Task manager class based on thread module which
    executes assigned tasks concurently. It uses a
    pool of thread workers, queue of tasks and pid
    set to monitor jobs execution.

    .. doctest::

        Use case:
        mgr  = TaskManager()
        jobs = []
        jobs.append(mgr.spawn(func, args))
        mgr.joinall(jobs)

    """
    def __init__(self, nworkers=10, name='TaskManager'):
        self.name = name
        self.pids = set()
        self.uids = UidSet()
        self.tasks = Queue()
        self.workers = [Worker(name, self.tasks, self.pids, self.uids) \
                        for _ in range(0, nworkers)]

    def status(self):
        "Return status of task manager queue"
        info = {'qsize':self.tasks.qsize(), 'full':self.tasks.full(),
                'unfinished':self.tasks.unfinished_tasks,
                'nworkers':len(self.workers)}
        return {self.name: info}

    def nworkers(self):
        """Return number of workers associated with this manager"""
        return len(self.workers)

    def spawn(self, func, *args, **kwargs):
        """Spawn new process for given function"""
        pid = kwargs.get('pid', genkey(str(args) + str(kwargs)))
        evt = threading.Event()
        if  not pid in self.pids:
            self.pids.add(pid)
            task = (evt, pid, func, args, kwargs)
            self.tasks.put(task)
        else:
            # the event was not added to task list, invoke set()
            # to pass it in wait() call, see joinall
            evt.set()
        return evt, pid

    def remove(self, pid):
        """Remove pid and associative process from the queue"""
        self.pids.discard(pid)

    def is_alive(self, pid):
        """Check worker queue if given pid of the process is still running"""
        return pid in self.pids

    def clear(self, tasks):
        """
        Clear all tasks in a queue. It allows current jobs to run, but will
        block all new requests till workers event flag is set again
        """
        _ = [t[0].clear() for t in tasks] # each task is return from spawn, i.e. a pair (evt, pid)

    def joinall(self, tasks):
        """Join all tasks in a queue and quit"""
        _ = [t[0].wait() for t in tasks] # each task is return from spawn, i.e. a pair (evt, pid)

    def quit(self):
        """Put None task to all workers and let them quit"""
        _ = [self.tasks.put(None) for _ in self.workers]
        time.sleep(1) # let workers threads cool-off and quit
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)

        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            temp_logger = logging.getLogger('HubstorageClient')
            temp_logger.info('BatchWriter: item size is {}'.format(len(data)))
            temp_logger.info('BatchWriter: data type is {}'.format(type(data)))
            temp_logger.info('BatchWriter: item type {}'.format(type(item)))
            js = json.loads(data)
            temp_logger.info('BatchWriter: data keys {}'.format(js.keys()))

            if getattr(item, 'keys', None):
                temp_logger.info('BatchWriter: item keys {}'.format(item.keys()))

                for key, value in item.items():
                    d = {}
                    d[key] = value
                    res = jsonencode(d)
                    temp_logger.info('BatchWriter: key <{}> JSONENCODE size is {}, value type {}'.format(key, len(res), type(value)))
                    if getattr(value, '__len__', None):
                        temp_logger.info('BatchWriter: key <{}> value size is {}'.format(key, len(value)))

            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return self._nextid.next()

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
Example #58
0
class DatabaseAL (threading.Thread):
    "database abstraction layer"
    
    def __init__(self, server, enabled = 0):
        threading.Thread.__init__(self)
        self.name = "DatabaseAL worker thread"
        self.setDaemon(True)
        self.log = logging.getLogger('DatabaseAL')
        #self.client = SensorClient()
        self.server = server

        self.enabled = enabled        
        if self.enabled == 0:
            self.log.warning("OpentTSDB is disabled. Data will not be saved in OpenTSDB.")
                
        self.dataq = Queue(maxsize = 1024*1024)
        self.shutdown_evt = Event()
        
    def append(self, timestamp, values, tags):
        "append data to the db"
        #self.log.debug("Got data: {} {} {}".format(timestamp, values, tags))
        # append a tuple
        #if type(tags) is SensorTag:
        #    print tags
            
        if self.dataq.full():
            self.log.error("DataQ is full! Data is lost!")
        else:
            self.dataq.put( (timestamp, values, tags) )
            
    def busy(self):
        return not self.dataq.empty()
    
    def put(self, worklist):
        # format is (unix timestamp, value, Tags)
        
        m = set()
        t = set()
        p = []
        for w in worklist:
            ts, val, tags = w
            if tags.metric not in m:
                m.add(tags.metric)
            for k,v  in tags.toTagData().items():
                kv_str = "{}={}".format(k,v)
                if kv_str not in t:
                    t.add(kv_str)
                    
            p += [{'metric': tags.metric,
                      'timestamp': ts,
                      'value': val,
                      'tags': tags.toTagData(),
                      }]
        #self.log.debug("putting data: "+json.dumps(p))
        
        self.log.info("putting metrics:"+str(m))
        self.log.info("with tags:"+str(t))
        r = requests.post('{}/put?details'.format(self.server),
                                                  data = json.dumps(p))
        if r.status_code != requests.codes.ok:
            self.log.error("put failed: "+ r.text)
        
            for ts, val, tags in worklist:
                self.append( ts, val, tags)
        #else:
            #self.log.info("put {} samples.".format(len(worklist)))
        
    def run(self):
        while not self.shutdown_evt.wait(1):
            if self.shutdown_evt.is_set():
                break
            if self.dataq.empty():
                continue
            
            worklist = []            
            while not self.dataq.empty() and len(worklist) < 2500:
                worklist.append(self.dataq.get())
            
            self.log.info("Putting a batch of {} samples".format(len(worklist)))            
            # now batch processes the worklist
            
            if len(worklist) > 0:
                try:                   
                    #i = self.client.multiplePut(worklist)
                
                    if self.enabled > 0:    
                        self.put(worklist)                    
                        self.log.info("Pushed {} samples to OpenTSDB.".format(len(worklist)))
                        
                except Exception as x:                    
                    self.log.error("Push failed, error ={}; worklist = {}".format(x, worklist))
                       
        self.log.info("Shutdown")
    
    def stop(self):
        self.shutdown_evt.set()
        self.join(5)
Example #59
0
class NFQ:
    ##
    # @param in_size number of encoded features
    # @param out_size = number of possible actions
    def __init__(self, in_size, out_size, learning_rate=0.75, model_path=None, weights_path=None):
        assert in_size > 1
        assert type(in_size) is int
        assert out_size > 1
        assert type(out_size) is int

        self.in_size = in_size
        self.out_size = out_size
        self.gamma = learning_rate

        if model_path is None:
            self.model = Sequential()
            self.model.add(Dense(64, input_dim=in_size, init='lecun_uniform'))
            self.model.add(Activation('relu'))
            # self.model.add(BatchNormalization())
            # self.model.add(Dropout(0.2))
            self.model.add(Dense(40, init='lecun_uniform'))
            self.model.add(Activation('relu'))
            # self.model.add(Dropout(0.2))
            self.model.add(Dense(out_size, init='lecun_uniform'))
            self.model.add(Activation('linear'))
        else:
            assert weights_path is not None
            self.model = model_from_json(open(model_path).read())
            self.model.load_weights(weights_path)

        self.model.compile(
            loss='mse',     # maybe binary_crossentrpy?
            optimizer='rmsprop'
        )

        self.transitions = Queue(25000)

        ##
        #   Calculate target Q-fun values and train a NN on it
        def train(self):
            queue_size = self.transitions.qsize()
            np_data = list(self.transitions.queue)
            np_data = np.array(np_data)
            r = np.random.randint(queue_size-1, size=3000)
            np_data = np_data[r, :]
            # trim the input data of neural net because it also contains
            # unwanted state' and reward information => need it only for target Q
            in_data = np.delete(np_data, np.s_[self.in_size::], 1)
            out_data = self.get_training_data(np_data)
            # stop_cb = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
            print 'Learning...'
            hist = self.model.fit(
                in_data,
                out_data,
                nb_epoch=500,
                batch_size=256,
                verbose=0,
                validation_split=0.4
            )
            print(
                'Loss: from ',
                hist.history['loss'][0],
                ' to ',
                hist.history['loss'][-1]
            )
            # callbacks=[stop_cb]

        ##
        # Predicts next action, which is the one maximizing the Q function in current state
        def predict_action(self, state):
            q = self.model.predict(state)
            return np.argmax(q)

        ##
        # Process training data into correct format (transitions)
        def get_training_data(self, data):
            out_data = list()
            for row in data:
                reward = row[-1]
                selected_action = row[self.in_size]
                next_state = row[self.in_size+1:-1]
                next_state = next_state.reshape(1, next_state.size)
                predicted_Q = self.model.predict(next_state)
                maxQ = np.max(predicted_Q)
                minQ = np.min(predicted_Q)

                out = np.zeros((self.out_size,))
                if reward >= 1:
                    out[int(selected_action)] = 1
                elif reward < 0:
                    out[int(selected_action)] = -1
                else:
                    out[int(selected_action)] = reward + self.gamma*maxQ

                for i in xrange(self.out_size):
                    if i != int(selected_action):
                        out[i] = minQ
                out_data.append(out)

            return np.array(out_data)

        ##
        # Add transition to training dataset
        # transition has form [st, a, st+1, r]
        def add_transition(self, transition):
            # transition must be a numpy array
            assert type(transition) is np.ndarray
            if self.transitions.full():
                self.transitions.get()

            self.transitions.put(transition)

        ##
        # Save NN to external file
        # @param model_path File containing NN architecture
        # @param weights_path File containing NN weights
        def save_net(self, model_path=None, weights_path=None):
            if model_path is None:
                model_path = 'model.json'

            if weights_path is None:
                weights_path = 'weights.h5'

            # remove old file if exists
            try:
                os.remove(weights_path)
            except OSError:
                pass

            try:
                self.model.save_weights(weights_path)
                open(model_path, 'w').write(self.model.to_json())
            except Exception as e:
                print('Saving a model failed')
                print(type(e))
                print(e)