Beispiel #1
0
class DownloadByGevent(GetTitleUrls):

    def __init__(self, download_urls):
        super(DownloadByGevent, self).__init__()
        self.workQueue = Queue(1000)
        self.download_urls = download_urls

    def crawler(self, index):
        Process_id = 'Process-' + str(index)
        while not self.workQueue.empty():
            url = self.workQueue.get(timeout=2)
            try:
                r = self.get_soup(url, timeout=20)
                my_write(str(' '.join([str(x) for x in [Process_id, self.workQueue.qsize(), r.status_code, url]])))
            except Exception as e:
                # print(Process_id, self.workQueue.qsize, url, 'Error: ', e)
                my_write(str(' '.join([str(x) for x in [Process_id, self.workQueue.qsize(), r.status_code, url]])))
    
    def boss(self):
        for url in self.download_urls:
            self.workQueue.put_nowait(url)

    def start(self):
        start_time = time.time()
        gevent.spawn(self.boss).join()
        jobs = []
        for i in range(10):
            jobs.append(gevent.spawn(self.crawler, i))

        gevent.joinall(jobs)
        print('end')
        print("爬虫时间为%s"%time.time() - start_time)
Beispiel #2
0
    def test_add_to_retry_queue(self):
        retry_items_queue = Queue()
        worker = ResourceItemWorker(
            config_dict=self.worker_config,
            retry_resource_items_queue=retry_items_queue)
        retry_item = {
            'id': uuid.uuid4().hex,
            'dateModified': datetime.datetime.utcnow().isoformat(),
        }
        self.assertEqual(retry_items_queue.qsize(), 0)

        # Add to retry_resource_items_queue
        worker.add_to_retry_queue(retry_item)
        sleep(worker.config['retry_default_timeout'] * 2)
        self.assertEqual(retry_items_queue.qsize(), 1)
        retry_item_from_queue = retry_items_queue.get()
        self.assertEqual(retry_item_from_queue['retries_count'], 1)
        self.assertEqual(retry_item_from_queue['timeout'],
                         worker.config['retry_default_timeout'] * 2)

        # Add to retry_resource_items_queue with status_code '429'
        worker.add_to_retry_queue(retry_item, status_code=429)
        retry_item_from_queue = retry_items_queue.get()
        self.assertEqual(retry_item_from_queue['retries_count'], 1)
        self.assertEqual(retry_item_from_queue['timeout'],
                         worker.config['retry_default_timeout'] * 2)

        # Drop from retry_resource_items_queue
        retry_item['retries_count'] = 3
        worker.add_to_retry_queue(retry_item)
        self.assertEqual(retry_items_queue.qsize(), 0)

        del worker
Beispiel #3
0
    def test_concurrent_rw(self):
        written_data = Queue()

        def _writer(_buffer):
            while True:
                data = b"".join([
                    ascii_letters[m].encode()
                    for m in [randrange(0, 8) for _ in range(8)]
                ])
                _buffer.write(data)
                written_data.put(data)
                sleep(0.2)

        writer = spawn(_writer, self.buffer)
        writer.start()
        sleep(0.5)
        data = self.buffer.read()
        _data = b""
        while written_data.qsize() != 0:
            _data += written_data.get()
        self.assertEqual(data, _data)
        sleep(0.5)
        data = self.buffer.read()
        _data = b""
        while written_data.qsize() != 0:
            _data += written_data.get()
        self.assertEqual(data, _data)
        writer.kill()
        writer.get()
Beispiel #4
0
class FilterTests(TestCase):
    def create_stage(self, **conf):
        return self.cls(**conf)

    def create(self, conf={}, events=[]):
        self.input = Queue()
        self.output = Queue()
        with DummyContext():
            self.i = self.create_stage(**conf)
        self.input = self.i.setup(self.output)

        self.i.start()
        for ev in events:
            self.input.put(ev)
        return self.i

    def wait(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            # wait for input to be consumed and output to be produced
            while self.input.qsize():
                gevent.sleep(0.0)
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        if events:
            return [self.output.get() for n in xrange(events)]

    def tearDown(self):
        self.i.stop()
Beispiel #5
0
class FilterTests(TestCase):
    def create_stage(self, **conf):
        return self.cls(**conf)

    def create(self, conf={}, events=[]):
        self.input = Queue()
        self.output = Queue()
        with DummyContext():
            self.i = self.create_stage(**conf)
        self.input = self.i.setup(self.output)

        self.i.start()
        for ev in events:
            self.input.put(ev)
        return self.i

    def wait(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            # wait for input to be consumed and output to be produced
            while self.input.qsize():
                gevent.sleep(0.0)
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        if events:
            return [self.output.get() for n in xrange(events)]

    def tearDown(self):
        self.i.stop()
Beispiel #6
0
class FlowTests(TestCase):
    def create(self, conf={}, events=[]):
        self.input = Queue()
        self.output = Queue()

        context = DummyContext()
        with context:
            self.i = self.create_stage(**conf)
            self.input = self.i.setup(self.output)

        self.assertEquals(1, len(context.stages))

        self.i.start()
        for ev in events:
            self.input.put(ev)
        return self.i

    def wait(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            # wait for input to be consumed and output to be produced
            while self.input.qsize():
                gevent.sleep(0.0)
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        self.i.stop()
        if events:
            return [self.output.get() for n in xrange(events)]
Beispiel #7
0
class FlowTests(TestCase):
    def create(self, conf={}, events=[]):
        self.input = Queue()
        self.output = Queue()

        context = DummyContext()
        with context:
            self.i = self.create_stage(**conf)
            self.input = self.i.setup(self.output)

        self.assertEquals(1, len(context.stages))

        self.i.start()
        for ev in events:
            self.input.put(ev)
        return self.i

    def wait(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            # wait for input to be consumed and output to be produced
            while self.input.qsize():
                gevent.sleep(0.0)
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        self.i.stop()
        if events:
            return [self.output.get() for n in xrange(events)]
Beispiel #8
0
class ConnectionQueue(object):
    """ Holds connections to resources. Each time it's called a connection is fetched from its underlying queue
    assuming any connection is still available.
    """
    def __init__(self, pool_size, queue_build_cap, conn_name, conn_type,
                 address, add_client_func):
        self.queue = Queue(pool_size)
        self.queue_build_cap = queue_build_cap
        self.conn_name = conn_name
        self.conn_type = conn_type
        self.address = address
        self.add_client_func = add_client_func

        self.logger = logging.getLogger(self.__class__.__name__)

    def __call__(self):
        return _Connection(self.queue, self.conn_name)

    def put_client(self, client):
        self.queue.put(client)
        self.logger.info('Added `%s` client to %s (%s)', self.conn_name,
                         self.address, self.conn_type)

    def build_queue(self):
        """ Spawns greenlets to populate the queue and waits up to self.queue_build_cap seconds until the queue is full.
        If it never is, raises an exception stating so.
        """
        for x in range(self.queue.maxsize):
            gevent.spawn(self.add_client_func)

        start = datetime.utcnow()
        build_until = start + timedelta(seconds=self.queue_build_cap)

        while not self.queue.full():
            gevent.sleep(0.5)

            now = datetime.utcnow()
            if now >= build_until:

                self.logger.error(
                    'Built %s/%s %s clients to `%s` within %s seconds, giving up',
                    self.queue.qsize(), self.queue.maxsize, self.conn_type,
                    self.address, self.queue_build_cap)
                return

            self.logger.info(
                '%d/%d %s clients connected to `%s` (%s) after %s (cap: %ss)',
                self.queue.qsize(), self.queue.maxsize, self.conn_type,
                self.address, self.conn_name, now - start,
                self.queue_build_cap)

        self.logger.info('Obtained %d %s clients to `%s` for `%s`',
                         self.queue.maxsize, self.conn_type, self.address,
                         self.conn_name)
def main(psize, filename=None):
    if filename:
        urls = Queue()
        results = Queue()
        pool = Pool(int(psize))
        reader = gevent.spawn(readfile, filename, urls)
        request = gevent.spawn(work_input_file, urls, results, reader)
        pool.add(reader)
        pool.add(request)
        pool.join()
        pool.free_count()
        print results.qsize(), 3333333333333333333
        print urls.qsize(), 3333333333333333333
        return results
Beispiel #10
0
class ASyncHandler(Group):

	def __init__(self, master, count=100, waitout=0.1):
		super(ASyncHandler, self).__init__(
			master, log=master.log, count=count, timeout=1)
		self.master = master
		self.queue = Queue()
		self.waitout = waitout
		self._size = 0
		self._last = 0

	def size(self):
		if time.time() - self._last < 1:
			return self._size
		self._last = time.time()
		self._size = self.queue.qsize()
		return self._size

	def do(self, func, *args, **kwargs):
		if self.size() < 500:
			self.queue.put((func, args, kwargs))
		else:
			func(*args, **kwargs)

	def quick(self, limit=50):
		count = 0
		while count < limit:
			try:
				func, args, kwargs = self.queue.get_nowait()
				func(*args, **kwargs)
				count += 1
			except Empty:
				break

	def handle(self):
		count = 0
		while True:
			try:
				func, args, kwargs = self.queue.get()
				func(*args, **kwargs)
				count += 1
			except Empty:
				break

		self.wait(self.waitout)

	def on_quit(self):
		if not self.queue.empty():
			self.log.info('%s - has %d tasks not done. now doing ...' 
				% (self.name, self.queue.qsize()))
    def test_put_failure(self):
        queue = Queue(1)  # unbounded
        queue.put(object())
        assert_that(queue.qsize(), is_(1))

        transaction.begin()

        put_nowait(queue, self)
        # still size 1
        assert_that(queue.qsize(), is_(1))
        with self.assertRaises(Full) as cm:
            transaction.commit()

        assert_that(cm.exception, is_(Full))
        assert_that(queue.get(block=False), is_(object))
Beispiel #12
0
def main(psize, filename=None):
    psize = int(psize)
    if filename:
        urls = Queue()
        results = Queue()
        reader = gevent.spawn(readfile, filename, urls)
        jobs = [gevent.spawn(work_input_file, urls, results, reader) for i in xrange(psize)]
        print jobs, 11111111111
        gevent.joinall(jobs)
        print results.qsize(), 3333333333333333333
        print urls.qsize(), 3333333333333333333
        return results
    else:
        pool = Pool(psize)
        urls = ['http://www.baidu.com'] * 100
        return pool.map(work, urls)
Beispiel #13
0
class Scheduler(object):
    """ Scheduler """
    def __init__(self, spider):
        self.request_filter = RequestFilter()
        self.queue = Queue()
        self.settings = spider.settings
        self.timeout = self.settings.get('TIMEOUT', 5)
        self.download_delay = self.settings.get('DOWNLOAD_DELAY', 0)
        self.logger = spider.logger

    def enqueue_request(self, request):
        """put request
        """
        if self.request_filter.request_seen(request):
            self.logger.debug("ignore %s", request.url)
            return
        self.queue.put(request)

    def next_request(self):
        """next request
        """
        gevent.sleep(self.download_delay)
        return self.queue.get(timeout=self.timeout * 3)

    def __len__(self):
        return self.queue.qsize()
Beispiel #14
0
def batch_request(url, reqs, config={}, workers_num=1):
    if "interval_time" not in config:
        config["interval_time"] = None

    if "headers" not in config:
        config["headers"] = {}
    elif "content-type" in config["headers"]:
        if config["headers"]["content-type"] == "application/json":
            reqs = [json.dumps(req).strip() for req in reqs]

    if "method" not in config:
        config["method"] = "POST"

    if "expected" not in config:
        config["expected"] = "True"

    reqs_queue = Queue()

    for req in reqs:
        reqs_queue.put_nowait(req)

    count = reqs_queue.qsize()

    workers = [gevent.spawn(request_worker, url, reqs_queue, config, worker=worker) for worker in range(workers_num)]
    start = time()
    # send requests at the same time
    gevent.joinall(workers)
    # Response.elapsed for a single request, offered by requests
    print "total requests number: %d" % (count)
    print "total elapsed time: %s\n" % (time()-start)
Beispiel #15
0
class SyncBaseDeamon(object):

    def __init__(self, interval=5):
        self.ip_addr = socket.gethostbyname(socket.gethostname())
        self.pid = os.getpid()
        self.channel = Queue(1)
        self.sync_interval = int(interval)
        self.exit_flag = Event()
        self.lock_path = ""
        self.logger = logging.getLogger()

    def exit(self):
        if not self.exit_flag.isSet():
            self.exit_flag.set()
            self.channel.put('exit')
        else:
            self.logger.warn("Waiting exit signal been resolved")

    def terminate(self, code):
        zk.close()
        sys.exit(code)

    def run(self):
        greenlets = []
        try:
            zk.create(
                self.lock_path,
                "%s:%s" % (self.ip_addr, self.pid),
                ephemeral=True,
                makepath=True
            )
        except NodeExistsError:
            ret, _ = zk.get(self.lock_path)
            ip, pid = ret.split(':')
            self.logger.error("Servers syncing process is running on %s, pid: %s" % (ip, pid))
            self.terminate(EXIT_CODES['leave_alone'])

        gevent.signal(signal.SIGTERM, self.exit)
        gevent.signal(signal.SIGINT, self.exit)
        gevent.signal(signal.SIGHUP, self.exit)

        greenlets.append(gevent.spawn(self.sync))
        gevent.spawn(self.ticker)

        gevent.joinall(greenlets)
        self.logger.warn("syncing deamon exited")
        self.terminate(EXIT_CODES['normal_exit'])

    def ticker(self):
        while True:
            try:
                if self.channel.qsize() == 0:
                    self.channel.put("sync", block=False)
            except Full:
                self.logger.error("something wrong occur in syncing thread")
                pass
            gevent.sleep(self.sync_interval)

    def sync(self, test=False):
        pass
Beispiel #16
0
class BlackBerryPushService(object):
    def __init__(self, app_id, password, push_url):
        self.app_id = app_id
        self.password = password
        self.push_url = push_url
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.bbp')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("BlackBerry Push service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    print e
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
Beispiel #17
0
class ConnectionPool:
    def __init__(self, db_config, time_to_sleep=30, test_run=False):
        self.username = db_config.get('user')
        self.password = db_config.get('password')
        self.host = db_config.get('host')
        self.port = int(db_config.get('port'))
        self.max_pool_size = 20
        self.test_run = test_run
        self.pool = None
        self.time_to_sleep = time_to_sleep
        self._initialize_pool()

    def get_initialized_connection_pool(self):
        return self.pool

    def _initialize_pool(self):
        self.pool = Queue(maxsize=self.max_pool_size)
        current_pool_size = self.pool.qsize()
        if current_pool_size < self.max_pool_size:  # this is a redundant check, can be removed
            for _ in xrange(0, self.max_pool_size - current_pool_size):
                try:
                    conn = db.connect(host=self.host,
                                      user=self.username,
                                      passwd=self.password,
                                      port=self.port)
                    self.pool.put_nowait(conn)

                except db.OperationalError, e:
                    LOGGER.error(
                        "Cannot initialize connection pool - retrying in {} seconds"
                        .format(self.time_to_sleep))
                    LOGGER.exception(e)
                    break
        self._check_for_connection_loss()
Beispiel #18
0
class C2DMService(object):
    def __init__(self, source, email, password):
        self.source = source
        self.email = email
        self.password = password
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.c2dm')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("C2DM service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    self.log.exception("Error while pushing")
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
Beispiel #19
0
class NotifyingQueue(Event):
    def __init__(self):
        super(NotifyingQueue, self).__init__()
        self._queue = Queue()

    def put(self, item):
        """ Add new item to the queue. """
        self._queue.put(item)
        self.set()

    def get(self, block=True, timeout=None):
        """ Removes and returns an item from the queue. """
        value = self._queue.get(block, timeout)
        if self._queue.empty():
            self.clear()
        return value

    def peek(self, block=True, timeout=None):
        return self._queue.peek(block, timeout)

    def __len__(self):
        return len(self._queue)

    def copy(self):
        """ Copies the current queue items. """
        return [self.peek(i) for i in range(self._queue.qsize())]
    class Transport(object):
        def __init__(self, tid, name):
            self.__tid = tid
            self.__name = name
            self.q_maxsize = 100
            self.__task_queue = Queue(maxsize=self.q_maxsize)

        def add_task(self, task):
            if self.__task_queue.qsize() < self.q_maxsize:
                self.__task_queue.put_nowait(task)
                return True
            else:
                # queue is full
                logger.debug('The task queue is full')
                return False

        def get(self, timeout=None):
            task = None
            try:
                task = self.__task_queue.get(block=True, timeout=timeout)
            except Exception as e:
                if not self.__task_queue.empty():
                    logger.error('Transport Thread still in processing data')
            finally:
                return task

        @property
        def name(self):
            return str(self.__name)
    def test_put_failure( self ):
        queue = Queue(1) # unbounded
        queue.put( object() )
        assert_that( queue.qsize(), is_( 1 ) )

        transaction.begin()

        put_nowait( queue, self )
        # still size 1
        assert_that( queue.qsize(), is_( 1 ) )
        with self.assertRaises( Full ) as cm:
            transaction.commit()


        assert_that( cm.exception, is_( Full ) )
        assert_that( queue.get(block=False), is_( object ) )
class ConnectionPool:
    def __init__(self, db_config, time_to_sleep=30, test_run=False):
        self.username = db_config.get('user')
        self.password = db_config.get('password')
        self.host = db_config.get('host')
        self.port = int(db_config.get('port'))
        self.max_pool_size = 20
        self.test_run = test_run
        self.pool = None
        self.time_to_sleep = time_to_sleep
        self._initialize_pool()

    def get_initialized_connection_pool(self):
        return self.pool

    def _initialize_pool(self):
        self.pool = Queue(maxsize=self.max_pool_size)
        current_pool_size = self.pool.qsize()
        if current_pool_size < self.max_pool_size:  # this is a redundant check, can be removed
            for _ in xrange(0, self.max_pool_size - current_pool_size):
                try:
                    conn = db.connect(host=self.host,
                                      user=self.username,
                                      passwd=self.password,
                                      port=self.port)
                    self.pool.put_nowait(conn)

                except db.OperationalError, e:
                    LOGGER.error("Cannot initialize connection pool - retrying in {} seconds".format(self.time_to_sleep))
                    LOGGER.exception(e)
                    break
        self._check_for_connection_loss()
Beispiel #23
0
class BlackBerryPushService(object):
    def __init__(self, app_id, password, push_url):
        self.app_id = app_id
        self.password = password
        self.push_url = push_url
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.bbp')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("BlackBerry Push service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    print e
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
Beispiel #24
0
class C2DMService(object):
    def __init__(self, source, email, password):
        self.source = source
        self.email = email
        self.password = password
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.c2dm')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("C2DM service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    self.log.exception("Error while pushing")
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
Beispiel #25
0
class Scheduler(object):

    """ Scheduler """

    def __init__(self):
        self.request_filter = RequestFilter()
        self.queue = Queue()

    def enqueue_request(self, request):
        """put request
        """
        if not request.dont_filter \
                and self.request_filter.request_seen(request):
            logger.warn("ignore %s", request.url)
            return
        self.queue.put(request)

    def next_request(self):
        """next request
        """
        if self.queue.empty():
            return None
        return self.queue.get()

    def __len__(self):
        return self.queue.qsize()
Beispiel #26
0
class DBConnectionPool(object):
    """
    The Connection Pool

    "Classic" pool of connections with connection lifecycle management
    """
    def __init__(self,
                 dsn,
                 db_module='psycopg2',
                 pool_size=10,
                 conn_lifetime=600,
                 do_log=False):
        """
        :param string dsn: DSN for the default `class:DBConnectionPool`
        :param string db_module: name of the DB-API module to use
        :param int pool_size: Poolsize of the first/default `class:DBConnectionPool`
        :param int conn_lifetime: Number of seconds after which a connection will be recycled when :meth:`.put` back
        :param bool do_log: Log to the console or not
        """
        if do_log:
            import logging
            logging.basicConfig(level=logging.INFO,
                                format="%(asctime)s %(message)s")
            self.logger = logging.getLogger()
        self.do_log = do_log
        self.dsn = dsn
        self.db_module = db_module
        self.pool_size = pool_size
        self.CONN_RECYCLE_AFTER = conn_lifetime if conn_lifetime is not None else 0
        self.pool = Queue(self.pool_size)
        __import__(db_module)
        self.connection_jobs = map(
            lambda x: gevent.spawn(self.create_connection),
            xrange(self.pool_size))
        try:
            gevent.joinall(self.connection_jobs, timeout=10)
            assert self.pool_size == self.pool.qsize()
            if self.do_log:
                self.logger.info("$ poolsize: %i" % self.pool.qsize())
            self.ready = True
        except AssertionError, e:
            raise DBPoolConnectionException(
                "Could not get %s connections for the pool as requested. %s" %
                (self.pool_size, e.message))
        except Exception, e:
            raise e
Beispiel #27
0
class ConnectionQueue(object):
    """ Holds connections to resources. Each time it's called a connection is fetched from its underlying queue
    assuming any connection is still available.
    """
    def __init__(self, pool_size, queue_build_cap, conn_name, conn_type, address, add_client_func):
        self.queue = Queue(pool_size)
        self.queue_build_cap = queue_build_cap
        self.conn_name = conn_name
        self.conn_type = conn_type
        self.address = address
        self.add_client_func = add_client_func

        self.logger = logging.getLogger(self.__class__.__name__)

    def __call__(self):
        return _Connection(self.queue, self.conn_name)

    def put_client(self, client):
        self.queue.put(client)
        self.logger.info('Added `%s` client to %s (%s)', self.conn_name, self.address, self.conn_type)

    def build_queue(self):
        """ Spawns greenlets to populate the queue and waits up to self.queue_build_cap seconds until the queue is full.
        If it never is, raises an exception stating so.
        """
        for x in range(self.queue.maxsize):
            gevent.spawn(self.add_client_func)

        start = datetime.utcnow()
        build_until = start + timedelta(seconds=self.queue_build_cap)

        while not self.queue.full():
            gevent.sleep(0.5)

            now = datetime.utcnow()
            if  now >= build_until:

                self.logger.error('Built %s/%s %s clients to `%s` within %s seconds, giving up',
                    self.queue.qsize(), self.queue.maxsize, self.conn_type, self.address, self.queue_build_cap)
                return

            self.logger.info('%d/%d %s clients connected to `%s` (%s) after %s (cap: %ss)',
                self.queue.qsize(), self.queue.maxsize, self.conn_type, self.address, self.conn_name, now - start,
                self.queue_build_cap)

        self.logger.info('Obtained %d %s clients to `%s` for `%s`', self.queue.maxsize, self.conn_type, self.address, self.conn_name)
Beispiel #28
0
def salt_top_run(request):
    center_server = request.POST.get('center_server')
    run_target = request.POST.get('run_target')
    state = request.POST.get('state')

    for server in center_server.split('|'):
        if not check_center_server_up(CENTER_SERVER[server][0],CENTER_SERVER[server][1]):
            return HttpResponse(json.dumps({'code':1,'msg':u'无法连接到%s' % server, 'cmd_results':''}),content_type="application/json")

    run_target_dict = {}
    target = []
    cmd_results = ''
    time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    for i in  zip(center_server.split('|'),run_target.split('|'),state.split('|')):
        if not run_target_dict.has_key(i[0]):
            run_target_dict[i[0]] = []
        run_target_dict[i[0]].append((i[1],i[2]))

    for i in run_target_dict.keys():
        master_dir = commands.getoutput('''ssh %s "grep -A2 '^file_roots' /etc/salt/master |grep 'base:' -A1|grep '-'|cut -d'-' -f2"''' % CENTER_SERVER[i][0])
        content = 'base:\n'
        for j in run_target_dict[i]:
            for n in j[0].split(','):
                if not n in target:
                    target.append(n)
                    cmd = u'state模块 < %s >' % j[1]
                    log.objects.create(source_ip=n,username=request.user.username,command=cmd,time=time_now)
                else:
                    return HttpResponse(json.dumps({'code':1,'msg':u'目标主机不能重复','cmd_results':cmd_results}),content_type="application/json")
                content += "  '%s':\n" % n
                for m in j[1].split(','):
                    content += '    - %s\n' % m
        content += 'EOF'
        os.system('''ssh %s "cat > %s/top.sls << EOF\n%s"''' % (CENTER_SERVER[i][0],master_dir,content))

    try:
        def gevent_run_all(CENTER_SERVER,client_send_data,p,q):
            for i in run_target_dict.keys():
                for j in run_target_dict[i]:
                    p.spawn(gevent_run,CENTER_SERVER,client_send_data,i,j[0],q)
        def gevent_run(CENTER_SERVER,client_send_data,i,j,q):
            cmd_result = client_send_data(json.dumps({'salt':1,'act':'state.highstate','hosts':j,'argv':''}),CENTER_SERVER[i][0],CENTER_SERVER[i][1])
            cmd_result = convert_str_to_html(cmd_result)
            q.put(cmd_result)
        p = Pool()
        q = Queue()
        p.spawn(gevent_run_all,CENTER_SERVER,client_send_data,p,q)
        p.join()
        for i in range(q.qsize()):
            cmd_result = q.get()
            if not cmd_results:
                cmd_results = cmd_result
            else:
                cmd_results = cmd_results + '<br><br><br><br>' + cmd_result
        return HttpResponse(json.dumps({'code':0,'msg':u'模块执行完成','cmd_results':cmd_results}),content_type="application/json")
    except Exception,e:
        return HttpResponse(json.dumps({'code':1,'msg':u'模块执行失败'}),content_type="application/json")
Beispiel #29
0
class QueueStatReporter(StatReporter):
    def _start_queue(self):
        self.queue = Queue()

    def _flush_queue(self, exit_exc=None, caller=None):
        sleep(1)
        self.logger.info("Flushing a queue of size {}"
                         .format(self.queue.qsize()))
        self.queue.put(StopIteration)
        for item in self.queue:
            self._run_queue_item(item)
        self.logger.info("Queue flush complete, Exit.")

    @loop(setup='_start_queue', fin='_flush_queue')
    def _queue_proc(self):
        item = self.queue.get()
        if self._run_queue_item(item) == "retry":
            # Put it at the back of the queue for retry
            self.queue.put(item)
            sleep(1)

    def _run_queue_item(self, item):
        name, args, kwargs = item
        if __debug__:
            self.logger.debug("Queue running {} with args '{}' kwargs '{}'"
                              .format(name, args, kwargs))
        try:
            func = getattr(self, name, None)
            if func is None:
                raise NotImplementedError(
                    "Item {} has been enqueued that has no valid function!"
                    .format(name))
            func(*args, **kwargs)
        except self.queue_exceptions as e:
            self.logger.error("Unable to process queue item, retrying! "
                              "{} Name: {}; Args: {}; Kwargs: {};"
                              .format(e, name, args, kwargs))
            return "retry"
        except Exception:
            # Log any unexpected problem, but don't retry because we might
            # end up endlessly retrying with same failure
            self.logger.error("Unkown error, queue data discarded!"
                              "Name: {}; Args: {}; Kwargs: {};"
                              .format(name, args, kwargs), exc_info=True)

    def log_one_minute(self, *args, **kwargs):
        self.queue.put(("_queue_log_one_minute", args, kwargs))

    def add_block(self, *args, **kwargs):
        self.queue.put(("_queue_add_block", args, kwargs))

    def _queue_add_block(self, address, height, total_subsidy, fees, hex_bits,
                         hex_hash, currency, algo, merged=False, worker=None,
                         **kwargs):
        raise NotImplementedError

    def _queue_log_one_minute(self, address, worker, algo, stamp, typ, amount):
        raise NotImplementedError
Beispiel #30
0
class Grep(object):
    """
    用于启动并管理grep的子进程
    """
    def __init__(self, log_path, content, timeout):
        self._log_path = log_path  # 文件路径
        self._time_out = timeout  # 子进程timeout
        self._content = content  # 需要grep的内容

        self._process = None  # 用于引用创建的子进程

        self._datas = Queue(maxsize=10)  # grep出来的数据将会放到这里来等待获取
        self._over = False  # 用于标记子进程是否已经执行完毕了
        self._is_time_out = False  # 用于标记子进程是否执行超时
        gevent.spawn(self._start)  # 在一个单独的协程中启动一个子进程

    def _start(self):
        """
        这个方法需要在一个协程中单独的执行,用于启动一个子进程,然后执行grep方法,并通过过标准输出来获取数据

        通过一个超时了控制子进程的运行时间,防止grep执行过久,占用太多系统资源,在超时之后要保证启动的grep进程被关闭
        """
        try:
            with gevent.timeout.Timeout(self._time_out):
                p = subprocess.Popen(["grep", self._content, self._log_path],
                                     stdout=subprocess.PIPE)
                self._process = p
                while 1:
                    line = p.stdout.readline()
                    if not line:  # 这个一般是子进程退出了
                        break
                    self._datas.put(line)
        except:
            """
            如果发发生了异常,那么判断异常的类型,如果是超时异常,那么需要设置超时标志位
            """
            ex = sys.exc_info()[1]
            if isinstance(ex, gevent.Timeout):
                logging.error(u"grep执行超时 %s:%s", self._log_path, self._content)
                self._is_time_out = True
        finally:
            if self._process and self._process.returncode is None:
                self._process.kill()
            self._over = True

    def get_data(self):
        """
        获取已经grep出来的数据,如果没有数据,那么返回None,同时需要返回启动的子进程是否已经结束,是否是超时
        """
        if self._datas.qsize() > 0:
            out = self._datas
            self._datas = Queue(maxsize=10)
            out_data = []
            while out.qsize() > 0:
                out_data.append(out.get())
            return out_data, self._over, self._is_time_out
        else:
            return None, self._over, self._is_time_out
Beispiel #31
0
class EventPersister(StandaloneProcess):

    def on_init(self):
        # Time in between event persists
        self.persist_interval = 1.0

        # Holds received events FIFO
        self.event_queue = Queue()

        # Temporarily holds list of events to persist while datastore operation not yet completed
        self.events_to_persist = None

        # bookkeeping for timeout greenlet
        self._persist_greenlet = None
        self._terminate_persist = Event() # when set, exits the timeout greenlet

        # The event subscriber
        self.event_sub = None

    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._on_event)
        self.event_sub.start()

    def on_quit(self):
        # Stop event subscriber
        self.event_sub.stop()

        # tell the trigger greenlet we're done
        self._terminate_persist.set()

        # wait on the greenlet to finish cleanly
        self._persist_greenlet.join(timeout=10)

    def _on_event(self, event, *args, **kwargs):
        self.event_queue.put(event)

    def _trigger_func(self, persist_interval):
        log.debug('Starting event persister thread with persist_interval=%s', persist_interval)

        # Event.wait returns False on timeout (and True when set in on_quit), so we use this to both exit cleanly and do our timeout in a loop
        while not self._terminate_persist.wait(timeout=persist_interval):
            try:
                self.events_to_persist = [self.event_queue.get() for x in xrange(self.event_queue.qsize())]

                self._persist_events(self.events_to_persist)
                self.events_to_persist = None
            except Exception as ex:
                log.exception("Failed to persist received events")
                return False

    def _persist_events(self, event_list):
        if event_list:
            bootstrap.container_instance.event_repository.put_events(event_list)
Beispiel #32
0
    def parse_results_queue(results_queue: Queue):
        results = defaultdict(lambda: defaultdict(list))
        while results_queue.qsize():
            result = results_queue.get()
            for param_name, value in result.items():
                url, reasons, type, response = value['url'], value['reasons'], value['type'], value['response']
                results[url][type].append({'param': param_name, 'reasons': reasons, 'response': response})

        return results
Beispiel #33
0
class NotificationService(object):
	def __init__(self, sandbox = True, **kwargs):
		if "certfile" not in kwargs:
			raise ValueError, u"Must specify a PEM bundle."
		self._sslargs = kwargs
		self._push_connection = None
		self._feedback_connection = None
		self._sandbox = sandbox
		self._send_queue = Queue()
		self._error_queue = Queue()
		self._feedback_queue = Queue()
		self._send_greenlet = None
		self._error_greenlet = None
		self._feedback_greenlet = None

		self._send_queue_cleared = Event()

	def _check_send_connection(self):
		if self._push_connection is None:
			s = ssl.wrap_socket(socket(AF_INET, SOCK_STREAM, 0),
				ssl_version=ssl.PROTOCOL_SSLv3,
				**self._sslargs)
			addr = ["gateway.push.apple.com", 2195]
			if self._sandbox:
				addr[0] = "gateway.sandbox.push.apple.com"
			s.connect_ex(tuple(addr))
			self._push_connection = s
			self._error_greenlet = gevent.spawn(self._error_loop)

	def _check_feedback_connection(self):
		if self._feedback_connection is None:
			s = ssl.wrap_socket(socket(AF_INET, SOCK_STREAM, 0),
				ssl_version = ssl.PROTOCOL_SSLv3,
				**self._sslargs)
			addr = ["feedback.push.apple.com", 2196]
			if self._sandbox:
				addr[0] = "feedback.sandbox.push.apple.com"
			s.connect_ex(tuple(addr))
			self._feedback_connection = s

	def _send_loop(self):
		self._send_greenlet = gevent.getcurrent()
		try:
			while True:
				msg = self._send_queue.get()
				self._check_send_connection()
				try:
					self._push_connection.send(str(msg))
				except Exception, e:
					self._send_queue.put(msg)
					self._push_connection.close()
					self._push_connection = None
					gevent.sleep(5.0)
				finally:
					if self._send_queue.qsize() < 1 and \
							not self._send_queue_cleared.is_set():
						self._send_queue_cleared.set()
Beispiel #34
0
class Grep(object):
    """
    用于启动并管理grep的子进程
    """
    def __init__(self, log_path, content, timeout):
        self._log_path = log_path         # 文件路径
        self._time_out = timeout          # 子进程timeout
        self._content = content           # 需要grep的内容

        self._process = None              # 用于引用创建的子进程

        self._datas = Queue(maxsize=10)   # grep出来的数据将会放到这里来等待获取
        self._over = False                # 用于标记子进程是否已经执行完毕了
        self._is_time_out = False         # 用于标记子进程是否执行超时
        gevent.spawn(self._start)         # 在一个单独的协程中启动一个子进程

    def _start(self):
        """
        这个方法需要在一个协程中单独的执行,用于启动一个子进程,然后执行grep方法,并通过过标准输出来获取数据

        通过一个超时了控制子进程的运行时间,防止grep执行过久,占用太多系统资源,在超时之后要保证启动的grep进程被关闭
        """
        try:
            with gevent.timeout.Timeout(self._time_out):
                p = subprocess.Popen(["grep", self._content, self._log_path], stdout=subprocess.PIPE)
                self._process = p
                while 1:
                    line = p.stdout.readline()
                    if not line:                       # 这个一般是子进程退出了
                        break
                    self._datas.put(line)
        except:
            """
            如果发发生了异常,那么判断异常的类型,如果是超时异常,那么需要设置超时标志位
            """
            ex = sys.exc_info()[1]
            if isinstance(ex, gevent.Timeout):
                logging.error(u"grep执行超时 %s:%s", self._log_path, self._content)
                self._is_time_out = True
        finally:
            if self._process and self._process.returncode is None:
                self._process.kill()
            self._over = True

    def get_data(self):
        """
        获取已经grep出来的数据,如果没有数据,那么返回None,同时需要返回启动的子进程是否已经结束,是否是超时
        """
        if self._datas.qsize() > 0:
            out = self._datas
            self._datas = Queue(maxsize=10)
            out_data = []
            while out.qsize() > 0:
                out_data.append(out.get())
            return out_data, self._over, self._is_time_out
        else:
            return None, self._over, self._is_time_out
Beispiel #35
0
class NotificationService(object):
	def __init__(self, sandbox = True, **kwargs):
		if "certfile" not in kwargs:
			raise ValueError, u"Must specify a PEM bundle."
		self._sslargs = kwargs
		self._push_connection = None
		self._feedback_connection = None
		self._sandbox = sandbox
		self._send_queue = Queue()
		self._error_queue = Queue()
		self._feedback_queue = Queue()
		self._send_greenlet = None
		self._error_greenlet = None
		self._feedback_greenlet = None

		self._send_queue_cleared = Event()

	def _check_send_connection(self):
		if self._push_connection is None:
			s = ssl.wrap_socket(socket(AF_INET, SOCK_STREAM, 0),
				ssl_version=ssl.PROTOCOL_SSLv3,
				**self._sslargs)
			addr = ["gateway.push.apple.com", 2195]
			if self._sandbox:
				addr[0] = "gateway.sandbox.push.apple.com"
			s.connect_ex(tuple(addr))
			self._push_connection = s
			self._error_greenlet = gevent.spawn(self._error_loop)

	def _check_feedback_connection(self):
		if self._feedback_connection is None:
			s = ssl.wrap_socket(socket(AF_INET, SOCK_STREAM, 0),
				ssl_version = ssl.PROTOCOL_SSLv3,
				**self._sslargs)
			addr = ["feedback.push.apple.com", 2196]
			if self._sandbox:
				addr[0] = "feedback.sandbox.push.apple.com"
			s.connect_ex(tuple(addr))
			self._feedback_connection = s

	def _send_loop(self):
		self._send_greenlet = gevent.getcurrent()
		try:
			while True:
				msg = self._send_queue.get()
				self._check_send_connection()
				try:
					self._push_connection.send(str(msg))
				except Exception, e:
					self._send_queue.put(msg)
					self._push_connection.close()
					self._push_connection = None
					gevent.sleep(5.0)
				finally:
					if self._send_queue.qsize() < 1 and \
							not self._send_queue_cleared.is_set():
						self._send_queue_cleared.set()
Beispiel #36
0
class AsyncPool(object):

    __slots__ = ["sockets", "socket_factory", "pool_size", "log", "local", "pid" ]

    def __init__(self, socket_factory, pool_size):
        self.pid = os.getpid()
        self.pool_size = pool_size
        self.socket_factory = socket_factory
        self.sockets = Queue()
        self.log = logging.getLogger('ming.async.AsyncPool')
        self.local = local()

    def _get_sock(self):
        return getattr(self.local, 'sock', None)
    def _set_sock(self, value):
        self.local.sock = value
    sock = property(_get_sock, _set_sock)

    def socket(self):
        pid = os.getpid()

        if pid != self.pid:
            self.sock = None
            self.sockets = Queue()
            self.pid = pid

        if self.sock is not None:
            self.log.debug('Return existing socket to greenlet %s', gevent.getcurrent() )
            return self.sock
        gl = gevent.getcurrent()
        try:
            self.sock = self.sockets.get_nowait()
            self.log.debug('Checkout socket %s to greenlet %s',
                           self.sock, gl )
        except Empty:
            self.sock = self.socket_factory()
            self.log.debug('Create socket in greenlet %s', gl)
        self.sock.last_greenlet = gl
        return self.sock

    def return_socket(self):
        if self.sock is None:
            self.log.debug('No socket to return from greenlet %s', gevent.getcurrent() )
            return
        if self.sockets.qsize() < self.pool_size:
            gl = gevent.getcurrent()
            self.log.debug('Checkin socket %s from greenlet %s',
                           self.sock, gl)
            self.sockets.put(self.sock)
            self.sock = None
        else:
            self.log.debug('Close socket in greenlet %s', gevent.getcurrent() )
            self.sock.close()
            self.sock = None
        self.local.sock = None
    def test_put_succeeds(self):
        queue = Queue()  # unbounded
        transaction.begin()

        put_nowait(queue, self)
        # still empty
        assert_that(queue.qsize(), is_(0))

        transaction.commit()

        assert_that(queue.get(block=False), is_(self))
    def test_put_succeeds(self):
        queue = Queue() # unbounded
        transaction.begin()

        put_nowait( queue, self )
        # still empty
        assert_that( queue.qsize(), is_( 0 ) )

        transaction.commit()

        assert_that( queue.get(block=False), is_( self ) )
Beispiel #39
0
    def test_clear_api_client_queue(self):
        queue = Queue()
        client_dict = {'id': uuid.uuid4().hex, 'client': None}
        client_dict2 = {'id': uuid.uuid4().hex, 'client': None}
        clients_info = {
            client_dict['id']: {
                'destroy': False
            },
            client_dict2['id']: {
                'destroy': True
            }
        }
        queue.put(client_dict)
        queue.put(client_dict2)

        self.assertEqual(queue.qsize(), 2)
        clear_api_client_queue(queue, clients_info)
        self.assertEqual(queue.qsize(), 1)
        client_dict_from_queue = queue.get()
        self.assertEqual(client_dict, client_dict_from_queue)
Beispiel #40
0
 def save_and_convert(self, items_queue, requests_queue):
     # can not use ItemQueue cause ItemQueue was singleton
     tmp_items_queue = Queue()
     # each round just parse 5 items
     while not items_queue.empty():
         if tmp_items_queue.qsize() < 5:
             tmp_items_queue.put(items_queue.get(block=True, timeout=3))
         else:
             break
     # save and convert async
     self._save_and_convert_async(items_queue=tmp_items_queue, requests_queue=requests_queue)
    def test_put_transaction_abort(self):
        queue = Queue()
        transaction.begin()
        put_nowait(queue, 'aborted')
        transaction.abort()

        transaction.begin()
        put_nowait(queue, 'committed')
        transaction.commit()

        assert_that(queue.qsize(), is_(1))
        assert_that(queue.get(block=False), is_('committed'))
    def test_put_transaction_abort(self):
        queue = Queue()
        transaction.begin()
        put_nowait( queue, 'aborted' )
        transaction.abort()

        transaction.begin()
        put_nowait( queue, 'committed' )
        transaction.commit()

        assert_that( queue.qsize(), is_( 1 ) )
        assert_that( queue.get( block=False ), is_( 'committed' ) )
Beispiel #43
0
class DBConnectionPool( object ):
    """
    The Connection Pool

    "Classic" pool of connections with connection lifecycle management
    """

    def __init__( self, dsn, db_module = 'psycopg2', pool_size = 10,
                  conn_lifetime = 600, do_log = False ):
        """
        :param string dsn: DSN for the default `class:DBConnectionPool`
        :param string db_module: name of the DB-API module to use
        :param int pool_size: Poolsize of the first/default `class:DBConnectionPool`
        :param int conn_lifetime: Number of seconds after which a connection will be recycled when :meth:`.put` back
        :param bool do_log: Log to the console or not
        """
        if do_log:
            import logging
            logging.basicConfig( level = logging.INFO, format = "%(asctime)s %(message)s" )
            self.logger = logging.getLogger()
        self.do_log = do_log
        self.dsn = dsn
        self.db_module = db_module
        self.pool_size = pool_size
        self.CONN_RECYCLE_AFTER = conn_lifetime if conn_lifetime is not None else 0
        self.pool = Queue( self.pool_size )
        __import__( db_module )
        self.connection_jobs = map( lambda x: gevent.spawn( self.create_connection ), xrange( self.pool_size ) )
        try:
            gevent.joinall( self.connection_jobs, timeout = 10 )
            assert self.pool_size == self.pool.qsize()
            if self.do_log:
                self.logger.info( "$ poolsize: %i" % self.pool.qsize() )
            self.ready = True
        except AssertionError, e:
            raise DBPoolConnectionException( "Could not get %s connections for the pool as requested. %s" % ( self.pool_size, e.message ) )
        except Exception, e:
            raise e
Beispiel #44
0
class ReusableCursor(Greenlet):
    def __init__(self, pool, key, sql, values):
        super(ReusableCursor, self).__init__(self.work)
        self.pool = pool
        self.key = self._formatted_info = key
        self.sql = sql
        self.values = values
        self.offset = 0
        self.queue = Queue()
        self._count = AsyncResult()
        self.last_access = time.time()
        self.idle = False
        self.listeners = []
        self.window = config.xgetint('web', 'query-reusable-window', 30)

    @property
    def count(self):
        return self._count.get()

    def work(self):
        try:
            with self.pool.connection() as conn:
                cur = conn.cursor('_cur')
                cur.execute(self.sql, self.values)
                logging.debug(cur.query)
                cur_tmp = conn.cursor()
                cur_tmp.execute('MOVE ALL FROM _cur')
                self._count.set(int(cur_tmp.statusmessage.split()[-1]))
                cur_tmp.close()
                cur.scroll(0, 'absolute')
                while True:
                    if not self.queue.qsize():
                        self.idle = True
                        for l in self.listeners:
                            spawn(l.onIdle, self)
                    result, limit, offset = self.queue.get(timeout=self.window)
                    self.idle = False
                    if limit is None:
                        raise Killed(result)
                    if self.offset != offset:
                        cur.scroll(offset, 'absolute')
                    data = cur.fetchmany(limit)
                    self.offset = offset + limit
                    result.set(data)
                    self.last_access = time.time()
        except Empty:
            pass
        except Killed, k:
            k.result.set()
        finally:
Beispiel #45
0
class Producer(object):
    def __init__(self, queue_size=2048):
        self.Q = Queue(maxsize=queue_size)

    def make(self):
        pass

    def run(self):
        items = self.make()
        if not items: return
        for item in items:
            self.Q.put(item)

    def sync(self, qsize=0, interval=5):
        while self.Q.qsize() > qsize:
            time.sleep(interval)
Beispiel #46
0
class ConnectionPool(object):

    def __init__(self, connection_cls, maxsize=100, **kwargs):
        if not isinstance(maxsize, integer_types):
            raise TypeError('Expected integer, got %r' % (maxsize, ))
        self._connection_cls = connection_cls
        self._maxsize = maxsize
        self._pool = Queue()
        self._size = 0
        self._conn_params = kwargs

    def get(self):
        if self._size >= self._maxsize or self._pool.qsize():
            return self._pool.get()
        else:
            self._size += 1
            try:
                return self._connection_cls(**self._conn_params)
            except:
                self._size -= 1
                raise

    def put(self, item):
        self._pool.put(item)

    def closeall(self):
        while not self._pool.empty():
            conn = self._pool.get_nowait()
            try:
                conn.close()
            except Exception:
                pass

    @contextlib.contextmanager
    def connection(self):
        conn = self.get()
        try:
            yield conn
        except:
            if conn.closed:
                conn = None
                self.closeall()
            raise
        finally:
            if conn is not None and not conn.closed:
                self.put(conn)
Beispiel #47
0
class InputTests(TestCase):
    def create(self, conf):
        self.output = Queue()
        with DummyContext():
            self.i = i = self.cls(**conf)
        i.setup(self.output)
        i.start()
        return i

    def waitForQueue(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        self.i.stop()
        if events:
            return [self.output.get() for n in xrange(events)]
Beispiel #48
0
class InputTests(TestCase):
    def create(self, conf):
        self.output = Queue()
        with DummyContext():
            self.i = i = self.cls(**conf)
        i.setup(self.output)
        i.start()
        return i

    def waitForQueue(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        self.i.stop()
        if events:
            return [self.output.get() for n in xrange(events)]
class LimitedIMapUnordered(pool.IMapUnordered):
    def __init__(self, func, iterable, max_queue, spawn=None):
        pool.IMapUnordered.__init__(self, func, iterable, spawn)
        self.queue = Queue(max_queue)
        self.max_queue = max_queue

    def _run(self):
        try:
            func = self.func
            for item in self.iterable:
                while self.queue.qsize() + self.count + 2 == self.max_queue:
                    gevent.sleep(0.1)
                self.count += 1
                self.spawn(func, item).rawlink(self._on_result)
        finally:
            self.__dict__.pop('spawn', None)
            self.__dict__.pop('func', None)
            self.__dict__.pop('iterable', None)
    def test_put_transaction_savepoint(self):
        queue = Queue()
        transaction.begin()
        put_nowait( queue, 'presavepoint' )
        # we can get a non-optimistic savepoint
        savepoint = transaction.savepoint(optimistic=False)
        assert_that( savepoint._savepoints, has_length( 1 ) )
        repr(savepoint._savepoints) # cover
        put_nowait( queue, 'aftersavepoint' )

        # If we rollback the savepoint now, what we just
        # did will be lost, but the original work
        # will still happen
        savepoint.rollback()
        transaction.commit()

        assert_that( queue.qsize(), is_( 1 ) )
        assert_that( queue.get( block=False ), is_( 'presavepoint' ) )
    def test_put_transaction_savepoint(self):
        queue = Queue()
        transaction.begin()
        put_nowait(queue, 'presavepoint')
        # we can get a non-optimistic savepoint
        savepoint = transaction.savepoint(optimistic=False)
        assert_that(savepoint._savepoints, has_length(1))
        repr(savepoint._savepoints)  # cover
        put_nowait(queue, 'aftersavepoint')

        # If we rollback the savepoint now, what we just
        # did will be lost, but the original work
        # will still happen
        savepoint.rollback()
        transaction.commit()

        assert_that(queue.qsize(), is_(1))
        assert_that(queue.get(block=False), is_('presavepoint'))
Beispiel #52
0
class LimitedIMapUnordered(pool.IMapUnordered):
	def __init__(self, func, iterable, max_queue, spawn=None):
		pool.IMapUnordered.__init__(self, func, iterable, spawn)
		self.queue = Queue(max_queue)
		self.max_queue = max_queue
		
	def _run(self):
		try:
			func = self.func
			for item in self.iterable:
				while self.queue.qsize() + self.count + 2 == self.max_queue:
					gevent.sleep(0.1)
				self.count += 1
				self.spawn(func, item).rawlink(self._on_result)
		finally:
			self.__dict__.pop('spawn', None)
			self.__dict__.pop('func', None)
			self.__dict__.pop('iterable', None)
Beispiel #53
0
class ZMQSummarizedTestResult(ZMQTestResult):
    def __init__(self, args):
        super(ZMQSummarizedTestResult, self).__init__(args)
        self.interval = 1.
        self._data = Queue()
        gevent.spawn_later(self.interval, self._dump_data)

    def push(self, data_type, **data):
        self._data.put_nowait((data_type, data))

    def close(self):
        while not self._data.empty():
            self._dump_data(loop=False)
        self.context.destroy()

    def _dump_data(self, loop=True):
        if self._data.empty() and loop:
            gevent.spawn_later(self.interval, self._dump_data)
            return

        data = {
            'data_type': 'batch',
            'agent_id': self.agent_id,
            'hostname': get_hostname(),
            'run_id': self.run_id,
            'counts': defaultdict(list)
        }

        # grabbing what we have
        for _ in range(self._data.qsize()):
            data_type, message = self._data.get()
            data['counts'][data_type].append(message)

        while True:
            try:
                self._push.send(self.encoder.encode(data), zmq.NOBLOCK)
                break
            except zmq.ZMQError as e:
                if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
                    continue
                else:
                    raise
        if loop:
            gevent.spawn_later(self.interval, self._dump_data)
Beispiel #54
0
 def parse_and_convert(self, pages_queue, items_queue):
     """
     parse page and create Item and send to items queue
     :param pages_queue: pages queue use to get Page
     :param items_queue: items queue use to receive Item
     :return: None
     """
     tmp_pages_queue ,save_data_urls_list = Queue(), []
     # each round just parse 5 pages
     while not pages_queue.empty():
         if tmp_pages_queue.qsize() < 5:
             tmp_pages_queue.put(pages_queue.get(block=True, timeout=3))
         else:
             break
     # TODO: i should find out should parse page using gevent
     # extract
     self._extract_async(pages_queue=tmp_pages_queue, save_data_urls_list=save_data_urls_list)
     # convert
     self._convert_to_item(save_data_urls_list=save_data_urls_list, items_queue=items_queue)
Beispiel #55
0
class ZMQSummarizedTestResult(ZMQTestResult):
    def __init__(self, args):
        super(ZMQSummarizedTestResult, self).__init__(args)
        self.interval = 1.
        self._data = Queue()
        gevent.spawn_later(self.interval, self._dump_data)

    def push(self, data_type, **data):
        self._data.put_nowait((data_type, data))

    def close(self):
        while not self._data.empty():
            self._dump_data(loop=False)
        self.context.destroy()

    def _dump_data(self, loop=True):
        if self._data.empty() and loop:
            gevent.spawn_later(self.interval, self._dump_data)
            return

        data = {'data_type': 'batch',
                'agent_id': self.agent_id,
                'hostname': get_hostname(),
                'run_id': self.run_id,
                'counts': defaultdict(list)}

        # grabbing what we have
        for _ in range(self._data.qsize()):
            data_type, message = self._data.get()
            data['counts'][data_type].append(message)

        while True:
            try:
                self._push.send(self.encoder.encode(data), zmq.NOBLOCK)
                break
            except zmq.ZMQError as e:
                if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
                    continue
                else:
                    raise
        if loop:
            gevent.spawn_later(self.interval, self._dump_data)
Beispiel #56
0
    def setup_bucket_sizes(self):
        """ Устанавливает размер порций для всех запросов """
        args_queue = Queue()

        # Запускаем на один и тот же запрос разные работы
        for info in self.info_list:
            for finder in self.finders:
                if not finder.is_info_searchable(info):
                    continue

                args_queue.put((finder.determine_bucket_size, info))

        # Запускаем воркеры
        workers = [
            SetBucketWorker(args_queue, self.logger)
            for _ in range(self.threads)
        ]

        greenlets = [gevent.spawn(worker.run) for worker in workers]

        # Ждем заверщения работы
        while any([worker.is_running()
                   for worker in workers]) or args_queue.qsize():
            gevent.sleep(0)

        # Выключаем воркеры
        for worker in workers:
            worker.finish()

        # Ждем выключения
        gevent.joinall(greenlets)

        # Устанавливаем размеры порций
        for info in self.info_list:
            for finder in self.finders:
                if not finder.is_info_searchable(info):
                    continue

                finder.set_bucket_size(info)
                self.logger.debug(
                    f'{finder.__class__.__name__}: {info.origin_url} - размер порции {finder.get_bucket_size(info)}'
                )
Beispiel #57
0
 def download_and_convert(self, requests_queue, pages_queue):
     """
     Download page using requests or selenium downloader
     :param requests_queue: requests queue
     :param pages_queue: pages queue
     :return: None
     """
     tmp_requests_queue, tmp_selenium_queue, url_page_list = Queue(), Queue(), list()
     while not requests_queue.empty():
         request = requests_queue.get(block=True, timeout=3)
         if "SELENIUM" not in request.method:
             tmp_requests_queue.put(request)
         else:
             tmp_selenium_queue.put(request)
         if tmp_selenium_queue.qsize() == 5 or tmp_requests_queue.qsize() == 5:
             # TODO: 5 is the max tmp queue size
             break
     # do requests download
     self._async_requests_download(tmp_requests_queue=tmp_requests_queue, url_page_list=url_page_list)
     # do selenium download
     # TODO: in windows seem like can run this, try on linux
     # self._linear_selenium_download(tmp_selenium_queue=tmp_selenium_queue, url_page_list=url_page_list)
     self._convert_and_send_to_page_queue(url_page_list=url_page_list, pages_queue=pages_queue)
Beispiel #58
0
class MemoryScheduler(Scheduler):
    def __init__(self, scheduler_name):
        self._scheduler_key = 'Scheduler:{scheduler_name}'.format(
            scheduler_name=scheduler_name)
        self._queue = Queue()

    @classmethod
    def from_spider(cls, spider):
        scheduler_name = spider.settings.get('SPIDER_NAME')
        return cls(scheduler_name)

    def __len__(self):
        return self._queue.qsize()

    def push(self, r):
        self._queue.put_nowait(r)

    def pull(self,
             count=DEFAULT_SCHEDULER_PULL_COUNT,
             timeout=DEFAULT_SCHEDULER_PULL_TIMEOUT):
        try:
            return self._queue.get(timeout=timeout)
        except Empty:
            raise SchedulerEmpty
Beispiel #59
0
class ChainService(WiredService):

    """
    Manages the chain and requests to it.
    """
    # required by BaseService
    name = 'chain'
    default_config = dict(eth=dict(privkey_hex=''))

    # required by WiredService
    wire_protocol = eth_protocol.ETHProtocol  # create for each peer

    # initialized after configure:
    chain = None
    genesis = None
    synchronizer = None
    config = None
    block_queue_size = 1024
    transaction_queue_size = 1024

    def __init__(self, app):
        self.config = app.config
        self.db = app.services.db
        assert self.db is not None
        super(ChainService, self).__init__(app)
        log.info('initializing chain')
        self.chain = Chain(self.db, new_head_cb=self._on_new_head)
        self.synchronizer = Synchronizer(self, force_sync=None)
        self.chain.coinbase = privtoaddr(self.config['eth']['privkey_hex'].decode('hex'))

        self.block_queue = Queue(maxsize=self.block_queue_size)
        self.transaction_queue = Queue(maxsize=self.transaction_queue_size)
        self.add_blocks_lock = False
        self.broadcast_filter = DuplicatesFilter()

    def _on_new_head(self, block):
        pass

    def add_block(self, t_block, proto):
        "adds a block to the block_queue and spawns _add_block if not running"
        self.block_queue.put((t_block, proto))  # blocks if full
        if not self.add_blocks_lock:
            self.add_blocks_lock = True
            gevent.spawn(self._add_blocks)

    def _add_blocks(self):
        log.debug('add_blocks', qsize=self.block_queue.qsize())
        try:
            while not self.block_queue.empty():
                t_block, proto = self.block_queue.get()
                if t_block.header.hash in self.chain:
                    log.warn('known block', block=t_block)
                    continue
                if t_block.header.prevhash not in self.chain:
                    log.warn('missing parent', block=t_block)
                    continue
                if not t_block.header.check_pow():
                    log.warn('invalid pow', block=t_block)
                    # FIXME ban node
                    continue
                try:  # deserialize
                    st = time.time()
                    block = t_block.to_block(db=self.chain.db)
                    elapsed = time.time() - st
                    log.debug('deserialized', elapsed='%.2fs' % elapsed,
                              gas_used=block.gas_used, gpsec=int(block.gas_used / elapsed))
                except processblock.InvalidTransaction as e:
                    log.warn('invalid transaction', block=t_block, error=e)
                    # FIXME ban node
                    continue

                if self.chain.add_block(block):
                    log.debug('added', block=block)
                gevent.sleep(0.001)
        finally:
            self.add_blocks_lock = False

    def broadcast_newblock(self, block, chain_difficulty, origin=None):
        assert isinstance(block, eth_protocol.TransientBlock)
        if self.broadcast_filter.known(block.header.hash):
            log.debug('already broadcasted block')
        else:
            log.debug('broadcasting newblock', origin=origin)
            bcast = self.app.services.peermanager.broadcast
            bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty),
                  num_peers=None, exclude_protos=[origin])

    # wire protocol receivers ###########

    def on_wire_protocol_start(self, proto):
        log.debug('on_wire_protocol_start', proto=proto)
        assert isinstance(proto, self.wire_protocol)
        # register callbacks
        proto.receive_status_callbacks.append(self.on_receive_status)
        proto.receive_transactions_callbacks.append(self.on_receive_transactions)
        proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes)
        proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes)
        proto.receive_getblocks_callbacks.append(self.on_receive_getblocks)
        proto.receive_blocks_callbacks.append(self.on_receive_blocks)
        proto.receive_newblock_callbacks.append(self.on_receive_newblock)

        # send status
        head = self.chain.head
        proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash,
                          genesis_hash=self.chain.genesis.hash)

    def on_wire_protocol_stop(self, proto):
        assert isinstance(proto, self.wire_protocol)
        log.debug('on_wire_protocol_stop', proto=proto)

    def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash,
                          genesis_hash):

        log.debug('status received', proto=proto, eth_version=eth_version)
        assert eth_version == proto.version, (eth_version, proto.version)
        if network_id != proto.network_id:
            log.warn("invalid network id", remote_id=proto.network_id, network_id=network_id)
            raise eth_protocol.ETHProtocolError('wrong network_id')

        # check genesis
        if genesis_hash != self.chain.genesis.hash:
            log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex'))
            raise eth_protocol.ETHProtocolError('wrong genesis block')

        # request chain
        self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty)

        # send transactions
        transactions = self.chain.get_transactions()
        if transactions:
            log.debug("sending transactions", remote_id=proto)
            proto.send_transactions(*transactions)

    # transactions

    def on_receive_transactions(self, proto, transactions):
        "receives rlp.decoded serialized"
        log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
        log.debug('skipping, FIXME')
        return
        for tx in transactions:
            # fixme bloomfilter
            self.chain.add_transaction(tx)

    # blockhashes ###########

    def on_receive_getblockhashes(self, proto, child_block_hash, count):
        log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash))
        max_hashes = min(count, self.wire_protocol.max_getblockhashes_count)
        found = []
        if child_block_hash not in self.chain:
            log.debug("unknown block")
            proto.send_blockhashes(*[])
            return

        last = child_block_hash
        while len(found) < max_hashes:
            last = rlp.decode_lazy(self.chain.db.get(last))[0][0]
            if last:
                found.append(last)
            else:
                break

        log.debug("sending: found block_hashes", count=len(found))
        proto.send_blockhashes(*found)

    def on_receive_blockhashes(self, proto, blockhashes):
        if blockhashes:
            log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto,
                      first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1]))
        else:
            log.debug("recv 0 remote block hashes, signifying genesis block")
        self.synchronizer.receive_blockhashes(proto, blockhashes)

    # blocks ################

    def on_receive_getblocks(self, proto, blockhashes):
        log.debug("on_receive_getblocks", count=len(blockhashes))
        found = []
        for bh in blockhashes[:self.wire_protocol.max_getblocks_count]:
            try:
                found.append(self.chain.db.get(bh))
            except KeyError:
                log.debug("unknown block requested", block_hash=encode_hex(bh))
        if found:
            log.debug("found", count=len(found))
            proto.send_blocks(*found)

    def on_receive_blocks(self, proto, transient_blocks):
        log.debug("recv blocks", count=len(transient_blocks), remote_id=proto,
                  highest_number=max(x.header.number for x in transient_blocks))
        if transient_blocks:
            self.synchronizer.receive_blocks(proto, transient_blocks)

    def on_receive_newblock(self, proto, block, chain_difficulty):
        log.debug("recv newblock", block=block, remote_id=proto)
        self.synchronizer.receive_newblock(proto, block, chain_difficulty)
Beispiel #60
0
class Emotiv(object):
    """
    Receives, decrypts and stores packets received from Emotiv Headsets.
    """
    def __init__(self, display_output=False, serial_number="", is_research=False):
        """
        Sets up initial values.
        """
        self.running = True
        self.packets = Queue()
        self.packets_received = 0
        self.packets_processed = 0
        self.battery = 0
        self.display_output = display_output
        self.is_research = is_research
        self.sensors = {
            'F3': {'value': 0, 'quality': 0},
            'FC6': {'value': 0, 'quality': 0},
            'P7': {'value': 0, 'quality': 0},
            'T8': {'value': 0, 'quality': 0},
            'F7': {'value': 0, 'quality': 0},
            'F8': {'value': 0, 'quality': 0},
            'T7': {'value': 0, 'quality': 0},
            'P8': {'value': 0, 'quality': 0},
            'AF4': {'value': 0, 'quality': 0},
            'F4': {'value': 0, 'quality': 0},
            'AF3': {'value': 0, 'quality': 0},
            'O2': {'value': 0, 'quality': 0},
            'O1': {'value': 0, 'quality': 0},
            'FC5': {'value': 0, 'quality': 0},
            'X': {'value': 0, 'quality': 0},
            'Y': {'value': 0, 'quality': 0},
            'Unknown': {'value': 0, 'quality': 0}
        }
        self.serial_number = serial_number  # You will need to set this manually for OS X.
        self.old_model = False

    def setup(self):
        """
        Runs setup function depending on platform.
        """
        print system_platform + " detected."
        if system_platform == "Windows":
            self.setup_windows()
        elif system_platform == "Linux":
            self.setup_posix()
        elif system_platform == "Darwin":
            self.setup_darwin()

    def setup_windows(self):
        """
        Setup for headset on the Windows platform.
        """
        devices = []
        try:
            for device in hid.find_all_hid_devices():
                if device.vendor_id != 0x21A1 and device.vendor_id != 0xED02:
                    continue
                if device.product_name == 'Brain Waves':
                    devices.append(device)
                    device.open()
                    self.serial_number = device.serial_number
                    device.set_raw_data_handler(self.handler)
                elif device.product_name == 'EPOC BCI':
                    devices.append(device)
                    device.open()
                    self.serial_number = device.serial_number
                    device.set_raw_data_handler(self.handler)
                elif device.product_name == '00000000000':
                    devices.append(device)
                    device.open()
                    self.serial_number = device.serial_number
                    device.set_raw_data_handler(self.handler)
                elif device.product_name == 'Emotiv RAW DATA':
                    devices.append(device)
                    device.open()
                    self.serial_number = device.serial_number
                    device.set_raw_data_handler(self.handler)
            crypto = gevent.spawn(self.setup_crypto, self.serial_number)
            console_updater = gevent.spawn(self.update_console)
            while self.running:
                try:
                    gevent.sleep(0)
                except KeyboardInterrupt:
                    self.running = False
        finally:
            for device in devices:
                device.close()
            gevent.kill(crypto, KeyboardInterrupt)
            gevent.kill(console_updater, KeyboardInterrupt)

    def handler(self, data):
        """
        Receives packets from headset for Windows. Sends them to a Queue to be processed
        by the crypto greenlet.
        """
        assert data[0] == 0
        tasks.put_nowait(''.join(map(chr, data[1:])))
        self.packets_received += 1
        return True

    def setup_posix(self):
        """
        Setup for headset on the Linux platform.
        Receives packets from headset and sends them to a Queue to be processed
        by the crypto greenlet.
        """
        _os_decryption = False
        if os.path.exists('/dev/eeg/raw'):
            # The decryption is handled by the Linux epoc daemon. We don't need to handle it.
            _os_decryption = True
            hidraw = open("/dev/eeg/raw")
        else:
            serial, hidraw_filename = get_linux_setup()
            self.serial_number = serial
            if os.path.exists("/dev/" + hidraw_filename):
                hidraw = open("/dev/" + hidraw_filename)
            else:
                hidraw = open("/dev/hidraw4")
            crypto = gevent.spawn(self.setup_crypto, self.serial_number)
        console_updater = gevent.spawn(self.update_console)
        while self.running:
            try:
                data = hidraw.read(32)
                if data != "":
                    if _os_decryption:
                        self.packets.put_nowait(EmotivPacket(data))
                    else:
                        #Queue it!
                        self.packets_received += 1
                        tasks.put_nowait(data)
                    gevent.sleep(0)
                else:
                    # No new data from the device; yield
                    # We cannot sleep(0) here because that would go 100% CPU if both queues are empty
                    gevent.sleep(DEVICE_POLL_INTERVAL)
            except KeyboardInterrupt:
                self.running = False
        hidraw.close()
        if not _os_decryption:
            gevent.kill(crypto, KeyboardInterrupt)
        gevent.kill(console_updater, KeyboardInterrupt)

    def setup_darwin(self):
        """
        Setup for headset on the OS X platform.
        Receives packets from headset and sends them to a Queue to be processed
        by the crypto greenlet.
        """

        # Set this to True if the OS is performing the encryption of the packets
        _os_decryption = False
        # Change these values to the hex equivalent from the output of hid_enumerate. If they are incorrect.
        # Current values = VendorID: 8609 ProductID: 1
        #hidraw = hid.device(0x31a1, 0x2001)
        hidraw = hid.device(0x1234,0xed02)

        hidraw.open(0x1234, 0xed02)
        self.serial_number = 'SN20120229000290'
        if not hidraw:
            hidraw = hid.device(0x21a1, 0x1234)
        if not hidraw:
                hidraw = hid.device(0xed02, 0x1234)
        if not hidraw:
            print "Device not found. Uncomment the code in setup_darwin and modify hid.device(vendor_id, product_id)"
            raise ValueError
        if self.serial_number == "":
            print "Serial number needs to be specified manually in __init__()."
            raise ValueError
        print "Serial number:" + self.serial_number
        crypto = gevent.spawn(self.setup_crypto, self.serial_number)
        console_updater = gevent.spawn(self.update_console)
        zero = 0
        while self.running:
            try:
                # Doesn't seem to matter how big we make the buffer 32 returned every time, 33 for other platforms
                data = hidraw.read(34,10)
                #data = [48]*32
                if len(data) == 32:
                    # Most of the time the 0 is truncated? That's ok we'll add it...
                    data = [zero] + data
                if data != "":
                    if _os_decryption:
                        self.packets.put_nowait(EmotivPacket(data))
                    else:
                        #Queue it!
                        print ('Queuing package:'+len(data))
                        tasks.put_nowait(''.join(map(chr, data[1:])))
                        self.packets_received += 1

                    print ('Waiting...')
                    gevent.sleep(0.01)
                else:
                    # No new data from the device; yield
                    # We cannot sleep(0) here because that would go 100% CPU if both queues are empty.
                    gevent.sleep(DEVICE_POLL_INTERVAL)
            except KeyboardInterrupt:
                self.running = False
        hidraw.close()
        gevent.kill(crypto, KeyboardInterrupt)
        gevent.kill(console_updater, KeyboardInterrupt)

    def setup_crypto(self, sn):
        """
        Performs decryption of packets received. Stores decrypted packets in a Queue for use.
        """
        if is_old_model(sn):
            self.old_model = True
        print self.old_model
        k = ['\0'] * 16
        k[0] = sn[-1]
        k[1] = '\0'
        k[2] = sn[-2]
        if self.is_research:
            k[3] = 'H'
            k[4] = sn[-1]
            k[5] = '\0'
            k[6] = sn[-2]
            k[7] = 'T'
            k[8] = sn[-3]
            k[9] = '\x10'
            k[10] = sn[-4]
            k[11] = 'B'
        else:
            k[3] = 'T'
            k[4] = sn[-3]
            k[5] = '\x10'
            k[6] = sn[-4]
            k[7] = 'B'
            k[8] = sn[-1]
            k[9] = '\0'
            k[10] = sn[-2]
            k[11] = 'H'
        k[12] = sn[-3]
        k[13] = '\0'
        k[14] = sn[-4]
        k[15] = 'P'
        key = ''.join(k)
        iv = Random.new().read(AES.block_size)
        cipher = AES.new(key, AES.MODE_ECB, iv)
        print ("Encryption...")
        for i in k:
            print "0x%.02x " % (ord(i))
        while self.running:
            print ("Check encryption queue...")
            while not tasks.empty():
                task = tasks.get()
                try:
                    print ("Adding packets to queue...:"+str(self.packets.qsize()))
                    data = cipher.decrypt(task[:16]) + cipher.decrypt(task[16:])
                    print ("Data received:"+str(len(data)))
                    self.packets.put_nowait(EmotivPacket(data, self.sensors, self.old_model))
                    self.packets_processed += 1
                    print ("Packets added..")
                except Exception as ex:
                    print ("Exception raied..:"+str(ex))
                    pass
                gevent.sleep(0.01)
            gevent.sleep(0.01)

    def dequeue(self):
        """
        Returns an EmotivPacket popped off the Queue.
        """
        try:
            print ("Returning something...:"+str(self.packets.empty()))
            return self.packets.get()
        except Exception, e:
            print e