Ejemplo n.º 1
0
    def test_blocking_add_job(self):
        """Test add_job blocks until some jobs are processed by the threads"""
        pool = ThreadPool(nthreads=1, func=TestThreadPool.length_func, maxlen=1)

        initial_time = time()
        for i in range(10):
            pool.add_job(i, block=True)
        final_time = time()
        self.assertLess(0.5, final_time-initial_time)
Ejemplo n.º 2
0
    def test_func_args(self):
        """Test args are passed to function"""
        q = queue.Queue()
        pool = ThreadPool(func=TestThreadPool.args_func, args=(q, 1, 2, 3))
        pool.add_job(1)

        processed = q.get()
        self.assertEqual(processed, (1, 2, 3))
        pool.close()
Ejemplo n.º 3
0
    def test_queue_length(self):
        """Test job queue max length"""
        pool = ThreadPool(nthreads=1, func=TestThreadPool.length_func, maxlen=5)
        
        # Add jobs until the queue is filled
        with self.assertRaises(queue.Full):
            for i in range(30):
                pool.add_job(i, block=False)

        self.assertLess(i, 30)
        pool.close()
Ejemplo n.º 4
0
    def test_kwargs(self):
        """Test kwargs are passed to function"""
        q = queue.Queue()
        d = {"a": 1, "b": 2}
        pool = ThreadPool(func=TestThreadPool.kwargs_func, args=(q,), kwargs=d)
        pool.add_job(1)

        processed = q.get()
        self.assertEqual(len(d), len(processed))
        for arg, value in processed.items():
            self.assertTrue(value, d[arg])
            
        pool.close()
Ejemplo n.º 5
0
    def test_close_now(self):
        """Close pool discarding pending jobs"""
        q = queue.Queue()
        pool = ThreadPool(nthreads=1, func=TestThreadPool.close_func, args=(q,))
        
        for i in range(20): # 1 Seconds processing
            pool.add_job(i)

        sleep(0.1)
        pool.close(now=True)

        # Check some jobs where discarded
        seen = set()
        while True:
            try:      
                tid = q.get(timeout=0.2)
                seen.add(tid)
            except queue.Empty:
                break

        self.assertLess(len(seen), 10)
Ejemplo n.º 6
0
    def test_close(self):
        """Close pool after finishing without discarding job queue"""
        q = queue.Queue()
        pool = ThreadPool(nthreads=1, func=TestThreadPool.close_func, args=(q,))
        
        for i in range(20): # 1 Seconds processing
            pool.add_job(i)

        sleep(0.1)
        pool.close(now=False)

        # Check all jobs where finished before exit
        seen = set()
        while True:
            try:      
                tid = q.get(timeout=0.2)
                seen.add(tid)
            except queue.Empty:
                break

        self.assertEqual(len(seen), 20)
Ejemplo n.º 7
0
    def test_number_of_threads(self):
        """Test the requested number of threads are created"""
        nthreads = 11

        # Check correct number ot threads created
        q = queue.Queue()
        pool = ThreadPool(nthreads, TestThreadPool.id_func, args=[q,])

        for i in range(nthreads*2):
            pool.add_job(i)

        # Wait until all jobs are completed
        sleep(3*0.05*nthreads)
        seen = set()
        while True:
            try:
                tid = q.get(block=False)
                seen.add(tid)
            except queue.Empty:
                break

        # Check nthreads were used
        self.assertEqual(len(seen), nthreads)
        pool.close()
Ejemplo n.º 8
0
    def __init__(self,
                 db_session,
                 retries=3,
                 retry_period=120,
                 nthreads=10,
                 recover_db=True):
        # Dictionary containing all callback indexed by (txid, addr)
        self._callbacks = {}

        # Queue of callbacks ids to retry ordered by last retry time
        self._retry_q = collections.deque()

        # SQLAlchemy session
        self._db_session = db_session

        # Queue where ThreadPool threads stores the ids of sent callbacks
        self._sent_q = queue.Queue()

        # Lock for every thing excepts DB access
        self._lock = threading.Lock()

        # DB access lock
        self._db_lock = threading.Lock()

        # Max number of callback retries
        self.retries = retries

        # Wait between sucessive unacknowledged callback tries
        self.retry_period = retry_period

        # Start request thread pool
        self._thread_pool = ThreadPool(nthreads=nthreads,
                                       func=CallbackManager._send_thread_func,
                                       args=(self._sent_q, ))

        # Flag used to notify update_thread to stop
        self._close_flag = threading.Event()

        # Start update thread
        self._update_thread = threading.Thread(
            target=CallbackManager._update_func, args=(self, ), daemon=True)
        self._update_thread.start()

        # Recover unfinished callbacks from DB
        if not recover_db:
            return

        # Load unfinished callbacks from DB
        with self._db_lock:
            with make_session_scope(self._db_session) as session:
                pending = session.query(Callback).filter(
                    Callback.acknowledged == False,
                    Callback.retries > 0).all()

        # Add unfinished to retry queue.
        pending = sorted(pending, key=lambda c: c.last_retry)
        with self._lock:
            for cback in pending:
                record = CallbackRecord(cback.id, cback.retries,
                                        cback.last_retry)
                self._callbacks[record.id] = record
                self._retry_q.append(record.id)
Ejemplo n.º 9
0
class CallbackManager(object):
    def __init__(self,
                 db_session,
                 retries=3,
                 retry_period=120,
                 nthreads=10,
                 recover_db=True):
        # Dictionary containing all callback indexed by (txid, addr)
        self._callbacks = {}

        # Queue of callbacks ids to retry ordered by last retry time
        self._retry_q = collections.deque()

        # SQLAlchemy session
        self._db_session = db_session

        # Queue where ThreadPool threads stores the ids of sent callbacks
        self._sent_q = queue.Queue()

        # Lock for every thing excepts DB access
        self._lock = threading.Lock()

        # DB access lock
        self._db_lock = threading.Lock()

        # Max number of callback retries
        self.retries = retries

        # Wait between sucessive unacknowledged callback tries
        self.retry_period = retry_period

        # Start request thread pool
        self._thread_pool = ThreadPool(nthreads=nthreads,
                                       func=CallbackManager._send_thread_func,
                                       args=(self._sent_q, ))

        # Flag used to notify update_thread to stop
        self._close_flag = threading.Event()

        # Start update thread
        self._update_thread = threading.Thread(
            target=CallbackManager._update_func, args=(self, ), daemon=True)
        self._update_thread.start()

        # Recover unfinished callbacks from DB
        if not recover_db:
            return

        # Load unfinished callbacks from DB
        with self._db_lock:
            with make_session_scope(self._db_session) as session:
                pending = session.query(Callback).filter(
                    Callback.acknowledged == False,
                    Callback.retries > 0).all()

        # Add unfinished to retry queue.
        pending = sorted(pending, key=lambda c: c.last_retry)
        with self._lock:
            for cback in pending:
                record = CallbackRecord(cback.id, cback.retries,
                                        cback.last_retry)
                self._callbacks[record.id] = record
                self._retry_q.append(record.id)

    def _get_session(self):
        return self._db_session()

    @staticmethod
    def _send_thread_func(job, sent_q):
        """Function used by ThreadPool to send callbacks, one sent
        it places its id on sent_q"""
        callback_id, json, url = job

        try:
            requests.post(url, json=json, timeout=CALLBACK_REQUEST_TIMEOUT)
        except requests.RequestException:
            pass
        except Exception:
            pass

        sent_q.put(callback_id)

    def ack_callback(self, callback_id):
        """Mark callback as acknolewdged, return False if it didn't exist
        True otherwise"""

        # The callback is removed from the callback dictionary to
        # signify it was acknowledged
        with self._lock:
            callback = self._callbacks.pop(callback_id, None)

            # Check there was a callback with the given id
            if callback is None:
                return False

        with self._db_lock:
            with make_session_scope(self._db_session) as session:
                session.query(Callback).filter_by(id=callback_id)\
                        .update({'acknowledged': True})

        return True

    def new_callback(self, callback):
        """Add new callback to queue"""
        callback = Callback.from_callback_data(callback,
                                               retries=self.retries + 1)

        with self._db_lock:
            with make_session_scope(self._db_session) as session:
                session.add(callback)

        with self._lock:
            record = CallbackRecord(callback.id, callback.retries,
                                    callback.last_retry)
            self._callbacks[callback.id] = record
            # Set callback as next for delivery
            self._retry_q.appendleft(callback.id)

    def close(self, timeout=None):
        """Close all allocated resources"""
        self._thread_pool.close()
        self._close_flag.set()  # To notify update thread
        self._update_thread.join(timeout)
        self._db_session.close()

    def _next_sent(self):
        """Deque an return first callback record ready for delivery or None"""
        retry_limit = datetime.utcnow() - timedelta(seconds=self.retry_period)
        with self._lock:
            callback_id = self._retry_q[0]

            # Check if callback was acknowledged
            callback_record = self._callbacks.get(callback_id, None)
            if callback_record is None:
                return None

            # Check head of the callback is ready to be sent
            if callback_record.last_retry > retry_limit:
                return None

            callback_record = self._retry_q.popleft()

        return callback_record

    def _send_ready(self):
        """Enqueue ready to send callbacks into thread_pool job queue"""

        # Send callbacks ready for a retry
        while len(self._retry_q):

            callback_id = self._next_sent()
            if not callback_id:
                break

            # Construct callback request json
            with self._db_lock:
                with make_session_scope(self._db_session) as session:
                    callback = session.query(Callback).get(callback_id)
                    json = callback.to_request()
                    url = callback.subscription.callback_url

            # Add to thread pool job queue
            try:
                job = (callback_id, json, url)
                self._thread_pool.add_job(job, block=False)
            except queue.Full:
                with self._lock:
                    # If the queue if full wait until next update
                    self._retry_q.appendleft(callback_id)
                    break

    def _process_sent(self):
        """Process callbacks marked as sent by the thread_pool"""
        # Process all callbacks marked as sent by thread_pool
        while True:
            try:
                callback_id = self._sent_q.get(block=True, timeout=1)

                with self._lock:
                    # If callback was acknowledged while being sent discard it
                    # and keep looping
                    record = self._callbacks.get(callback_id, None)
                    if not record:
                        continue

                    # Update callback and enqueue for a retry if there are any remaining,
                    # otherwise discard it.
                    if record.retries > 0:
                        record = CallbackRecord(callback_id,
                                                record.retries - 1,
                                                datetime.utcnow())
                        self._callbacks[callback_id] = record
                        self._retry_q.append(callback_id)
                    else:
                        del self._callbacks[callback_id]

                # Save all changes to db
                with self._db_lock:
                    with make_session_scope(self._db_session) as session:
                        update_fields = {
                            'retries': record.retries,
                            'last_retry': record.last_retry
                        }
                        session.query(Callback).filter_by(id=record.id)\
                            .update(update_fields)

            except queue.Empty:
                # No callbacks remainig at the queue
                break

    @staticmethod
    def _update_func(callback_manager):
        """Function used by periodic update thread"""
        while True:
            if callback_manager._close_flag.is_set():
                break
            callback_manager._send_ready()
            callback_manager._process_sent()

    def __len__(self):
        return len(self._callbacks)