コード例 #1
0
class ThreadTestCase(testtools.TestCase):
    def setUp(self):
        super(ThreadTestCase, self).setUp()
        self.got_args_kwargs = Queue()
        self.starting_thread_count = threading.active_count()

    def _func(self, q_item, *args, **kwargs):
        self.got_items.put(q_item)
        self.got_args_kwargs.put((args, kwargs))

        if q_item == 'go boom':
            raise Exception('I went boom!')
        if q_item == 'c boom':
            raise ClientException(
                'Client Boom', http_scheme='http', http_host='192.168.22.1',
                http_port=80, http_path='/booze', http_status=404,
                http_reason='to much', http_response_content='no sir!')

        return 'best result EVAR!'

    def assertQueueContains(self, queue, expected_contents):
        got_contents = []
        try:
            while True:
                got_contents.append(queue.get(timeout=0.1))
        except Empty:
            pass
        if isinstance(expected_contents, set):
            got_contents = set(got_contents)
        self.assertEqual(expected_contents, got_contents)
コード例 #2
0
ファイル: handler.py プロジェクト: swift-nav/libsbp
    class _SBPQueueIterator(six.Iterator):
        """
        Class for upstream iterators.  Implements callable interface for adding
        messages into the queue, and iterable interface for getting them out.
        """

        def __init__(self, maxsize):
            self._queue = Queue(maxsize)
            self._broken = False

        def __iter__(self):
            return self

        def __call__(self, msg, **metadata):
            self._queue.put((msg, metadata), False)

        def breakiter(self):
            self._broken = True
            self._queue.put(None, True, 1.0)

        def __next__(self):
            if self._broken and self._queue.empty():
                raise StopIteration
            m = self._queue.get(True)
            if self._broken and m is None:
                raise StopIteration
            return m
コード例 #3
0
ファイル: pool.py プロジェクト: izderadicka/imap_detach
class Pool(object):
    class Error(Exception):
        pass
    
    def __init__(self, threads, host, port, ssl, user, password):
        self._threads=[]
        self._queue = Queue(maxsize=1000)
        count=0
        while len(self._threads) < threads and count < 3* threads:
            try:
                count+=1
                w=Downloader(self._queue, host, port, ssl, user, password)
                w.start()
                self._threads.append(w)
            except SOFT_ERRORS as e:
                log.warn('Cannot create downloder thread: %s', e)
                
        if len(self._threads) != threads:
            log.error('Cannot create enough workers')
            raise Pool.Error('Cannot create enough workers')
        
    def wait_finish(self):
        self._queue.join()
        
    def stop(self):
        for t in self._threads:
            t.stop()
        
    def download(self, **kwargs):
        kwargs['retry']=0
        self._queue.put(kwargs)
                
        
        
コード例 #4
0
ファイル: __init__.py プロジェクト: MahatmaCane/iris
    def lines(self, fuseki_process):
        """
        Provides an iterator generating the encoded string representation
        of each member of this metarelate mapping translation.

        Returns:
            An iterator of string.

        """
        msg = '\tGenerating phenomenon translation {!r}.'
        print(msg.format(self.mapping_name))
        lines = ['\n%s = {\n' % self.mapping_name]
        # Retrieve encodings for the collection of mapping instances.
        # Retrieval is threaded as it is heavily bound by resource resolution
        # over http.
        # Queue for metarelate mapping instances
        mapenc_queue = Queue()
        for mapping in self.mappings:
            mapenc_queue.put(mapping)
        # deque to contain the results of the jobs processed from the queue
        mapencs = deque()
        # run worker threads
        for i in range(MAXTHREADS):
            MappingEncodeWorker(mapenc_queue, mapencs, fuseki_process).start()
        # block progress until the queue is empty
        mapenc_queue.join()
        # end of threaded retrieval process.

        # now sort the payload
        payload = [mapenc.encoding for mapenc in mapencs]
        payload.sort(key=self._key)
        lines.extend(payload)
        lines.append('    }\n')
        return iter(lines)
コード例 #5
0
class ThreadTestCase(testtools.TestCase):
    def setUp(self):
        super(ThreadTestCase, self).setUp()
        self.got_items = Queue()
        self.got_args_kwargs = Queue()
        self.starting_thread_count = threading.active_count()

    def _func(self, conn, item, *args, **kwargs):
        self.got_items.put((conn, item))
        self.got_args_kwargs.put((args, kwargs))

        if item == 'sleep':
            sleep(1)
        if item == 'go boom':
            raise Exception('I went boom!')

        return 'success'

    def _create_conn(self):
        return "This is a connection"

    def _create_conn_fail(self):
        raise Exception("This is a failed connection")

    def assertQueueContains(self, queue, expected_contents):
        got_contents = []
        try:
            while True:
                got_contents.append(queue.get(timeout=0.1))
        except Empty:
            pass
        if isinstance(expected_contents, set):
            got_contents = set(got_contents)
        self.assertEqual(expected_contents, got_contents)
コード例 #6
0
ファイル: search.py プロジェクト: Magica-Chen/pyro
class Search(TracePosterior):
    """
    Trace and Poutine-based implementation of systematic search.

    :param callable model: Probabilistic model defined as a function.
    :param int max_tries: The maximum number of times to try completing a trace from the queue.
    """
    def __init__(self, model, max_tries=1e6):
        """
        Constructor. Default `max_tries` to something sensible - 1e6.

        :param callable model: Probabilistic model defined as a function.
        :param int max_tries: The maximum number of times to try completing a trace from the queue.
        """
        self.model = model
        self.max_tries = int(max_tries)

    def _traces(self, *args, **kwargs):
        """
        algorithm entered here
        Running until the queue is empty and collecting the marginal histogram
        is performing exact inference

        :returns: Iterator of traces from the posterior.
        :rtype: Generator[:class:`pyro.Trace`]
        """
        # currently only using the standard library queue
        self.queue = Queue()
        self.queue.put(poutine.Trace())

        p = poutine.trace(
            poutine.queue(self.model, queue=self.queue, max_tries=self.max_tries))
        while not self.queue.empty():
            tr = p.get_trace(*args, **kwargs)
            yield (tr, tr.log_pdf())
コード例 #7
0
ファイル: test_monitor.py プロジェクト: piotrmaslanka/satella
    def test_acquire_contextmanager(self):
        class TestedClass(Monitor):
            def __init__(self, cqueue):
                self.cqueue = cqueue
                Monitor.__init__(self)

            @Monitor.synchronized
            def execute(self):
                self.cqueue.put(1)
                sleep(1)
                self.cqueue.get()

        class TesterThread(Thread):
            def __init__(self, tc):
                self.tc = tc
                Thread.__init__(self)

            def run(self):
                self.tc.execute()

        cq = Queue()
        cq.put(1)
        tc = TestedClass(cq)
        tt = TesterThread(tc)

        with Monitor.acquire(tc):
            tt.start()
            sleep(0.4)
            self.assertEqual(cq.qsize(), 1)
コード例 #8
0
ファイル: VideoBuffer.py プロジェクト: esitarski/CrossMgr
class FrameSaver( threading.Thread ):
	def __init__( self ):
		threading.Thread.__init__( self )
		self.daemon = True
		self.name = 'FrameSaver'
		self.reset()
	
	def reset( self ):
		self.queue = Queue()
	
	def run( self ):
		self.reset()
		while 1:
			message = self.queue.get()
			if   message[0] == 'Save':
				cmd, fileName, bib, t, frame = message
				#sys.stderr.write( 'save' )
				PhotoFinish.SavePhoto( fileName, bib, t, frame )
				self.queue.task_done()
			elif message[0] == 'Terminate':
				self.queue.task_done()
				self.reset()
				break
	
	def stop( self ):
		self.queue.put( ['Terminate'] )
		self.join()
	
	def save( self, fileName, bib, t, frame ):
		self.queue.put( ['Save', fileName, bib, t, frame] )
コード例 #9
0
ファイル: utils.py プロジェクト: ProstoKSI/distributed-queue
class StoppableThread(threading.Thread):
    """This is thread can be stopped.

    Note: Thread by default does not return function result in any case,
    which is why I've implemented this workaroung with built-in Queue.
    """
    def __init__(self, **kwargs):
        super(StoppableThread, self).__init__(**kwargs)
        self.__target = kwargs.get('target')
        self.__args = kwargs.get('args')
        if self.__args is None:
            self.__args = ()
        self.__kwargs = kwargs.get('kwargs')
        if self.__kwargs is None:
            self.__kwargs = {}
        self.__result_queue = Queue()
        self.__stopped = threading.Event()

    def stop(self):
        """Stop the thread. It will not terminate code, but set the flag that
        should be handled in executed function.
        """
        self.__stopped.set()

    def is_stopped(self):
        """Check the status of the thread. It only monitors the flag state. If
        task is stopped you have to pay attention to `.is_alive()`.
        """
        return self.__stopped.is_set()

    def run(self):
        """Run the target function, check expected result and propagate
        exceptions.
        """
        try:
            self.__kwargs['_is_stopped'] = self.__stopped.is_set
            try:
                if self.__target:
                    func_result = self.__target(*self.__args, **self.__kwargs)
            finally:
                # Avoid a refcycle if the thread is running a function with
                # an argument that has a member that points to the thread.
                del self.__target, self.__args, self.__kwargs
            if func_result is None:
                func_result = {}
            elif not isinstance(func_result, dict):
                raise TypeError("Task has to return a dict or None.")
        except Exception: # pylint: disable=W0703
            self.__result_queue.put(traceback.format_exc())
        else:
            self.__result_queue.put(func_result)

    def get_result(self):
        """Return results of target function execution.
        """
        self.join()
        try:
            return self.__result_queue.get_nowait()
        except Queue.Empty:
            return None
コード例 #10
0
ファイル: stress.py プロジェクト: dvska/splash
    def run(self):
        args = list(islice(self.reqs, self.requests))
        if self.shuffle:
            random.shuffle(args)
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)

        starttime = time.time()
        q, p = Queue(), Queue()
        for _ in six.moves.range(self.concurrency):
            t = Thread(target=worker, args=(self.host, q, p, self.verbose))
            t.daemon = True
            t.start()
        for a in args:
            q.put(a)
        q.join()

        outputs = []
        for _ in six.moves.range(self.requests):
            outputs.append(p.get())

        elapsed = time.time() - starttime
        print()
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)
        print("Elapsed time  : %.3fs" % elapsed)
        print("Avg time p/req: %.3fs" % (elapsed / len(args)))
        print("Received (per status code or error):")
        for c, n in Counter(outputs).items():
            print("  %s: %d" % (c, n))
コード例 #11
0
ファイル: executor.py プロジェクト: tamland/python-actors
class Executor(object):
    _INTERRUPT = object()

    def __init__(self, num_workers=1):
        super(Executor, self).__init__()
        self._queue = Queue()
        self._workers = []

        for _ in range(num_workers):
            th = Thread(target=self._work)
            th.start()
            self._workers.append(th)

    def submit(self, task):
        self._queue.put(task)

    def shutdown(self):
        for _ in self._workers:
            self._queue.put(self._INTERRUPT)

    def join(self):
        for worker in self._workers:
            worker.join()

    def _work(self):
        while True:
            task = self._queue.get(block=True)
            if task is self._INTERRUPT:
                break
            try:
                task()
            except BaseException as e:
                logger.exception(e)
コード例 #12
0
ファイル: TagGroup.py プロジェクト: esitarski/CrossMgr
class TagGroup( object ):
	'''
		Process groups of tag reads and return the best time estimated using quadratic regression.
		Stray reads are also detected if there is no quiet period for the tag.
		The first read time of each stray read is returned.
	'''
	def __init__( self ):
		self.q = Queue()
		self.tagInfo = {}
		
	def add( self, antenna, tag, t, db ):
		self.q.put((antenna, tag, t, db))

	def flush( self ):
		# Process all waiting reads.
		while 1:
			try:
				antenna, tag, t, db = self.q.get(False)
			except Empty:
				break
			try:
				self.tagInfo[tag].add( antenna, t, db )
			except KeyError:
				self.tagInfo[tag] = TagGroupEntry( antenna, t, db )
			self.q.task_done()
			
	def getReadsStrays( self, tNow=None, method=QuadraticRegressionMethod, antennaChoice=MostReadsChoice, removeOutliers=True ):
		'''
			Returns two lists:
				reads = [(tag1, t1, sampleSize1, antennaID1), (tag2, t2, sampleSize2, , antennaID2), ...]
				strays = [(tagA, tFirstReadA), (tagB, tFirstReadB), ...]
				
			Each stray will be reported as a read the first time it is detected.
		'''
		self.flush()
		
		trNow = datetimeToTr( tNow or datetime.now() )
		reads, strays = [], []
		toDelete = []
		
		for tag, tge in six.iteritems(self.tagInfo):
			if trNow - tge.lastReadMax >= tQuiet:				# Tag has left read range.
				if not tge.isStray:
					t, sampleSize, antennaID = tge.getBestEstimate(method, antennaChoice, removeOutliers)
					reads.append( (tag, t, sampleSize, antennaID) )
				toDelete.append( tag )
			elif tge.lastReadMax - tge.firstReadMin >= tStray:	# This is a stray.
				t = trToDatetime( tge.firstReadMin )
				if not tge.isStray:
					tge.setStray()
					reads.append( (tag, t, 1, 0) )				# Report stray first read time.
				strays.append( (tag, t) )
				
		for tag in toDelete:
			del self.tagInfo[tag]
		
		reads.sort( key=operator.itemgetter(1,0))
		strays.sort( key=operator.itemgetter(1,0) )
		return reads, strays
コード例 #13
0
ファイル: log_printer_test.py プロジェクト: aronahl/compose
    def test_item_is_stop_with_cascade_stop(self):
        """Return the name of the container that caused the cascade_stop"""
        queue = Queue()
        for item in QueueItem.stop('foobar-1'), QueueItem.new('a'), QueueItem.new('b'):
            queue.put(item)

        generator = consume_queue(queue, True)
        assert next(generator) is 'foobar-1'
コード例 #14
0
ファイル: log_printer_test.py プロジェクト: 9923/compose
    def test_item_is_stop_without_cascade_stop(self):
        queue = Queue()
        for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'):
            queue.put(item)

        generator = consume_queue(queue, False)
        assert next(generator) == 'a'
        assert next(generator) == 'b'
コード例 #15
0
ファイル: main.py プロジェクト: developmentseed/sentinel-s3
def daily_metadata(year, month, day, dst_folder, writers=[file_writer], geometry_check=None,
                   num_worker_threads=1):
    """ Extra metadata for all products in a specific date """

    threaded = False

    counter = {
        'products': 0,
        'saved_tiles': 0,
        'skipped_tiles': 0,
        'skipped_tiles_paths': []
    }

    if num_worker_threads > 1:
        threaded = True
        queue = Queue()

    # create folders
    year_dir = os.path.join(dst_folder, str(year))
    month_dir = os.path.join(year_dir, str(month))
    day_dir = os.path.join(month_dir, str(day))

    product_list = get_products_metadata_path(year, month, day)

    logger.info('There are %s products in %s-%s-%s' % (len(list(iterkeys(product_list))),
                                                       year, month, day))

    for name, product in iteritems(product_list):
        product_dir = os.path.join(day_dir, name)

        if threaded:
            queue.put([product, product_dir, counter, writers, geometry_check])
        else:
            counter = product_metadata(product, product_dir, counter, writers, geometry_check)

    if threaded:
        def worker():
            while not queue.empty():
                args = queue.get()
                try:
                    product_metadata(*args)
                except Exception:
                    exc = sys.exc_info()
                    logger.error('%s tile skipped due to error: %s' % (threading.current_thread().name,
                                                                       exc[1].__str__()))
                    args[2]['skipped_tiles'] += 1
                queue.task_done()

        threads = []
        for i in range(num_worker_threads):
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)

        queue.join()

    return counter
コード例 #16
0
ファイル: autorunner.py プロジェクト: htmue/python-autocheck
class AutocheckEventHandler(RegexMatchingEventHandler):

    def __init__(self, filepattern=DEFAULT_FILEPATTERN):
        self.queue = Queue()
        super(AutocheckEventHandler, self).__init__(
            regexes=[filepattern], ignore_directories=True,
            case_sensitive=False)

    def on_any_event(self, event):
        self.queue.put(event)
コード例 #17
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #18
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {}: {!r}'
                .format(sizeof_fmt(self.maxitemsize), truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #19
0
ファイル: pubnub.py プロジェクト: pubnub/python
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    def wait_for_connect(self):
        if not self.connected_event.is_set():
            self.connected_event.wait()
        else:
            raise Exception("the instance is already connected")

    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            self.disconnected_event.wait()
        else:
            raise Exception("the instance is already disconnected")

    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.message_queue.get()
            self.message_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue

    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.presence_queue.get()
            self.presence_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue
コード例 #20
0
class ThreadPool:
    """Pool of threads consuming tasks from a queue."""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue."""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue."""
        self.tasks.join()
コード例 #21
0
ファイル: log_printer_test.py プロジェクト: shin-/compose
    def test_item_is_an_exception(self):
        class Problem(Exception):
            pass

        queue = Queue()
        error = Problem("oops")
        for item in QueueItem.new("a"), QueueItem.new("b"), QueueItem.exception(error):
            queue.put(item)

        generator = consume_queue(queue, False)
        assert next(generator) == "a"
        assert next(generator) == "b"
        with pytest.raises(Problem):
            next(generator)
コード例 #22
0
class GenericCaller(DaemonTask):
    def __init__(self, interval=0, threads=1, name=None):
        DaemonTask.__init__(self, self.call, interval=interval, threads=threads, name=name)
        self.q = Queue()

    def call(self):
        try:
            func, args, kwargs = self.q.get(timeout=config['thread_wait_interval'])
            func(*args, **kwargs)
        except Empty:
            pass

    def defer(self, func, *args, **kwargs):
        self.q.put([func, args, kwargs])
コード例 #23
0
ファイル: ftdi.py プロジェクト: nccgroup/umap2
class USBFtdiInterface(USBInterface):
    name = 'FtdiInterface'

    def __init__(self, app, phy, interface_number):
        super(USBFtdiInterface, self).__init__(
            app=app,
            phy=phy,
            interface_number=interface_number,
            interface_alternate=0,
            interface_class=USBClass.VendorSpecific,
            interface_subclass=0xff,
            interface_protocol=0xff,
            interface_string_index=0,
            endpoints=[
                USBEndpoint(
                    app=app,
                    phy=phy,
                    number=1,
                    direction=USBEndpoint.direction_out,
                    transfer_type=USBEndpoint.transfer_type_bulk,
                    sync_type=USBEndpoint.sync_type_none,
                    usage_type=USBEndpoint.usage_type_data,
                    max_packet_size=0x40,
                    interval=0,
                    handler=self.handle_data_available
                ),
                USBEndpoint(
                    app=app,
                    phy=phy,
                    number=3,
                    direction=USBEndpoint.direction_in,
                    transfer_type=USBEndpoint.transfer_type_bulk,
                    sync_type=USBEndpoint.sync_type_none,
                    usage_type=USBEndpoint.usage_type_data,
                    max_packet_size=0x40,
                    interval=0,
                    handler=self.handle_ep3_buffer_available  # at this point, we don't send data to the host
                )
            ],
        )
        self.txq = Queue()

    def handle_data_available(self, data):
        self.debug('received string (%d): %s' % (len(data), data))
        reply = b'\x01\x00' + data
        self.txq.put(reply)

    def handle_ep3_buffer_available(self):
        if not self.txq.empty():
            self.send_on_endpoint(3, self.txq.get())
コード例 #24
0
ファイル: threadpool.py プロジェクト: PeterJCLaw/tools
class ThreadPool:
    """Pool of threads consuming tasks from a queue."""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue."""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue."""
        self.tasks.join()
コード例 #25
0
ファイル: utils.py プロジェクト: MortalCatalyst/lektor
class WorkerPool(object):

    def __init__(self, num_threads=None):
        if num_threads is None:
            num_threads = multiprocessing.cpu_count()
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        self.tasks.put((func, args, kargs))

    def wait_for_completion(self):
        self.tasks.join()
コード例 #26
0
ファイル: test_container.py プロジェクト: candlerb/pato
def test_thread_safe_object_creation(c):
    """
    If two threads try to fetch the object at the same time,
    only one instance should be created.
    This also tests assigning an existing function as a service.
    """
    cin = Queue()
    cout = Queue()
    def test_factory(username, password):
        cout.put("ready")
        cin.get()
        res = libtest.sample.Foo(username, password)
        cout.put("done")
        return res

    c['test_factory'] = test_factory
    c.load_yaml("""
a:
    :: <test_factory>
    username: abc
    password: xyz
""")
    def run(q):
        q.put("starting")
        q.put(c['a'])
    q1 = Queue()
    t1 = Thread(target=run, kwargs={"q":q1})
    t1.start()
    assert cout.get(True, 2) == "ready"
    assert q1.get(True, 2) == "starting"
    # Now t1 is waiting inside factory method

    q2 = Queue()
    t2 = Thread(target=run, kwargs={"q":q2})
    t2.start()
    assert q2.get(True, 2) == "starting"

    cin.put("go")
    assert cout.get(True, 2) == "done"
    t1.join(2)
    t2.join(2)
    assert cout.empty()

    res1 = q1.get(True, 2)
    res2 = q2.get(True, 2)
    # This also implies that test_factory was only called once
    # because otherwise t2 would hang waiting on cin
    assert isinstance(res1, libtest.sample.Foo)
    assert res1 is res2
コード例 #27
0
ファイル: ftdi.py プロジェクト: agdlgv/sahara_emulator
class USBFtdiInterface(USBInterface):
    name = 'FtdiInterface'

    def __init__(self, app, phy, interface_number):
        super(USBFtdiInterface, self).__init__(
            app=app,
            phy=phy,
            interface_number=interface_number,
            interface_alternate=0,
            interface_class=USBClass.VendorSpecific,
            interface_subclass=0xff,
            interface_protocol=0xff,
            interface_string_index=0,
            endpoints=[
                USBEndpoint(app=app,
                            phy=phy,
                            number=1,
                            direction=USBEndpoint.direction_out,
                            transfer_type=USBEndpoint.transfer_type_bulk,
                            sync_type=USBEndpoint.sync_type_none,
                            usage_type=USBEndpoint.usage_type_data,
                            max_packet_size=0x40,
                            interval=0,
                            handler=self.handle_data_available),
                USBEndpoint(
                    app=app,
                    phy=phy,
                    number=3,
                    direction=USBEndpoint.direction_in,
                    transfer_type=USBEndpoint.transfer_type_bulk,
                    sync_type=USBEndpoint.sync_type_none,
                    usage_type=USBEndpoint.usage_type_data,
                    max_packet_size=0x40,
                    interval=0,
                    handler=self.
                    handle_ep3_buffer_available  # at this point, we don't send data to the host
                )
            ],
        )
        self.txq = Queue()

    def handle_data_available(self, data):
        self.debug('received string (%d): %s' % (len(data), data))
        reply = b'\x01\x00' + data
        self.txq.put(reply)

    def handle_ep3_buffer_available(self):
        if not self.txq.empty():
            self.send_on_endpoint(3, self.txq.get())
コード例 #28
0
class RS485(object):
    """ Replicates the pyserial interface. """
    def __init__(self, serial):
        # type: (Serial) -> None
        """ Initialize a rs485 connection using the serial port. """
        self._serial = serial
        fileno = serial.fileno()
        if fileno is not None:
            flags_rs485 = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND
            serial_rs485 = struct.pack('hhhhhhhh', flags_rs485, 0, 0, 0, 0, 0,
                                       0, 0)
            fcntl.ioctl(fileno, TIOCSRS485, serial_rs485)

        self._serial.timeout = None
        self._running = False
        self._thread = BaseThread(name='rS485read', target=self._reader)
        self._thread.daemon = True
        # TODO why does this stream byte by byte?
        self.read_queue = Queue()  # type: Queue[bytearray]

    def start(self):
        # type: () -> None
        if not self._running:
            self._running = True
            self._thread.start()

    def stop(self):
        # type: () -> None
        self._running = False

    def write(self, data):
        # type: (bytes) -> None
        """ Write data to serial port """
        self._serial.write(data)

    def _reader(self):
        # type: () -> None
        try:
            while self._running:
                data = bytearray(self._serial.read(1))
                if len(data) == 1:
                    self.read_queue.put(data[:1])
                size = self._serial.inWaiting()
                if size > 0:
                    data = bytearray(self._serial.read(size))
                    for i in range(size):
                        self.read_queue.put(data[i:i + 1])
        except Exception as ex:
            print('Error in reader: {0}'.format(ex))
コード例 #29
0
ファイル: test_poutines.py プロジェクト: zippeurfou/pyro
class QueueHandlerMixedTest(TestCase):
    def setUp(self):

        # Simple model with 1 continuous + 1 discrete + 1 continuous variable.
        def model():
            p = torch.tensor([0.5])
            loc = torch.zeros(1)
            scale = torch.ones(1)

            x = pyro.sample("x",
                            Normal(loc,
                                   scale))  # Before the discrete variable.
            y = pyro.sample("y", Bernoulli(p))
            z = pyro.sample("z", Normal(loc,
                                        scale))  # After the discrete variable.
            return dict(x=x, y=y, z=z)

        self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
        self.model = model
        self.queue = Queue()
        self.queue.put(poutine.Trace())

    def test_queue_single(self):
        f = poutine.trace(poutine.queue(self.model, queue=self.queue))
        tr = f.get_trace()
        for name in self.sites:
            assert name in tr

    def test_queue_enumerate(self):
        f = poutine.trace(poutine.queue(self.model, queue=self.queue))
        trs = []
        while not self.queue.empty():
            trs.append(f.get_trace())
        assert len(trs) == 2

        values = [{
            name: tr.nodes[name]['value'].view(-1).item()
            for name in tr.nodes.keys() if tr.nodes[name]['type'] == 'sample'
        } for tr in trs]

        expected_ys = set([0, 1])
        actual_ys = set([value["y"] for value in values])
        assert actual_ys == expected_ys

        # Check that x was sampled the same on all each paths.
        assert values[0]["x"] == values[1]["x"]

        # Check that y was sampled differently on each path.
        assert values[0]["z"] != values[1]["z"]  # Almost surely true.
コード例 #30
0
class DnsCachingResolver(Thread):

    def __init__(self, cache_time=600.0, cache_fail_time=30.0):
        super(DnsCachingResolver, self).__init__()
        self._cache = {}
        self._cache_time = cache_time
        self._cache_fail_time = cache_fail_time
        self._resolve_queue = Queue()
        self.daemon = True
        self.start()

    def run(self):
        while True:
            hostname, attempt = self._resolve_queue.get()
            ips = self._do_resolve(hostname)
            if ips:
                self._cache[hostname] = (time.time(), ips)
            else:
                if attempt < 10:
                    self.resolve_async(hostname, attempt + 1)
                    time.sleep(1)

    def resolve(self, hostname):
        current_time = time.time()
        cached_time, ips = self._cache.get(hostname, (0, []))
        time_passed = current_time - cached_time
        if time_passed > self._cache_time or (not ips and time_passed > self._cache_fail_time):
            new_ips = self._do_resolve(hostname)
            if new_ips:
                self._cache[hostname] = (current_time, new_ips)
                ips = new_ips
        return ips

    def resolve_async(self, hostname, attempt=0):
        self._resolve_queue.put((hostname, attempt))

    @staticmethod
    def _do_resolve(hostname):
        try:
            ret = set()
            for r in socket.getaddrinfo(hostname, 0, 0, 0, socket.IPPROTO_TCP):
                if r[0] == socket.AF_INET6:
                    ret.add('[{0}]'.format(r[4][0]))
                else:
                    ret.add(r[4][0])
            return list(ret)
        except socket.gaierror:
            logger.warning('failed to resolve host %s', hostname)
            return []
コード例 #31
0
ファイル: threaded.py プロジェクト: sn-donbenjamin/grab
class ThreadedTransport(object):
    def __init__(self, spider, thread_number):
        self.spider = spider
        self.thread_number = thread_number
        self.task_queue = Queue()
        self.result_queue = Queue()
        #self.registry = {}

        self.workers = []
        self.freelist = []
        for x in six.moves.range(self.thread_number):
            th = Thread(target=worker_thread,
                        args=[
                            self.task_queue, self.result_queue, self.freelist,
                            self.spider.shutdown_event
                        ])
            th.daemon = True
            self.workers.append(th)
            self.freelist.append(1)
            th.start()

    def ready_for_task(self):
        return len(self.freelist)

    def get_free_threads_number(self):
        return len(self.freelist)

    def get_active_threads_number(self):
        return self.thread_number - len(self.freelist)

    def start_task_processing(self, task, grab, grab_config_backup):
        self.task_queue.put((task, grab, grab_config_backup))

    def process_handlers(self):
        pass

    def iterate_results(self):
        while True:
            try:
                result = self.result_queue.get(block=True, timeout=0.1)
            except Empty:
                break
            else:
                # FORMAT: {ok, grab, grab_config_backup, task, emsg, error_abbr}

                #grab.doc.error_code = None
                #grab.doc.error_msg = None

                yield result
コード例 #32
0
class Caller(DaemonTask):
    def __init__(self, func, interval=0, threads=1, name=None):
        self.q = Queue()
        DaemonTask.__init__(self, self.call, interval=interval, threads=threads, name=name or func.__name__)
        self.callee = func

    def call(self):
        try:
            args, kwargs = self.q.get(timeout=config['thread_wait_interval'])
            self.callee(*args, **kwargs)
        except Empty:
            pass

    def defer(self, *args, **kwargs):
        self.q.put([args, kwargs])
コード例 #33
0
    def test_item_is_an_exception(self):

        class Problem(Exception):
            pass

        queue = Queue()
        error = Problem('oops')
        for item in QueueItem.new('a'), QueueItem.new('b'), QueueItem.exception(error):
            queue.put(item)

        generator = consume_queue(queue, False)
        assert next(generator) == 'a'
        assert next(generator) == 'b'
        with pytest.raises(Problem):
            next(generator)
コード例 #34
0
ファイル: scheduler.py プロジェクト: xiaomo-shu/my_project
class Scheduler(object):
    def __init__(self):
        self.queue = Queue()

    def add_request(self, request):

        self.queue.put(request)

    def get_request(self):

        return self.queue.get()

    def senn_request(self):
        # 请求对象去重
        pass
コード例 #35
0
class FluentdEvent(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()

    def init_app(self, app):
        tag_prefix = app.config.get("FLUENTD_EVENT_TAG_PREFIX",
                                    "flask.fluentd_event")
        host = app.config.get("FLUENTD_EVENT_HOST", "localhost")
        port = int(app.config.get("FLUENTD_EVENT_PORT", 24224))
        self._sender = sender.FluentSender(tag_prefix, host=host, port=port)

        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, "teardown_appcontext"):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, tag, event):
        self.queue.put((tag, event))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, event = self.queue.get_nowait()
                self._sender.emit(tag, event)
                self.queue.task_done()
            except Empty:
                pumping = False
            except Exception as e:
                # This is bad but it's worse to foul the request because
                # of a logging issue
                logging.exception(e)
                self.queue.task_done()

        return exception
コード例 #36
0
ファイル: test_poutines.py プロジェクト: lewisKit/pyro
class QueueHandlerMixedTest(TestCase):

    def setUp(self):

        # Simple model with 1 continuous + 1 discrete + 1 continuous variable.
        def model():
            p = torch.tensor([0.5])
            loc = torch.zeros(1)
            scale = torch.ones(1)

            x = pyro.sample("x", Normal(loc, scale))  # Before the discrete variable.
            y = pyro.sample("y", Bernoulli(p))
            z = pyro.sample("z", Normal(loc, scale))  # After the discrete variable.
            return dict(x=x, y=y, z=z)

        self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
        self.model = model
        self.queue = Queue()
        self.queue.put(poutine.Trace())

    def test_queue_single(self):
        f = poutine.trace(poutine.queue(self.model, queue=self.queue))
        tr = f.get_trace()
        for name in self.sites:
            assert name in tr

    def test_queue_enumerate(self):
        f = poutine.trace(poutine.queue(self.model, queue=self.queue))
        trs = []
        while not self.queue.empty():
            trs.append(f.get_trace())
        assert len(trs) == 2

        values = [
            {name: tr.nodes[name]['value'].view(-1).item() for name in tr.nodes.keys()
             if tr.nodes[name]['type'] == 'sample'}
            for tr in trs
        ]

        expected_ys = set([0, 1])
        actual_ys = set([value["y"] for value in values])
        assert actual_ys == expected_ys

        # Check that x was sampled the same on all each paths.
        assert values[0]["x"] == values[1]["x"]

        # Check that y was sampled differently on each path.
        assert values[0]["z"] != values[1]["z"]  # Almost surely true.
コード例 #37
0
class BackgroundConsumer(object):
    """
    A consumer that runs in the background. The BackgroundConsumer does not provide get()
    but does a callback to a function whenever a message was consumed.
    """
    def __init__(
        self, command, cid, callback
    ):  # type: (CoreCommandSpec, int, Callable[[Dict[str, Any]], None]) -> None
        """
        Create a background consumer using a cmd, cid and callback.

        :param command: the CoreCommand to consume.
        :param cid: the communication id.
        :param callback: function to call when an instance was found.
        """
        self.cid = cid
        self.command = command
        self._callback = callback
        self._queue = Queue()  # type: Queue[Dict[str, Any]]

        self._callback_thread = BaseThread(name='coredelivery',
                                           target=self._consumer)
        self._callback_thread.setDaemon(True)
        self._callback_thread.start()

    def _consumer(self):
        while True:
            try:
                self.deliver()
            except Exception:
                logger.exception(
                    'Unexpected exception delivering background consumer data')
                time.sleep(1)

    def get_hash(self):  # type: () -> int
        """ Get an identification hash for this consumer. """
        return Toolbox.hash(CoreCommunicator.START_OF_REPLY +
                            bytearray([self.cid]) +
                            self.command.response_instruction)

    def consume(self, payload):  # type: (bytearray) -> None
        """ Consume payload. """
        data = self.command.consume_response_payload(payload)
        self._queue.put(data)

    def deliver(self):
        """ Deliver data to the callback functions. """
        self._callback(self._queue.get())
コード例 #38
0
class Scheduler(object):
    def __init__(self):
        self.queue = Queue()

    def add_request(self, request):
        """添加请求对象"""
        self.queue.put(request)

    def get_request(self):
        """获取一个请求对象并返回"""
        request = self.queue.get()
        return request

    def _filter_request(self):
        """请求去重"""
        pass
コード例 #39
0
ファイル: settings_view.py プロジェクト: zk20/piksi_tools
class WorkQueue():
    def __init__(self, settings_view):
        self._settings_view = settings_view
        self._work_queue = Queue()
        self._worker = threading.Thread(target=self._work_thd)
        self._worker.daemon = True
        self._worker.start()

    def put(self, func, *argv):
        self._work_queue.put((func, argv))

    def _work_thd(self):
        while True:
            (func, argv) = self._work_queue.get(block=True)
            func(*argv)
            self._work_queue.task_done()
コード例 #40
0
class Scheduler(object):
    def __init__(self):
        # 初始化队列
        self.queue = Queue()

    # 1、入队列
    def add_request(self, request):
        self.queue.put(request)

    # 2、出队列
    def get_request(self):
        return self.queue.get()

    # 3、去重
    def filter_request(self, request):
        pass
コード例 #41
0
ファイル: koji_source.py プロジェクト: rohanpm/pushsource
    def __iter__(self):
        # Queue holding all requests we need to make to koji.
        # We try to fetch as much as we can early to make efficient use
        # of multicall.
        koji_queue = Queue()

        # We'll need to obtain all RPMs referenced by filename
        for rpm_filename in self._rpm:
            koji_queue.put(GetRpmCommand(ident=rpm_filename))

        # We'll need to obtain all builds from which we want modules,
        # as well as the archives from those
        for build_id in self._module_build:
            koji_queue.put(GetBuildCommand(ident=build_id, list_archives=True))

        # Put some threads to work on the queue.
        fetch_exceptions = []
        fetch_threads = [
            Thread(
                name="koji-%s-fetch-%s" % (id(self), i),
                target=self._do_fetch,
                args=(koji_queue, fetch_exceptions),
            ) for i in range(0, self._threads)
        ]

        # Wait for all fetches to finish
        for t in fetch_threads:
            t.start()
        for t in fetch_threads:
            t.join(self._timeout)

        # Re-raise exceptions, if any.
        # If we got more than one, we're only propagating the first.
        if fetch_exceptions:
            raise fetch_exceptions[0]

        # The queue must be empty now
        assert koji_queue.empty()

        push_items_fs = self._modulemd_futures() + self._rpm_futures()

        completed_fs = futures.as_completed(push_items_fs,
                                            timeout=self._timeout)
        for f in completed_fs:
            # If an exception occurred, this is where it will be raised.
            for pushitem in f.result():
                yield pushitem
コード例 #42
0
ファイル: etcd.py プロジェクト: AtScaleInc/patroni
class DnsCachingResolver(Thread):
    def __init__(self, cache_time=600.0, cache_fail_time=30.0):
        super(DnsCachingResolver, self).__init__()
        self._cache = {}
        self._cache_time = cache_time
        self._cache_fail_time = cache_fail_time
        self._resolve_queue = Queue()
        self.daemon = True
        self.start()

    def run(self):
        while True:
            (host, port), attempt = self._resolve_queue.get()
            response = self._do_resolve(host, port)
            if response:
                self._cache[(host, port)] = (time.time(), response)
            else:
                if attempt < 10:
                    self.resolve_async(host, port, attempt + 1)
                    time.sleep(1)

    def resolve(self, host, port):
        current_time = time.time()
        cached_time, response = self._cache.get((host, port), (0, []))
        time_passed = current_time - cached_time
        if time_passed > self._cache_time or (
                not response and time_passed > self._cache_fail_time):
            new_response = self._do_resolve(host, port)
            if new_response:
                self._cache[(host, port)] = (current_time, new_response)
                response = new_response
        return response

    def resolve_async(self, host, port, attempt=0):
        self._resolve_queue.put(((host, port), attempt))

    def remove(self, host, port):
        self._cache.pop((host, port), None)

    @staticmethod
    def _do_resolve(host, port):
        try:
            return socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
                                      socket.IPPROTO_TCP)
        except Exception as e:
            logger.warning('failed to resolve host %s: %s', host, e)
            return []
コード例 #43
0
def fetch_adblock_list():
    queue = Queue()
    for url in FILTER_URLS:
        queue.put(url)

    worker_count = min(len(FILTER_URLS), multiprocessing.cpu_count())
    workers = []
    for _ in range(worker_count):
        worker = Worker(queue)
        worker.start()
        workers.append(worker)

    queue.join()
    hosts_str = '\n'
    for worker in workers:
        hosts_str += worker.result
    return hosts_str
コード例 #44
0
def main():
    volumes = 900
    num_worker_threads = 25
    task = Queue()
    poems = Queue()
    for i in range(num_worker_threads):
        t = Thread(target=worker, args=(task, poems))
        t.daemon = True
        t.start()
    write_thread = Thread(target=write_poems, args=('./data/poems.txt', poems))
    write_thread.start()
    for js in range(1, volumes + 1):
        task.put(js)
    task.join()
    poems.join()
    poems.put(None)
    write_thread.join()
コード例 #45
0
class SimpleSensor(Sensor):
    """
    A simple sensor that uses a single thread to prefetch data.
    """
    def _pre_forward(self):
        super(SimpleSensor, self)._pre_forward()
        if threading.active_count() == 1:
            # Only the main thread is alive. Raise exception.
            raise DataPrefetchThreadsDeadEvent

    def _setup_index_queue(self):
        # Set up a queue.
        self._index_queue = Queue(self.queue_size)
        # Start loading data from source according to mode.
        ## Put indices to prefetch.
        for i in range(self.queue_size):
            try:
                self._index_queue.put(
                    self.sampler.next(self.batch_size_dict[self.mode]))
            except EpochCompletedEvent:
                self._index_queue.put(
                    self.sampler.next(self.batch_size_dict[self.mode]))

    @property
    def index_queue(self):
        return self._index_queue

    def _setup_data_queue(self):
        # Set up a data queue.
        self._data_queue = Queue(self.queue_size)
        # Start loading data from source according to mode.
        ## Start the workers to fetch data.
        self.done_event = threading.Event()
        self.worker_thread = threading.Thread(
            target=_data_fetching_worker,
            args=(self.source, self._index_queue, self._data_queue,
                  self.done_event))
        self.worker_thread.start()

    def _teardown_data_queue(self):
        self.done_event.set()
        self.worker_thread.join()

    @property
    def data_queue(self):
        return self._data_queue
コード例 #46
0
class Scheduler(object):
    def __init__(self):
        self.queue = Queue()

    def add_request(self, request):
        self.queue.put(request)

    def get_request(self):
        # request = self.queue.get_nowait()
        try:
            request = self.queue.get(False)
            return request
        except Exception as e:
            logger.info(e)

    def filter_request(self, request):
        pass
コード例 #47
0
ファイル: scheduler.py プロジェクト: zhaoduoyu/demo
class Scheduler():
    def __init__(self):
        self.q = Queue()

    def add_request(self, request):
        # 请求入队的函数
        self.q.put(request)

    def get_request(self):
        # 取出request并返回
        request = self.q.get()
        return request

    def _filter_request(self):
        '''请求去重'''
        # 暂时不实现
        pass
コード例 #48
0
ファイル: flask_fluentd.py プロジェクト: thread/flask-fluentd
class Fluentd(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()
        tag_label = app.config.get('EVENT_TAG_PREFIX', 'flask.fluentd')
        self._sender = sender.FluentSender(tag_label)

    def init_app(self, app):
        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, 'teardown_appcontext'):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, pair):
        tag, evt = pair
        self.queue.put((tag, evt))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, evt = self.queue.get_nowait()
                self._sender.emit(tag, evt)
                self.queue.task_done()
            except Empty:
                pumping = False
            except Exception as e:
                # This is bad but it's worse to foul the request because
                # of a logging issue
                logging.exception(e)
                self.queue.task_done()

        return exception
コード例 #49
0
ファイル: thread.py プロジェクト: new07/pypeln
class _InputQueue(object):

    def __init__(self, maxsize, total_done, pipeline_namespace, **kwargs):
        
        self.queue = Queue(maxsize = maxsize, **kwargs)
        self.lock = Lock()
        self.namespace = _get_namespace()
        self.namespace.remaining = total_done

        self.pipeline_namespace = pipeline_namespace

    def __iter__(self):

        while not self.is_done():
            x = self.get()

            if self.pipeline_namespace.error:
                return

            if not utils.is_continue(x):
                yield x

    def get(self):
        
        try:
            x = self.queue.get(timeout = utils.TIMEOUT)
        except (Empty, Full):
            return utils.CONTINUE
        
        if not utils.is_done(x):
            return x
        else:
            with self.lock:
                self.namespace.remaining -= 1
            
            return utils.CONTINUE

    def is_done(self):
        return self.namespace.remaining == 0 and self.queue.empty()

    def put(self, x):
        self.queue.put(x)

    def done(self):
        self.queue.put(utils.DONE)
コード例 #50
0
def _build_droot_impact(destroy_handler):
    droot = {}   # destroyed view + nonview variables -> foundation
    impact = {}  # destroyed nonview variable -> it + all views of it
    root_destroyer = {}  # root -> destroyer apply

    for app in destroy_handler.destroyers:
        for output_idx, input_idx_list in app.op.destroy_map.items():
            if len(input_idx_list) != 1:
                raise NotImplementedError()
            input_idx = input_idx_list[0]
            input = app.inputs[input_idx]

            # Find non-view variable which is ultimatly viewed by input.
            view_i = destroy_handler.view_i
            _r = input
            while _r is not None:
                r = _r
                _r = view_i.get(r)
            input_root = r

            if input_root in droot:
                raise InconsistencyError(
                    "Multiple destroyers of %s" % input_root)
            droot[input_root] = input_root
            root_destroyer[input_root] = app

            # The code here add all the variables that are views of r into
            # an OrderedSet input_impact
            input_impact = OrderedSet()
            queue = Queue()
            queue.put(input_root)
            while not queue.empty():
                v = queue.get()
                for n in destroy_handler.view_o.get(v, []):
                    input_impact.add(n)
                    queue.put(n)

            for v in input_impact:
                assert v not in droot
                droot[v] = input_root

            impact[input_root] = input_impact
            impact[input_root].add(input_root)

    return droot, impact, root_destroyer
コード例 #51
0
ファイル: destroyhandler.py プロジェクト: 12190143/Theano
def _build_droot_impact(destroy_handler):
    droot = {}   # destroyed view + nonview variables -> foundation
    impact = {}  # destroyed nonview variable -> it + all views of it
    root_destroyer = {}  # root -> destroyer apply

    for app in destroy_handler.destroyers:
        for output_idx, input_idx_list in app.op.destroy_map.items():
            if len(input_idx_list) != 1:
                raise NotImplementedError()
            input_idx = input_idx_list[0]
            input = app.inputs[input_idx]

            # Find non-view variable which is ultimatly viewed by input.
            view_i = destroy_handler.view_i
            _r = input
            while _r is not None:
                r = _r
                _r = view_i.get(r)
            input_root = r

            if input_root in droot:
                raise InconsistencyError(
                    "Multiple destroyers of %s" % input_root)
            droot[input_root] = input_root
            root_destroyer[input_root] = app

            # The code here add all the variables that are views of r into
            # an OrderedSet input_impact
            input_impact = OrderedSet()
            queue = Queue()
            queue.put(input_root)
            while not queue.empty():
                v = queue.get()
                for n in destroy_handler.view_o.get(v, []):
                    input_impact.add(n)
                    queue.put(n)

            for v in input_impact:
                assert v not in droot
                droot[v] = input_root

            impact[input_root] = input_impact
            impact[input_root].add(input_root)

    return droot, impact, root_destroyer
コード例 #52
0
ファイル: MultiCast.py プロジェクト: scottwedge/CrossMgr
def SendTrigger(message):
    #
    # message is a tuple: (cmd, d)
    # d is an optional dict for the command.
    #
    # cmd can be:
    #
    # trigger    - broadcast the trigger
    # terminate  - stop the sender thread.
    #
    # if cmd = trigger, dict must contain d['ts'] which is the timestamp of the trigger.
    #
    global qTrigger, sender
    if not qTrigger:
        qTrigger = Queue()
        sender = MultiCastSender(qTrigger)
        sender.start()
    qTrigger.put(message)
コード例 #53
0
ファイル: WriteLog.py プロジェクト: rkantos/RaceDB
def writeLog( message ):
	global logThread, messageQ
	
	if not logThread:
		messageQ = Queue()
		logThread = threading.Thread( target=messageWriter )
		logThread.daemon = True
		logThread.start()
		writeLog( '****** Log Initialized *****' )

	dt = datetime.datetime.now()
	dt = dt.replace( microsecond = 0 )
	messageQ.put( '{} ({}) {}{}'.format(
			dt.isoformat(),
			PlatformName,
			message, '\n' if not message or message[-1] != '\n' else '',
		),
	)
コード例 #54
0
ファイル: scheduler.py プロジェクト: ChenQi960219/spider_my
class Scheduler(object):

    def __init__(self):

        self.queue = Queue()
        self.total_response_nums = 0

    def add_request(self, request):

        self.queue.put(request)
        self.total_response_nums += 1

    def get_request(self):

        return self.queue.get()

    def filter_request(self):
        pass
コード例 #55
0
ファイル: MultiCast.py プロジェクト: esitarski/CrossMgr
def SendTrigger( message ):
	#
	# message is a tuple: (cmd, d)
	# d is an optional dict for the command.
	#
	# cmd can be:
	#
	# trigger    - broadcast the trigger
	# terminate  - stop the sender thread.
	#
	# if cmd = trigger, dict must contain d['ts'] which is the timestamp of the trigger.
	#
	global qTrigger, sender
	if not qTrigger:
		qTrigger = Queue()
		sender = MultiCastSender( qTrigger )
		sender.start()
	qTrigger.put( message )
コード例 #56
0
    def download(
            self,
            destination,  # type: Union[str, fs.base.FS]
            condition=None,  # type: Optional[Callable[[dict], bool]]
            media_count=None,  # type: Optional[int]
            timeframe=None,  # type: Optional[_Timeframe]
            new_only=False,  # type: bool
            pgpbar_cls=None,  # type: Optional[Type[ProgressBar]]
            dlpbar_cls=None,  # type: Optional[Type[ProgressBar]]
    ):
        # type: (...) -> int
        """Download the refered post to the destination.

        See `InstaLooter.download` for argument reference.

        Note:
            This function, opposed to other *looter* implementations, will
            not spawn new threads, but simply use the main thread to download
            the files.

            Since a worker is in charge of downloading a *media* at a time
            (and not a *file*), there would be no point in spawning more.

        """
        destination, close_destination = self._init_destfs(destination)

        queue = Queue()  # type: Queue[Dict]
        medias_queued = self._fill_media_queue(queue, destination,
                                               iter(self.medias()),
                                               media_count, new_only,
                                               condition)
        queue.put(None)

        worker = InstaDownloader(queue=queue,
                                 destination=destination,
                                 namegen=self.namegen,
                                 add_metadata=self.add_metadata,
                                 dump_json=self.dump_json,
                                 dump_only=self.dump_only,
                                 pbar=None,
                                 session=self.session)
        worker.run()

        return medias_queued
コード例 #57
0
ファイル: cos_threadpool.py プロジェクト: lewzylu/coscmd
class SimpleThreadPool:

    def __init__(self, num_threads=5):
        self._num_threads = num_threads
        self._queue = Queue(2000)
        self._lock = Lock()
        self._active = False
        self._workers = []
        self._finished = False

    def add_task(self, func, *args, **kwargs):
        if not self._active:
            with self._lock:
                if not self._active:
                    self._workers = []
                    self._active = True
                    for i in range(self._num_threads):
                        w = WorkerThread(self._queue)
                        self._workers.append(w)
                        w.start()

        self._queue.put((func, args, kwargs))

    def release(self):
        while self._queue.empty() is False:
            time.sleep(1)

    def wait_completion(self):
        self._queue.join()
        self._finished = True
        # 已经结束的任务, 需要将线程都退出, 防止卡死
        for i in range(self._num_threads):
            self._queue.put((None, None, None))

        self._active = False

    def complete(self):
        self._finished = True

    def get_result(self):
        assert self._finished
        detail = [worker.get_result() for worker in self._workers]
        succ_all = all([tp[1] == 0 for tp in detail])
        return {'success_all': succ_all, 'detail': detail}
コード例 #58
0
ファイル: test_image_json.py プロジェクト: bjlittle/iris
    def test_resolve(self):
        repo_fname = os.path.join(os.path.dirname(__file__), "results", "imagerepo.json")
        with open(repo_fname, "rb") as fi:
            repo = json.load(codecs.getreader("utf-8")(fi))
        uris = list(chain.from_iterable(six.itervalues(repo)))
        uri_list = deque()
        exceptions = deque()
        uri_queue = Queue()
        for uri in uris:
            if uri.startswith("https://scitools.github.io"):
                uri_queue.put(uri)
            else:
                msg = "{} is not a valid resource.".format(uri)
                exceptions.append(ValueError(msg))

        for i in range(MAXTHREADS):
            _ResolveWorkerThread(uri_queue, uri_list, exceptions).start()
        uri_queue.join()
        self.assertEqual(deque(), exceptions)
コード例 #59
0
ファイル: event_queue.py プロジェクト: pymedusa/SickRage
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = 'EVENT-QUEUE'
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as error:
            log.error(u'Exception generated in thread %s: %s',
                      self.name, ex(error))
            log.debug(repr(traceback.format_exc()))

    # System Events
    class SystemEvent(Event):
        RESTART = 'RESTART'
        SHUTDOWN = 'SHUTDOWN'