Пример #1
0
class GCThreadPool(object):
	def __init__(self):
		self._lock = GCLock()
		self._notify = GCEvent()
		self._token = 0
		self._token_time = {}
		self._token_desc = {}
		self._log = logging.getLogger('thread_pool')
		self._ex_collector = ExceptionCollector(self._log)

	def wait_and_drop(self, timeout = None):
		while True:
			self._lock.acquire()
			try:
				t_current = time.time()
				# discard stale threads
				for token in list(self._token_time):
					if timeout and (t_current - self._token_time.get(token, 0) > timeout):
						self._token_time.pop(token, None)
						self._token_desc.pop(token, None)
				if not self._token_time: # no active threads
					return True
				# drop all threads if timeout is reached
				if (timeout is not None) and (timeout <= 0):
					self._token_time = {}
					self._token_desc = {}
					return False
			finally:
				self._lock.release()
			# wait for thread to finish and adapt timeout for next round
			self._notify.wait(timeout)
			if timeout is not None:
				timeout -= time.time() - t_current

	def start_thread(self, desc, fun, *args, **kwargs):
		self._lock.acquire()
		try:
			self._token += 1
			self._token_time[self._token] = time.time()
			self._token_desc[self._token] = desc
		finally:
			self._lock.release()
		start_thread(desc, self._run_thread, self._token, fun, args, kwargs)

	def _run_thread(self, token, fun, args, kwargs):
		try:
			fun(*args, **kwargs)
		except Exception:
			self._lock.acquire()
			try:
				self._ex_collector.collect(logging.ERROR, 'Exception in thread %r', self._token_desc[token], exc_info = get_current_exception())
			finally:
				self._lock.release()
		self._lock.acquire()
		try:
			self._token_time.pop(token, None)
			self._token_desc.pop(token, None)
		finally:
			self._lock.release()
		self._notify.set()
Пример #2
0
def tchain(iterables, timeout = None): # Combines multiple, threaded generators into single generator
	import time
	from grid_control.utils.thread_tools import start_thread, GCQueue
	threads = []
	result = GCQueue()
	ec = ExceptionCollector()
	for idx, it in enumerate(iterables):
		def generator_thread(iterator):
			try:
				try:
					for item in iterator:
						result.put(item)
				finally:
					result.put(GCQueue) # Use GCQueue as end-of-generator marker
			except Exception:
				ec.collect()
		threads.append(start_thread('generator thread %d' % idx, generator_thread, it))

	if timeout is not None:
		t_end = time.time() + timeout
	try:
		while len(threads):
			if timeout is not None:
				timeout = max(0, t_end - time.time())
			try:
				tmp = result.get(timeout)
			except IndexError: # Empty queue after waiting for timeout
				break
			if tmp == GCQueue:
				threads.pop() # which thread is irrelevant - only used as counter
			else:
				yield tmp
	except Exception:
		result.finish()
	ec.raise_any(NestedException('Caught exception during threaded chain'))
Пример #3
0
    def getBlocks(self, show_stats):
        statsProcessor = NullDataProcessor(config=None, onChange=None)
        if show_stats:
            statsProcessor = self._stats
        if self._cache_block is None:
            ec = ExceptionCollector()

            def getAllBlocks():
                for provider in self._providerList:
                    try:
                        for block in provider.getBlocksNormed():
                            yield block
                    except Exception:
                        ec.collect()
                    if utils.abort():
                        raise DatasetError('Could not retrieve all datasets!')

            try:
                self._cache_block = list(
                    statsProcessor.process(
                        self._datasetProcessor.process(getAllBlocks())))
            except Exception:
                raise DatasetError(
                    'Unable to run datasets through processing pipeline!')
            ec.raise_any(DatasetError('Could not retrieve all datasets!'))
        return self._cache_block
Пример #4
0
def tchain(iterable_iter, timeout=None, max_concurrent=None,
		ex_cls=NestedException, ex_msg='Caught exception during threaded chain'):
	# Combines multiple, threaded generators into single generator
	threads = []
	result = GCQueue()
	exc = ExceptionCollector()
	iterable_list = list(iterable_iter)

	def _start_generators():
		while iterable_list and ((max_concurrent is None) or (len(threads) < max_concurrent)):
			iterable = iterable_list.pop(0)
			threads.append(start_daemon('tchain generator thread (%s)' % repr(iterable)[:50],
				_tchain_thread, exc, iterable, result))
	_start_generators()

	if timeout is not None:
		t_end = time.time() + timeout
	while len(threads):
		if timeout is not None:
			timeout = max(0, t_end - time.time())
		try:
			tmp = result.get(timeout)
		except IndexError:  # Empty queue after waiting for timeout
			clear_current_exception()
			break
		if tmp == GCQueue:
			threads.pop()  # which thread is irrelevant - only used as counter
			_start_generators()
		else:
			yield tmp
	exc.raise_any(ex_cls(ex_msg))
Пример #5
0
	def __new__(cls, config, name):
		def _create_backend(wms):
			try:
				backend_cls = WMS.get_class(wms)
			except Exception:
				raise BackendError('Unable to load backend class %s' % repr(wms))
			wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls])
			return WMS.create_instance(wms, wms_config, name)
		wms = config.get('wms', '')
		if wms:
			return _create_backend(wms)
		exc = ExceptionCollector()
		(wms_search_dict, wms_search_order) = config.get_dict('wms search list',
			default={'sacct': 'SLURM', 'sgepasswd': 'OGE', 'pbs-config': 'PBS', 'qsub': 'OGE',
				'condor_q': 'Condor', 'bsub': 'LSF', 'job_slurm': 'JMS'},
			default_order=['sacct', 'sgepasswd', 'pbs-config', 'qsub', 'condor_q', 'bsub', 'job_slurm'])
		for cmd in wms_search_order:
			try:
				resolve_install_path(cmd)
			except Exception:
				exc.collect()
				continue
			return _create_backend(wms_search_dict[cmd])
		# at this point all backends have failed!
		exc.raise_any(BackendError('No valid local backend found!'))
Пример #6
0
def tchain(iterable_iter, timeout=None, max_concurrent=None,
		ex_cls=NestedException, ex_msg='Caught exception during threaded chain'):
	# Combines multiple, threaded generators into single generator
	threads = []
	result = GCQueue()
	exc = ExceptionCollector()
	iterable_list = list(iterable_iter)

	def _start_generators():
		while iterable_list and ((max_concurrent is None) or (len(threads) < max_concurrent)):
			iterable = iterable_list.pop(0)
			threads.append(start_daemon('tchain generator thread (%s)' % repr(iterable)[:50],
				_tchain_thread, exc, iterable, result))
	_start_generators()

	if timeout is not None:
		t_end = time.time() + timeout
	while len(threads):
		if timeout is not None:
			timeout = max(0, t_end - time.time())
		try:
			tmp = result.get(timeout)
		except IndexError:  # Empty queue after waiting for timeout
			clear_current_exception()
			break
		if tmp == GCQueue:
			threads.pop()  # which thread is irrelevant - only used as counter
			_start_generators()
		else:
			yield tmp
	exc.raise_any(ex_cls(ex_msg))
Пример #7
0
	def __init__(self, limit=None):
		self._lock = GCLock()
		self._notify = GCEvent()
		(self._limit, self._queue) = (limit, [])
		(self._token, self._token_time, self._token_desc) = (0, {}, {})
		self._log = logging.getLogger('thread_pool')
		self._exc = ExceptionCollector(self._log)
Пример #8
0
def tchain(iterables, timeout = None):
	threads = []
	result = GCQueue()
	ec = ExceptionCollector()
	for idx, it in enumerate(iterables):
		def generator_thread(iterator): # TODO: Python 3.5 hickup related to pep 479?
			try:
				try:
					for item in iterator:
						result.put(item)
				finally:
					result.put(GCQueue) # Use GCQueue as end-of-generator marker
			except Exception:
				ec.collect()
		threads.append(start_thread('generator thread %d' % idx, generator_thread, it))

	if timeout is not None:
		t_end = time.time() + timeout
	while len(threads):
		if timeout is not None:
			timeout = max(0, t_end - time.time())
		try:
			tmp = result.get(timeout)
		except IndexError: # Empty queue after waiting for timeout
			break
		if tmp == GCQueue:
			threads.pop() # which thread is irrelevant - only used as counter
		else:
			yield tmp
	ec.raise_any(NestedException('Caught exception during threaded chain'))
Пример #9
0
 def __init__(self, config, workflow):
     GUI.__init__(self, config, workflow)
     install_console_reset()
     self._console_lock = GCLock(threading.RLock())  # terminal output lock
     self._exc = ExceptionCollector()
     (self._redraw_thread, self._redraw_shutdown) = (None, False)
     (self._redraw_event,
      self._immediate_redraw_event) = (GCEvent(rlock=True),
                                       GCEvent(rlock=True))
     self._redraw_interval = config.get_float('gui redraw interval',
                                              0.1,
                                              on_change=None)
     self._redraw_delay = config.get_float('gui redraw delay',
                                           0.05,
                                           on_change=None)
     element = config.get_composited_plugin('gui element',
                                            'report activity log',
                                            'MultiGUIElement',
                                            cls=GUIElement,
                                            on_change=None,
                                            bind_kwargs={'inherit': True},
                                            pargs=(workflow,
                                                   self._redraw_event,
                                                   sys.stdout))
     self._element = FrameGUIElement(config, 'gui', workflow,
                                     self._redraw_event, sys.stdout,
                                     self._immediate_redraw_event, element)
Пример #10
0
	def __init__(self):
		self._lock = GCLock()
		self._notify = GCEvent()
		self._token = 0
		self._token_time = {}
		self._token_desc = {}
		self._log = logging.getLogger('thread_pool')
		self._ex_collector = ExceptionCollector(self._log)
		def patlist2pathlist(value, mustExist):
			ec = ExceptionCollector()
			for pattern in value:
				try:
					for fn in utils.resolvePaths(pattern, self._configView.pathDict.get('search_paths', []), mustExist, ConfigError):
						yield fn
				except Exception:
					ec.collect()
			ec.raise_any(ConfigError('Error resolving paths'))
Пример #12
0
		def _patlist2pathlist(value, must_exist):
			exc = ExceptionCollector()
			search_path_list = self._config_view.config_vault.get('path:search', [])
			for pattern in value:
				try:
					for fn in resolve_paths(pattern, search_path_list, must_exist, ConfigError):
						yield fn
				except Exception:
					exc.collect()
			exc.raise_any(ConfigError('Error resolving paths'))
Пример #13
0
	def getBlocksInternal(self):
		ec = ExceptionCollector()
		for provider in self.subprovider:
			try:
				for block in provider.getBlocks():
					yield block
			except Exception:
				ec.collect()
			if utils.abort():
				raise DatasetError('Could not retrieve all datasets!')
		ec.raise_any(DatasetError('Could not retrieve all datasets!'))
Пример #14
0
	def getDatasets(self):
		if self._cache_dataset is None:
			self._cache_dataset = []
			ec = ExceptionCollector()
			for provider in self._providerList:
				try:
					self._cache_dataset.extend(provider.getDatasets())
				except Exception:
					ec.collect()
				if utils.abort():
					raise DatasetError('Could not retrieve all datasets!')
			ec.raise_any(DatasetError('Could not retrieve all datasets!'))
		return self._cache_dataset
Пример #15
0
	def getBlocks(self, silent = True):
		if self._cache_block is None:
			ec = ExceptionCollector()
			def getAllBlocks():
				for provider in self._providerList:
					try:
						for block in provider.getBlocks(silent):
							yield block
					except Exception:
						ec.collect()
					if utils.abort():
						raise DatasetError('Could not retrieve all datasets!')
			self._cache_block = list(self._stats.process(self._datasetProcessor.process(getAllBlocks())))
			ec.raise_any(DatasetError('Could not retrieve all datasets!'))
			logging.getLogger('user').info('Summary: Running over %d block(s) containing %s', *self._stats.getStats())
		return self._cache_block
Пример #16
0
	def __init__(self, limit=None):
		self._lock = GCLock()
		self._notify = GCEvent()
		(self._limit, self._queue) = (limit, [])
		(self._token, self._token_time, self._token_desc) = (0, {}, {})
		self._log = logging.getLogger('thread_pool')
		self._exc = ExceptionCollector(self._log)
Пример #17
0
	def execute(self, wmsIDs): # yields list of (wmsID, job_status, job_info)
		ec = ExceptionCollector()
		for wmsID in wmsIDs:
			try:
				job_info = utils.filterDict(dict(self._status_fun(wmsID)), vF = lambda v: v not in ['', '0'])
				job_info[CheckInfo.RAW_STATUS] = job_info.pop('status', '').lower()
				if 'destination' in job_info:
					try:
						dest_info = job_info['destination'].split('/', 1)
						job_info[CheckInfo.SITE] = dest_info[0].strip()
						job_info[CheckInfo.QUEUE] = dest_info[1].strip()
					except Exception:
						clear_current_exception()
				yield (wmsID, self._status_map.get(job_info[CheckInfo.RAW_STATUS], Job.UNKNOWN), job_info)
			except Exception:
				ec.collect()
				if utils.abort():
					break
		ec.raise_any(BackendError('Encountered errors while checking job status'))
Пример #18
0
	def getBlocks(self, show_stats):
		statsProcessor = NullDataProcessor(config = None, onChange = None)
		if show_stats:
			statsProcessor = self._stats
		if self._cache_block is None:
			ec = ExceptionCollector()
			def getAllBlocks():
				for provider in self._providerList:
					try:
						for block in provider.getBlocksNormed():
							yield block
					except Exception:
						ec.collect()
					if utils.abort():
						raise DatasetError('Could not retrieve all datasets!')
			try:
				self._cache_block = list(statsProcessor.process(self._datasetProcessor.process(getAllBlocks())))
			except Exception:
				raise DatasetError('Unable to run datasets through processing pipeline!')
			ec.raise_any(DatasetError('Could not retrieve all datasets!'))
		return self._cache_block
Пример #19
0
 def execute(self,
             wms_id_list):  # yields list of (wms_id, job_status, job_info)
     exc = ExceptionCollector()
     for wms_id in wms_id_list:
         try:
             job_info = filter_dict(
                 dict(self._status_fun(wms_id)),
                 value_filter=lambda v: v not in ['', '0'])
             job_info[CheckInfo.RAW_STATUS] = job_info.pop('status',
                                                           '').lower()
             if 'destination' in job_info:
                 try:
                     dest_info = job_info['destination'].split('/', 1)
                     job_info[CheckInfo.SITE] = dest_info[0].strip()
                     job_info[CheckInfo.QUEUE] = dest_info[1].strip()
                 except Exception:
                     clear_current_exception()
             yield (wms_id,
                    self._status_map.get(job_info[CheckInfo.RAW_STATUS],
                                         Job.UNKNOWN), job_info)
         except Exception:
             exc.collect()
             if abort():
                 break
     exc.raise_any(
         BackendError('Encountered errors while checking job status'))
Пример #20
0
    def __new__(cls, config, name):
        def _create_backend(wms):
            try:
                backend_cls = WMS.get_class(wms)
            except Exception:
                raise BackendError('Unable to load backend class %s' %
                                   repr(wms))
            wms_config = config.change_view(view_class='TaggedConfigView',
                                            set_classes=[backend_cls])
            return WMS.create_instance(wms, wms_config, name)

        wms = config.get('wms', '')
        if wms:
            return _create_backend(wms)
        exc = ExceptionCollector()
        for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'),
                         ('pbs-config', 'PBS'), ('qsub', 'OGE'),
                         ('condor_q', 'Condor'), ('bsub', 'LSF'),
                         ('job_slurm', 'JMS')]:
            try:
                resolve_install_path(cmd)
            except Exception:
                exc.collect()
                continue
            return _create_backend(wms)
        # at this point all backends have failed!
        exc.raise_any(BackendError('No valid local backend found!'))
Пример #21
0
    def getBlocks(self, silent=True):
        if self._cache_block is None:
            ec = ExceptionCollector()

            def getAllBlocks():
                for provider in self._providerList:
                    try:
                        for block in provider.getBlocks(silent):
                            yield block
                    except Exception:
                        ec.collect()
                    if utils.abort():
                        raise DatasetError('Could not retrieve all datasets!')

            self._cache_block = list(
                self._stats.process(
                    self._datasetProcessor.process(getAllBlocks())))
            ec.raise_any(DatasetError('Could not retrieve all datasets!'))
            logging.getLogger('user').info(
                'Summary: Running over %s distributed over %d blocks.',
                *self._stats.getStats())
        return self._cache_block
Пример #22
0
	def __new__(cls, config, name):
		def _create_backend(wms):
			try:
				backend_cls = WMS.get_class(wms)
			except Exception:
				raise BackendError('Unable to load backend class %s' % repr(wms))
			wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls])
			return WMS.create_instance(wms, wms_config, name)
		wms = config.get('wms', '')
		if wms:
			return _create_backend(wms)
		exc = ExceptionCollector()
		for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'),
				('qsub', 'OGE'), ('condor_q', 'Condor'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]:
			try:
				resolve_install_path(cmd)
			except Exception:
				exc.collect()
				continue
			return _create_backend(wms)
		# at this point all backends have failed!
		exc.raise_any(BackendError('No valid local backend found!'))
Пример #23
0
		def patlist2pathlist(value, mustExist):
			ec = ExceptionCollector()
			for pattern in value:
				try:
					for fn in utils.resolvePaths(pattern, self._configView.pathDict.get('search_paths', []), mustExist, ConfigError):
						yield fn
				except Exception:
					ec.collect()
			ec.raise_any(ConfigError('Error resolving paths'))
Пример #24
0
	def get_dataset_name_list(self):
		if self._cache_dataset is None:
			self._cache_dataset = set()
			exc = ExceptionCollector()
			for provider in self._provider_list:
				try:
					self._cache_dataset.update(provider.get_dataset_name_list())
				except Exception:
					exc.collect()
			exc.raise_any(DatasetError('Could not retrieve all datasets!'))
		return list(self._cache_dataset)
Пример #25
0
 def _patlist2pathlist(value, must_exist):
     exc = ExceptionCollector()
     search_path_list = self._config_view.config_vault.get(
         'path:search', [])
     for pattern in value:
         try:
             for fn in resolve_paths(pattern, search_path_list,
                                     must_exist, ConfigError):
                 yield fn
         except Exception:
             exc.collect()
     exc.raise_any(ConfigError('Error resolving paths'))
Пример #26
0
 def getDatasets(self):
     if self._cache_dataset is None:
         self._cache_dataset = []
         ec = ExceptionCollector()
         for provider in self._providerList:
             try:
                 self._cache_dataset.extend(provider.getDatasets())
             except Exception:
                 ec.collect()
             if utils.abort():
                 raise DatasetError('Could not retrieve all datasets!')
         ec.raise_any(DatasetError('Could not retrieve all datasets!'))
     return self._cache_dataset
Пример #27
0
	def __new__(cls, config, name):
		ec = ExceptionCollector()
		for cmd, wms in [('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'SLURM')]:
			try:
				utils.resolveInstallPath(cmd)
			except Exception:
				ec.collect()
				continue
			try:
				wmsCls = WMS.getClass(wms)
			except Exception:
				raise BackendError('Unable to load backend class %s' % repr(wms))
			config_wms = config.changeView(viewClass = 'TaggedConfigView', setClasses = [wmsCls])
			return WMS.createInstance(wms, config_wms, name)
		ec.raise_any(BackendError('No valid local backend found!')) # at this point all backends have failed!
Пример #28
0
    def __new__(cls, config, name):
        def _create_backend(wms):
            try:
                backend_cls = WMS.get_class(wms)
            except Exception:
                raise BackendError('Unable to load backend class %s' %
                                   repr(wms))
            wms_config = config.change_view(view_class='TaggedConfigView',
                                            set_classes=[backend_cls])
            return WMS.create_instance(wms, wms_config, name)

        wms = config.get('wms', '')
        if wms:
            return _create_backend(wms)
        exc = ExceptionCollector()
        (wms_search_dict,
         wms_search_order) = config.get_dict('wms search list',
                                             default={
                                                 'sacct': 'SLURM',
                                                 'sgepasswd': 'OGE',
                                                 'pbs-config': 'PBS',
                                                 'qsub': 'OGE',
                                                 'condor_q': 'Condor',
                                                 'bsub': 'LSF',
                                                 'job_slurm': 'JMS'
                                             },
                                             default_order=[
                                                 'sacct', 'sgepasswd',
                                                 'pbs-config', 'qsub',
                                                 'condor_q', 'bsub',
                                                 'job_slurm'
                                             ])
        for cmd in wms_search_order:
            try:
                resolve_install_path(cmd)
            except Exception:
                exc.collect()
                continue
            return _create_backend(wms_search_dict[cmd])
        # at this point all backends have failed!
        exc.raise_any(BackendError('No valid local backend found!'))
Пример #29
0
class ANSIGUI(GUI):
    alias_list = ['ansi']

    def __new__(cls, config, workflow):
        if is_dumb_terminal():
            return GUI.create_instance('BasicConsoleGUI', config, workflow)
        return GUI.__new__(cls)

    def __init__(self, config, workflow):
        GUI.__init__(self, config, workflow)
        install_console_reset()
        self._console_lock = GCLock(threading.RLock())  # terminal output lock
        self._exc = ExceptionCollector()
        (self._redraw_thread, self._redraw_shutdown) = (None, False)
        (self._redraw_event,
         self._immediate_redraw_event) = (GCEvent(rlock=True),
                                          GCEvent(rlock=True))
        self._redraw_interval = config.get_float('gui redraw interval',
                                                 0.1,
                                                 on_change=None)
        self._redraw_delay = config.get_float('gui redraw delay',
                                              0.05,
                                              on_change=None)
        element = config.get_composited_plugin('gui element',
                                               'report activity log',
                                               'MultiGUIElement',
                                               cls=GUIElement,
                                               on_change=None,
                                               bind_kwargs={'inherit': True},
                                               pargs=(workflow,
                                                      self._redraw_event,
                                                      sys.stdout))
        self._element = FrameGUIElement(config, 'gui', workflow,
                                        self._redraw_event, sys.stdout,
                                        self._immediate_redraw_event, element)

    def end_interface(
        self
    ):  # lots of try ... except .. finally - for clean console state restore
        def _end_interface():
            try:
                self._finish_drawing()
            finally:
                GCStreamHandler.set_global_lock()
                Console.reset_console()

        rethrow(GUIException('GUI shutdown exception'), _end_interface)
        self._exc.raise_any(GUIException('GUI drawing exception'))

    def start_interface(self):
        GCStreamHandler.set_global_lock(self._console_lock)
        with_lock(self._console_lock, self._element.draw_startup)
        self._redraw_shutdown = False  # start redraw thread
        self._redraw_thread = start_daemon('GUI draw thread', self._redraw)

    def _finish_drawing(self):
        def _final_draw():
            try:
                self._element.make_dirty()
            finally:
                self._redraw_shutdown = True  # stop redraw thread
                self._redraw_event.set()

        try:
            try:
                with_lock(self._console_lock, _final_draw)  # last redraw
            finally:
                if self._redraw_thread:
                    self._redraw_thread.join(5 + self._redraw_interval)
        finally:
            with_lock(self._console_lock,
                      self._element.draw_finish)  # draw finish

    def _redraw(self):
        try:
            while not self._redraw_shutdown:
                self._redraw_event.wait(timeout=self._redraw_interval)
                self._immediate_redraw_event.wait(timeout=self._redraw_delay)
                with_lock(self._console_lock, self._element.redraw)
                self._immediate_redraw_event.clear()
                self._redraw_event.clear()
        except Exception:
            self._exc.collect()
            abort(True)
Пример #30
0
class GCThreadPool(object):
	# Class to manage a collection of threads
	def __init__(self, limit=None):
		self._lock = GCLock()
		self._notify = GCEvent()
		(self._limit, self._queue) = (limit, [])
		(self._token, self._token_time, self._token_desc) = (0, {}, {})
		self._log = logging.getLogger('thread_pool')
		self._exc = ExceptionCollector(self._log)

	def start_daemon(self, desc, fun, *args, **kwargs):
		self._queue.append((desc, fun, args, kwargs))
		with_lock(self._lock, self._queue_update)

	def wait_and_drop(self, timeout=None):
		while True:
			result = with_lock(self._lock, self._monitor_token, timeout)
			if result is not None:
				return result
			t_current = time.time()
			self._notify.wait(timeout)  # wait for thread to finish and adapt timeout for next round
			if timeout is not None:
				timeout -= time.time() - t_current
			with_lock(self._lock, self._queue_update)

	def _collect_exc(self, token, exc_info):
		self._exc.collect(logging.ERROR, 'Exception in thread %r',
			self._token_desc[token], exc_info=exc_info)

	def _monitor_token(self, timeout):
		t_current = time.time()
		# discard stale threads
		for token in list(self._token_time):
			if timeout and (t_current - self._token_time.get(token, 0) > timeout):
				self._unregister_token(token)
		if not self._token_time:  # no active threads
			return True
		# drop all threads if timeout is reached
		if (timeout is not None) and (timeout <= 0):
			self._token_time = {}
			self._token_desc = {}
			return False

	def _queue_update(self):
		while self._queue and ((self._limit is None) or (len(self._token_time) < self._limit)):
			(desc, fun, args, kwargs) = self._queue.pop(0)
			_start_thread(desc=desc, daemon=True, fun=self._run_thread,
				args=(self._register_token(desc), fun, args, kwargs), kwargs={})
		self._notify.clear()

	def _register_token(self, desc):
		self._token += 1
		self._token_time[self._token] = time.time()
		self._token_desc[self._token] = desc
		return self._token

	def _run_thread(self, token, fun, args, kwargs):
		trace_fun = get_trace_fun()
		if trace_fun:
			sys.settrace(trace_fun)
		try:
			fun(*args, **kwargs)
		except Exception:
			with_lock(self._lock, self._collect_exc, token, sys.exc_info())
		with_lock(self._lock, self._unregister_token, token)
		with_lock(self._lock, self._notify.set)

	def _unregister_token(self, token):
		self._token_time.pop(token, None)
		self._token_desc.pop(token, None)
Пример #31
0
	def get_block_list_cached(self, show_stats):
		exc = ExceptionCollector()
		result = self._create_block_cache(show_stats, lambda: self._iter_all_blocks(exc))
		exc.raise_any(DatasetError('Could not retrieve all datasets!'))
		return result
Пример #32
0
class GCThreadPool(object):
	# Class to manage a collection of threads
	def __init__(self, limit=None):
		self._lock = GCLock()
		self._notify = GCEvent()
		(self._limit, self._queue) = (limit, [])
		(self._token, self._token_time, self._token_desc) = (0, {}, {})
		self._log = logging.getLogger('thread_pool')
		self._exc = ExceptionCollector(self._log)

	def start_daemon(self, desc, fun, *args, **kwargs):
		self._queue.append((desc, fun, args, kwargs))
		with_lock(self._lock, self._queue_update)

	def wait_and_drop(self, timeout=None):
		while True:
			result = with_lock(self._lock, self._monitor_token, timeout)
			if result is not None:
				return result
			t_current = time.time()
			self._notify.wait(timeout)  # wait for thread to finish and adapt timeout for next round
			if timeout is not None:
				timeout -= time.time() - t_current
			with_lock(self._lock, self._queue_update)

	def _collect_exc(self, token, exc_info):
		self._exc.collect(logging.ERROR, 'Exception in thread %r',
			self._token_desc[token], exc_info=exc_info)

	def _monitor_token(self, timeout):
		t_current = time.time()
		# discard stale threads
		for token in list(self._token_time):
			if timeout and (t_current - self._token_time.get(token, 0) > timeout):
				self._unregister_token(token)
		if not self._token_time:  # no active threads
			return True
		# drop all threads if timeout is reached
		if (timeout is not None) and (timeout <= 0):
			self._token_time = {}
			self._token_desc = {}
			return False

	def _queue_update(self):
		while self._queue and ((self._limit is None) or (len(self._token_time) < self._limit)):
			(desc, fun, args, kwargs) = self._queue.pop(0)
			_start_thread(desc=desc, daemon=True, fun=self._run_thread,
				args=(self._register_token(desc), fun, args, kwargs), kwargs={})
		self._notify.clear()

	def _register_token(self, desc):
		self._token += 1
		self._token_time[self._token] = time.time()
		self._token_desc[self._token] = desc
		return self._token

	def _run_thread(self, token, fun, args, kwargs):
		trace_fun = get_trace_fun()
		if trace_fun:
			sys.settrace(trace_fun)
		try:
			fun(*args, **kwargs)
		except Exception:
			with_lock(self._lock, self._collect_exc, token, get_current_exception())
		with_lock(self._lock, self._unregister_token, token)
		with_lock(self._lock, self._notify.set)

	def _unregister_token(self, token):
		self._token_time.pop(token, None)
		self._token_desc.pop(token, None)