Exemple #1
0
    def __new__(cls, config, name):
        def _create_backend(wms):
            try:
                backend_cls = WMS.get_class(wms)
            except Exception:
                raise BackendError('Unable to load backend class %s' %
                                   repr(wms))
            wms_config = config.change_view(view_class='TaggedConfigView',
                                            set_classes=[backend_cls])
            return WMS.create_instance(wms, wms_config, name)

        wms = config.get('wms', '')
        if wms:
            return _create_backend(wms)
        exc = ExceptionCollector()
        for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'),
                         ('pbs-config', 'PBS'), ('qsub', 'OGE'),
                         ('condor_q', 'Condor'), ('bsub', 'LSF'),
                         ('job_slurm', 'JMS')]:
            try:
                resolve_install_path(cmd)
            except Exception:
                exc.collect()
                continue
            return _create_backend(wms)
        # at this point all backends have failed!
        exc.raise_any(BackendError('No valid local backend found!'))
Exemple #2
0
 def __init__(self, config, workflow):
     GUI.__init__(self, config, workflow)
     install_console_reset()
     self._console_lock = GCLock(threading.RLock())  # terminal output lock
     self._exc = ExceptionCollector()
     (self._redraw_thread, self._redraw_shutdown) = (None, False)
     (self._redraw_event,
      self._immediate_redraw_event) = (GCEvent(rlock=True),
                                       GCEvent(rlock=True))
     self._redraw_interval = config.get_float('gui redraw interval',
                                              0.1,
                                              on_change=None)
     self._redraw_delay = config.get_float('gui redraw delay',
                                           0.05,
                                           on_change=None)
     element = config.get_composited_plugin('gui element',
                                            'report activity log',
                                            'MultiGUIElement',
                                            cls=GUIElement,
                                            on_change=None,
                                            bind_kwargs={'inherit': True},
                                            pargs=(workflow,
                                                   self._redraw_event,
                                                   sys.stdout))
     self._element = FrameGUIElement(config, 'gui', workflow,
                                     self._redraw_event, sys.stdout,
                                     self._immediate_redraw_event, element)
Exemple #3
0
def tchain(iterables, timeout = None): # Combines multiple, threaded generators into single generator
	import time
	from grid_control.utils.thread_tools import start_thread, GCQueue
	threads = []
	result = GCQueue()
	ec = ExceptionCollector()
	for idx, it in enumerate(iterables):
		def generator_thread(iterator):
			try:
				try:
					for item in iterator:
						result.put(item)
				finally:
					result.put(GCQueue) # Use GCQueue as end-of-generator marker
			except Exception:
				ec.collect()
		threads.append(start_thread('generator thread %d' % idx, generator_thread, it))

	if timeout is not None:
		t_end = time.time() + timeout
	try:
		while len(threads):
			if timeout is not None:
				timeout = max(0, t_end - time.time())
			try:
				tmp = result.get(timeout)
			except IndexError: # Empty queue after waiting for timeout
				break
			if tmp == GCQueue:
				threads.pop() # which thread is irrelevant - only used as counter
			else:
				yield tmp
	except Exception:
		result.finish()
	ec.raise_any(NestedException('Caught exception during threaded chain'))
def tchain(iterables, timeout = None):
	threads = []
	result = GCQueue()
	ec = ExceptionCollector()
	for idx, it in enumerate(iterables):
		def generator_thread(iterator): # TODO: Python 3.5 hickup related to pep 479?
			try:
				try:
					for item in iterator:
						result.put(item)
				finally:
					result.put(GCQueue) # Use GCQueue as end-of-generator marker
			except Exception:
				ec.collect()
		threads.append(start_thread('generator thread %d' % idx, generator_thread, it))

	if timeout is not None:
		t_end = time.time() + timeout
	while len(threads):
		if timeout is not None:
			timeout = max(0, t_end - time.time())
		try:
			tmp = result.get(timeout)
		except IndexError: # Empty queue after waiting for timeout
			break
		if tmp == GCQueue:
			threads.pop() # which thread is irrelevant - only used as counter
		else:
			yield tmp
	ec.raise_any(NestedException('Caught exception during threaded chain'))
    def getBlocks(self, show_stats):
        statsProcessor = NullDataProcessor(config=None, onChange=None)
        if show_stats:
            statsProcessor = self._stats
        if self._cache_block is None:
            ec = ExceptionCollector()

            def getAllBlocks():
                for provider in self._providerList:
                    try:
                        for block in provider.getBlocksNormed():
                            yield block
                    except Exception:
                        ec.collect()
                    if utils.abort():
                        raise DatasetError('Could not retrieve all datasets!')

            try:
                self._cache_block = list(
                    statsProcessor.process(
                        self._datasetProcessor.process(getAllBlocks())))
            except Exception:
                raise DatasetError(
                    'Unable to run datasets through processing pipeline!')
            ec.raise_any(DatasetError('Could not retrieve all datasets!'))
        return self._cache_block
Exemple #6
0
def tchain(iterable_iter, timeout=None, max_concurrent=None,
		ex_cls=NestedException, ex_msg='Caught exception during threaded chain'):
	# Combines multiple, threaded generators into single generator
	threads = []
	result = GCQueue()
	exc = ExceptionCollector()
	iterable_list = list(iterable_iter)

	def _start_generators():
		while iterable_list and ((max_concurrent is None) or (len(threads) < max_concurrent)):
			iterable = iterable_list.pop(0)
			threads.append(start_daemon('tchain generator thread (%s)' % repr(iterable)[:50],
				_tchain_thread, exc, iterable, result))
	_start_generators()

	if timeout is not None:
		t_end = time.time() + timeout
	while len(threads):
		if timeout is not None:
			timeout = max(0, t_end - time.time())
		try:
			tmp = result.get(timeout)
		except IndexError:  # Empty queue after waiting for timeout
			clear_current_exception()
			break
		if tmp == GCQueue:
			threads.pop()  # which thread is irrelevant - only used as counter
			_start_generators()
		else:
			yield tmp
	exc.raise_any(ex_cls(ex_msg))
Exemple #7
0
	def __init__(self, limit=None):
		self._lock = GCLock()
		self._notify = GCEvent()
		(self._limit, self._queue) = (limit, [])
		(self._token, self._token_time, self._token_desc) = (0, {}, {})
		self._log = logging.getLogger('thread_pool')
		self._exc = ExceptionCollector(self._log)
 def execute(self,
             wms_id_list):  # yields list of (wms_id, job_status, job_info)
     exc = ExceptionCollector()
     for wms_id in wms_id_list:
         try:
             job_info = filter_dict(
                 dict(self._status_fun(wms_id)),
                 value_filter=lambda v: v not in ['', '0'])
             job_info[CheckInfo.RAW_STATUS] = job_info.pop('status',
                                                           '').lower()
             if 'destination' in job_info:
                 try:
                     dest_info = job_info['destination'].split('/', 1)
                     job_info[CheckInfo.SITE] = dest_info[0].strip()
                     job_info[CheckInfo.QUEUE] = dest_info[1].strip()
                 except Exception:
                     clear_current_exception()
             yield (wms_id,
                    self._status_map.get(job_info[CheckInfo.RAW_STATUS],
                                         Job.UNKNOWN), job_info)
         except Exception:
             exc.collect()
             if abort():
                 break
     exc.raise_any(
         BackendError('Encountered errors while checking job status'))
	def __init__(self):
		self._lock = GCLock()
		self._notify = GCEvent()
		self._token = 0
		self._token_time = {}
		self._token_desc = {}
		self._log = logging.getLogger('thread_pool')
		self._ex_collector = ExceptionCollector(self._log)
Exemple #10
0
		def patlist2pathlist(value, mustExist):
			ec = ExceptionCollector()
			for pattern in value:
				try:
					for fn in utils.resolvePaths(pattern, self._configView.pathDict.get('search_paths', []), mustExist, ConfigError):
						yield fn
				except Exception:
					ec.collect()
			ec.raise_any(ConfigError('Error resolving paths'))
	def get_dataset_name_list(self):
		if self._cache_dataset is None:
			self._cache_dataset = set()
			exc = ExceptionCollector()
			for provider in self._provider_list:
				try:
					self._cache_dataset.update(provider.get_dataset_name_list())
				except Exception:
					exc.collect()
			exc.raise_any(DatasetError('Could not retrieve all datasets!'))
		return list(self._cache_dataset)
 def _patlist2pathlist(value, must_exist):
     exc = ExceptionCollector()
     search_path_list = self._config_view.config_vault.get(
         'path:search', [])
     for pattern in value:
         try:
             for fn in resolve_paths(pattern, search_path_list,
                                     must_exist, ConfigError):
                 yield fn
         except Exception:
             exc.collect()
     exc.raise_any(ConfigError('Error resolving paths'))
 def getDatasets(self):
     if self._cache_dataset is None:
         self._cache_dataset = []
         ec = ExceptionCollector()
         for provider in self._providerList:
             try:
                 self._cache_dataset.extend(provider.getDatasets())
             except Exception:
                 ec.collect()
             if utils.abort():
                 raise DatasetError('Could not retrieve all datasets!')
         ec.raise_any(DatasetError('Could not retrieve all datasets!'))
     return self._cache_dataset
Exemple #14
0
	def __new__(cls, config, name):
		ec = ExceptionCollector()
		for cmd, wms in [('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'SLURM')]:
			try:
				utils.resolveInstallPath(cmd)
			except Exception:
				ec.collect()
				continue
			try:
				wmsCls = WMS.getClass(wms)
			except Exception:
				raise BackendError('Unable to load backend class %s' % repr(wms))
			config_wms = config.changeView(viewClass = 'TaggedConfigView', setClasses = [wmsCls])
			return WMS.createInstance(wms, config_wms, name)
		ec.raise_any(BackendError('No valid local backend found!')) # at this point all backends have failed!
Exemple #15
0
    def __new__(cls, config, name):
        def _create_backend(wms):
            try:
                backend_cls = WMS.get_class(wms)
            except Exception:
                raise BackendError('Unable to load backend class %s' %
                                   repr(wms))
            wms_config = config.change_view(view_class='TaggedConfigView',
                                            set_classes=[backend_cls])
            return WMS.create_instance(wms, wms_config, name)

        wms = config.get('wms', '')
        if wms:
            return _create_backend(wms)
        exc = ExceptionCollector()
        (wms_search_dict,
         wms_search_order) = config.get_dict('wms search list',
                                             default={
                                                 'sacct': 'SLURM',
                                                 'sgepasswd': 'OGE',
                                                 'pbs-config': 'PBS',
                                                 'qsub': 'OGE',
                                                 'condor_q': 'Condor',
                                                 'bsub': 'LSF',
                                                 'job_slurm': 'JMS'
                                             },
                                             default_order=[
                                                 'sacct', 'sgepasswd',
                                                 'pbs-config', 'qsub',
                                                 'condor_q', 'bsub',
                                                 'job_slurm'
                                             ])
        for cmd in wms_search_order:
            try:
                resolve_install_path(cmd)
            except Exception:
                exc.collect()
                continue
            return _create_backend(wms_search_dict[cmd])
        # at this point all backends have failed!
        exc.raise_any(BackendError('No valid local backend found!'))
    def getBlocks(self, silent=True):
        if self._cache_block is None:
            ec = ExceptionCollector()

            def getAllBlocks():
                for provider in self._providerList:
                    try:
                        for block in provider.getBlocks(silent):
                            yield block
                    except Exception:
                        ec.collect()
                    if utils.abort():
                        raise DatasetError('Could not retrieve all datasets!')

            self._cache_block = list(
                self._stats.process(
                    self._datasetProcessor.process(getAllBlocks())))
            ec.raise_any(DatasetError('Could not retrieve all datasets!'))
            logging.getLogger('user').info(
                'Summary: Running over %s distributed over %d blocks.',
                *self._stats.getStats())
        return self._cache_block
	def get_block_list_cached(self, show_stats):
		exc = ExceptionCollector()
		result = self._create_block_cache(show_stats, lambda: self._iter_all_blocks(exc))
		exc.raise_any(DatasetError('Could not retrieve all datasets!'))
		return result