def getBlocks(self, show_stats): statsProcessor = NullDataProcessor(config=None, onChange=None) if show_stats: statsProcessor = self._stats if self._cache_block is None: ec = ExceptionCollector() def getAllBlocks(): for provider in self._providerList: try: for block in provider.getBlocksNormed(): yield block except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') try: self._cache_block = list( statsProcessor.process( self._datasetProcessor.process(getAllBlocks()))) except Exception: raise DatasetError( 'Unable to run datasets through processing pipeline!') ec.raise_any(DatasetError('Could not retrieve all datasets!')) return self._cache_block
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() (wms_search_dict, wms_search_order) = config.get_dict('wms search list', default={'sacct': 'SLURM', 'sgepasswd': 'OGE', 'pbs-config': 'PBS', 'qsub': 'OGE', 'condor_q': 'Condor', 'bsub': 'LSF', 'job_slurm': 'JMS'}, default_order=['sacct', 'sgepasswd', 'pbs-config', 'qsub', 'condor_q', 'bsub', 'job_slurm']) for cmd in wms_search_order: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms_search_dict[cmd]) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def tchain(iterable_iter, timeout=None, max_concurrent=None, ex_cls=NestedException, ex_msg='Caught exception during threaded chain'): # Combines multiple, threaded generators into single generator threads = [] result = GCQueue() exc = ExceptionCollector() iterable_list = list(iterable_iter) def _start_generators(): while iterable_list and ((max_concurrent is None) or (len(threads) < max_concurrent)): iterable = iterable_list.pop(0) threads.append(start_daemon('tchain generator thread (%s)' % repr(iterable)[:50], _tchain_thread, exc, iterable, result)) _start_generators() if timeout is not None: t_end = time.time() + timeout while len(threads): if timeout is not None: timeout = max(0, t_end - time.time()) try: tmp = result.get(timeout) except IndexError: # Empty queue after waiting for timeout clear_current_exception() break if tmp == GCQueue: threads.pop() # which thread is irrelevant - only used as counter _start_generators() else: yield tmp exc.raise_any(ex_cls(ex_msg))
def tchain(iterables, timeout = None): # Combines multiple, threaded generators into single generator import time from grid_control.utils.thread_tools import start_thread, GCQueue threads = [] result = GCQueue() ec = ExceptionCollector() for idx, it in enumerate(iterables): def generator_thread(iterator): try: try: for item in iterator: result.put(item) finally: result.put(GCQueue) # Use GCQueue as end-of-generator marker except Exception: ec.collect() threads.append(start_thread('generator thread %d' % idx, generator_thread, it)) if timeout is not None: t_end = time.time() + timeout try: while len(threads): if timeout is not None: timeout = max(0, t_end - time.time()) try: tmp = result.get(timeout) except IndexError: # Empty queue after waiting for timeout break if tmp == GCQueue: threads.pop() # which thread is irrelevant - only used as counter else: yield tmp except Exception: result.finish() ec.raise_any(NestedException('Caught exception during threaded chain'))
def tchain(iterables, timeout = None): threads = [] result = GCQueue() ec = ExceptionCollector() for idx, it in enumerate(iterables): def generator_thread(iterator): # TODO: Python 3.5 hickup related to pep 479? try: try: for item in iterator: result.put(item) finally: result.put(GCQueue) # Use GCQueue as end-of-generator marker except Exception: ec.collect() threads.append(start_thread('generator thread %d' % idx, generator_thread, it)) if timeout is not None: t_end = time.time() + timeout while len(threads): if timeout is not None: timeout = max(0, t_end - time.time()) try: tmp = result.get(timeout) except IndexError: # Empty queue after waiting for timeout break if tmp == GCQueue: threads.pop() # which thread is irrelevant - only used as counter else: yield tmp ec.raise_any(NestedException('Caught exception during threaded chain'))
def __new__(cls, config, name): def createWMS(wms): try: wmsCls = WMS.getClass(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.changeView(viewClass='TaggedConfigView', setClasses=[wmsCls]) return WMS.createInstance(wms, wms_config, name) wms = config.get('wms', '') if wms: return createWMS(wms) ec = ExceptionCollector() for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]: try: utils.resolveInstallPath(cmd) except Exception: ec.collect() continue return createWMS(wms) ec.raise_any(BackendError('No valid local backend found!') ) # at this point all backends have failed!
def patlist2pathlist(value, mustExist): ec = ExceptionCollector() for pattern in value: try: for fn in utils.resolvePaths(pattern, self._configView.pathDict.get('search_paths', []), mustExist, ConfigError): yield fn except Exception: ec.collect() ec.raise_any(ConfigError('Error resolving paths'))
def _patlist2pathlist(value, must_exist): exc = ExceptionCollector() search_path_list = self._config_view.config_vault.get('path:search', []) for pattern in value: try: for fn in resolve_paths(pattern, search_path_list, must_exist, ConfigError): yield fn except Exception: exc.collect() exc.raise_any(ConfigError('Error resolving paths'))
def get_dataset_name_list(self): if self._cache_dataset is None: self._cache_dataset = set() exc = ExceptionCollector() for provider in self._provider_list: try: self._cache_dataset.update(provider.get_dataset_name_list()) except Exception: exc.collect() exc.raise_any(DatasetError('Could not retrieve all datasets!')) return list(self._cache_dataset)
def getBlocksInternal(self): ec = ExceptionCollector() for provider in self.subprovider: try: for block in provider.getBlocks(): yield block except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') ec.raise_any(DatasetError('Could not retrieve all datasets!'))
def _patlist2pathlist(value, must_exist): exc = ExceptionCollector() search_path_list = self._config_view.config_vault.get( 'path:search', []) for pattern in value: try: for fn in resolve_paths(pattern, search_path_list, must_exist, ConfigError): yield fn except Exception: exc.collect() exc.raise_any(ConfigError('Error resolving paths'))
def getDatasets(self): if self._cache_dataset is None: self._cache_dataset = [] ec = ExceptionCollector() for provider in self._providerList: try: self._cache_dataset.extend(provider.getDatasets()) except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') ec.raise_any(DatasetError('Could not retrieve all datasets!')) return self._cache_dataset
def __new__(cls, config, name): ec = ExceptionCollector() for cmd, wms in [('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'SLURM')]: try: utils.resolveInstallPath(cmd) except Exception: ec.collect() continue try: wmsCls = WMS.getClass(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) config_wms = config.changeView(viewClass = 'TaggedConfigView', setClasses = [wmsCls]) return WMS.createInstance(wms, config_wms, name) ec.raise_any(BackendError('No valid local backend found!')) # at this point all backends have failed!
def getBlocks(self, silent = True): if self._cache_block is None: ec = ExceptionCollector() def getAllBlocks(): for provider in self._providerList: try: for block in provider.getBlocks(silent): yield block except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') self._cache_block = list(self._stats.process(self._datasetProcessor.process(getAllBlocks()))) ec.raise_any(DatasetError('Could not retrieve all datasets!')) logging.getLogger('user').info('Summary: Running over %d block(s) containing %s', *self._stats.getStats()) return self._cache_block
def execute(self, wmsIDs): # yields list of (wmsID, job_status, job_info) ec = ExceptionCollector() for wmsID in wmsIDs: try: job_info = utils.filterDict(dict(self._status_fun(wmsID)), vF = lambda v: v not in ['', '0']) job_info[CheckInfo.RAW_STATUS] = job_info.pop('status', '').lower() if 'destination' in job_info: try: dest_info = job_info['destination'].split('/', 1) job_info[CheckInfo.SITE] = dest_info[0].strip() job_info[CheckInfo.QUEUE] = dest_info[1].strip() except Exception: clear_current_exception() yield (wmsID, self._status_map.get(job_info[CheckInfo.RAW_STATUS], Job.UNKNOWN), job_info) except Exception: ec.collect() if utils.abort(): break ec.raise_any(BackendError('Encountered errors while checking job status'))
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() (wms_search_dict, wms_search_order) = config.get_dict('wms search list', default={ 'sacct': 'SLURM', 'sgepasswd': 'OGE', 'pbs-config': 'PBS', 'qsub': 'OGE', 'condor_q': 'Condor', 'bsub': 'LSF', 'job_slurm': 'JMS' }, default_order=[ 'sacct', 'sgepasswd', 'pbs-config', 'qsub', 'condor_q', 'bsub', 'job_slurm' ]) for cmd in wms_search_order: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms_search_dict[cmd]) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def getBlocks(self, show_stats): statsProcessor = NullDataProcessor(config = None, onChange = None) if show_stats: statsProcessor = self._stats if self._cache_block is None: ec = ExceptionCollector() def getAllBlocks(): for provider in self._providerList: try: for block in provider.getBlocksNormed(): yield block except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') try: self._cache_block = list(statsProcessor.process(self._datasetProcessor.process(getAllBlocks()))) except Exception: raise DatasetError('Unable to run datasets through processing pipeline!') ec.raise_any(DatasetError('Could not retrieve all datasets!')) return self._cache_block
def getBlocks(self, silent=True): if self._cache_block is None: ec = ExceptionCollector() def getAllBlocks(): for provider in self._providerList: try: for block in provider.getBlocks(silent): yield block except Exception: ec.collect() if utils.abort(): raise DatasetError('Could not retrieve all datasets!') self._cache_block = list( self._stats.process( self._datasetProcessor.process(getAllBlocks()))) ec.raise_any(DatasetError('Could not retrieve all datasets!')) logging.getLogger('user').info( 'Summary: Running over %s distributed over %d blocks.', *self._stats.getStats()) return self._cache_block
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('condor_q', 'Condor'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def execute(self, wmsIDs): # yields list of (wmsID, job_status, job_info) ec = ExceptionCollector() for wmsID in wmsIDs: try: job_info = utils.filterDict(dict(self._status_fun(wmsID)), vF=lambda v: v not in ['', '0']) job_info[CheckInfo.RAW_STATUS] = job_info.pop('status', '').lower() if 'destination' in job_info: try: dest_info = job_info['destination'].split('/', 1) job_info[CheckInfo.SITE] = dest_info[0].strip() job_info[CheckInfo.QUEUE] = dest_info[1].strip() except Exception: clear_current_exception() yield (wmsID, self._status_map.get(job_info[CheckInfo.RAW_STATUS], Job.UNKNOWN), job_info) except Exception: ec.collect() if utils.abort(): break ec.raise_any( BackendError('Encountered errors while checking job status'))
class ANSIGUI(GUI): alias_list = ['ansi'] def __new__(cls, config, workflow): if is_dumb_terminal(): return GUI.create_instance('BasicConsoleGUI', config, workflow) return GUI.__new__(cls) def __init__(self, config, workflow): GUI.__init__(self, config, workflow) install_console_reset() self._console_lock = GCLock(threading.RLock()) # terminal output lock self._exc = ExceptionCollector() (self._redraw_thread, self._redraw_shutdown) = (None, False) (self._redraw_event, self._immediate_redraw_event) = (GCEvent(rlock=True), GCEvent(rlock=True)) self._redraw_interval = config.get_float('gui redraw interval', 0.1, on_change=None) self._redraw_delay = config.get_float('gui redraw delay', 0.05, on_change=None) element = config.get_composited_plugin('gui element', 'report activity log', 'MultiGUIElement', cls=GUIElement, on_change=None, bind_kwargs={'inherit': True}, pargs=(workflow, self._redraw_event, sys.stdout)) self._element = FrameGUIElement(config, 'gui', workflow, self._redraw_event, sys.stdout, self._immediate_redraw_event, element) def end_interface( self ): # lots of try ... except .. finally - for clean console state restore def _end_interface(): try: self._finish_drawing() finally: GCStreamHandler.set_global_lock() Console.reset_console() rethrow(GUIException('GUI shutdown exception'), _end_interface) self._exc.raise_any(GUIException('GUI drawing exception')) def start_interface(self): GCStreamHandler.set_global_lock(self._console_lock) with_lock(self._console_lock, self._element.draw_startup) self._redraw_shutdown = False # start redraw thread self._redraw_thread = start_daemon('GUI draw thread', self._redraw) def _finish_drawing(self): def _final_draw(): try: self._element.make_dirty() finally: self._redraw_shutdown = True # stop redraw thread self._redraw_event.set() try: try: with_lock(self._console_lock, _final_draw) # last redraw finally: if self._redraw_thread: self._redraw_thread.join(5 + self._redraw_interval) finally: with_lock(self._console_lock, self._element.draw_finish) # draw finish def _redraw(self): try: while not self._redraw_shutdown: self._redraw_event.wait(timeout=self._redraw_interval) self._immediate_redraw_event.wait(timeout=self._redraw_delay) with_lock(self._console_lock, self._element.redraw) self._immediate_redraw_event.clear() self._redraw_event.clear() except Exception: self._exc.collect() abort(True)
def get_block_list_cached(self, show_stats): exc = ExceptionCollector() result = self._create_block_cache(show_stats, lambda: self._iter_all_blocks(exc)) exc.raise_any(DatasetError('Could not retrieve all datasets!')) return result