def _gc_create_workflow(config, do_freeze=True, **kwargs): # create workflow from config and do initial processing steps # set up signal handler for interrupts and debug session or stack dump requests signal.signal(signal.SIGURG, handle_debug_interrupt) signal.signal(signal.SIGINT, handle_abort_interrupt) start_daemon('debug watchdog', _debug_watchdog) # Configure logging settings logging_setup(config.change_view(set_sections=['logging'])) global_config = config.change_view(set_sections=['global']) _setup_work_path(global_config) for package_paths in global_config.get_dn_list('package paths', [], on_change=None): init_hpf_plugins(package_paths) # Query config settings before config is frozen help_cfg = global_config.get_state('display', detail='config') help_scfg = global_config.get_state('display', detail='minimal config') action_config = config.change_view(set_sections=['action']) action_cancel = action_config.get(['delete', 'cancel'], '', on_change=None) action_reset = action_config.get('reset', '', on_change=None) # Create workflow and freeze config settings workflow = global_config.get_plugin('workflow', 'Workflow:global', cls='Workflow', pkwargs=kwargs) gui = config.get_plugin('gui', 'BasicConsoleGUI', cls=GUI, on_change=None, pargs=(workflow, )) if do_freeze: config.factory.freeze( write_config=config.get_state('init', detail='config')) # Give config help if help_cfg or help_scfg: config.write(sys.stdout, print_default=help_cfg, print_unused=False, print_minimal=help_scfg, print_source=help_cfg) sys.exit(os.EX_OK) # Check if user requested deletion / reset of jobs if action_cancel: workflow.job_manager.cancel(workflow.task, workflow.backend, action_cancel) sys.exit(os.EX_OK) if action_reset: workflow.job_manager.reset(workflow.task, workflow.backend, action_reset) sys.exit(os.EX_OK) return (workflow, gui)
def _gc_create_workflow(config, do_freeze=True, **kwargs): # create workflow from config and do initial processing steps # set up signal handler for interrupts and debug session or stack dump requests signal.signal(signal.SIGURG, handle_debug_interrupt) signal.signal(signal.SIGINT, handle_abort_interrupt) start_daemon('debug watchdog', _debug_watchdog) # Configure logging settings logging_setup(config.change_view(set_sections=['logging'])) global_config = config.change_view(set_sections=['global']) _setup_work_path(global_config) for package_paths in global_config.get_dn_list('package paths', [], on_change=None): init_hpf_plugins(package_paths) # Query config settings before config is frozen help_cfg = global_config.get_state('display', detail='config') help_scfg = global_config.get_state('display', detail='minimal config') action_config = config.change_view(set_sections=['action']) action_cancel = action_config.get(['delete', 'cancel'], '', on_change=None) action_reset = action_config.get('reset', '', on_change=None) # Create workflow and freeze config settings workflow = global_config.get_plugin('workflow', 'Workflow:global', cls='Workflow', pkwargs=kwargs) gui = config.get_plugin('gui', 'BasicConsoleGUI', cls=GUI, on_change=None, pargs=(workflow,)) if do_freeze: config.factory.freeze(write_config=config.get_state('init', detail='config')) # Give config help if help_cfg or help_scfg: config.write(sys.stdout, print_default=help_cfg, print_unused=False, print_minimal=help_scfg, print_source=help_cfg) sys.exit(os.EX_OK) # Check if user requested deletion / reset of jobs if action_cancel: workflow.job_manager.cancel(workflow.task, workflow.backend, action_cancel) sys.exit(os.EX_OK) if action_reset: workflow.job_manager.reset(workflow.task, workflow.backend, action_reset) sys.exit(os.EX_OK) return (workflow, gui)
def download_single_file(opts, jobnum, fi_idx, fi, status_mon): (source_se_path, target_se_path, local_se_path) = get_fi_path_tuple(opts, fi) show_file_info(jobnum, fi_idx, fi) # Copy files to local folder if not accepted_se(opts, fi): return status_mon.register_file_result(jobnum, fi_idx, 'skipping file on blacklisted SE', FileDownloadStatus.FILE_SE_BLACKLIST) activity_check = Activity('Checking file existance') try: if opts.skip_existing and (se_exists(target_se_path).status(timeout=10, terminate=True) == 0): return status_mon.register_file_result(jobnum, fi_idx, 'skipping already existing file', FileDownloadStatus.FILE_EXISTS) finally: activity_check.finish() if se_exists(os.path.dirname(target_se_path)).status(timeout=10, terminate=True) != 0: activity = Activity('Creating target directory') try: mkdir_proc = se_mkdir(os.path.dirname(target_se_path)) if mkdir_proc.status(timeout=10, terminate=True) != 0: return status_mon.register_file_result(jobnum, fi_idx, 'unable to create target dir', FileDownloadStatus.FILE_MKDIR_FAILED, proc=mkdir_proc) finally: activity.finish() if 'file://' in target_se_path: local_se_path = target_se_path copy_timeout_event = GCEvent() copy_ended_event = GCEvent() monitor_thread = start_daemon('Download monitor %s' % jobnum, download_monitor, jobnum, fi_idx, fi, local_se_path, copy_ended_event, copy_timeout_event) cp_proc = se_copy(source_se_path, target_se_path, tmp=local_se_path) while (cp_proc.status(timeout=0) is None) and not copy_timeout_event.wait(timeout=0.1): pass copy_ended_event.set() monitor_thread.join() if copy_timeout_event.is_set(): cp_proc.terminate(timeout=1) return status_mon.register_file_result(jobnum, fi_idx, 'Transfer timeout', FileDownloadStatus.FILE_TIMEOUT) elif cp_proc.status(timeout=0, terminate=True) != 0: return status_mon.register_file_result(jobnum, fi_idx, 'Transfer error', FileDownloadStatus.FILE_TIMEOUT, proc=cp_proc) return hash_verify(opts, status_mon, local_se_path, jobnum, fi_idx, fi)
def _start_watcher(self, desc, daemon, pid, *args): desc += ' (%d:%r)' % (pid, [self._cmd] + self._args) if daemon: return start_daemon(desc, *args) return start_thread(desc, *args)
def _trigger_debug_signal(duration): def _signal_debug_console(): time.sleep(duration) os.kill(os.getpid(), signal.SIGURG) start_daemon('debug console trigger', _signal_debug_console)
def download_single_file(opts, jobnum, fi_idx, fi, status_mon): (source_se_path, target_se_path, local_se_path) = get_fi_path_tuple(opts, fi) show_file_info(jobnum, fi_idx, fi) # Copy files to local folder if not accepted_se(opts, fi): return status_mon.register_file_result( jobnum, fi_idx, 'skipping file on blacklisted SE', FileDownloadStatus.FILE_SE_BLACKLIST) activity_check = Activity('Checking file existance') try: if opts.skip_existing and (se_exists(target_se_path).status( timeout=10, terminate=True) == 0): return status_mon.register_file_result( jobnum, fi_idx, 'skipping already existing file', FileDownloadStatus.FILE_EXISTS) finally: activity_check.finish() if se_exists(os.path.dirname(target_se_path)).status(timeout=10, terminate=True) != 0: activity = Activity('Creating target directory') try: mkdir_proc = se_mkdir(os.path.dirname(target_se_path)) if mkdir_proc.status(timeout=10, terminate=True) != 0: return status_mon.register_file_result( jobnum, fi_idx, 'unable to create target dir', FileDownloadStatus.FILE_MKDIR_FAILED, proc=mkdir_proc) finally: activity.finish() if 'file://' in target_se_path: local_se_path = target_se_path copy_timeout_event = GCEvent() copy_ended_event = GCEvent() monitor_thread = start_daemon('Download monitor %s' % jobnum, download_monitor, jobnum, fi_idx, fi, local_se_path, copy_ended_event, copy_timeout_event) cp_proc = se_copy(source_se_path, target_se_path, tmp=local_se_path) while (cp_proc.status(timeout=0) is None) and not copy_timeout_event.wait(timeout=0.1): pass copy_ended_event.set() monitor_thread.join() if copy_timeout_event.is_set(): cp_proc.terminate(timeout=1) return status_mon.register_file_result(jobnum, fi_idx, 'Transfer timeout', FileDownloadStatus.FILE_TIMEOUT) elif cp_proc.status(timeout=0, terminate=True) != 0: return status_mon.register_file_result(jobnum, fi_idx, 'Transfer error', FileDownloadStatus.FILE_TIMEOUT, proc=cp_proc) return hash_verify(opts, status_mon, local_se_path, jobnum, fi_idx, fi)
def start_interface(self): GCStreamHandler.set_global_lock(self._console_lock) with_lock(self._console_lock, self._element.draw_startup) self._redraw_shutdown = False # start redraw thread self._redraw_thread = start_daemon('GUI draw thread', self._redraw)