def close_asyncio_loop(): try: get_loop().close() except RuntimeError as exc: # A runtime error can occur if the loop was not used. if 'There is no current event loop in thread' not in str(exc): raise
def run_until_complete(coroutine): # Get event loop loop = get_loop() loop.slow_callback_duration = 1.0 # Run jobs return loop.run_until_complete(coroutine)
def _run(cmd, exit_on_error=True, **kwargs): def create_protocol(*args, **kwargs): return MyProtocol(cmd, exit_on_error, *args, **kwargs) @asyncio.coroutine def run_coroutine(future): kwargs['emulate_tty'] = True transport, protocol = yield from async_execute_process( create_protocol, cmd, **kwargs) returncode = yield from protocol.complete future.set_result(returncode) future = asyncio.Future() get_loop().run_until_complete(run_coroutine(future)) retcode = future.result() if exit_on_error and retcode != 0: sys.exit(retcode) return retcode
def async_job(verb, job, threadpool, locks, event_queue, log_path): """Run a sequence of Stages from a Job and collect their output. :param job: A Job instance :threadpool: A thread pool executor for blocking stages :event_queue: A queue for asynchronous events """ # Initialize success flag all_stages_succeeded = True # Jobs start occuping a jobserver job occupying_job = True # Load environment for this job job_env = job.getenv(os.environ) # Execute each stage of this job for stage in job.stages: # Logger reference in this scope for error reporting logger = None # Abort the job if one of the stages has failed if job.continue_on_failure and not all_stages_succeeded: break # Check for stage synchronization lock if stage.locked_resource is not None: lock = locks.setdefault(stage.locked_resource, asyncio.Lock()) yield asyncio.From(lock) else: lock = FakeLock() try: # If the stage doesn't require a job token, release it temporarily if stage.occupy_job: if not occupying_job: while job_server.try_acquire() is None: yield asyncio.From(asyncio.sleep(0.05)) occupying_job = True else: if occupying_job: job_server.release() occupying_job = False # Notify stage started event_queue.put(ExecutionEvent( 'STARTED_STAGE', job_id=job.jid, stage_label=stage.label)) if type(stage) is CommandStage: try: # Initiate the command while True: try: # Update the environment for this stage (respects overrides) stage.update_env(job_env) # Get the logger protocol_type = stage.logger_factory(verb, job.jid, stage.label, event_queue, log_path) # Start asynchroonous execution transport, logger = yield asyncio.From( async_execute_process( protocol_type, **stage.async_execute_process_kwargs)) break except OSError as exc: if 'Text file busy' in str(exc): # This is a transient error, try again shortly # TODO: report the file causing the problem (exc.filename) yield asyncio.From(asyncio.sleep(0.01)) continue raise # Notify that a subprocess has been created event_queue.put(ExecutionEvent( 'SUBPROCESS', job_id=job.jid, stage_label=stage.label, stage_repro=stage.get_reproduction_cmd(verb, job.jid), **stage.async_execute_process_kwargs)) # Asynchronously yield until this command is completed retcode = yield asyncio.From(logger.complete) except: logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) logger.err(str(traceback.format_exc())) retcode = 3 elif type(stage) is FunctionStage: logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) try: # Asynchronously yield until this function is completed retcode = yield asyncio.From(get_loop().run_in_executor( threadpool, stage.function, logger, event_queue)) except: logger.err('Stage `{}` failed with arguments:'.format(stage.label)) for arg_val in stage.args: logger.err(' {}'.format(arg_val)) for arg_name, arg_val in stage.kwargs.items(): logger.err(' {}: {}'.format(arg_name, arg_val)) logger.err(str(traceback.format_exc())) retcode = 3 else: raise TypeError("Bad Job Stage: {}".format(stage)) # Set whether this stage succeeded stage_succeeded = (retcode == 0) # Update success tracker from this stage all_stages_succeeded = all_stages_succeeded and stage_succeeded # Store the results from this stage event_queue.put(ExecutionEvent( 'FINISHED_STAGE', job_id=job.jid, stage_label=stage.label, succeeded=stage_succeeded, stdout=logger.get_stdout_log(), stderr=logger.get_stderr_log(), interleaved=logger.get_interleaved_log(), logfile_filename=logger.unique_logfile_name, repro=stage.get_reproduction_cmd(verb, job.jid), retcode=retcode)) # Close logger logger.close() finally: lock.release() # Finally, return whether all stages of the job completed raise asyncio.Return(job.jid, all_stages_succeeded)
def async_job(verb, job, threadpool, locks, event_queue, log_path): """Run a sequence of Stages from a Job and collect their output. :param job: A Job instance :threadpool: A thread pool executor for blocking stages :event_queue: A queue for asynchronous events """ # Initialize success flag all_stages_succeeded = True # Jobs start occuping a jobserver job occupying_job = True # Execute each stage of this job for stage in job.stages: # Logger reference in this scope for error reporting logger = None # Abort the job if one of the stages has failed if job.continue_on_failure and not all_stages_succeeded: break # Check for stage synchronization lock if stage.locked_resource is not None: lock = locks.setdefault(stage.locked_resource, asyncio.Lock()) yield from lock else: lock = FakeLock() try: # If the stage doesn't require a job token, release it temporarily if stage.occupy_job: if not occupying_job: while job_server.try_acquire() is None: yield from asyncio.sleep(0.05) occupying_job = True else: if occupying_job: job_server.release() occupying_job = False # Notify stage started event_queue.put( ExecutionEvent('STARTED_STAGE', job_id=job.jid, stage_label=stage.label)) if type(stage) is CommandStage: try: # Initiate the command while True: try: # Update the environment for this stage (respects overrides) stage.update_env(job.env) # Get the logger protocol_type = stage.logger_factory( verb, job.jid, stage.label, event_queue, log_path) # Start asynchroonous execution transport, logger = yield from ( async_execute_process( protocol_type, **stage.async_execute_process_kwargs)) break except OSError as exc: if 'Text file busy' in str(exc): # This is a transient error, try again shortly # TODO: report the file causing the problem (exc.filename) yield from asyncio.sleep(0.01) continue raise # Notify that a subprocess has been created event_queue.put( ExecutionEvent('SUBPROCESS', job_id=job.jid, stage_label=stage.label, stage_repro=stage.get_reproduction_cmd( verb, job.jid), **stage.async_execute_process_kwargs)) # Asynchronously yield until this command is completed retcode = yield from logger.complete except: # noqa: E722 # Bare except is permissable here because the set of errors which the CommandState might raise # is unbounded. We capture the traceback here and save it to the build's log files. logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) logger.err(str(traceback.format_exc())) retcode = 3 elif type(stage) is FunctionStage: logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) try: # Asynchronously yield until this function is completed retcode = yield from get_loop().run_in_executor( threadpool, stage.function, logger, event_queue) except: # noqa: E722 # Bare except is permissable here because the set of errors which the FunctionStage might raise # is unbounded. We capture the traceback here and save it to the build's log files. logger.err('Stage `{}` failed with arguments:'.format( stage.label)) for arg_val in stage.args: logger.err(' {}'.format(arg_val)) for arg_name, arg_val in stage.kwargs.items(): logger.err(' {}: {}'.format(arg_name, arg_val)) retcode = 3 else: raise TypeError("Bad Job Stage: {}".format(stage)) # Set whether this stage succeeded stage_succeeded = (retcode == 0) # Update success tracker from this stage all_stages_succeeded = all_stages_succeeded and stage_succeeded # Store the results from this stage event_queue.put( ExecutionEvent('FINISHED_STAGE', job_id=job.jid, stage_label=stage.label, succeeded=stage_succeeded, stdout=logger.get_stdout_log(), stderr=logger.get_stderr_log(), interleaved=logger.get_interleaved_log(), logfile_filename=logger.unique_logfile_name, repro=stage.get_reproduction_cmd(verb, job.jid), retcode=retcode)) # Close logger logger.close() finally: lock.release() # Finally, return whether all stages of the job completed return (job.jid, all_stages_succeeded)
from osrf_pycommon.process_utils import asyncio from osrf_pycommon.process_utils.async_execute_process import async_execute_process from osrf_pycommon.process_utils import get_loop # allow module to be importable for --cover-inclusive try: from osrf_pycommon.process_utils.async_execute_process_trollius import From except ImportError: TROLLIUS_FOUND = False else: TROLLIUS_FOUND = True from osrf_pycommon.process_utils.async_execute_process_trollius import Return from .impl_aep_protocol import create_protocol loop = get_loop() @asyncio.coroutine def run(cmd, **kwargs): transport, protocol = yield From( async_execute_process(create_protocol(), cmd, **kwargs)) retcode = yield asyncio.From(protocol.complete) raise Return(protocol.stdout_buffer, protocol.stderr_buffer, retcode)
from osrf_pycommon.process_utils import asyncio from osrf_pycommon.process_utils.async_execute_process import async_execute_process from osrf_pycommon.process_utils import get_loop # allow module to be importable for --cover-inclusive try: from osrf_pycommon.process_utils.async_execute_process_trollius import From except ImportError: TROLLIUS_FOUND = False else: TROLLIUS_FOUND = True from osrf_pycommon.process_utils.async_execute_process_trollius import Return from .impl_aep_protocol import create_protocol loop = get_loop() @asyncio.coroutine def run(cmd, **kwargs): transport, protocol = yield From(async_execute_process( create_protocol(), cmd, **kwargs)) retcode = yield asyncio.From(protocol.complete) raise Return(protocol.stdout_buffer, protocol.stderr_buffer, retcode)