def test_FDReadStreamConnector_close_on_no_istream_data(istream, ostream): istream.data = b'' with mock.patch.object(ostream, 'close') as ostream_close: with mock.patch.object(istream, 'close') as istream_close: assert FDReadStreamConnector(istream, ostream).read() == 0 ostream_close.assert_called_once() istream_close.assert_called_once() assert ostream.data == b''
def _setup_streams(task_inputs, inputs, task_outputs, outputs, tempdir, job_mgr, progress_pipe): """ Returns a 2 tuple of input and output pipe mappings. The first element is a dict mapping input file descriptors to the corresponding stream adapters, the second is a dict mapping output file descriptors to the corresponding stream adapters. This also handles the special cases of STDIN, STDOUT, and STDERR mappings, and in the case of non-streaming standard IO pipes, will create default bindings for those as well. """ stream_connectors = [] def stream_pipe_path(id, spec, bindings): """ Helper to check for a valid streaming IO specs. If the given spec is not a streaming spec, returns None. If it is, returns the path. """ if spec.get('stream') and id in bindings and spec.get( 'target') == 'filepath': path = spec.get('path', id) if path.startswith('/'): raise Exception('Streaming filepaths must be relative.') path = os.path.join(tempdir, path) return path return None # handle stream inputs for id, spec in task_inputs.iteritems(): path = stream_pipe_path(id, spec, inputs) # We have a streaming input if path is not None: writer = NamedPipeWriter(NamedPipe(path)) connector = FDWriteStreamConnector( make_stream_fetch_adapter(inputs[id]), writer) stream_connectors.append(connector) # Don't open from this side, must be opened for reading first! # handle stream outputs for id, spec in task_outputs.iteritems(): path = stream_pipe_path(id, spec, outputs) if path is not None: reader = NamedPipeReader(NamedPipe(path)) connector = FDReadStreamConnector( reader, make_stream_push_adapter(outputs[id])) stream_connectors.append(connector) # handle special stream output for job progress if progress_pipe and job_mgr: progress_pipe = ProgressPipe(os.path.join(tempdir, '.girder_progress')) stream_connectors.append(progress_pipe.open()) return stream_connectors
def transform(self, **kwargs): from girder_worker.docker.io import ( FDWriteStreamConnector, FDReadStreamConnector, ) input = _maybe_transform(self._input, **kwargs) output = _maybe_transform(self._output, **kwargs) if isinstance(self._output, NamedInputPipe): return FDWriteStreamConnector(input, output) elif isinstance(self._input, NamedOutputPipe): return FDReadStreamConnector(input, output) else: raise TypeError('A NamedInputPipe or NamedOutputPipe must be provided.')
def _run_select_loop(task, container, read_stream_connectors, write_stream_connectors): stdout = None stderr = None try: # attach to standard streams stdout = container.attach_socket(params={ 'stdout': True, 'logs': True, 'stream': True }) stderr = container.attach_socket(params={ 'stderr': True, 'logs': True, 'stream': True }) def exit_condition(): container.reload() return container.status in {'exited', 'dead'} or task.canceled # Look for ContainerStdOut and ContainerStdErr instances that need # to be replace with the real container streams. stdout_connected = False for read_stream_connector in read_stream_connectors: if isinstance(read_stream_connector.input, ContainerStdOut): stdout_reader = _SocketReader(stdout) read_stream_connector.output = DockerStreamPushAdapter( read_stream_connector.output) read_stream_connector.input = stdout_reader stdout_connected = True break stderr_connected = False for read_stream_connector in read_stream_connectors: if isinstance(read_stream_connector.input, ContainerStdErr): stderr_reader = _SocketReader(stderr) read_stream_connector.output = DockerStreamPushAdapter( read_stream_connector.output) read_stream_connector.input = stderr_reader stderr_connected = True break # If not stdout and stderr connection has been provided just use # sys.stdXXX if not stdout_connected: stdout_reader = _SocketReader(stdout) connector = FDReadStreamConnector( stdout_reader, DockerStreamPushAdapter(StdStreamWriter(sys.stdout))) read_stream_connectors.append(connector) if not stderr_connected: stderr_reader = _SocketReader(stderr) connector = FDReadStreamConnector( stderr_reader, DockerStreamPushAdapter(StdStreamWriter(sys.stderr))) read_stream_connectors.append(connector) # Run select loop utils.select_loop(exit_condition=exit_condition, readers=read_stream_connectors, writers=write_stream_connectors) if task.canceled: try: container.stop() # Catch the ReadTimeout from requests and wait for container to # exit. See https://github.com/docker/docker-py/issues/1374 for # more details. except ReadTimeout: tries = 10 while tries > 0: container.reload() if container.status == 'exited': break if container.status != 'exited': msg = 'Unable to stop container: %s' % container.id logger.error(msg) except DockerException as dex: logger.error(dex) raise container.reload() exit_code = container.attrs['State']['ExitCode'] if not task.canceled and exit_code != 0: raise DockerException( 'Non-zero exit code from docker container (%d).' % exit_code) finally: # Close our stdout and stderr sockets if stdout: stdout.close() if stderr: stderr.close()
def run(task, inputs, outputs, task_inputs, task_outputs, **kwargs): image = task['docker_image'] celery_task = kwargs.get('_celery_task') pull_image = task.get('pull_image', True) progress_pipe = task.get('progress_pipe', False) remove_container = kwargs.get('_rm_container', False) # Allow run args to overridden extra_run_kwargs = task.get('docker_run_args', {}) tempdir = kwargs.get('_tempdir') job_mgr = kwargs.get('_job_manager') args = _expand_args(task.get('container_args', []), inputs, task_inputs, outputs, tempdir) stream_connectors = _setup_streams(task_inputs, inputs, task_outputs, outputs, tempdir, job_mgr, progress_pipe) stdout_fetch_adapter, stderr_fetch_adapter = _setup_std_streams( task_outputs, outputs) # Connect up stdout and strerr ContainerStdOut() and ContainerStdErr() will be # replaced with the real streams when the container is started. if stdout_fetch_adapter is not None: stream_connectors.append( FDReadStreamConnector(ContainerStdOut(), stdout_fetch_adapter)) if stderr_fetch_adapter is not None: stream_connectors.append( FDReadStreamConnector(ContainerStdErr(), stderr_fetch_adapter)) entrypoint = None if 'entrypoint' in task: if isinstance(task['entrypoint'], (list, tuple)): entrypoint = task['entrypoint'] else: entrypoint = [task['entrypoint']] volumes = {tempdir: {'bind': DATA_VOLUME, 'mode': 'rw'}} add_input_volumes(inputs, volumes) _docker_run(celery_task, image, pull_image=pull_image, entrypoint=entrypoint, container_args=args, volumes=volumes, remove_container=remove_container, stream_connectors=stream_connectors, **extra_run_kwargs) for name, spec in task_outputs.iteritems(): if spec.get('target') == 'filepath' and not spec.get('stream'): path = spec.get('path', '$output{%s}' % name) for outputId in _outputRe.findall(path): if outputId in outputs and 'name' in outputs[outputId]: path = path.replace('$output{%s}' % outputId, outputs[outputId]['name']) elif 'path' in spec: raise Exception('Could not expand token: $output{%s}.' % outputId) else: path = name if not path.startswith('/'): # Assume relative paths are relative to the data volume path = os.path.join(DATA_VOLUME, path) # Convert data volume refs to the temp dir on the host path = path.replace(DATA_VOLUME, tempdir, 1) if not os.path.exists(path): raise Exception('Output filepath %s does not exist.' % path) outputs[name]['script_data'] = path
def test_FDReadStreamConnector_open_calls_istream_open(istream, ostream): with mock.patch.object(istream, 'open', create=True) as istream_open: FDReadStreamConnector(istream, ostream).open() istream_open.assert_called_once()
def test_FDReadStreamConnector_read_istream_data_to_ostream(istream, ostream): istream.data = b'Test Data' FDReadStreamConnector(istream, ostream).read() assert ostream.data == b'Test Data'
def test_FDReadStreamConnector_fileno_same_as_input_fileno(istream, ostream): fd = FDReadStreamConnector(istream, ostream) assert fd.fileno() == istream.fileno()