def before_execute(self, block_store): self.so_refs = self.args["lib"] self.entry_point_name = self.args["entry_point"] ciel.log.error("Running C executor for entry point: %s" % self.entry_point_name, "CEXEC", logging.DEBUG) ciel.engine.publish("worker_event", "C-exec: fetching SOs") self.so_filenames = retrieve_filenames_for_refs(self.so_refs, self.task_record)
def before_execute(self): self.dll_refs = self.args['lib'] self.class_name = self.args['class'] ciel.log.error("Running Dotnet executor for class: %s" % self.class_name, "DOTNET", logging.DEBUG) ciel.engine.publish("worker_event", "Dotnet: fetching DLLs") self.dll_filenames = retrieve_filenames_for_refs(self.dll_refs, self.task_record)
def before_execute(self): self.jar_refs = self.args["lib"] self.class_name = self.args["class"] ciel.log.error("Running Java executor for class: %s" % self.class_name, "JAVA", logging.DEBUG) ciel.engine.publish("worker_event", "Java: fetching JAR") self.jar_filenames = retrieve_filenames_for_refs(self.jar_refs, self.task_record)
def before_execute(self, block_store): self.so_refs = self.args['lib'] self.entry_point_name = self.args['entry_point'] ciel.log.error( "Running C executor for entry point: %s" % self.entry_point_name, "CEXEC", logging.DEBUG) ciel.engine.publish("worker_event", "C-exec: fetching SOs") self.so_filenames = retrieve_filenames_for_refs( self.so_refs, self.task_record)
def before_execute(self): self.jar_refs = self.args["lib"] self.class_name = self.args["class"] ciel.log.error("Running Java executor for class: %s" % self.class_name, "JAVA", logging.DEBUG) ciel.engine.publish("worker_event", "Java: fetching JAR") self.jar_filenames = retrieve_filenames_for_refs( self.jar_refs, self.task_record)
def guarded_execute(self): try: self.input_refs = self.args['inputs'] except KeyError: self.input_refs = [] try: self.stream_output = self.args['stream_output'] except KeyError: self.stream_output = False try: self.pipe_output = self.args['pipe_output'] except KeyError: self.pipe_output = False try: self.eager_fetch = self.args['eager_fetch'] except KeyError: self.eager_fetch = False try: self.stream_chunk_size = self.args['stream_chunk_size'] except KeyError: self.stream_chunk_size = 67108864 try: self.make_sweetheart = self.args['make_sweetheart'] if not isinstance(self.make_sweetheart, list): self.make_sweetheart = [self.make_sweetheart] except KeyError: self.make_sweetheart = [] file_inputs = None push_threads = None if self.eager_fetch: file_inputs = retrieve_filenames_for_refs(self.input_refs, self.task_record) else: push_threads = [ OngoingFetch(ref, chunk_size=self.stream_chunk_size, task_record=self.task_record, must_block=True) for ref in self.input_refs ] for thread in push_threads: self.context_mgr.add_context(thread) # TODO: Make these use OngoingOutputs and the context manager. with list_with([ make_local_output(id, may_pipe=self.pipe_output) for id in self.output_ids ]) as out_file_contexts: if self.stream_output: stream_refs = [ ctx.get_stream_ref() for ctx in out_file_contexts ] self.task_record.prepublish_refs(stream_refs) # We do these last, as these are the calls which can lead to stalls whilst we await a stream's beginning or end. if file_inputs is None: file_inputs = [] for thread in push_threads: (filename, is_blocking) = thread.get_filename() if is_blocking is not None: assert is_blocking is True file_inputs.append(filename) file_outputs = [ filename for (filename, _) in (ctx.get_filename_or_fd() for ctx in out_file_contexts) ] self.proc = self.start_process(file_inputs, file_outputs) add_running_child(self.proc) rc = self.await_process(file_inputs, file_outputs) remove_running_child(self.proc) self.proc = None # if "trace_io" in self.debug_opts: # transfer_ctx.log_traces() if rc != 0: raise OSError() for i, output in enumerate(out_file_contexts): self.output_refs[i] = output.get_completed_ref() ciel.engine.publish("worker_event", "Executor: Done")
def guarded_execute(self): try: self.input_refs = self.args['inputs'] except KeyError: self.input_refs = [] try: self.stream_output = self.args['stream_output'] except KeyError: self.stream_output = False try: self.pipe_output = self.args['pipe_output'] except KeyError: self.pipe_output = False try: self.eager_fetch = self.args['eager_fetch'] except KeyError: self.eager_fetch = False try: self.stream_chunk_size = self.args['stream_chunk_size'] except KeyError: self.stream_chunk_size = 67108864 try: self.make_sweetheart = self.args['make_sweetheart'] if not isinstance(self.make_sweetheart, list): self.make_sweetheart = [self.make_sweetheart] except KeyError: self.make_sweetheart = [] file_inputs = None push_threads = None if self.eager_fetch: file_inputs = retrieve_filenames_for_refs(self.input_refs, self.task_record) else: push_threads = [OngoingFetch(ref, chunk_size=self.stream_chunk_size, task_record=self.task_record, must_block=True) for ref in self.input_refs] for thread in push_threads: self.context_mgr.add_context(thread) # TODO: Make these use OngoingOutputs and the context manager. with list_with([make_local_output(id, may_pipe=self.pipe_output) for id in self.output_ids]) as out_file_contexts: if self.stream_output: stream_refs = [ctx.get_stream_ref() for ctx in out_file_contexts] self.task_record.prepublish_refs(stream_refs) # We do these last, as these are the calls which can lead to stalls whilst we await a stream's beginning or end. if file_inputs is None: file_inputs = [] for thread in push_threads: (filename, is_blocking) = thread.get_filename() if is_blocking is not None: assert is_blocking is True file_inputs.append(filename) file_outputs = [filename for (filename, _) in (ctx.get_filename_or_fd() for ctx in out_file_contexts)] self.proc = self.start_process(file_inputs, file_outputs) add_running_child(self.proc) rc = self.await_process(file_inputs, file_outputs) remove_running_child(self.proc) self.proc = None # if "trace_io" in self.debug_opts: # transfer_ctx.log_traces() if rc != 0: raise OSError() for i, output in enumerate(out_file_contexts): self.output_refs[i] = output.get_completed_ref() ciel.engine.publish("worker_event", "Executor: Done")