Exemplo n.º 1
0
 def _run_command(self, job, environ):
     start_time = time.time()
     slug = slugify(job.id)
     output_writer = CommandOutputWriter(
         stdout_path=os.path.join(self._jobs_io_log_dir,
                                  "{}.stdout".format(slug)),
         stderr_path=os.path.join(self._jobs_io_log_dir,
                                  "{}.stderr".format(slug)))
     io_log_gen = IOLogRecordGenerator()
     log = os.path.join(self._jobs_io_log_dir, "{}.record.gz".format(slug))
     with gzip.open(log, mode='wb') as gzip_stream, io.TextIOWrapper(
             gzip_stream, encoding='UTF-8') as record_stream:
         writer = IOLogRecordWriter(record_stream)
         io_log_gen.on_new_record.connect(writer.write_record)
         delegate = extcmd.Chain([
             self._job_runner_ui_delegate, io_log_gen,
             self._command_io_delegate, output_writer
         ])
         ecmd = extcmd.ExternalCommandWithDelegate(delegate)
         return_code = self.execute_job(job, environ, ecmd, self._stdin)
         io_log_gen.on_new_record.disconnect(writer.write_record)
     if return_code == 0:
         outcome = IJobResult.OUTCOME_PASS
     elif return_code < 0:
         outcome = IJobResult.OUTCOME_CRASH
     else:
         outcome = IJobResult.OUTCOME_FAIL
     return JobResultBuilder(outcome=outcome,
                             return_code=return_code,
                             io_log_filename=log,
                             execution_duration=time.time() - start_time)
Exemplo n.º 2
0
 def _prepare_io_handling(self, job, config):
     ui_io_delegate = self._command_io_delegate
     # NOTE: deprecated
     # If there is no UI delegate specified create a simple
     # delegate that logs all output to the console
     if ui_io_delegate is None:
         ui_io_delegate = FallbackCommandOutputPrinter(job.id)
     # Compute a shared base filename for all logging activity associated
     # with this job (aka: the slug)
     slug = slugify(job.id)
     # Create a delegate that writes all IO to disk
     output_writer = CommandOutputWriter(
         stdout_path=os.path.join(self._jobs_io_log_dir,
                                  "{}.stdout".format(slug)),
         stderr_path=os.path.join(self._jobs_io_log_dir,
                                  "{}.stderr".format(slug)))
     # Create a delegate for converting regular IO to IOLogRecords.
     # It takes no arguments as all the interesting stuff is added as a
     # signal listener.
     io_log_gen = IOLogRecordGenerator()
     # FIXME: this description is probably inaccurate and definitely doesn't
     # take self._job_runner_ui_delegate into account.
     #
     # Create the delegate for routing IO
     #
     # Split the stream of data into three parts (each part is expressed as
     # an element of extcmd.Chain()).
     #
     # Send the first copy of the data through bytes->text decoder and
     # then to the UI delegate. This could be something provided by the
     # higher level caller or the default FallbackCommandOutputPrinter.
     #
     # Send the second copy of the data to the IOLogRecordGenerator instance
     # that converts raw bytes into neat IOLogRecord objects. This generator
     # has a on_new_record signal that can be used to do stuff when a new
     # record is generated.
     #
     # Send the third copy to the output writer that writes everything to
     # disk.
     delegate = extcmd.Chain([
         self._job_runner_ui_delegate, ui_io_delegate, io_log_gen,
         output_writer
     ])
     logger.debug(_("job[%s] extcmd delegate: %r"), job.id, delegate)
     # Attach listeners to io_log_gen (the IOLogRecordGenerator instance)
     # One listener appends each record to an array
     return delegate, io_log_gen
Exemplo n.º 3
0
 def test_chain(self):
     obj = extcmd.Chain([Dummy()])
     self.assertEqual(repr(obj),
                      "<Chain [<SafeDelegate wrapping <Dummy>>]>")