示例#1
0
 def test_smoke_write(self):
     stream = io.StringIO()
     writer = IOLogRecordWriter(stream)
     writer.write_record(self._RECORD)
     self.assertEqual(stream.getvalue(), self._TEXT)
     writer.close()
     with self.assertRaises(ValueError):
         stream.getvalue()
示例#2
0
def make_io_log(io_log, io_log_dir):
    """
    Make the io logs serialization to json and return the saved file pathname
    WARNING: The caller has to remove the file once done with it!
    """
    with NamedTemporaryFile(
        delete=False, suffix='.record.gz', dir=io_log_dir) as byte_stream, \
            GzipFile(fileobj=byte_stream, mode='wb') as gzip_stream, \
            TextIOWrapper(gzip_stream, encoding='UTF-8') as text_stream:
        writer = IOLogRecordWriter(text_stream)
        for record in io_log:
            writer.write_record(record)
    return byte_stream.name
 def test_smoke_write(self):
     stream = io.StringIO()
     writer = IOLogRecordWriter(stream)
     writer.write_record(self._RECORD)
     self.assertEqual(stream.getvalue(), self._TEXT)
     writer.close()
     with self.assertRaises(ValueError):
         stream.getvalue()
示例#4
0
    def _run_command(self, job, job_state, config, ctrl):
        """
        Run the shell command associated with the specified job.

        :returns: (return_code, record_path) where return_code is the number
        returned by the exiting child process while record_path is a pathname
        of a gzipped content readable with :class:`IOLogRecordReader`
        """
        # Bail early if there is nothing do do
        if job.command is None:
            raise ValueError(_("job {0} has no command to run").format(job.id))
        # Get an extcmd delegate for observing all the IO the way we need
        delegate, io_log_gen = self._prepare_io_handling(job, config)
        # Create a subprocess.Popen() like object that uses the delegate
        # system to observe all IO as it occurs in real time.
        delegate_cls = extcmd.ExternalCommandWithDelegate
        flags = 0
        # Use chunked IO for jobs that explicitly request this
        if 'use-chunked-io' in job.get_flag_set():
            flags |= extcmd.CHUNKED_IO
        extcmd_popen = delegate_cls(delegate, flags=flags)
        # Stream all IOLogRecord entries to disk
        record_path = os.path.join(
            self._jobs_io_log_dir, "{}.record.gz".format(
                slugify(job.id)))
        with gzip.open(record_path, mode='wb') as gzip_stream, \
                io.TextIOWrapper(
                    gzip_stream, encoding='UTF-8') as record_stream:
            writer = IOLogRecordWriter(record_stream)
            io_log_gen.on_new_record.connect(writer.write_record)
            try:
                # Start the process and wait for it to finish getting the
                # result code. This will actually call a number of callbacks
                # while the process is running. It will also spawn a few
                # threads although all callbacks will be fired from a single
                # thread (which is _not_ the main thread)
                logger.debug(
                    _("job[%s] starting command: %s"), job.id, job.command)
                # Run the job command using extcmd
                return_code = self._run_extcmd(
                    job, job_state, config, extcmd_popen, ctrl)
                # Always return something useful (even with PermissionError)
                if return_code is None:
                    return_code = -1
                logger.debug(
                    _("job[%s] command return code: %r"), job.id, return_code)
            finally:
                io_log_gen.on_new_record.disconnect(writer.write_record)
        return return_code, record_path
示例#5
0
    def _run_command(self, job, config):
        """
        Run the shell command associated with the specified job.

        :returns: (return_code, record_path) where return_code is the number
        returned by the exiting child process while record_path is a pathname
        of a gzipped content readable with :class:`IOLogRecordReader`
        """
        # Bail early if there is nothing do do
        if job.command is None:
            return None, ()
        # Create an equivalent of the CHECKBOX_DATA directory used by
        # some jobs to store logs and other files that may later be used
        # by other jobs.
        self._checkbox_data_dir = os.path.join(self._session_dir,
                                               "CHECKBOX_DATA")
        if not os.path.isdir(self._checkbox_data_dir):
            os.makedirs(self._checkbox_data_dir)
        # Get an extcmd delegate for observing all the IO the way we need
        delegate, io_log_gen = self._prepare_io_handling(job, config)
        # Create a subprocess.Popen() like object that uses the delegate
        # system to observe all IO as it occurs in real time.
        extcmd_popen = extcmd.ExternalCommandWithDelegate(delegate)
        # Stream all IOLogRecord entries to disk
        record_path = os.path.join(self._jobs_io_log_dir,
                                   "{}.record.gz".format(slugify(job.name)))
        with gzip.open(record_path, mode='wb') as gzip_stream, \
                io.TextIOWrapper(
                    gzip_stream, encoding='UTF-8') as record_stream:
            writer = IOLogRecordWriter(record_stream)
            io_log_gen.on_new_record.connect(writer.write_record)
            # Start the process and wait for it to finish getting the
            # result code. This will actually call a number of callbacks
            # while the process is running. It will also spawn a few
            # threads although all callbacks will be fired from a single
            # thread (which is _not_ the main thread)
            logger.debug("job[%s] starting command: %s", job.name, job.command)
            # Run the job command using extcmd
            return_code = self._run_extcmd(job, config, extcmd_popen)
            logger.debug("job[%s] command return code: %r", job.name,
                         return_code)
        return return_code, record_path
示例#6
0
    def run_qml_job(self, job, job_state, config):
        """
        Method called to run a job with plugin field equal to 'qml'.

        The 'qml' job implements the following scenario:

        * Maybe display the description to the user
        * Run qmlscene with provided test and wait for it to finish
        * Decide on the outcome based on the result object returned by qml
          shell
        * The method ends here

        .. note::
            QML jobs are fully manual jobs with graphical user interface
            implemented in QML. They implement proposal described in CEP-5.
        """
        if job.plugin != "qml":
            # TRANSLATORS: please keep 'plugin' untranslated
            raise ValueError(_("bad job plugin value"))
        try:
            ctrl = self._get_ctrl_for_job(job)
        except LookupError:
            return JobResultBuilder(
                outcome=IJobResult.OUTCOME_NOT_SUPPORTED,
                comments=_('No suitable execution controller is available)')
            ).get_result()
        # Run the embedded command
        start_time = time.time()
        delegate, io_log_gen = self._prepare_io_handling(job, config)
        # Create a subprocess.Popen() like object that uses the delegate
        # system to observe all IO as it occurs in real time.
        delegate_cls = extcmd.ExternalCommandWithDelegate
        extcmd_popen = delegate_cls(delegate)
        # Stream all IOLogRecord entries to disk
        record_path = self.get_record_path_for_job(job)
        with gzip.open(record_path, mode='wb') as gzip_stream, \
                io.TextIOWrapper(
                    gzip_stream, encoding='UTF-8') as record_stream:
            writer = IOLogRecordWriter(record_stream)
            io_log_gen.on_new_record.connect(writer.write_record)
            try:
                # Start the process and wait for it to finish getting the
                # result code. This will actually call a number of callbacks
                # while the process is running. It will also spawn a few
                # threads although all callbacks will be fired from a single
                # thread (which is _not_ the main thread)
                logger.debug(
                    _("job[%s] starting qml shell: %s"), job.id, job.qml_file)
                # Run the job command using extcmd
                ctrl.on_leftover_files.connect(self.on_leftover_files)
                try:
                    return_code, result = ctrl.execute_job_with_result(
                        job, job_state, config, self._session_dir,
                        extcmd_popen)
                finally:
                    ctrl.on_leftover_files.disconnect(self.on_leftover_files)
                logger.debug(
                    _("job[%s] shell return code: %r"), job.id, return_code)
            finally:
                io_log_gen.on_new_record.disconnect(writer.write_record)
        execution_duration = time.time() - start_time
        if return_code != 0 or result is None:
            outcome = IJobResult.OUTCOME_FAIL
        else:
            outcome = result['outcome']
        # Create a result object and return it
        return JobResultBuilder(
            outcome=outcome,
            return_code=return_code,
            io_log_filename=record_path,
            execution_duration=execution_duration
        ).get_result()