def test_lpadmin(self): """ Verify the error for a failure in lpadmin. The failure is reported as CUPS_LPADMIN_FAILURE. @raises TestFail: If the error code from debugd is incorrect. """ ppd_contents = self.load_ppd(_GENERIC_PPD) result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( 'CUPS rejects names with spaces', 'socket://127.0.0.1/ipp/fake_printer', ppd_contents) if result != _CUPS_LPADMIN_ERROR: raise error.TestFail( 'lpadmin - Names with spaces should be rejected by CUPS ' '%d' % result) result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( 'UnrecognizedProtocol', 'badbadbad://127.0.0.1/ipp/fake_printer', ppd_contents) if result != _CUPS_LPADMIN_ERROR: raise error.TestFail( 'lpadmin - Unrecognized protocols should be rejected by ' 'CUPS. %d' % result)
def validate_perf_data_in_feedback_logs(self): """ Validate that feedback logs contain valid perf data. """ pipe_r, pipe_w = os.pipe() # GetBigFeedbackReport transfers large content through the pipe. We # need to read from the pipe off-thread to prevent a deadlock. pipe_reader = PipeReader(pipe_r) thread = Thread(target=pipe_reader.read) thread.start() # Use 180-sec timeout because GetBigFeedbackLogs runs arc-bugreport, # which takes a while to finish. debugd_util.iface().GetBigFeedbackLogs(dbus.types.UnixFd(pipe_w), signature='h', timeout=180) # pipe_w is dup()'d in calling dbus. Close in this process. os.close(pipe_w) thread.join() # Decode into a dictionary. logs = json.loads(pipe_reader.result) if len(logs) == 0: raise error.TestFail('GetBigFeedbackLogs() returned no data') logging.info('GetBigFeedbackLogs() returned %d elements.', len(logs)) perf_data = logs['perf-data'] if perf_data is None: raise error.TestFail('perf-data not found in feedback logs') BLOB_START_TOKEN = '<base64>: ' try: blob_start = perf_data.index(BLOB_START_TOKEN) except: raise error.TestFail(("perf-data doesn't include base64 encoded" "data")) # Skip description text and BLOB_START_TOKEN perf_data = perf_data[blob_start + len(BLOB_START_TOKEN):] logging.info('base64 perf data: %d bytes', len(perf_data)) # This raises TypeError if input is invalid base64-encoded data. compressed_data = base64.b64decode(perf_data) protobuff = self.xz_decompress_string(compressed_data) if len(protobuff) < 10: raise error.TestFail('Perf output too small (%d bytes)' % len(protobuff)) if protobuff.startswith('<process exited with status: '): raise error.TestFail('Failed to capture a profile: %s' % protobuff)
def test_ppd_error(self): """ Validates that malformed PPDs are rejected. The expected error code is CUPS_INVALID_PPD error. @raises TestFail: If the test failed. """ ppd_contents = dbus.ByteArray('This is not a valid ppd') result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( 'ManualPrinterBreaks', 'socket://127.0.0.1/ipp/fake_printer', ppd_contents) # PPD is invalid. Expect a CUPS_INVALID_PPD error. if result != _CUPS_INVALID_PPD_ERROR: raise error.TestFail('Incorrect error code received %d' % result)
def run_once(self, *args, **kwargs): """ Primary autotest function. """ # Setup. self.dbus_iface = debugd_util.iface() # Test normal cases. self.test_full_duration() self.test_start_after_previous_finished() self.test_stop_perf() # Test error cases. self.test_stop_without_start() self.test_stop_using_wrong_id() self.test_start_2nd_time()
def test_autoconf(self): """ Attempt to add an unreachable autoconfigured printer. Verifies that upon autoconf failure, the error code is CUPS_AUTOCONF_FAILURE. @raises TestFail: If the test failed. """ autoconfig_result = debugd_util.iface().CupsAddAutoConfiguredPrinter( 'AutoconfPrinter', 'ipp://127.0.0.1/ipp/print') # There's no printer at this address. Autoconf failure expected. # CUPS_AUTOCONF_FAILURE. if autoconfig_result != _CUPS_AUTOCONF_FAILURE: raise error.TestFail('Incorrect error code received: %i' % autoconfig_result)
def add_a_printer(self, ppd_path): """ Add a printer manually given ppd file. Args: @param ppd_path: path to ppd file @raises: error.TestFail if could not setup a printer """ logging.info('add printer from ppd:' + ppd_path) ppd_contents = self.load_ppd(ppd_path) result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( _FAKE_PRINTER_ID, 'socket://127.0.0.1/', ppd_contents) if result != _CUPS_SUCCESS: raise error.TestFail('valid_config - Could not setup valid ' 'printer %d' % result)
def test_valid_config(self): """ Validates that a printer can be installed. Verifies that given a valid configuration and a well formed PPD, DebugDaemon reports a CUPS_SUCCESS error code indicating success. @raises TestFail: If the result from debugd was not CUPS_SUCCESS. """ ppd_contents = self.load_ppd(_GENERIC_PPD) result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( 'ManualPrinterGood', 'socket://127.0.0.1/ipp/fake_printer', ppd_contents) # PPD is valid. Printer doesn't need to be reachable. This is # expected to pass with CUPS_SUCCESS. if result != _CUPS_SUCCESS: raise error.TestFail('Could not setup valid printer %d' % result)
def _PPD_test_procedure(self, ppd_name, ppd_content, port): """ Test procedure for single PPD file. It tries to run the following steps: 1. Starts an instance of FakePrinter 2. Configures CUPS printer 3. For each test document run the following steps: 3a. Sends tests documents to the CUPS printer 3b. Fetches the raw document from the FakePrinter 3c. Parse CUPS logs and check for any errors 3d. If self._pipeline_dir is set, extract the executed CUPS pipeline, rerun it in bash console and verify every step and final output 3e. If self._path_output_directory is set, save the raw document and all intermediate steps in the provided directory 3f. If the digest is available, verify a digest of an output documents 4. Removes CUPS printer and stops FakePrinter If the test fails this method throws an exception. @param ppd_name: a name of the PPD file @param ppd_content: a content of the PPD file @param port: a port for the printer @throws Exception when the test fails """ # Create work directory for external pipelines and save the PPD file # there (if needed) path_ppd = None if self._pipeline_dir is not None: path_pipeline_ppd_dir = os.path.join(self._pipeline_dir, ppd_name) os.makedirs(path_pipeline_ppd_dir) path_ppd = os.path.join(path_pipeline_ppd_dir, ppd_name) with open(path_ppd, 'wb') as file_ppd: file_ppd.write(ppd_content) if path_ppd.endswith('.gz'): subprocess.call(['gzip', '-d', path_ppd]) path_ppd = path_ppd[0:-3] try: # Starts the fake printer with fake_printer.FakePrinter(port) as printer: # Add a CUPS printer manually with given ppd file cups_printer_id = '%s_at_%05d' % (_FAKE_PRINTER_ID, port) result = debugd_util.iface().CupsAddManuallyConfiguredPrinter( cups_printer_id, 'socket://127.0.0.1:%d' % port, dbus.ByteArray(ppd_content)) if result != _CUPS_SUCCESS: raise Exception('valid_config - Could not setup valid ' 'printer %d' % result) # Prints all test documents try: for doc_name in self._docs: # Omit exceptions if (doc_name in _EXCEPTIONS and ppd_name in _EXCEPTIONS[doc_name]): if self._path_output_directory is not None: self._new_digests[doc_name][ppd_name] = ( helpers.calculate_digest('\x00')) continue # Full path to the test document path_doc = os.path.join(self._location_of_test_docs, doc_name) # Sends test document to printer argv = ['lp', '-d', cups_printer_id] argv += [path_doc] subprocess.call(argv) # Prepare a workdir for the pipeline (if needed) path_pipeline_workdir_temp = None if self._pipeline_dir is not None: path_pipeline_workdir = os.path.join( path_pipeline_ppd_dir, doc_name) path_pipeline_workdir_temp = os.path.join( path_pipeline_workdir, 'temp') os.makedirs(path_pipeline_workdir_temp) # Gets the output document from the fake printer doc = printer.fetch_document(_FAKE_PRINTER_TIMEOUT) digest = helpers.calculate_digest(doc) # Retrive data from the log file no_errors, logs, pipeline = \ self._log_reader.extract_result( cups_printer_id, path_ppd, path_doc, path_pipeline_workdir_temp) # Archive obtained results in the output directory if self._path_output_directory is not None: self._archivers[doc_name].save_file( ppd_name, '.out', doc, apply_gzip=True) self._archivers[doc_name].save_file( ppd_name, '.log', logs) if pipeline is not None: self._archivers[doc_name].save_file( ppd_name, '.sh', pipeline) # Set new digest self._new_digests[doc_name][ppd_name] = digest # Fail if any of CUPS filters failed if not no_errors: raise Exception('One of the CUPS filters failed') # Reruns the pipeline and dump intermediate outputs if self._pipeline_dir is not None: self._rerun_whole_pipeline(pipeline, path_pipeline_workdir, ppd_name, doc_name, digest) shutil.rmtree(path_pipeline_workdir) # Check document's digest (if known) if ppd_name in self._digests[doc_name]: digest_expected = self._digests[doc_name][ppd_name] if digest_expected != digest: message = 'Document\'s digest does not match' raise Exception(message) else: # Simple validation if len(doc) < 16: raise Exception('Empty output') finally: # Remove CUPS printer debugd_util.iface().CupsRemovePrinter(cups_printer_id) # The fake printer is stopped at the end of "with" statement finally: # Finalize archivers and cleaning if self._path_output_directory is not None: for doc_name in self._docs: self._archivers[doc_name].finalize_prefix(ppd_name) # Clean the pipelines' working directories if self._pipeline_dir is not None: shutil.rmtree(path_pipeline_ppd_dir)