def fetch(self, test_job): try: data = self.__get_job_details__(test_job.job_id) if data['status'] in self.complete_statuses: data['results'] = self.__get_testjob_results_yaml__( test_job.job_id) # fetch logs logs = "" try: logs = self.__get_job_logs__(test_job.job_id) except Exception: self.log_warn(("Logs for job %s are not available" % test_job.job_id) + "\n" + traceback.format_exc()) return self.__parse_results__(data, test_job) + (logs, ) except xmlrpc.client.ProtocolError as error: raise TemporaryFetchIssue(str(error)) except xmlrpc.client.Fault as fault: if fault.faultCode // 100 == 5: # assume HTTP errors 5xx are temporary issues raise TemporaryFetchIssue(str(fault)) else: raise FetchIssue(str(fault)) except ssl.SSLError as fault: raise FetchIssue(str(fault))
def fetch(self, test_job): try: data = self.__get_job_details__(test_job.job_id) status_key = 'status' if not self.use_xml_rpc: status_key = 'state' if data[status_key] in self.complete_statuses: # fill in start and end datetime for the job start_time = data.get('start_time', None) end_time = data.get('end_time', None) # convert to datetime if type(start_time) == str: try: start_time = isoparse(start_time) except ValueError: start_time = None if type(end_time) == str: try: end_time = isoparse(end_time) except ValueError: end_time = None test_job.started_at = start_time test_job.ended_at = end_time test_job.failure = None test_job.save() data['results'] = self.__get_testjob_results_yaml__( test_job.job_id) # fetch logs raw_logs = BytesIO() try: raw_logs = BytesIO( self.__download_full_log__(test_job.job_id)) except Exception: self.log_warn(("Logs for job %s are not available" % test_job.job_id) + "\n" + traceback.format_exc()) return self.__parse_results__(data, test_job, raw_logs) except xmlrpc.client.ProtocolError as error: raise TemporaryFetchIssue(self.url_remove_token(str(error))) except xmlrpc.client.Fault as fault: if fault.faultCode // 100 == 5: # assume HTTP errors 5xx are temporary issues raise TemporaryFetchIssue(self.url_remove_token(str(fault))) else: raise FetchIssue(self.url_remove_token(str(fault))) except ssl.SSLError as fault: raise FetchIssue(self.url_remove_token(str(fault))) except requests.exceptions.RequestException as fault: if isinstance(fault, requests.exceptions.Timeout): # assume HTTP errors 5xx are temporary issues raise TemporaryFetchIssue(self.url_remove_token(str(fault))) else: raise FetchIssue(self.url_remove_token(str(fault)))
def test_counts_attempts_with_temporary_exceptions(self, fetch_method): attemps = self.test_job.fetch_attempts fetch_method.side_effect = TemporaryFetchIssue("ERROR") fetch.apply(args=[self.test_job.id]) self.test_job.refresh_from_db() self.assertEqual(attemps + 1, self.test_job.fetch_attempts)
def test_temporary_exception_when_fetching(self, fetch_method): fetch_method.side_effect = TemporaryFetchIssue("ERROR") fetch.apply(args=[self.test_job.id]) self.test_job.refresh_from_db() self.assertEqual("ERROR", self.test_job.failure) self.assertFalse(self.test_job.fetched)
def fetch_url(self, *urlbits): url = reduce(urljoin, urlbits) try: response = requests.get(url) except Exception as e: raise TemporaryFetchIssue(f"Can't retrieve from {url}: %s" % e) return response
def test_clear_exception_after_successful_fetch(self, job_url, fetch_method): fetch_method.side_effect = TemporaryFetchIssue("ERROR") fetch.apply(args=[self.test_job.id]) self.test_job.refresh_from_db() self.assertEqual("ERROR", self.test_job.failure) self.assertFalse(self.test_job.fetched) fetch_method.side_effect = FetchTest.mock_backend_fetch job_url.side_effect = lambda a: 'test' fetch.apply(args=[self.test_job.id]) self.test_job.refresh_from_db() fetch_method.assert_called_with(self.test_job) self.assertIsNone(self.test_job.failure) self.assertTrue(self.test_job.fetched)
def parse_build_results(self, test_job, job_url, results, settings): required_keys = ['build_status', 'warnings_count', 'download_url', 'retry'] self.__check_required_keys__(required_keys, results) # Generate generic test/metric name test_name = results.get('build_name') or self.generate_test_name(results) test_job.name = test_name build_status = results['build_status'] if build_status == 'error' and results['retry'] < 2: # SQUAD should retry fetching the build until retry == 2 raise TemporaryFetchIssue(results.get('status_message', 'TuxSuite Error')) # Make metadata metadata_keys = settings.get('BUILD_METADATA_KEYS', []) metadata = {k: results.get(k) for k in metadata_keys} metadata['job_url'] = job_url metadata['config'] = urljoin(results.get('download_url') + '/', 'config') # Create tests and metrics tests = {} metrics = {} completed = True if results['retry'] >= 2: # This indicates that TuxSuite gave up trying to work on this build status = 'Incomplete' tests[f'build/{test_name}'] = 'skip' logs = '' else: status = 'Complete' tests[f'build/{test_name}'] = build_status metrics[f'build/{test_name}-warnings'] = results['warnings_count'] logs = self.fetch_url(results['download_url'], 'build.log').text try: metrics[f'build/{test_name}-duration'] = results['tuxmake_metadata']['results']['duration']['build'] except KeyError: raise FetchIssue('Missing duration from build results') return status, completed, metadata, tests, metrics, logs