def get_appropriate_filename(path): has_page = "&page=" in path i1 = path.index(EXPECTEDPATH_JOBS) + len(EXPECTEDPATH_JOBS) i2 = path.index("&page=") if has_page else len(path) push_id = path[i1:i2] page = path[path.index("&page=") + len("&page="):] if has_page else "1" key = push_id + "_" + page seen_counter = find_and_increment_seen_counter(key) key += "_" + seen_counter log("Checking for push_id", push_id, "page", page, "seen", seen_counter, level=LogLevel.Debug) if key not in PUSH_IDS: key = push_id + "_" + page + "_" + "A" log("Response-specific key missing, checking for key ", key, level=LogLevel.Debug) if key not in PUSH_IDS: assert False, "Could not find either key in PUSH_IDS" return PUSH_IDS[key]
def do_GET(self): self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() file_prefix = "tests/" if not os.getcwd().endswith("tests") else "" file_prefix += "treeherder_api_responses/" if EXPECTEDPATH_PUSH in self.path: revision = self.path[self.path.index(EXPECTEDPATH_PUSH) + len(EXPECTEDPATH_PUSH):] log("MockTreeherderServer (push): Looking for revision %s" % revision, level=LogLevel.Info) if revision not in TRY_REVISIONS: assert False, "MockTreeherderServer: Could not find that revision" self.wfile.write(TRY_REVISIONS[revision].encode()) return elif EXPECTEDPATH_FAILURECLASSIFICATION in self.path: self.wfile.write(FAILURE_CLASSIFICATIONS.encode()) return elif EXPECTEDPATH_PUSHHEALTH in self.path: revision = self.path[self.path.index(EXPECTEDPATH_PUSHHEALTH) + len(EXPECTEDPATH_PUSHHEALTH):] log("MockTreeherderServer (push health): Looking for revision %s" % revision, level=LogLevel.Info) if revision not in HEALTH_REVISIONS: assert False, "MockTreeherderServer: Could not find that revision" filename = HEALTH_REVISIONS[revision] else: if EXPECTEDPATH_JOBS in self.path: log("MockTreeherderServer (jobs): Got path %s" % self.path, level=LogLevel.Info) filename = get_appropriate_filename(self.path) elif EXPECTEDPATH_ACTIONSJSON in self.path: log("MockTreeherderServer (actiosnjson)", level=LogLevel.Info) filename = "actionsjson.txt" else: assert False, "MockTreeherderServer GET got a path it didn't expect: " + self.path assert filename, "MockTreeherderServer somehow got a blank filename" log("MockTreeherderServer: Streaming %s" % filename, level=LogLevel.Info) with open(file_prefix + filename, "rb") as f: for line in f: self.wfile.write(line)
def do_POST(self): if EXPECTEDPATH_RETRIGGER in self.path: log("MockTreeherderServer (retrigger): streaming standard retrigger response", level=LogLevel.Info) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(RETRIGGER_RESPONSE.encode()) else: assert False, "MockTreeherderServer POST got a path it didn't expect: " + self.path
def _check_jobs(u, library_filter, expected_values, status, outcome): tc = unittest.TestCase() for lib in u.libraryProvider.get_libraries( u.config_dictionary['General']['gecko-path']): if library_filter not in lib.name: continue for task in lib.tasks: if task.type != 'vendoring': continue j = u.dbProvider.get_job(lib, expected_values.library_version_id) log("In _check_jobs looking for status %s and outcome %s" % (status, outcome)) tc.assertNotEqual(j, None) tc.assertEqual(lib.name, j.library_shortname) tc.assertEqual(expected_values.library_version_id, j.version) tc.assertEqual( status, j.status, "Expected status %s, got status %s" % (status.name, j.status.name)) tc.assertEqual( outcome, j.outcome, "Expected outcome %s, got outcome %s" % (outcome.name, j.outcome.name)) tc.assertEqual(expected_values.filed_bug_id, j.bugzilla_id) tc.assertEqual(expected_values.phab_revision, j.phab_revision) tc.assertTrue(len(j.try_runs) <= 2) tc.assertEqual('initial platform', j.try_runs[0].purpose) tc.assertEqual(expected_values.try_revision_1, j.try_runs[0].revision) if len(j.try_runs) == 2: tc.assertEqual('more platforms', j.try_runs[1].purpose) tc.assertEqual( expected_values.try_revision_2, j.try_runs[1].revision, "Did not match the second try run's revision") elif len(j.try_runs) == 1 and j.status > JOBSTATUS.DONE: # Ony check in the DONE status because this test may set try_revision_2 # (so the expected value is non-null), but we're performing this check # before we've submitted a second try run. tc.assertEqual(expected_values.try_revision_2, None)
from components.dbmodels import JOBSTATUS, JOBOUTCOME from components.mach_vendor import VendorProvider from components.hg import MercurialProvider from components.scmprovider import SCMProvider from apis.taskcluster import TaskclusterProvider from apis.phabricator import PhabricatorProvider from tests.mock_commandprovider import TestCommandProvider from tests.mock_libraryprovider import MockLibraryProvider from tests.mock_treeherder_server import MockTreeherderServer from tests.database import transform_db_config_to_tmp_db try: from localconfig import localconfig except ImportError: log("Unit tests require a local database configuration to be defined.") sys.exit(1) def TRY_OUTPUT(revision): return """ warning: 'mach try auto' is experimental, results may vary! Test configuration changed. Regenerating backend. Creating temporary commit for remote... A try_task_config.json pushing to ssh://hg.mozilla.org/try searching for changes remote: adding changesets remote: adding manifests remote: adding file changes remote: recorded push in pushlog