def build_ref_data_names(project, build_system): ''' We want all reference data names for every task that runs on a specific project. For example: * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1" * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1" ''' ignored_jobs = [] ref_data_names = {} runnable_jobs = list_runnable_jobs(project)['results'] for job in runnable_jobs: testtype = job_testtype(job) # e.g. web-platform-tests-4 if _ignore(testtype): ignored_jobs.append(job['ref_data_name']) continue key = unique_key(testtype=testtype, buildtype=job['platform_option'], platform=job['platform']) if build_system == '*': ref_data_names[key] = job['ref_data_name'] elif job['build_system_type'] == build_system: ref_data_names[key] = job['ref_data_name'] for ref_data_name in sorted(ignored_jobs): LOG.info('Ignoring {}'.format(ref_data_name)) return ref_data_names
def list(self, request, project): """ GET method implementation for list of all runnable buildbot jobs """ try: return Response(list_runnable_jobs(project, request.query_params.get('decision_task_id'))) except Exception as ex: return Response("Exception: {0}".format(ex), status=HTTP_500_INTERNAL_SERVER_ERROR)
def get_reference_data_names(project="autoland", build_system="taskcluster"): ''' We want all reference data names for every task that runs on a specific project. For example: "test-linux64/opt-mochitest-webgl-e10s-1" ''' # we cache the reference data names in order to reduce API calls cache_key = '{}-{}-ref_data_names_cache'.format(project, build_system) ref_data_names_map = cache.get(cache_key) if ref_data_names_map: return ref_data_names_map logger.debug("We did not hit the cache.") # cache expired so re-build the reference data names map; the map # contains the ref_data_name of every Treeherder task for this project ignored_jobs = [] ref_data_names = {} runnable_jobs = list_runnable_jobs(project) for job in runnable_jobs: # get testtype e.g. web-platform-tests-4 testtype = parse_testtype( build_system_type=job['build_system_type'], job_type_name=job['job_type_name'], platform_option=job['platform_option'], ref_data_name=job['ref_data_name'], ) if not valid_platform(job['platform']): continue if is_job_blacklisted(testtype): ignored_jobs.append(job['ref_data_name']) if testtype: logger.debug( 'get_reference_data_names: blacklisted testtype {} for job {}'.format( testtype, job ) ) continue key = unique_key( testtype=testtype, buildtype=job['platform_option'], platform=job['platform'] ) if build_system == '*': ref_data_names[key] = job['ref_data_name'] elif job['build_system_type'] == build_system: ref_data_names[key] = job['ref_data_name'] logger.debug('Ignoring %s', ', '.join(sorted(ignored_jobs))) # update the cache cache.set(cache_key, ref_data_names_map, SETA_REF_DATA_NAMES_CACHE_TIMEOUT) return ref_data_names
def list(self, request, project): """ GET method implementation for list of all runnable buildbot jobs """ try: return Response( list_runnable_jobs( project, request.query_params.get('decision_task_id'))) except Exception as ex: return Response("Exception: {0}".format(ex), status=HTTP_500_INTERNAL_SERVER_ERROR)
def query_sanitized_data(repo_name='mozilla-inbound'): """Return sanitized jobs data based on runnable api. None if failed to obtain or no new data. We need to find the latest gecko decision task ID (by querying the index [1][2]). [1] https://index.taskcluster.net/v1/task/gecko.v2.%s.latest.taskgraph.decision/ [2] Index's data structure: { "namespace": "gecko.v2.mozilla-inbound.latest.taskgraph.decision", "taskId": "Dh9ZvFk5QCSprJ877cgUmw", "rank": 0, "data": {}, "expires": "2017-10-06T18:30:18.428Z" } """ runnable_jobs = list_runnable_jobs(repo_name) return _sanitize_data(runnable_jobs)
def _build_ref_data_names(self, project, build_system): ''' We want all reference data names for every task that runs on a specific project. For example: * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1" * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1" ''' ignored_jobs = [] ref_data_names = {} runnable_jobs = list_runnable_jobs(project)['results'] for job in runnable_jobs: # get testtype e.g. web-platform-tests-4 testtype = parse_testtype( build_system_type=job['build_system_type'], job_type_name=job['job_type_name'], platform_option=job['platform_option'], ref_data_name=job['ref_data_name'] ) if not valid_platform(job['platform']): continue if is_job_blacklisted(testtype): ignored_jobs.append(job['ref_data_name']) continue key = unique_key(testtype=testtype, buildtype=job['platform_option'], platform=job['platform']) if build_system == '*': ref_data_names[key] = job['ref_data_name'] elif job['build_system_type'] == build_system: ref_data_names[key] = job['ref_data_name'] for ref_data_name in sorted(ignored_jobs): logger.info('Ignoring {}'.format(ref_data_name)) return ref_data_names
def _build_ref_data_names(self, project, build_system): ''' We want all reference data names for every task that runs on a specific project. For example: * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1" * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1" ''' ignored_jobs = [] ref_data_names = {} runnable_jobs = list_runnable_jobs(project) for job in runnable_jobs: # get testtype e.g. web-platform-tests-4 testtype = parse_testtype( build_system_type=job['build_system_type'], job_type_name=job['job_type_name'], platform_option=job['platform_option'], ref_data_name=job['ref_data_name']) if not valid_platform(job['platform']): continue if is_job_blacklisted(testtype): ignored_jobs.append(job['ref_data_name']) continue key = unique_key(testtype=testtype, buildtype=job['platform_option'], platform=job['platform']) if build_system == '*': ref_data_names[key] = job['ref_data_name'] elif job['build_system_type'] == build_system: ref_data_names[key] = job['ref_data_name'] for ref_data_name in sorted(ignored_jobs): logger.info('Ignoring %s', ref_data_name) return ref_data_names
def _query_runnable_jobs(self, repo_name, task_id): return list_runnable_jobs(repo_name, task_id)