Exemple #1
0
    def _process(self, project, build_system, job_priorities):
        '''Return list of ref_data_name for job_priorities'''
        if not job_priorities:
            raise SetaError("Call docker-compose run backend ./manage.py initialize_seta")

        jobs = []

        ref_data_names_map = get_reference_data_names(project, build_system)

        # now check the JobPriority table against the list of valid runnable
        for jp in job_priorities:
            # if this JobPriority entry is no longer supported in SETA then ignore it
            if not valid_platform(jp.platform):
                continue
            if is_job_blacklisted(jp.testtype):
                continue

            key = jp.unique_identifier()
            if key in ref_data_names_map:
                # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
                jobs.append(ref_data_names_map[key])
            else:
                logger.warning(
                    'Job priority key %s for (%s) not found in accepted jobs list', key, jp
                )

        return jobs
    def _process(self, project, build_system, job_priorities):
        '''Return list of ref_data_name for job_priorities'''
        jobs = []

        # we cache the reference data names in order to reduce API calls
        cache_key = '{}-{}-ref_data_names_cache'.format(project, build_system)
        ref_data_names_map = cache.get(cache_key)
        if not ref_data_names_map:
            # cache expired so re-build the reference data names map; the map
            # contains the ref_data_name of every treeherder *test* job for this project
            ref_data_names_map = self._build_ref_data_names(project, build_system)
            # update the cache
            cache.set(cache_key, ref_data_names_map, SETA_REF_DATA_NAMES_CACHE_TIMEOUT)

        # now check the JobPriority table against the list of valid runnable
        for jp in job_priorities:
            # if this JobPriority entry is no longer supported in SETA then ignore it
            if not valid_platform(jp.platform):
                continue
            if is_job_blacklisted(jp.testtype):
                continue

            key = jp.unique_identifier()
            if key in ref_data_names_map:
                # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
                jobs.append(ref_data_names_map[key])
            else:
                logger.warning('Job priority ({}) not found in accepted jobs list'.format(jp))

        return jobs
def _sanitize_data(runnable_jobs_data):
    """We receive data from runnable jobs api and return the sanitized data that meets our needs.

    This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
    By doing this, it allows us to have a single database query

    It returns sanitized_list which will contain a subset which excludes:
    * jobs that don't specify the platform
    * jobs that don't specify the testtype
    * if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
      under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
    """
    job_build_system_type = {}
    sanitized_list = []
    for job in runnable_jobs_data:
        if not valid_platform(job['platform']):
            logger.debug('Invalid platform %s', job['platform'])
            continue

        testtype = parse_testtype(
            build_system_type=job['build_system_type'],
            job_type_name=job['job_type_name'],
            platform_option=job['platform_option'],
            ref_data_name=job['ref_data_name'],
        )

        if not testtype:
            continue

        # NOTE: This is *all* the data we need from the runnable API
        new_job = {
            'build_system_type':
            job['build_system_type'],  # e.g. {buildbot,taskcluster,*}
            'platform': job['platform'],  # e.g. windows8-64
            'platform_option': job['platform_option'],  # e.g. {opt,debug}
            'testtype': testtype,  # e.g. web-platform-tests-1
        }
        key = _unique_key(new_job)

        # Let's build a map of all the jobs and if duplicated change the build_system_type to *
        if key not in job_build_system_type:
            job_build_system_type[key] = job['build_system_type']
            sanitized_list.append(new_job)
        elif new_job['build_system_type'] != job_build_system_type[key]:
            new_job['build_system_type'] = job_build_system_type[key]
            # This will *replace* the previous build system type with '*'
            # This guarantees that we don't have duplicates
            sanitized_list[sanitized_list.index(
                new_job)]['build_system_type'] = '*'

    return sanitized_list
def _sanitize_data(runnable_jobs_data):
    """We receive data from runnable jobs api and return the sanitized data that meets our needs.

    This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
    By doing this, it allows us to have a single database query

    It returns sanitized_list which will contain a subset which excludes:
    * jobs that don't specify the platform
    * jobs that don't specify the testtype
    * if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
      under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
    """
    job_build_system_type = {}
    sanitized_list = []
    for job in runnable_jobs_data:
        if not valid_platform(job['platform']):
            logger.info('Invalid platform %s', job['platform'])
            continue

        testtype = parse_testtype(
            build_system_type=job['build_system_type'],
            job_type_name=job['job_type_name'],
            platform_option=job['platform_option'],
            ref_data_name=job['ref_data_name']
        )

        if not testtype:
            continue

        # NOTE: This is *all* the data we need from the runnable API
        new_job = {
            'build_system_type': job['build_system_type'],  # e.g. {buildbot,taskcluster,*}
            'platform': job['platform'],  # e.g. windows8-64
            'platform_option': job['platform_option'],  # e.g. {opt,debug}
            'testtype': testtype,  # e.g. web-platform-tests-1
        }
        key = _unique_key(new_job)

        # Let's build a map of all the jobs and if duplicated change the build_system_type to *
        if key not in job_build_system_type:
            job_build_system_type[key] = job['build_system_type']
            sanitized_list.append(new_job)
        elif new_job['build_system_type'] != job_build_system_type[key]:
            new_job['build_system_type'] = job_build_system_type[key]
            # This will *replace* the previous build system type with '*'
            # This guarantees that we don't have duplicates
            sanitized_list[sanitized_list.index(new_job)]['build_system_type'] = '*'

    return sanitized_list
    def _build_ref_data_names(self, project, build_system):
        '''
        We want all reference data names for every task that runs on a specific project.

        For example:
            * Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1"
            * TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1"
        '''
        ignored_jobs = []
        ref_data_names = {}

        runnable_jobs = list_runnable_jobs(project)['results']

        for job in runnable_jobs:
            # get testtype e.g. web-platform-tests-4
            testtype = parse_testtype(
                build_system_type=job['build_system_type'],
                job_type_name=job['job_type_name'],
                platform_option=job['platform_option'],
                ref_data_name=job['ref_data_name']
            )

            if not valid_platform(job['platform']):
                continue

            if is_job_blacklisted(testtype):
                ignored_jobs.append(job['ref_data_name'])
                continue

            key = unique_key(testtype=testtype,
                             buildtype=job['platform_option'],
                             platform=job['platform'])

            if build_system == '*':
                ref_data_names[key] = job['ref_data_name']
            elif job['build_system_type'] == build_system:
                ref_data_names[key] = job['ref_data_name']

        for ref_data_name in sorted(ignored_jobs):
            logger.info('Ignoring {}'.format(ref_data_name))

        return ref_data_names
def _process(project, build_system, job_priorities):
    '''Return list of ref_data_name for job_priorities'''
    jobs = []
    # This map contains the ref_data_name of every Treeherder *test* job for this project
    ref_data_names_map = build_ref_data_names(project, build_system)

    for jp in job_priorities:
        # if this JobPriority entry is no longer supported in SETA then ignore it
        if not valid_platform(jp.platform):
            continue
        if is_job_blacklisted(jp.testtype):
            continue

        key = jp.unique_identifier()
        if key in ref_data_names_map:
            # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
            jobs.append(ref_data_names_map[key])
        else:
            logger.warning('We did not find job priority ({}) in the list of accepted jobs'.format(jp))

    return jobs
Exemple #7
0
def _process(project, build_system, job_priorities):
    '''Return list of ref_data_name for job_priorities'''
    jobs = []
    # This map contains the ref_data_name of every Treeherder *test* job for this project
    ref_data_names_map = build_ref_data_names(project, build_system)

    for jp in job_priorities:
        # if this JobPriority entry is no longer supported in SETA then ignore it
        if not valid_platform(jp.platform):
            continue
        if is_job_blacklisted(jp.testtype):
            continue

        key = jp.unique_identifier()
        if key in ref_data_names_map:
            # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
            jobs.append(ref_data_names_map[key])
        else:
            logger.warning(
                'We did not find job priority ({}) in the list of accepted jobs'
                .format(jp))

    return jobs