Ejemplo n.º 1
0
def load_preseed(validate=False):
    """ Update JobPriority information from preseed.json"""
    logger.info("About to load preseed.json")

    preseed = preseed_data()
    if validate:
        logger.info("We are going to validate the values from preseed.json")
        ref_names = get_reference_data_names()
    for job in preseed:
        if validate:
            validate_preseed_entry(job, ref_names)

        logger.debug("Processing %s",
                     (job["testtype"], job["buildtype"], job["platform"]))
        queryset = JobPriority.objects.all()

        for field in ('testtype', 'buildtype', 'platform'):
            if job[field] != '*':
                # The JobPriority table does not contain the raw
                # testtype value seen in the preseed.json file. We
                # must convert the job[field] value to the appropriate
                # value before performing the query.
                field_value = (convert_job_type_name_to_testtype(job[field])
                               if field == 'testtype' else job[field])
                queryset = queryset.filter(**{field: field_value})

        # Deal with the case where we have a new entry in preseed
        if not queryset:
            create_new_entry(job)
        else:
            # We can have wildcards, so loop on all returned values in data
            for jp in queryset:
                process_job_priority(jp, job)
    logger.debug("Finished")
Ejemplo n.º 2
0
    def _process(self, project, build_system, job_priorities):
        '''Return list of ref_data_name for job_priorities'''
        if not job_priorities:
            raise SetaError("Call docker-compose run backend ./manage.py initialize_seta")

        jobs = []

        ref_data_names_map = get_reference_data_names(project, build_system)

        # now check the JobPriority table against the list of valid runnable
        for jp in job_priorities:
            # if this JobPriority entry is no longer supported in SETA then ignore it
            if not valid_platform(jp.platform):
                continue
            if is_job_blacklisted(jp.testtype):
                continue

            key = jp.unique_identifier()
            if key in ref_data_names_map:
                # e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
                jobs.append(ref_data_names_map[key])
            else:
                logger.warning(
                    'Job priority key %s for (%s) not found in accepted jobs list', key, jp
                )

        return jobs
Ejemplo n.º 3
0
def load_preseed(validate=False):
    """ Update JobPriority information from preseed.json

    The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date
    The expiration_date field defaults to 2 weeks when inserted in the table
    The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire
    The default priority is 1, however, if we want to force coalescing we can do that
    The fields buildtype, testtype and platform can have * which makes ut match  all flavors of
    the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests
    """
    logger.info("About to load preseed.json")

    preseed = preseed_data()
    if validate:
        logger.info("We are going to validate the values from preseed.json")
        ref_names = get_reference_data_names()
    for job in preseed:
        if validate:
            validate_preseed_entry(job, ref_names)

        logger.debug("Processing %s", (job["testtype"], job["buildtype"], job["platform"]))
        queryset = JobPriority.objects.all()

        for field in ('testtype', 'buildtype', 'platform'):
            if job[field] != '*':
                queryset = queryset.filter(**{field: job[field]})

        # Deal with the case where we have a new entry in preseed
        if not queryset:
            create_new_entry(job)
        else:
            # We can have wildcards, so loop on all returned values in data
            for jp in queryset:
                process_job_priority(jp, job)
    logger.debug("Finished")