Пример #1
0
    def check_results(self, testname=""):
        """Return True if there already exist unrejected results for this device,
        build and test.
        """

        if not self._resulturl:
            return False

        # Create JSON to send to webserver
        query = {
            "phoneid": self.phone.id,
            "test": testname,
            "revision": self.build.revision,
            "product": self.build.app_name,
        }

        self.loggerdeco.debug("check_results for: %s" % query)

        url = self._resulturl + "check/?" + urllib.urlencode(query)
        response = utils.get_remote_json(url)
        self.loggerdeco.debug("check_results: content: %s" % response)
        if response:
            return response["result"]

        self.loggerdeco.warning(
            "check_results: could not check: "
            "phoneid: %s, test: %s, revision: %s, product: %s"
            % (query["phoneid"], query["test"], query["revision"], query["product"])
        )
        return False
Пример #2
0
    def get_treeherder_privatebuild_info(self, project, job):
        url = '%s/api/jobdetail/?repository=%s&job_id=%s' % (
            self.treeherder_url, project, job['id'])
        data = utils.get_remote_json(url)
        LOGGER.debug("get_treeherder_privatebuild_info: data: %s", data)

        privatebuild_keys = set(
            ['build_url', 'config_file', 'chunk', 'builder_type'])
        info = {}
        for detail in data['results']:
            if detail['title'] in privatebuild_keys:
                # the build_url property is too long to fit into "value"
                if detail.get('url'):
                    value = detail['url']
                else:
                    value = detail['value']
                info[detail['title']] = value

        # If we're missing a privatebuild key for some reason
        # return None to avoid errors.
        missing_keys = privatebuild_keys - set(info.keys())
        if missing_keys:
            LOGGER.warning(
                "get_treeherder_privatebuild_info: %s "
                "missing keys: %s "
                "job: %s", url, missing_keys, job)
            return None

        return info
Пример #3
0
    def check_results(self, testname=''):
        """Return True if there already exist unrejected results for this device,
        build and test.
        """

        if not self._resulturl:
            return False

        # Create JSON to send to webserver
        query = {
            'phoneid': self.phone.id,
            'test': testname,
            'revision': self.build.changeset,
            'product': self.build.app_name
        }

        self.loggerdeco.debug('check_results for: %s' % query)

        url = self._resulturl + 'check/?' + urllib.urlencode(query)
        response = utils.get_remote_json(url)
        self.loggerdeco.debug('check_results: content: %s' % response)
        if response:
            return response['result']

        self.loggerdeco.warning(
            'check_results: could not check: '
            'phoneid: %s, test: %s, revision: %s, product: %s' % (
                query['phoneid'], query['test'],
                query['revision'], query['product']))
        return False
Пример #4
0
def get_job_bugzilla_suggestions_json(args,
                                      repo,
                                      job_id,
                                      include_related_bugs=False,
                                      update_cache=False):
    """get_job_bugzilla_suggestions_json

    Retrieve job_bugzilla_suggestions given args, and job_id

    """
    cache_attributes = ['treeherder', repo, 'bugzilla_suggestions']

    suggestions_data = cache.load(cache_attributes, job_id)
    if suggestions_data and not update_cache:
        suggestions = json.loads(suggestions_data)
    else:
        bugzilla_suggestions_url = '%s/api/project/%s/jobs/%s/bug_suggestions/' % (
            (URL, repo, job_id))

        suggestions = utils.get_remote_json(bugzilla_suggestions_url)
        cache.save(cache_attributes, job_id, json.dumps(suggestions, indent=2))

    if args.test_failure_pattern:
        bugzilla_suggestions = [
            suggestion for suggestion in suggestions
            if args.test_failure_pattern.search(suggestion['search'])
        ]
    else:
        bugzilla_suggestions = suggestions

    if not include_related_bugs:
        for bug_data in bugzilla_suggestions:
            del bug_data['bugs']

    return bugzilla_suggestions
Пример #5
0
def get_failure_count_json(args, repo, bug_id, start_date, end_date):
    """get_failure_count_json

    Retrieve list of objects by repo/project, bug and date range.
    [
        {
            "date": "2019-07-10",
            "test_runs": 154,
            "failure_count": 0
        },
    ]

    """

    if type(start_date) == datetime.datetime:
        start_date = start_date.strftime('%Y-%m-%d')
    if type(end_date) == datetime.datetime:
        end_date = end_date.strftime('%Y-%m-%d')

    failure_count_url = '%s/api/failurecount/?startday=%s&endday=%s&tree=%s&bug=%s' % (
        (URL, start_date, end_date, repo, bug_id))

    failure_count_json = utils.get_remote_json(failure_count_url)

    return failure_count_json
Пример #6
0
    def check_results(self, testname=''):
        """Return True if there already exist unrejected results for this device,
        build and test.
        """

        if not self._resulturl:
            return False

        # Create JSON to send to webserver
        query = {
            'phoneid': self.phone.id,
            'test': testname,
            'revision': self.build.revision,
            'product': self.build.app_name
        }

        self.loggerdeco.debug('check_results for: %s' % query)

        url = self._resulturl + 'check/?' + urllib.urlencode(query)
        response = utils.get_remote_json(url)
        self.loggerdeco.debug('check_results: content: %s' % response)
        if response:
            return response['result']

        self.loggerdeco.warning(
            'check_results: could not check: '
            'phoneid: %s, test: %s, revision: %s, product: %s' % (
                query['phoneid'], query['test'],
                query['revision'], query['product']))
        return False
Пример #7
0
    def get_treeherder_privatebuild_info(self, project, job):
        logger = utils.getLogger()
        url = '%s/api/jobdetail/?repository=%s&job_id=%s' % (
            self.treeherder_url, project, job['id'])
        data = utils.get_remote_json(url)
        logger.debug("get_treeherder_privatebuild_info: data: %s", data)

        privatebuild_keys = set(['build_url', 'config_file', 'chunk',
                                 'builder_type'])
        info = {}
        for detail in data['results']:
            if detail['title'] in privatebuild_keys:
                # the build_url property is too long to fit into "value"
                if detail.get('url'):
                    value = detail['url']
                else:
                    value = detail['value']
                info[detail['title']] = value

        # If we're missing a privatebuild key for some reason
        # return None to avoid errors.
        missing_keys = privatebuild_keys - set(info.keys())
        if missing_keys:
            logger.debug("get_treeherder_privatebuild_info: %s "
                         "missing keys: %s "
                         "job: %s", url, missing_keys, job)
            return None

        return info
Пример #8
0
 def get_treeherder_privatebuild_artifact(self, job):
     if job:
         for artifact in job['artifacts']:
             if artifact['name'] == 'privatebuild':
                 url = '%s%s' % (
                     self.treeherder_url, artifact['resource_uri'])
                 return utils.get_remote_json(url)
     return None
Пример #9
0
def get_revision_timestamps(repo, first_revision, last_revision):
    """Returns a tuple containing timestamps for the revisions from
    the given repo.

    arguments:
    repo            - name of repository. For example, one of
                      mozilla-central, mozilla-aurora, mozilla-beta,
                      mozilla-inbound, fx-team, b2g-inbound
    first_revision  - string.
    last_revision - string.

    returns: first_timestamp, last_timestamp.
    """
    prefix = '%sjson-pushes?changeset=' % repo_urls[repo]
    first = utils.get_remote_json('%s%s' % (prefix, first_revision))
    last = utils.get_remote_json('%s%s' % (prefix, last_revision))

    return first[first.keys()[0]]['date'], last[last.keys()[0]]['date']
Пример #10
0
def validate(request):
    """
    POST /validate

    Validate GeoJSON data in POST body
    """

    testing = request.GET.get('testing')

    if request.method == 'POST':
        stringy_json = request.raw_post_data
    else:  # GET
        try:
            remote_url = request.GET['url']
            stringy_json = get_remote_json(remote_url)
        except KeyError:  # The "url" URL parameter was missing
            return _geojson_error(
                'When validating via GET, a "url" URL parameter is required.',
                status=400)
        except NonFetchableURLException:
            return _geojson_error('The URL passed could not be fetched.')

    try:
        test_geojson = json.loads(stringy_json)
        if not isinstance(test_geojson, dict):
            return _geojson_error('Data was not a JSON object.', testing)
    except:
        return _geojson_error('Data was not JSON serializeable.', testing)

    if not 'type' in test_geojson:
        return _geojson_error(
            'The "type" member is required and was not found.', testing)

    try:
        validate_geojson(test_geojson)
    except GeoJSONValidationException as e:
        return _geojson_error(str(e), testing)

    # Everything checked out. Return 'ok'.
    track_validate()
    resp = {
        'status': 'ok',
    }
    return HttpResponse(json.dumps(resp), mimetype='application/json')
Пример #11
0
def get_bug_job_map_json(args, repo, job_id, update_cache=False):
    """get_bug_job_map_json

    Retrieve bug_job_map given args, repo and job_id

    """
    cache_attributes = ['treeherder', repo, 'bug-job-map']

    bug_job_map_url = '%s/api/project/%s/bug-job-map/?job_id=%s' % (
        (URL, repo, job_id))

    bug_job_map_data = cache.load(cache_attributes, job_id)
    if bug_job_map_data and not update_cache:
        bug_job_map = json.loads(bug_job_map_data)
        bug_job_map_data = None
    else:
        bug_job_map = utils.get_remote_json(bug_job_map_url)
        cache.save(cache_attributes, job_id, json.dumps(bug_job_map, indent=2))

    return bug_job_map
Пример #12
0
def validate(request):
    """
    POST /validate

    Validate GeoJSON data in POST body
    """

    testing = request.GET.get('testing')

    if request.method == 'POST':
        stringy_json = request.raw_post_data
    else:  # GET
        try:
            remote_url = request.GET['url']
            stringy_json = get_remote_json(remote_url)
        except KeyError:  # The "url" URL parameter was missing
            return _geojson_error('When validating via GET, a "url" URL parameter is required.', status=400)
        except NonFetchableURLException:
            return _geojson_error('The URL passed could not be fetched.')

    try:
        test_geojson = json.loads(stringy_json)
        if not isinstance(test_geojson, dict):
            return _geojson_error('Data was not a JSON object.', testing)
    except:
        return _geojson_error('Data was not JSON serializeable.', testing)

    if not 'type' in test_geojson:
        return _geojson_error('The "type" member is required and was not found.', testing)

    try:
        validate_geojson(test_geojson)
    except GeoJSONValidationException as e:
        return _geojson_error(str(e), testing)

    # Everything checked out. Return 'ok'.
    resp = {
        'status': 'ok',
    }
    return HttpResponse(json.dumps(resp), mimetype='application/json')
Пример #13
0
    def handle_taskcompleted(self, data, message):
        if self.verbose:
            LOGGER.debug(
                'handle_taskcompleted:\n'
                '\tdata   : %s\n'
                '\tmessage: %s', json.dumps(data, sort_keys=True, indent=4),
                json.dumps(message.__dict__, sort_keys=True, indent=4))
        artifact_data = {}
        task_id = data['status']['taskId']
        run_id = data['runId']
        task_definition = self.taskcluster_queue.task(task_id)
        LOGGER.debug('handle_taskcompleted: task_definition: %s',
                     task_definition)
        # Test the repo early in order to prevent unnecessary IO for irrelevent branches.
        try:
            MH_BRANCH = task_definition['payload']['env']['MH_BRANCH']
            if MH_BRANCH not in self.trees:
                LOGGER.debug(
                    'handle_taskcompleted: task_id: %s, run_id: %s: '
                    'skip task_definition MH_BRANCH %s', task_id, run_id,
                    MH_BRANCH)
                return
        except KeyError:
            pass
        worker_type = task_definition['workerType']
        builder_type = 'buildbot' if worker_type == 'buildbot' else 'taskcluster'

        build_data = None
        artifact_data = {}
        artifacts = utils.taskcluster_artifacts(task_id, run_id)
        while True:
            try:
                artifact = artifacts.next()
            except StopIteration:
                break
            key = artifact['name'].replace('public/build/', '')
            artifact_data[
                key] = 'https://queue.taskcluster.net/v1/task/%s/runs/%s/artifacts/%s' % (
                    task_id, run_id, artifact['name'])
            if key == 'target.apk':
                build_data = utils.get_build_data(artifact_data[key],
                                                  builder_type=builder_type)
                if not build_data:
                    LOGGER.warning(
                        'handle_taskcompleted: task_id: %s, run_id: %s: '
                        'could not get %s', task_id, run_id,
                        artifact_data[key])
                    return
                tier = get_treeherder_tier(build_data['repo'], task_id, run_id)
                if builder_type != 'buildbot' and tier != 1:
                    LOGGER.debug(
                        'handle_taskcompleted: ignoring worker_type: %s, tier: %s',
                        worker_type, tier)
                    return
                build_data['app_name'] = 'fennec'
                build_data['builder_name'] = 'unknown'

        if not build_data:
            LOGGER.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: '
                'no build found', task_id, run_id)
            return

        if 'id' not in build_data or 'build_type' not in build_data:
            LOGGER.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: '
                'skipping build due to missing id or build_type %s.', task_id,
                run_id, build_data)
            return

        LOGGER.debug(
            'handle_taskcompleted: task_id: %s, run_id: %s: build_data: %s',
            task_id, run_id, build_data)
        if build_data['repo'] not in self.trees:
            LOGGER.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: skip repo %s',
                task_id, run_id, build_data['repo'])
            return
        if build_data['platform'] not in self.platforms:
            return
        if build_data['build_type'] not in self.buildtypes:
            LOGGER.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: skip build_type %s',
                task_id, run_id, build_data['build_type'])
            return

        rev_json_url = build_data['changeset'].replace('/rev/', '/json-rev/')
        rev_json = utils.get_remote_json(rev_json_url)
        if rev_json:
            build_data['comments'] = rev_json['desc']
        else:
            build_data['comments'] = 'unknown'
            LOGGER.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: could not get %s',
                task_id, run_id, rev_json_url)

        if build_data['repo'] == 'try' and 'autophone' not in build_data[
                'comments']:
            LOGGER.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: skip %s %s',
                task_id, run_id, build_data['repo'], build_data['comments'])
            return

        self.build_callback(build_data)
Пример #14
0
    def handle_taskcompleted(self, data, message):
        logger = utils.getLogger()
        if self.verbose:
            logger.debug(
                'handle_taskcompleted:\n'
                '\tdata   : %s\n'
                '\tmessage: %s', json.dumps(data, sort_keys=True, indent=4),
                json.dumps(message.__dict__, sort_keys=True, indent=4))
        artifact_data = {}
        task_id = data['status']['taskId']
        run_id = data['runId']
        task_definition = utils.get_taskcluster_task_definition(task_id)
        logger.debug('handle_taskcompleted: task_definition: %s',
                     task_definition)
        # Test the repo early in order to prevent unnecessary IO for irrelevent branches.
        try:
            MH_BRANCH = task_definition['payload']['env']['MH_BRANCH']
            if MH_BRANCH not in self.trees:
                logger.debug(
                    'handle_taskcompleted: task_id: %s, run_id: %s: '
                    'skip task_definition MH_BRANCH %s', task_id, run_id,
                    MH_BRANCH)
                return
        except KeyError:
            pass
        worker_type = task_definition['workerType']
        builder_type = 'buildbot' if worker_type == 'buildbot' else 'taskcluster'

        build_data = None
        artifact_data = {}
        artifacts = utils.taskcluster_artifacts(task_id, run_id)
        # Save temporary holders for the build_url and app_name in order
        # that we may safely override the values returned from fennec in the
        # case that we are actually returning the geckoview_example build.
        build_url = None
        app_name = None
        while True:
            try:
                artifact = artifacts.next()
            except StopIteration:
                break
            key = artifact['name'].replace('public/build/', '')
            artifact_data[
                key] = 'https://queue.taskcluster.net/v1/task/%s/runs/%s/artifacts/%s' % (
                    task_id, run_id, artifact['name'])
            logger.debug('handle_taskcompleted: artifact: %s', artifact)
            if key == 'target.apk':
                build_data = utils.get_build_data(artifact_data[key],
                                                  builder_type=builder_type)
                if not build_data:
                    logger.warning(
                        'handle_taskcompleted: task_id: %s, run_id: %s: '
                        'could not get build data for %s', task_id, run_id,
                        artifact_data[key])
                    return

                if build_data['repo'] not in self.trees:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip repo %s not in %s',
                        task_id, run_id, build_data['repo'], self.trees)
                    return

                if build_data['platform'] not in self.platforms:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip platform %s not in %s',
                        task_id, run_id, build_data['platform'],
                        self.platforms)
                    return

                if build_data['build_type'] not in self.buildtypes:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip build_type %s not in %s',
                        task_id, run_id, build_data['build_type'],
                        self.buildtypes)
                    return

                if 'id' not in build_data or 'build_type' not in build_data:
                    logger.warning(
                        'handle_taskcompleted: task_id: %s, run_id: %s: '
                        'skip build due to missing id or build_type %s.',
                        task_id, run_id, build_data)
                    return

                build_url = build_data['url']
                if not app_name:
                    app_name = 'org.mozilla.fennec'
                logger.debug('handle_taskcompleted: got target.apk')
            elif key == 'geckoview_example.apk':
                # The geckoview_example app is built from the same source
                # as the corresponding fennec so we don't need to perform the
                # build_data look ups here but we will record the app_name
                # and the build_url
                logger.debug('handle_taskcompleted: got geckoview_example.apk')
                app_name = 'org.mozilla.geckoview_example'

        if not build_data:
            logger.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: '
                'could not get build_data', task_id, run_id)
            return

        # We are totally ignoring the gradle build of fennec in favor
        # of the geckoview_example.apk. If in the future we need to test
        # both, then we will have to change this to do the call back on
        # the fennec build and to download the geckoview_example.apk when
        # we download the fennec build.

        if app_name == 'org.mozilla.geckoview_example':
            build_url = build_url.replace('target.apk',
                                          'geckoview_example.apk')

        build_data['app_name'] = app_name
        build_data['url'] = build_url
        build_data['builder_name'] = 'unknown'

        logger.debug(
            'handle_taskcompleted: task_id: %s, run_id: %s: build_data: %s',
            task_id, run_id, build_data)

        rev_json_url = build_data['changeset'].replace('/rev/', '/json-rev/')
        rev_json = utils.get_remote_json(rev_json_url)
        if rev_json:
            build_data['comments'] = rev_json['desc']
        else:
            build_data['comments'] = 'unknown'
            logger.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: could not get %s',
                task_id, run_id, rev_json_url)

        if build_data['repo'] == 'try' and 'autophone' not in build_data[
                'comments']:
            logger.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: skip %s %s',
                task_id, run_id, build_data['repo'], build_data['comments'])
            return

        self.build_callback(build_data)
Пример #15
0
def get_pushes_jobs_job_details_json(args, repo, update_cache=False):
    """get_pushes_jobs_job_details_json

    Retrieve nested pushes, jobs, job details matching args set via
    push_args parser and job_args parser.

    """
    if hasattr(args, 'update_cache'):
        update_cache = args.update_cache

    cache_attributes = ['treeherder', repo, 'job_details']

    pushes = get_pushes_jobs_json(args, repo, update_cache=update_cache)

    for push in pushes:
        for job in push['jobs']:
            # job['job_guid'] contains a slash followed by the run number.
            # Convert this into a value which can be used a file name
            # by replacing / with _.
            job_guid_path = job['job_guid'].replace('/', '_')
            job_details_data = cache.load(cache_attributes, job_guid_path)
            if job_details_data and not update_cache:
                job['job_details'] = json.loads(job_details_data)
            else:
                job['job_details'] = []
                # We can get all of the job details from CLIENT.get_job_details while
                # get_job_log_url only gives us live_backing.log and live.log.
                job['job_details'] = retry_client_request(
                    CLIENT.get_job_details, 3, job_guid=job['job_guid'])
                if job['job_details'] is None:
                    logger.warning("Unable to get job_details for job_guid %s",
                                   job['job_guid'])
                    continue
                cache.save(cache_attributes, job_guid_path,
                           json.dumps(job['job_details'], indent=2))

            if hasattr(args, 'add_resource_usage') and args.add_resource_usage:
                for attempt in range(3):
                    try:
                        for job_detail in job['job_details']:
                            if job_detail['value'] == 'resource-usage.json':
                                resource_usage_name = job_guid_path + '-' + job_detail[
                                    'value']
                                job_detail_resource_usage_data = cache.load(
                                    cache_attributes, resource_usage_name)
                                if job_detail_resource_usage_data and not update_cache:
                                    job['resource_usage'] = json.loads(
                                        job_detail_resource_usage_data)
                                    job_detail_resource_usage_data = None
                                else:
                                    job['resource_usage'] = utils.get_remote_json(
                                        job_detail['url'])
                                    cache.save(
                                        cache_attributes, resource_usage_name,
                                        json.dumps(job['resource_usage'],
                                                   indent=2))
                                break
                        break
                    except requests.HTTPError as e:
                        if '503 Server Error' not in str(e):
                            raise
                        logger.exception(
                            'get_job_details resource %s attempt %s', attempt)
                    except requests.ConnectionError:
                        logger.exception(
                            'get_job_details resource %s attempt %s', attempt)
                    if attempt != 2:
                        time.sleep(30)
                if attempt == 2:
                    logger.warning("Unable to get job_details for job_guid %s",
                                   job['job_guid'])
                    continue
    return pushes
Пример #16
0
 def get_treeherder_job(self, project, job_id):
     url = '%s/api/project/%s/jobs/%s' % (
         self.treeherder_url, project, job_id)
     return utils.get_remote_json(url)
    def handle_taskcompleted(self, data, message):
        logger = utils.getLogger()
        if self.verbose:
            logger.debug(
                'handle_taskcompleted:\n'
                '\tdata   : %s\n'
                '\tmessage: %s', json.dumps(data, sort_keys=True, indent=4),
                json.dumps(message.__dict__, sort_keys=True, indent=4))
        artifact_data = {}
        task_id = data['status']['taskId']
        run_id = data['runId']
        task_definition = utils.get_taskcluster_task_definition(task_id)
        logger.debug('handle_taskcompleted: task_definition: %s',
                     task_definition)
        # Test the repo early in order to prevent unnecessary IO for irrelevent branches.
        try:
            MH_BRANCH = task_definition['payload']['env']['MH_BRANCH']
            if MH_BRANCH not in self.trees:
                logger.debug(
                    'handle_taskcompleted: task_id: %s, run_id: %s: '
                    'skip task_definition MH_BRANCH %s', task_id, run_id,
                    MH_BRANCH)
                return
        except KeyError:
            pass
        worker_type = task_definition['workerType']
        builder_type = 'buildbot' if worker_type == 'buildbot' else 'taskcluster'

        build_data = None
        artifact_data = {}
        artifacts = utils.taskcluster_artifacts(task_id, run_id)
        # Process the artifacts for the task looking for app build artifacts
        # with which to test. These currently are limited to target.apk
        # for fennec and geckoview_example.apk. target.apk is used
        # to obtain meta data for the build.
        # app_data is used to map the app name to the url to the apk file.
        # app_data[app_name] == build_url
        app_data = {}
        while True:
            try:
                artifact = artifacts.next()
            except StopIteration:
                break
            key = artifact['name'].replace('public/build/', '')
            artifact_data[
                key] = 'https://queue.taskcluster.net/v1/task/%s/runs/%s/artifacts/%s' % (
                    task_id, run_id, artifact['name'])
            build_url = artifact_data[key]
            logger.debug('handle_taskcompleted: artifact: %s', artifact)
            if key == 'target.apk':
                # The actual app name may be slightly different depending on the repository.
                app_data['org.mozilla.fennec'] = build_url
                build_data = utils.get_build_data(build_url,
                                                  builder_type=builder_type)
                if not build_data:
                    logger.warning(
                        'handle_taskcompleted: task_id: %s, run_id: %s: '
                        'could not get build data for %s', task_id, run_id,
                        build_url)
                    return

                tier = get_treeherder_tier(build_data['repo'], task_id, run_id)
                if builder_type != 'buildbot' and tier != 1:
                    logger.debug(
                        'handle_taskcompleted: ignoring worker_type: %s, tier: %s',
                        worker_type, tier)
                    return

                if build_data['repo'] not in self.trees:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip repo %s not in %s',
                        task_id, run_id, build_data['repo'], self.trees)
                    return

                if build_data['platform'] not in self.platforms:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip platform %s not in %s',
                        task_id, run_id, build_data['platform'],
                        self.platforms)
                    return

                if build_data['build_type'] not in self.buildtypes:
                    logger.debug(
                        'handle_taskcompleted: task_id: %s, run_id: %s: skip build_type %s not in %s',
                        task_id, run_id, build_data['build_type'],
                        self.buildtypes)
                    return

                if 'id' not in build_data or 'build_type' not in build_data:
                    logger.warning(
                        'handle_taskcompleted: task_id: %s, run_id: %s: '
                        'skip build due to missing id or build_type %s.',
                        task_id, run_id, build_data)
                    return

                logger.debug('handle_taskcompleted: got target.apk')
            elif key == 'geckoview_example.apk':
                # The geckoview_example app is built from the same source
                # as the corresponding fennec so we don't need to perform the
                # build_data look ups here but we will record the app_name
                # and the build_url
                logger.debug('handle_taskcompleted: got geckoview_example.apk')
                app_data['org.mozilla.geckoview_example'] = build_url

        if not build_data:
            logger.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: '
                'could not get build_data', task_id, run_id)
            return

        build_data['builder_name'] = 'unknown'
        # Save the app_data to the build_data object to be used in the build_callback.
        build_data['app_data'] = app_data

        logger.debug(
            'handle_taskcompleted: task_id: %s, run_id: %s: build_data: %s',
            task_id, run_id, build_data)

        rev_json_url = build_data['changeset'].replace('/rev/', '/json-rev/')
        rev_json = utils.get_remote_json(rev_json_url)
        if rev_json:
            build_data['comments'] = rev_json['desc']
        else:
            build_data['comments'] = 'unknown'
            logger.warning(
                'handle_taskcompleted: task_id: %s, run_id: %s: could not get %s',
                task_id, run_id, rev_json_url)

        if build_data['repo'] == 'try' and 'autophone' not in build_data[
                'comments']:
            logger.debug(
                'handle_taskcompleted: task_id: %s, run_id: %s: skip %s %s',
                task_id, run_id, build_data['repo'], build_data['comments'])
            return

        self.build_callback(build_data)
Пример #18
0
    def publish_results(self, starttime=0, tstrt=0, tstop=0,
                        testname='', cache_enabled=True,
                        rejected=False):
        # Create JSON to send to webserver
        author = None
        if self.build.tree == 'try':
            rev_json_url = self.build.changeset.replace('/rev/', '/json-rev/')
            rev_json = utils.get_remote_json(rev_json_url)
            if rev_json:
                author = rev_json['pushuser']

        blddate = float(convert_datetime_to_string(self.build.date, TIMESTAMP))
        self.loggerdeco.debug('publish_results: build.id: %s, build.date: %s, blddate: %s' % (
            self.build.id, self.build.date, blddate))

        resultdata = {
            'phoneid': self.phone.id,
            'testname': testname,
            'starttime': starttime,
            'throbberstart': tstrt,
            'throbberstop': tstop,
            'blddate': blddate,
            'cached': cache_enabled,
            'rejected': rejected,
            'revision': self.build.changeset,
            'author': author,
            'productname': self.build.app_name,
            'productversion': self.build.version,
            'osver': self.phone.osver,
            'bldtype': self.build.type,
            'machineid': self.phone.machinetype
        }

        result = {'data': resultdata}
        # Upload
        if self._signer:
            encoded_result = jwt.encode(result, signer=self._signer)
            content_type = 'application/jwt'
        else:
            encoded_result = json.dumps(result)
            content_type = 'application/json; charset=utf-8'
        req = urllib2.Request(self._resulturl + 'add/', encoded_result,
                              {'Content-Type': content_type})
        max_attempts = 10
        wait_time = 10
        for attempt in range(1, max_attempts+1):
            try:
                f = urllib2.urlopen(req)
                f.read()
                f.close()
                return
            except Exception, e:
                # Retry submission if the exception is due to a
                # timeout and if we haven't exceeded the maximum
                # number of attempts.
                if attempt < max_attempts:
                    self.loggerdeco.warning('PerfTest.publish_results: '
                                            'Attempt %d/%d error %s sending '
                                            'results to server' % (
                                                attempt, max_attempts,
                                                e))
                    time.sleep(wait_time)
                    continue
                self.loggerdeco.exception('Error sending results to server')
                self.worker_subprocess.mailer.send(
                    '%s attempt %s/%s Error sending %s results for phone %s, '
                    'build %s' % (utils.host(), attempt, max_attempts,
                                  self.name, self.phone.id, self.build.id),
                    'There was an error attempting to send test results '
                    'to the result server %s.\n'
                    '\n'
                    'Host       %s\n'
                    'Job        %s\n'
                    'Test       %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n'
                    'Exception  %s\n'
                    'Result     %s\n' %
                    (self.result_server,
                     utils.host(),
                     self.job_url,
                     self.name,
                     self.phone.id,
                     self.build.tree,
                     self.build.id,
                     self.build.changeset,
                     e,
                     json.dumps(resultdata, sort_keys=True, indent=2)))
                message = 'Error sending results to phonedash server'
                self.add_failure(self.name, TestStatus.TEST_UNEXPECTED_FAIL,
                                 message, TreeherderStatus.EXCEPTION)
Пример #19
0
 def get_treeherder_job(self, project, job_id):
     url = '%s/api/project/%s/jobs/%s/' % (self.treeherder_url, project,
                                           job_id)
     return utils.get_remote_json(url)
Пример #20
0
    def publish_results(self, starttime=0, tstrt=0, tstop=0,
                        testname='', cache_enabled=True,
                        rejected=False):
        # Create JSON to send to webserver
        author = None
        if self.build.tree == 'try':
            rev_json_url = self.build.changeset.replace('/rev/', '/json-rev/')
            rev_json = utils.get_remote_json(rev_json_url)
            if rev_json:
                author = rev_json['pushuser']

        blddate = float(convert_datetime_to_string(self.build.date, TIMESTAMP))
        self.loggerdeco.debug('publish_results: build.id: %s, build.date: %s, blddate: %s' % (
            self.build.id, self.build.date, blddate))

        resultdata = {
            'phoneid': self.phone.id,
            'testname': testname,
            'starttime': starttime,
            'throbberstart': tstrt,
            'throbberstop': tstop,
            'blddate': blddate,
            'cached': cache_enabled,
            'rejected': rejected,
            'revision': self.build.changeset,
            'author': author,
            'productname': self.build.app_name,
            'productversion': self.build.version,
            'osver': self.phone.osver,
            'bldtype': self.build.type,
            'machineid': self.phone.machinetype
        }

        result = {'data': resultdata}
        # Upload
        if self._signer:
            encoded_result = jwt.encode(result, signer=self._signer)
            content_type = 'application/jwt'
        else:
            encoded_result = json.dumps(result)
            content_type = 'application/json; charset=utf-8'
        req = urllib2.Request(self._resulturl + 'add/', encoded_result,
                              {'Content-Type': content_type})
        max_attempts = 10
        wait_time = 10
        for attempt in range(1, max_attempts+1):
            try:
                f = urllib2.urlopen(req)
                f.read()
                f.close()
                return
            except Exception, e:
                # Retry submission if the exception is due to a
                # timeout and if we haven't exceeded the maximum
                # number of attempts.
                if attempt < max_attempts:
                    self.loggerdeco.warning('PerfTest.publish_results: '
                                            'Attempt %d/%d error %s sending '
                                            'results to server' % (
                                                attempt, max_attempts,
                                                e))
                    time.sleep(wait_time)
                    continue
                self.loggerdeco.exception('Error sending results to server')
                self.worker_subprocess.mailer.send(
                    '%s attempt %s/%s Error sending %s results for phone %s, '
                    'build %s' % (utils.host(), attempt, max_attempts,
                                  self.name, self.phone.id, self.build.id),
                    'There was an error attempting to send test results '
                    'to the result server %s.\n'
                    '\n'
                    'Host       %s\n'
                    'Job        %s\n'
                    'Test       %s\n'
                    'Phone      %s\n'
                    'Repository %s\n'
                    'Build      %s\n'
                    'Revision   %s\n'
                    'Exception  %s\n'
                    'Result     %s\n' %
                    (self.result_server,
                     utils.host(),
                     self.job_url,
                     self.name,
                     self.phone.id,
                     self.build.tree,
                     self.build.id,
                     self.build.changeset,
                     e,
                     json.dumps(resultdata, sort_keys=True, indent=2)))
                message = 'Error sending results to server'
                self.status = PhoneTest.EXCEPTION
                self.message = message
                self.update_status(message=message)
def get_test_isolation_bugzilla_data(args):
    """Query Bugzilla for bugs marked with [test isolation] in the
    whiteboard.  Return a dictionary keyed by revision url containing
    the bug id and summary.

    """
    cache_attributes = ['test-isolation']

    bugzilla_data = cache.load(cache_attributes, 'bugzilla.json')
    if bugzilla_data and not args.update_cache:
        return json.loads(bugzilla_data)

    now = datetime.datetime.now()

    data = {}

    re_logview = re.compile(
        r'https://treeherder.mozilla.org/logviewer.html#\?job_id=([0-9]+)&repo=([a-z-]+)'
    )
    re_pushlog_url = re.compile(r'(https://.*)$\n', re.MULTILINE)

    query = BUGZILLA_URL + 'bug?'
    query_terms = {
        'include_fields': 'id,creation_time,whiteboard',
        'creation_time': args.bug_creation_time,
        'whiteboard': args.whiteboard,
        'limit': 100,
        'offset': 0,
    }
    if args.bugs:
        query_terms['id'] = ','.join([str(id) for id in args.bugs])
    else:
        query_terms['creation_time'] = args.bug_creation_time

    while True:
        response = utils.get_remote_json(query, params=query_terms)
        if 'error' in response:
            logger.error('Bugzilla({}, {}): {}'.format(query, query_terms,
                                                       response))
            return

        if len(response['bugs']) == 0:
            break

        # update query terms for next iteration of the loop.
        query_terms['offset'] += query_terms['limit']

        for bug in response['bugs']:
            #https://bugzilla.mozilla.org/rest/bug/1559260/comment

            if args.bugs_after and bug['id'] <= args.bugs_after:
                continue

            if args.whiteboard not in bug['whiteboard']:
                # The query performs an all words not substring
                # query, so restrict to the substring.
                continue

            if args.bugs and bug['id'] not in args.bugs:
                continue

            query2 = BUGZILLA_URL + 'bug/%s' % bug['id']
            response2 = utils.get_remote_json(query2)
            if 'error' in response2:
                logger.error('Bugzilla({}): {}'.format(query2, response2))
                return

            bug_summary = response2['bugs'][0]['summary']
            munged_bug_summary = bugzilla_summary_munge_failure(bug_summary)

            query3 = BUGZILLA_URL + 'bug/%s/comment' % bug['id']
            response3 = utils.get_remote_json(query3)
            if 'error' in response3:
                logger.error('Bugzilla({}): {}'.format(query, response3))
                return

            raw_text = response3['bugs'][str(
                bug['id'])]['comments'][0]['raw_text']

            match = re_logview.search(raw_text)
            if match:
                # Get push associated with this failed job.
                job_id = int(match.group(1))
                repo = match.group(2)
                job = get_job_by_repo_job_id_json(
                    args, repo, job_id, update_cache=args.update_cache)
                push_id = job['push_id']
                push = get_push_json(args,
                                     repo,
                                     push_id,
                                     update_cache=args.update_cache)
                repository = get_repository_by_id(
                    push['revisions'][0]['repository_id'])
                revision = push['revisions'][0]['revision']
                revision_url = '%s/rev/%s' % (repository['url'], revision)

                new_args = copy.deepcopy(args)
                new_args.revision_url = revision_url
                (new_args.repo, _,
                 new_args.revision) = new_args.revision_url.split('/')[-3:]
                new_args.add_bugzilla_suggestions = True
                new_args.state = 'completed'
                new_args.result = 'success|testfailed'
                #new_args.job_type_name = '^test-'
                new_args.job_type_name = job['job_type_name']
                new_args.test_failure_pattern = TEST_FAILURE_PATTERN
                pushes_args.compile_filters(new_args)
                jobs_args.compile_filters(new_args)

                if revision_url not in data:
                    data[revision_url] = []

                mozharness_failure = match_bug_summary_to_mozharness_failure(
                    bug_summary, raw_text)

                test = None
                if mozharness_failure:
                    test = get_test(mozharness_failure)
                    pattern = convert_failure_to_pattern(mozharness_failure)
                if not test:
                    test = get_test(munged_bug_summary)
                    pattern = convert_failure_to_pattern(munged_bug_summary)
                if not test:
                    logger.warning('Unable to obtain test for '
                                   'bug {} {} failure {}'.format(
                                       bug['id'], bug_summary,
                                       mozharness_failure))

                bug_data = {
                    'bug_id':
                    bug['id'],
                    'bug_summary':
                    bug_summary,
                    'munged_bug_summary':
                    munged_bug_summary,
                    'job_type_name':
                    job['job_type_name'],
                    'test':
                    test,
                    'mozharness_failure':
                    mozharness_failure,
                    'job_id':
                    job_id,
                    'push_id':
                    push_id,
                    'repository':
                    repository['name'],
                    'revision_url':
                    revision_url,
                    'bugzilla_suggestions':
                    get_job_bugzilla_suggestions_json(
                        new_args,
                        new_args.repo,
                        job_id,
                        update_cache=args.update_cache),
                    'bug_job_map':
                    get_bug_job_map_json(new_args,
                                         new_args.repo,
                                         job_id,
                                         update_cache=args.update_cache),
                    'pattern':
                    pattern,
                }

                data[revision_url].append(bug_data)

                # Get failure counts for trunk for this bug for the two weeks following
                # the creation of the bug. Ignore failure counts for bugs who are less
                # than 2 weeks old.
                # TODO: Allow in place updating of bugzilla.json so that we can reprocess
                # the failure counts without having to query the full set of bugs.
                start_date = datetime.datetime.strptime(
                    bug['creation_time'].rstrip('Z'),
                    '%Y-%m-%dT%H:%M:%S') - datetime.timedelta(days=1)
                end_date = start_date + datetime.timedelta(days=15)
                failure_count_json = get_failure_count_json(
                    args, 'trunk', bug['id'], start_date, end_date)
                if now - start_date < datetime.timedelta(days=15):
                    failure_count = None
                else:
                    failure_count = 0
                    for failures in failure_count_json:
                        failure_count += failures['failure_count']
                bug_data['failure_count'] = failure_count

            elif args.whiteboard and False:  #Disable this as it is buggy.
                # This run has specified the test or is this is a bug
                # that is not a Treeherder filed bug. If it was marked
                # via the whiteboad then we are interested in the
                # pushes for this bug.  Since we can't really tell
                # which is which, we can include all of the pushes
                # since only those with test isolation jobs will
                # matter.  The problem is this bug does not
                # necessarily have a bug_summary referencing a test
                # failure...
                test = None  # We don't have a failure in this case.
                comments = response3['bugs'][str(bug['id'])]['comments']
                for comment in comments:
                    if not comment['raw_text'].startswith('Pushed by'):
                        continue
                    # Get the last revision in the comment as the head of the push.
                    revision_url = None
                    pushlog_url_match = re_pushlog_url.search(
                        comment['raw_text'])
                    while pushlog_url_match:
                        revision_url = pushlog_url_match.group(1)
                        pushlog_url_match = re_pushlog_url.search(
                            comment['raw_text'], pushlog_url_match.end(1))
                    if revision_url:
                        # revision_url from Bugzilla has the 12 character revision.
                        new_args = copy.deepcopy(args)
                        new_args.revision_url = revision_url
                        (new_args.repo, _, new_args.revision
                         ) = new_args.revision_url.split('/')[-3:]
                        new_args.add_bugzilla_suggestions = True
                        new_args.state = 'completed'
                        new_args.job_type_name = '^test-'
                        new_args.test_failure_pattern = TEST_FAILURE_PATTERN
                        pushes_args.compile_filters(new_args)
                        jobs_args.compile_filters(new_args)

                        pushes = get_pushes_jobs_json(
                            new_args,
                            new_args.repo,
                            update_cache=args.update_cache)
                        if len(pushes):
                            # Convert the revision url to 40 characters.
                            push = pushes[0]
                            repository = get_repository_by_id(
                                push['revisions'][0]['repository_id'])
                            revision = push['revisions'][0]['revision']
                            revision_url = '%s/rev/%s' % (repository['url'],
                                                          revision)
                            new_args.revision_url = revision_url
                            (new_args.repo, _, new_args.revision
                             ) = new_args.revision_url.split('/')[-3:]

                            if revision_url not in data:
                                data[revision_url] = []

                            push_id = push['id']
                            repository = get_repository_by_id(
                                push['revisions'][0]['repository_id'])
                            # Only the original job is of interest for collecting the bugzilla data.
                            # The others are the retriggers.
                            #  There shouldn't be a bug_job_map or bugzilla_suggestions for non-classified bugs.
                            job_id = push['jobs'][0]

                            bug_data = {
                                'bug_id': bug['id'],
                                'bug_summary': bug_summary,
                                'test': test,
                                'job_id': job_id,
                                'push_id': push_id,
                                'repository': repository['name'],
                                'revision_url': revision_url,
                                'bugzilla_suggestions': [],
                                'bug_job_map': [],
                                'pattern':
                                convert_failure_to_pattern(bug_summary),
                            }
                            data[revision_url].append(bug_data)

                            # Get failure counts for trunk for this bug for the two weeks following
                            # the creation of the bug. Ignore failure counts for bugs who are less
                            # than 2 weeks old. Use the previous day for the start date and 15 days
                            # to account for timezone issues.
                            # TODO: Allow in place updating of bugzilla.json so that we can reprocess
                            # the failure counts without having to query the full set of bugs.
                            start_date = datetime.datetime.strptime(
                                bug['creation_time'].rstrip('Z'),
                                '%Y-%m-%dT%H:%M:%S') - datetime.timedelta(
                                    days=1)
                            end_date = start_date + datetime.timedelta(days=15)
                            failure_count_json = get_failure_count_json(
                                args, 'trunk', bug['id'], start_date, end_date)
                            if now - start_date < datetime.timedelta(days=15):
                                failure_count = None
                            else:
                                failure_count = 0
                                for failures in failure_count_json:
                                    failure_count += failures['failure_count']
                            bug_data['failure_count'] = failure_count

    cache.save(cache_attributes, 'bugzilla.json', json.dumps(data, indent=2))

    return data
Пример #22
0
args = parser.parse_args()

start_date = args.date
if not args.date:
    start_date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')

pushes_base_url = 'https://hg.mozilla.org/'

if args.repo in 'mozilla-beta,mozilla-aurora,mozilla-release':
    pushes_base_url += 'releases/'
elif args.repo not in 'mozilla-central,try':
    pushes_base_url += 'integration/'

pushes_base_url += args.repo + '/json-pushes?startdate=%s&enddate=%s'

start = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end = start + datetime.timedelta(days=1)
end_date = datetime.datetime.strftime(end, '%Y-%m-%d')

pushes_url = pushes_base_url % (start_date, end_date)
pushes_json = utils.get_remote_json(pushes_url)
if pushes_json:
    keys = pushes_json.keys()
    keys.sort()
    print int(keys[-1]) - int(keys[0]) + 1





Пример #23
0
            robocop_url = urlparse.urljoin(buildurl, 'robocop.apk')
            robocop_path = os.path.join(cache_build_dir, 'robocop.apk')
            if force or not os.path.exists(robocop_path):
                tmpf = tempfile.NamedTemporaryFile(delete=False)
                tmpf.close()
                try:
                    urllib.urlretrieve(robocop_url, tmpf.name)
                except IOError:
                    os.unlink(tmpf.name)
                    err = 'IO Error retrieving robocop.apk: %s.' % robocop_url
                    logger.exception(err)
                    return {'success': False, 'error': err}
                shutil.move(tmpf.name, robocop_path)
            test_packages_url = re.sub('.apk$', '.test_packages.json', buildurl)
            logger.info('downloading test package json %s' % test_packages_url)
            test_packages = utils.get_remote_json(test_packages_url)
            if not test_packages:
                logger.warning('test package json %s not found' %
                               test_packages_url)
                test_packages_url = urlparse.urljoin(buildurl,
                                                     'test_packages.json')
                logger.info('falling back to test package json %s' %
                            test_packages_url)
                test_packages = utils.get_remote_json(test_packages_url)

            # The test_packages.json file contains keys for each
            # test category but they all point to the same tests
            # zip file. This will change when
            # https://bugzilla.mozilla.org/show_bug.cgi?id=917999
            # goes into production, but using a set allows us to
            # easily eliminate duplicate file names.
Пример #24
0
 # XXX: assumes fixed buildurl-> fennec_ids.txt mapping
 fennec_ids_url = urlparse.urljoin(buildurl, 'fennec_ids.txt')
 fennec_ids_path = os.path.join(cache_build_dir, 'fennec_ids.txt')
 if force or not os.path.exists(fennec_ids_path):
     tmpf = tempfile.NamedTemporaryFile(delete=False)
     tmpf.close()
     try:
         urllib.urlretrieve(fennec_ids_url, tmpf.name)
     except IOError:
         os.unlink(tmpf.name)
         err = 'IO Error retrieving fennec_ids.txt: %s.' % \
             fennec_ids_url
         logger.exception(err)
         return {'success': False, 'error': err}
     shutil.move(tmpf.name, fennec_ids_path)
 test_packages = utils.get_remote_json(
     urlparse.urljoin(buildurl, 'test_packages.json'))
 # The test_packages.json file contains keys for each
 # test category but they all point to the same tests
 # zip file. This will change when
 # https://bugzilla.mozilla.org/show_bug.cgi?id=917999
 # goes into production, but using a set allows us to
 # easily eliminate duplicate file names.
 test_package_files = set()
 if test_package_names and test_packages:
     logger.debug('test_packages: %s' % json.dumps(test_packages))
     for test_package_name in test_package_names:
         logger.debug('test_package_name: %s' % test_package_name)
         test_package_files.update(set(test_packages[test_package_name]))
 else:
     # XXX: assumes fixed buildurl-> tests_url mapping
     logger.debug('default test package')
Пример #25
0
    def handle_taskcompleted(self, data, message):
        logger = utils.getLogger()
        if self.verbose:
            logger.debug(
                'handle_taskcompleted:\n'
                '\tdata   : %s\n'
                '\tmessage: %s',
                json.dumps(data, sort_keys=True, indent=4),
                json.dumps(message.__dict__, sort_keys=True, indent=4))
        artifact_data = {}
        task_id = data['status']['taskId']
        run_id = data['runId']
        task_definition = utils.get_taskcluster_task_definition(task_id)
        logger.debug('handle_taskcompleted: task_definition: %s', task_definition)
        # Test the repo early in order to prevent unnecessary IO for irrelevent branches.
        try:
            MH_BRANCH = task_definition['payload']['env']['MH_BRANCH']
            if MH_BRANCH not in self.trees:
                logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: '
                             'skip task_definition MH_BRANCH %s',
                             task_id, run_id, MH_BRANCH)
                return
        except KeyError:
            pass
        worker_type = task_definition['workerType']
        builder_type = 'buildbot' if worker_type == 'buildbot' else 'taskcluster'

        build_data = None
        artifact_data = {}
        artifacts = utils.taskcluster_artifacts(task_id, run_id)
        # Process the artifacts for the task looking for app build artifacts
        # with which to test. These currently are limited to target.apk
        # for fennec and geckoview_example.apk. target.apk is used
        # to obtain meta data for the build.
        # app_data is used to map the app name to the url to the apk file.
        # app_data[app_name] == build_url
        app_data = {}
        while True:
            try:
                artifact = artifacts.next()
            except StopIteration:
                break
            key = artifact['name'].replace('public/build/', '')
            artifact_data[key] = 'https://queue.taskcluster.net/v1/task/%s/runs/%s/artifacts/%s' % (
                task_id, run_id, artifact['name'])
            build_url = artifact_data[key]
            logger.debug('handle_taskcompleted: artifact: %s', artifact)
            if key == 'target.apk':
                # The actual app name may be slightly different depending on the repository.
                app_data['org.mozilla.fennec'] = build_url
                build_data = utils.get_build_data(build_url, builder_type=builder_type)
                if not build_data:
                    logger.warning('handle_taskcompleted: task_id: %s, run_id: %s: '
                                   'could not get build data for %s', task_id, run_id, build_url)
                    return

                tier = get_treeherder_tier(build_data['repo'], task_id, run_id)
                if builder_type != 'buildbot' and tier != 1:
                    logger.debug('handle_taskcompleted: ignoring worker_type: %s, tier: %s',
                                 worker_type, tier)
                    return

                if build_data['repo'] not in self.trees:
                    logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: skip repo %s not in %s',
                                 task_id, run_id, build_data['repo'], self.trees)
                    return

                if build_data['platform'] not in self.platforms:
                    logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: skip platform %s not in %s',
                                 task_id, run_id, build_data['platform'], self.platforms)
                    return

                if build_data['build_type'] not in self.buildtypes:
                    logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: skip build_type %s not in %s',
                                 task_id, run_id, build_data['build_type'], self.buildtypes)
                    return

                if 'id' not in build_data or 'build_type' not in build_data:
                    logger.warning('handle_taskcompleted: task_id: %s, run_id: %s: '
                                   'skip build due to missing id or build_type %s.',
                                   task_id, run_id, build_data)
                    return

                logger.debug('handle_taskcompleted: got target.apk')
            elif key == 'geckoview_example.apk':
                # The geckoview_example app is built from the same source
                # as the corresponding fennec so we don't need to perform the
                # build_data look ups here but we will record the app_name
                # and the build_url
                logger.debug('handle_taskcompleted: got geckoview_example.apk')
                app_data['org.mozilla.geckoview_example'] = build_url

        if not build_data:
            logger.warning('handle_taskcompleted: task_id: %s, run_id: %s: '
                           'could not get build_data', task_id, run_id)
            return

        build_data['builder_name'] = 'unknown'
        # Save the app_data to the build_data object to be used in the build_callback.
        build_data['app_data'] = app_data

        logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: build_data: %s',
                     task_id, run_id, build_data)

        rev_json_url = build_data['changeset'].replace('/rev/', '/json-rev/')
        rev_json = utils.get_remote_json(rev_json_url)
        if rev_json:
            build_data['comments'] = rev_json['desc']
        else:
            build_data['comments'] = 'unknown'
            logger.warning('handle_taskcompleted: task_id: %s, run_id: %s: could not get %s',
                           task_id, run_id, rev_json_url)

        if build_data['repo'] == 'try' and 'autophone' not in build_data['comments']:
            logger.debug('handle_taskcompleted: task_id: %s, run_id: %s: skip %s %s',
                         task_id, run_id, build_data['repo'], build_data['comments'])
            return

        self.build_callback(build_data)