Ejemplo n.º 1
0
def post_collection(
    project, th_collection, status=None, expect_errors=False,
    consumer_key=None, consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials( SampleData.get_credentials() )

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    if consumer_key:
        credentials['consumer_key'] = consumer_key

    if consumer_secret:
        credentials['consumer_secret'] = consumer_secret

    tr = TreeherderRequest(
        protocol='http',
        host='localhost',
        project=project,
        oauth_key=credentials['consumer_key'],
        oauth_secret=credentials['consumer_secret']
        )

    signed_uri = tr.get_signed_uri(
        th_collection.to_json(), tr.get_uri(th_collection)
        )

    response = TestApp(application).post_json(
        str(signed_uri), params=th_collection.get_collection_data(), status=status
        )

    return response
Ejemplo n.º 2
0
    def _post_json_data(url, data):

        th_collection = data[jm.project]

        OAuthCredentials.set_credentials( SampleData.get_credentials() )
        credentials = OAuthCredentials.get_credentials(jm.project)

        tr = TreeherderRequest(
            protocol='http',
            host='localhost',
            project=jm.project,
            oauth_key=credentials['consumer_key'],
            oauth_secret=credentials['consumer_secret']
            )
        signed_uri = tr.oauth_client.get_signed_uri(
            th_collection.to_json(),
            tr.get_uri(th_collection.endpoint_base),
            "POST"
            )

        response = TestApp(application).post_json(
            str(signed_uri), params=th_collection.get_collection_data()
            )

        response.getcode = lambda: response.status_int
        return response
Ejemplo n.º 3
0
    def load(self, th_collections):
        errors = []
        for project in th_collections:

            credentials = OAuthCredentials.get_credentials(project)

            th_request = TreeherderRequest(
                protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
                host=settings.TREEHERDER_REQUEST_HOST,
                project=project,
                oauth_key=credentials.get('consumer_key', None),
                oauth_secret=credentials.get('consumer_secret', None)
            )

            logger.info(
                "collection loading request: {0}".format(
                    th_request.get_uri(th_collections[project].endpoint_base)))
            response = th_request.post(th_collections[project])

            if not response or response.status != 200:
                errors.append({
                    "project": project,
                    "url": th_collections[project].endpoint_base,
                    "message": response.read()
                })
        if errors:
            raise CollectionNotLoadedException(errors)
Ejemplo n.º 4
0
def post_job_data(project, uri, data, status=None, expect_errors=False):

    # Since the uri is passed in it's not generated by the
    # treeherder request or collection and is missing the protocol
    # and host. Add those missing elements here.
    uri = 'http://localhost{0}'.format(uri)

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    tr = TreeherderRequest(protocol='http',
                           host='localhost',
                           project=project,
                           oauth_key=credentials['consumer_key'],
                           oauth_secret=credentials['consumer_secret'])

    signed_uri = tr.get_signed_uri(json.dumps(data), uri)

    response = TestApp(application).post_json(str(signed_uri),
                                              params=data,
                                              status=status,
                                              expect_errors=expect_errors)

    return response
Ejemplo n.º 5
0
def post_job_data(
    project, uri, data, status=None, expect_errors=False):

    # Since the uri is passed in it's not generated by the
    # treeherder request or collection and is missing the protocol
    # and host. Add those missing elements here.
    uri = 'http://localhost{0}'.format(uri)

    # Set the credentials
    OAuthCredentials.set_credentials( SampleData.get_credentials() )

    credentials = OAuthCredentials.get_credentials(project)

    tr = TreeherderRequest(
        protocol='http',
        host='localhost',
        project=project,
        oauth_key=credentials['consumer_key'],
        oauth_secret=credentials['consumer_secret']
        )

    signed_uri = tr.get_signed_uri(
        json.dumps(data), uri
        )

    response = TestApp(application).post_json(
        str(signed_uri), params=data, status=status,
        expect_errors=expect_errors
        )

    return response
Ejemplo n.º 6
0
def post_log_artifacts(project,
                       job_guid,
                       job_log_url,
                       retry_task,
                       extract_artifacts_cb,
                       check_errors=False):
    """Post a list of artifacts to a job."""
    def _retry(e):
        # Initially retry after 1 minute, then for each subsequent retry
        # lengthen the retry time by another minute.
        retry_task.retry(exc=e,
                         countdown=(1 + retry_task.request.retries) * 60)
        # .retry() raises a RetryTaskError exception,
        # so nothing after this function will be executed

    log_description = "%s %s (%s)" % (project, job_guid, job_log_url['url'])
    logger.debug("Downloading/parsing log for %s", log_description)

    credentials = OAuthCredentials.get_credentials(project)
    req = TreeherderRequest(
        protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
        host=settings.TREEHERDER_REQUEST_HOST,
        project=project,
        oauth_key=credentials.get('consumer_key', None),
        oauth_secret=credentials.get('consumer_secret', None),
    )

    try:
        artifact_list = extract_artifacts_cb(job_log_url['url'], job_guid,
                                             check_errors)
    except Exception as e:
        update_parse_status(req, job_log_url, 'failed')
        if isinstance(e, urllib2.HTTPError) and e.code == 404:
            logger.debug("Log not found for %s", log_description)
            return
        logger.error("Failed to download/parse log for %s: %s",
                     log_description, e)
        _retry(e)

    # store the artifacts generated
    tac = TreeherderArtifactCollection()
    for artifact in artifact_list:
        ta = tac.get_artifact({
            "job_guid": artifact[0],
            "name": artifact[1],
            "type": artifact[2],
            "blob": artifact[3]
        })
        tac.add(ta)

    try:
        req.post(tac)
        update_parse_status(req, job_log_url, 'parsed')
        logger.debug("Finished posting artifact for %s %s", project, job_guid)
    except Exception as e:
        logger.error("Failed to upload parsed artifact for %s: %s",
                     log_description, e)
        _retry(e)
Ejemplo n.º 7
0
def post_log_artifacts(project,
                       job_guid,
                       job_log_url,
                       retry_task,
                       extract_artifacts_cb,
                       check_errors=False):
    """Post a list of artifacts to a job."""
    def _retry(e):
        # Initially retry after 1 minute, then for each subsequent retry
        # lengthen the retry time by another minute.
        retry_task.retry(exc=e, countdown=(1 + retry_task.request.retries) * 60)
        # .retry() raises a RetryTaskError exception,
        # so nothing after this function will be executed

    log_description = "%s %s (%s)" % (project, job_guid, job_log_url['url'])
    logger.debug("Downloading/parsing log for %s", log_description)

    credentials = OAuthCredentials.get_credentials(project)
    req = TreeherderRequest(
        protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
        host=settings.TREEHERDER_REQUEST_HOST,
        project=project,
        oauth_key=credentials.get('consumer_key', None),
        oauth_secret=credentials.get('consumer_secret', None),
    )

    try:
        artifact_list = extract_artifacts_cb(job_log_url['url'],
                                             job_guid, check_errors)
    except Exception as e:
        update_parse_status(req, job_log_url, 'failed')
        if isinstance(e, urllib2.HTTPError) and e.code == 404:
            logger.debug("Log not found for %s", log_description)
            return
        logger.error("Failed to download/parse log for %s: %s", log_description, e)
        _retry(e)

    # store the artifacts generated
    tac = TreeherderArtifactCollection()
    for artifact in artifact_list:
        ta = tac.get_artifact({
            "job_guid": artifact[0],
            "name": artifact[1],
            "type": artifact[2],
            "blob": artifact[3]
        })
        tac.add(ta)

    try:
        req.post(tac)
        update_parse_status(req, job_log_url, 'parsed')
        logger.debug("Finished posting artifact for %s %s", project, job_guid)
    except Exception as e:
        logger.error("Failed to upload parsed artifact for %s: %s", log_description, e)
        _retry(e)
Ejemplo n.º 8
0
    def post_request(self, job_collection):
        self.worker.loggerdeco.debug('AutophoneTreeherder.post_request: %s' % job_collection.__dict__)
        if not self.url or not self.worker.build.revision_hash:
            self.worker.loggerdeco.debug('AutophoneTreeherder.post_request: no url/revision hash')
            return

        req = TreeherderRequest(
            protocol=self.protocol,
            host=self.server,
            project=self.worker.build.tree,
            oauth_key=self.credentials[self.worker.build.tree]['consumer_key'],
            oauth_secret=self.credentials[self.worker.build.tree]['consumer_secret']
            )

        try:
            for attempt in range(1, self.retries+1):
                response = req.post(job_collection)
                self.worker.loggerdeco.debug('AutophoneTreeherder.post_request attempt %d: '
                                             'body: %s headers: %s msg: %s status: %s '
                                             'reason: %s' % (
                                                 attempt,
                                                 response.read(),
                                                 response.getheaders(),
                                                 response.msg,
                                                 response.status,
                                                 response.reason))
                if response.reason == 'OK':
                    break
                msg = ('Attempt %d to post result to Treeherder failed.\n\n'
                       'Response:\n'
                       'body: %s\n'
                       'headers: %s\n'
                       'msg: %s\n'
                       'status: %s\n'
                       'reason: %s\n' % (
                           attempt,
                           response.read(), response.getheaders(),
                           response.msg, response.status,
                           response.reason))
                self.worker.loggerdeco.error(msg)
                self.worker.mailer.send('Attempt %d for Phone %s failed to post to Treeherder' %
                                        (attempt, self.worker.phone.id), msg)
                time.sleep(self.retry_wait)
        except Exception, e:
            self.worker.loggerdeco.exception('Error submitting request to Treeherder')
            self.worker.mailer.send('Error submitting request to Treeherder',
                                    'Phone: %s\n'
                                    'TreeherderClientError: %s\n'
                                    'TreeherderJobCollection %s\n' % (
                                        self.worker.phone.id,
                                        e,
                                        job_collection.to_json()))
    def test_send_without_oauth(
        self, mock_HTTPConnection, mock_time, mock_generate_nonce):

        """Can send data to the server."""
        mock_time.return_value = 1342229050
        mock_generate_nonce.return_value = "46810593"

        host = 'host'

        req = TreeherderRequest(
            protocol='http',
            host=host,
            project='project',
            oauth_key=None,
            oauth_secret=None,
            )

        mock_conn = mock_HTTPConnection.return_value
        mock_request = mock_conn.request
        mock_response = mock_conn.getresponse.return_value

        tjc = TreeherderJobCollection()

        for job in self.job_data:

            tjc.add( tjc.get_job(job) )
            break

        response = req.post(tjc)

        self.assertEqual(mock_HTTPConnection.call_count, 1)
        self.assertEqual(mock_HTTPConnection.call_args[0][0], host)
        self.assertEqual(mock_response, response)
        self.assertEqual(mock_request.call_count, 1)

        uri = req.get_uri(tjc)

        method, path, data, header = mock_request.call_args[0]
        self.assertEqual(method, "POST")

        deserialized_data = json.loads(data)
        self.assertEqual(
            deserialized_data,
            tjc.get_collection_data()
            )

        self.assertEqual(
            header['Content-Type'],
            'application/json',
            )
    def test_send_without_oauth(self, mock_HTTPConnection, mock_time,
                                mock_generate_nonce):
        """Can send data to the server."""
        mock_time.return_value = 1342229050
        mock_generate_nonce.return_value = "46810593"

        host = 'host'

        req = TreeherderRequest(
            protocol='http',
            host=host,
            project='project',
            oauth_key=None,
            oauth_secret=None,
        )

        mock_conn = mock_HTTPConnection.return_value
        mock_request = mock_conn.request
        mock_response = mock_conn.getresponse.return_value

        tjc = TreeherderJobCollection()

        for job in self.job_data:

            tjc.add(tjc.get_job(job))
            break

        response = req.post(tjc)

        self.assertEqual(mock_HTTPConnection.call_count, 1)
        self.assertEqual(mock_HTTPConnection.call_args[0][0], host)
        self.assertEqual(mock_response, response)
        self.assertEqual(mock_request.call_count, 1)

        uri = req.get_uri(tjc)

        method, path, data, header = mock_request.call_args[0]
        self.assertEqual(method, "POST")

        deserialized_data = json.loads(data)
        self.assertEqual(deserialized_data, tjc.get_collection_data())

        self.assertEqual(
            header['Content-Type'],
            'application/json',
        )
Ejemplo n.º 11
0
    def load(self, th_collections):

        for project in th_collections:

            credentials = OAuthCredentials.get_credentials(project)

            th_request = TreeherderRequest(
                protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
                host=settings.TREEHERDER_REQUEST_HOST,
                project=project,
                oauth_key=credentials.get('consumer_key', None),
                oauth_secret=credentials.get('consumer_secret', None))

            response = th_request.post(th_collections[project])

            if not response or response.status != 200:
                message = response.read()
                logger.error("collection loading failed: {0}".format(message))
Ejemplo n.º 12
0
    def load(self, th_collections):

        for project in th_collections:

            credentials = OAuthCredentials.get_credentials(project)

            th_request = TreeherderRequest(
                protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
                host=settings.TREEHERDER_REQUEST_HOST,
                project=project,
                oauth_key=credentials.get('consumer_key', None),
                oauth_secret=credentials.get('consumer_secret', None)
            )

            response = th_request.post(th_collections[project])

            if not response or response.status != 200:
                message = response.read()
                logger.error("collection loading failed: {0}".format(message))
    def test_send_artifact_collection(self, mock_send):
        """Can add a artifact collections to a TreeherderRequest."""

        tac = TreeherderArtifactCollection()

        for artifact in self.artifact_data:

            tac.add(tac.get_artifact(artifact))

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
        )

        req.post(tac)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(tac.to_json(), mock_send.call_args_list[0][1]["data"])
    def test_send_result_collection(self, mock_send):
        """Can add a treeherder collections to a TreeherderRequest."""

        trc = TreeherderResultSetCollection()

        for resultset in self.resultset_data:

            trc.add(trc.get_resultset(resultset))

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
        )

        req.post(trc)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(trc.to_json(), mock_send.call_args_list[0][1]['data'])
    def test_send_job_collection(self, mock_send):
        """Can add a treeherder collections to a TreeherderRequest."""

        tjc = TreeherderJobCollection()

        for job in self.job_data:

            tjc.add(tjc.get_job(job))

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
        )

        req.post(tjc)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(tjc.to_json(), mock_send.call_args_list[0][1]['data'])
Ejemplo n.º 16
0
    def load(self, th_collections):
        for project in th_collections:

            credentials = OAuthCredentials.get_credentials(project)

            th_request = TreeherderRequest(
                protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
                host=settings.TREEHERDER_REQUEST_HOST,
                project=project,
                oauth_key=credentials.get('consumer_key', None),
                oauth_secret=credentials.get('consumer_secret', None)
            )

            logger.info(
                "collection loading request: {0}".format(
                    th_request.get_uri(th_collections[project].endpoint_base)))
            response = th_request.post(th_collections[project])

            if not response or response.status != 200:
                message = response.read()
                logger.error('[{0}]Error posting data to {1} : {2}'.format(
                    project, th_collections[project].endpoint_base, message))
    def test_send_artifact_collection(self, mock_send):
        """Can add a artifact collections to a TreeherderRequest."""

        tac = TreeherderArtifactCollection()

        for artifact in self.artifact_data:

            tac.add(tac.get_artifact(artifact))

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
        )

        req.post(tac)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(
            tac.to_json(),
            mock_send.call_args_list[0][1]["data"]
        )
    def test_send_result_collection(self, mock_send):
        """Can add a treeherder collections to a TreeherderRequest."""

        trc = TreeherderResultSetCollection()

        for resultset in self.resultset_data:

            trc.add( trc.get_resultset(resultset) )

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
            )

        req.post(trc)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(
            trc.to_json(),
            mock_send.call_args_list[0][1]['data']
            )
    def test_send_job_collection(self, mock_send):
        """Can add a treeherder collections to a TreeherderRequest."""

        tjc = TreeherderJobCollection()

        for job in self.job_data:

            tjc.add( tjc.get_job(job) )

        req = TreeherderRequest(
            protocol='http',
            host='host',
            project='project',
            oauth_key='key',
            oauth_secret='secret',
            )

        req.post(tjc)

        self.assertEqual(mock_send.call_count, 1)
        self.assertEqual(
            tjc.to_json(),
            mock_send.call_args_list[0][1]['data']
            )
Ejemplo n.º 20
0
def post_collection(project,
                    th_collection,
                    status=None,
                    expect_errors=False,
                    consumer_key=None,
                    consumer_secret=None):

    # Set the credentials
    OAuthCredentials.set_credentials(SampleData.get_credentials())

    credentials = OAuthCredentials.get_credentials(project)

    # The only time the credentials should be overridden are when
    # a client needs to test authentication failure confirmation
    if consumer_key:
        credentials['consumer_key'] = consumer_key

    if consumer_secret:
        credentials['consumer_secret'] = consumer_secret

    tr = TreeherderRequest(protocol='http',
                           host='localhost',
                           project=project,
                           oauth_key=credentials['consumer_key'],
                           oauth_secret=credentials['consumer_secret'])

    signed_uri = tr.oauth_client.get_signed_uri(
        th_collection.to_json(), tr.get_uri(th_collection.endpoint_base),
        'POST')

    response = TestApp(application).post_json(
        str(signed_uri),
        params=th_collection.get_collection_data(),
        status=status)

    return response
Ejemplo n.º 21
0
def parse_log(project, job_log_url, job_guid, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """

    # if parse_status is not available, consider it pending
    parse_status = job_log_url.get("parse_status", "pending")
    # don't parse a log if it's already been parsed
    if parse_status == "parsed":
        return

    try:
        credentials = OAuthCredentials.get_credentials(project)
        req = TreeherderRequest(
            protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
            host=settings.TREEHERDER_REQUEST_HOST,
            project=project,
            oauth_key=credentials.get('consumer_key', None),
            oauth_secret=credentials.get('consumer_secret', None),
        )
        update_endpoint = 'job-log-url/{0}/update_parse_status'.format(
            job_log_url['id'])

        logger.debug("Downloading and extracting log information for guid "
                     "'%s' (from %s)" % (job_guid, job_log_url['url']))

        artifact_list = extract_log_artifacts(job_log_url['url'], job_guid,
                                              check_errors)
        # store the artifacts generated
        tac = TreeherderArtifactCollection()
        for artifact in artifact_list:
            ta = tac.get_artifact({
                "job_guid": artifact[0],
                "name": artifact[1],
                "type": artifact[2],
                "blob": artifact[3]
            })
            tac.add(ta)

        logger.debug("Finished downloading and processing artifact for guid "
                     "'%s'" % job_guid)

        req.post(tac)

        # send an update to job_log_url
        # the job_log_url status changes from pending to parsed
        current_timestamp = time.time()
        req.send(update_endpoint,
                 method='POST',
                 data={
                     'parse_status': 'parsed',
                     'parse_timestamp': current_timestamp
                 })

        logger.debug("Finished posting artifact for guid '%s'" % job_guid)

    except Exception, e:
        # send an update to job_log_url
        #the job_log_url status changes from pending/running to failed
        logger.warn("Failed to download and/or parse artifact for guid '%s'" %
                    job_guid)
        current_timestamp = time.time()
        req.send(update_endpoint,
                 method='POST',
                 data={
                     'parse_status': 'failed',
                     'parse_timestamp': current_timestamp
                 })
        # Initially retry after 1 minute, then for each subsequent retry
        # lengthen the retry time by another minute.
        parse_log.retry(exc=e, countdown=(1 + parse_log.request.retries) * 60)
Ejemplo n.º 22
0
    def post_to_treeherder(self, tests):
        version = mozversion.get_version(
            binary=self.bin, sources=self.sources, dm_type="adb", device_serial=self.device_serial
        )

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get("device_id")
        device_firmware_version_release = version.get("device_firmware_version_release")

        if not device:
            self.logger.error("Submitting to Treeherder is currently limited " "to devices.")
            return

        try:
            group = DEVICE_GROUP_MAP[device][device_firmware_version_release]
            job.add_group_name(group["name"])
            job.add_group_symbol(group["symbol"])
            job.add_job_name("Gaia Python Integration Test (%s)" % group["symbol"])
            job.add_job_symbol("Gip")
        except KeyError:
            self.logger.error(
                "Unknown device id: %s or device firmware "
                "version: %s. Unable to determine Treeherder "
                "group. Supported devices: %s"
                % (
                    device,
                    device_firmware_version_release,
                    ["%s: %s" % (k, [fw for fw in v.keys()]) for k, v in DEVICE_GROUP_MAP.iteritems()],
                )
            )
            return

        # Determine revision hash from application revision
        revision = version["application_changeset"]
        project = version["application_repository"].split("/")[-1]
        lookup_url = urljoin(self.treeherder_url, "api/project/%s/revision-lookup/?revision=%s" % (project, revision))
        self.logger.debug("Getting revision hash from: %s" % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), (
            "Unable to determine revision hash for %s. " "Perhaps it has not been ingested by " "Treeherder?" % revision
        )
        revision_hash = response.json()[revision]["revision_hash"]
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name("b2g")
        job.add_state("completed")

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result("testfailed")
        else:
            job.add_result("success")

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info("b2g", "b2g-device-image", "x86")
        job.add_machine_info("b2g", "b2g-device-image", "x86")

        # All B2G device builds are currently opt builds
        job.add_option_collection({"opt": True})

        date_format = "%d %b %Y %H:%M:%S"
        job_details = [
            {
                "content_type": "link",
                "title": "Gaia revision:",
                "url": "https://github.com/mozilla-b2g/gaia/commit/%s" % version.get("gaia_changeset"),
                "value": version.get("gaia_changeset"),
            },
            {
                "content_type": "text",
                "title": "Gaia date:",
                "value": version.get("gaia_date")
                and time.strftime(date_format, time.localtime(int(version.get("gaia_date")))),
            },
            {"content_type": "text", "title": "Device identifier:", "value": version.get("device_id")},
            {
                "content_type": "text",
                "title": "Device firmware (date):",
                "value": version.get("device_firmware_date")
                and time.strftime(date_format, time.localtime(int(version.get("device_firmware_date")))),
            },
            {
                "content_type": "text",
                "title": "Device firmware (incremental):",
                "value": version.get("device_firmware_version_incremental"),
            },
            {
                "content_type": "text",
                "title": "Device firmware (release):",
                "value": version.get("device_firmware_version_release"),
            },
        ]

        ci_url = os.environ.get("BUILD_URL")
        if ci_url:
            job_details.append({"url": ci_url, "value": ci_url, "content_type": "link", "title": "CI build:"})

        # Attach logcat
        adb_device = ADBDevice(self.device_serial)
        with tempfile.NamedTemporaryFile(suffix="logcat.txt") as f:
            f.writelines(adb_device.get_logcat())
            self.logger.debug("Logcat stored in: %s" % f.name)
            try:
                url = self.upload_to_s3(f.name)
                job_details.append({"url": url, "value": "logcat.txt", "content_type": "link", "title": "Log:"})
            except S3UploadError:
                job_details.append({"value": "Failed to upload logcat.txt", "content_type": "text", "title": "Error:"})

        # Attach log files
        handlers = [
            handler
            for handler in self.logger.handlers
            if isinstance(handler, StreamHandler) and os.path.exists(handler.stream.name)
        ]
        for handler in handlers:
            path = handler.stream.name
            filename = os.path.split(path)[-1]
            try:
                url = self.upload_to_s3(path)
                job_details.append({"url": url, "value": filename, "content_type": "link", "title": "Log:"})
                # Add log reference
                if (
                    type(handler.formatter) is TbplFormatter
                    or type(handler.formatter) is LogLevelFilter
                    and type(handler.formatter.inner) is TbplFormatter
                ):
                    job.add_log_reference(filename, url)
            except S3UploadError:
                job_details.append(
                    {"value": "Failed to upload %s" % filename, "content_type": "text", "title": "Error:"}
                )

        # Attach reports
        for report in [self.html_output]:
            if report is not None:
                filename = os.path.split(report)[-1]
                try:
                    url = self.upload_to_s3(report)
                    job_details.append({"url": url, "value": filename, "content_type": "link", "title": "Report:"})
                except S3UploadError:
                    job_details.append(
                        {"value": "Failed to upload %s" % filename, "content_type": "text", "title": "Error:"}
                    )

        if job_details:
            job.add_artifact("Job Info", "json", {"job_details": job_details})

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=os.environ.get("TREEHERDER_KEY"),
            oauth_secret=os.environ.get("TREEHERDER_SECRET"),
        )
        self.logger.debug("Sending results to Treeherder: %s" % job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug("Response: %s" % response.read())
        assert response.status == 200, "Failed to send results!"
        self.logger.info(
            "Results are available to view at: %s"
            % (urljoin(self.treeherder_url, "/ui/#/jobs?repo=%s&revision=%s" % (project, revision)))
        )
Ejemplo n.º 23
0
    def post_to_treeherder(self, tests):
        self.logger.info('\nTREEHERDER\n----------')
        version = mozversion.get_version(
            binary=self.bin, sources=self.sources,
            dm_type='adb', device_serial=self.device_serial)

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get('device_id')
        if not device:
            self.logger.error('Submitting to Treeherder is currently limited '
                              'to devices.')
            return

        try:
            group = DEVICE_GROUP_MAP[device]
            job.add_group_name(group['name'])
            job.add_group_symbol(group['symbol'])
            job.add_job_name('Gaia Python Integration Test (%s)' % device)
            job.add_job_symbol('Gip')
        except KeyError:
            self.logger.error('Unknown device id: %s, unable to determine '
                              'Treeherder group. Supported device ids: %s' % (
                                  device, DEVICE_GROUP_MAP.keys()))
            return

        # Determine revision hash from application revision
        revision = version['application_changeset']
        project = version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            self.treeherder_url,
            'api/project/%s/revision-lookup/?revision=%s' % (
                project, revision))
        self.logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result('testfailed')
        else:
            job.add_result('success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        # All B2G device builds are currently opt builds
        job.add_option_collection({'opt': True})

        # TODO: Add log reference
        # job.add_log_reference()

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type': 'link',
            'title': 'Gaia revision:',
            'url': 'https://github.com/mozilla-b2g/gaia/commit/%s' %
                   version.get('gaia_changeset'),
            'value': version.get('gaia_changeset'),
        }, {
            'content_type': 'text',
            'title': 'Gaia date:',
            'value': version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': version.get('device_id')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (date):',
            'value': version.get('device_firmware_date') and time.strftime(
                date_format, time.localtime(int(
                    version.get('device_firmware_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device firmware (incremental):',
            'value': version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': version.get('device_firmware_version_release')
        }]

        if self.ci_url:
            job_details.append({
                'url': self.ci_url,
                'value': self.ci_url,
                'content_type': 'link',
                'title': 'CI build:'})

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        # TODO: Add XML/HTML reports as artifacts
        # job.add_artifact()

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=self.treeherder_key,
            oauth_secret=self.treeherder_secret)
        self.logger.debug('Sending results to Treeherder: %s' %
                          job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug('Response: %s' % response.read())
        assert response.status == 200, 'Failed to send results!'
        self.logger.info('Results are available to view at: %s' % (
            urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' % (
                project, revision))))
Ejemplo n.º 24
0
    def post_to_treeherder(self, tests):
        version = mozversion.get_version(
            binary=self.bin, sources=self.sources,
            dm_type='adb', device_serial=self.device_serial)

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get('device_id')
        device_firmware_version_release = \
            version.get('device_firmware_version_release')

        if not device:
            self.logger.error('Submitting to Treeherder is currently limited '
                              'to devices.')
            return

        try:
            group = DEVICE_GROUP_MAP[device][device_firmware_version_release]
            job.add_group_name(group['name'])
            job.add_group_symbol(group['symbol'])
            job.add_job_name('Gaia Python Integration Test (%s)' % group['symbol'])
            job.add_job_symbol('Gip')
        except KeyError:
            self.logger.error('Unknown device id: %s or device firmware '
                              'version: %s. Unable to determine Treeherder '
                              'group. Supported devices: %s'
                              % (device, device_firmware_version_release,
                                 ['%s: %s' % (k, [fw for fw in v.keys()])
                                  for k, v in DEVICE_GROUP_MAP.iteritems()]))
            return

        # Determine revision hash from application revision
        revision = version['application_changeset']
        project = version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            self.treeherder_url,
            'api/project/%s/revision-lookup/?revision=%s' % (
                project, revision))
        self.logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result('testfailed')
        else:
            job.add_result('success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        # All B2G device builds are currently opt builds
        job.add_option_collection({'opt': True})

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type': 'link',
            'title': 'Gaia revision:',
            'url': 'https://github.com/mozilla-b2g/gaia/commit/%s' %
                   version.get('gaia_changeset'),
            'value': version.get('gaia_changeset'),
        }, {
            'content_type': 'text',
            'title': 'Gaia date:',
            'value': version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': version.get('device_id')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (date):',
            'value': version.get('device_firmware_date') and time.strftime(
                date_format, time.localtime(int(
                    version.get('device_firmware_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device firmware (incremental):',
            'value': version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': version.get('device_firmware_version_release')
        }]

        ci_url = os.environ.get('BUILD_URL')
        if ci_url:
            job_details.append({
                'url': ci_url,
                'value': ci_url,
                'content_type': 'link',
                'title': 'CI build:'})

        # Attach logcat
        adb_device = ADBDevice(self.device_serial)
        with tempfile.NamedTemporaryFile(suffix='logcat.txt') as f:
            f.writelines(adb_device.get_logcat())
            self.logger.debug('Logcat stored in: %s' % f.name)
            try:
                url = self.upload_to_s3(f.name)
                job_details.append({
                    'url': url,
                    'value': 'logcat.txt',
                    'content_type': 'link',
                    'title': 'Log:'})
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload logcat.txt',
                    'content_type': 'text',
                    'title': 'Error:'})

        # Attach log files
        handlers = [handler for handler in self.logger.handlers
                    if isinstance(handler, StreamHandler) and
                    os.path.exists(handler.stream.name)]
        for handler in handlers:
            path = handler.stream.name
            filename = os.path.split(path)[-1]
            try:
                url = self.upload_to_s3(path)
                job_details.append({
                    'url': url,
                    'value': filename,
                    'content_type': 'link',
                    'title': 'Log:'})
                # Add log reference
                if type(handler.formatter) is TbplFormatter or \
                        type(handler.formatter) is LogLevelFilter and \
                        type(handler.formatter.inner) is TbplFormatter:
                    job.add_log_reference(filename, url)
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload %s' % filename,
                    'content_type': 'text',
                    'title': 'Error:'})

        # Attach reports
        for report in [self.html_output, self.xml_output]:
            if report is not None:
                filename = os.path.split(report)[-1]
                try:
                    url = self.upload_to_s3(report)
                    job_details.append({
                        'url': url,
                        'value': filename,
                        'content_type': 'link',
                        'title': 'Report:'})
                except S3UploadError:
                    job_details.append({
                        'value': 'Failed to upload %s' % filename,
                        'content_type': 'text',
                        'title': 'Error:'})

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=os.environ.get('TREEHERDER_KEY'),
            oauth_secret=os.environ.get('TREEHERDER_SECRET'))
        self.logger.debug('Sending results to Treeherder: %s' %
                          job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug('Response: %s' % response.read())
        assert response.status == 200, 'Failed to send results!'
        self.logger.info('Results are available to view at: %s' % (
            urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' % (
                project, revision))))
Ejemplo n.º 25
0
def main():
    submit_time, start_time, end_time = argv[1:4]

    config = get_config()

    app_revision, app_repository = get_app_information(config)
    files = get_files(config)
    build_version = get_build_version(os.path.basename(files[0]))
    push_time = int(os.stat(files[0]).st_ctime)
    results = steepleparse.parse(config['system']['logfile'])
    result_set_hash = create_revision_hash()

    trsc = TreeherderResultSetCollection()
    trs = trsc.get_resultset()

    trs.add_revision_hash(result_set_hash)
    trs.add_author('Firefox Nightly')
    trs.add_push_timestamp(push_time)

    tr = trs.get_revision()

    tr.add_revision(app_revision)
    tr.add_author('Firefox Nightly')
    tr.add_comment(build_version)
    tr.add_files([os.path.basename(f) for f in files])
    tr.add_repository(app_repository)

    trs.add_revision(tr)
    trsc.add(trs)

    tjc = TreeherderJobCollection()
    tj = tjc.get_job()

    tj.add_revision_hash(result_set_hash)
    tj.add_project(config['repo']['project'])
    tj.add_job_guid(str(uuid.uuid4()))

    tj.add_group_name('WebRTC QA Tests')
    tj.add_group_symbol('WebRTC')
    tj.add_job_name('Endurance')
    tj.add_job_symbol('end')

    tj.add_build_info('linux', 'linux64', 'x86_64')
    tj.add_machine_info('linux', 'linux64', 'x86_64')
    tj.add_description('WebRTC Sunny Day')
    tj.add_option_collection({'opt': True})  # must not be {}!
    tj.add_reason('testing')
    tj.add_who('Mozilla Platform QA')


    tj.add_submit_timestamp(submit_time)
    tj.add_start_timestamp(start_time)
    tj.add_end_timestamp(end_time)

    tj.add_state('completed')
    tj.add_machine(socket.gethostname())

    result_string = get_result_string(results)
    tj.add_result(result_string)
    if result_string != 'busted': 
        summary = get_result_summary(results)
        tj.add_artifact('Job Info', 'json', summary)
    
    tj.add_artifact('Results', 'json', results)

    tjc.add(tj)

    print 'trsc = ' + json.dumps(json.loads(trsc.to_json()), sort_keys=True,
                                 indent=4, separators=(',', ': '))

    print 'tjc = ' + json.dumps(json.loads(tjc.to_json()), sort_keys=True,
                                indent=4, separators=(',', ': '))

    req = TreeherderRequest(
        protocol='http',
        host=config['repo']['host'],
        project=config['repo']['project'],
        oauth_key=config['credentials']['key'],
        oauth_secret=config['credentials']['secret']
    )

    req.post(trsc)
    req.post(tjc)
Ejemplo n.º 26
0
    def post_to_treeherder(self, tests):
        self.logger.info('\nTREEHERDER\n----------')
        version = mozversion.get_version(binary=self.bin,
                                         sources=self.sources,
                                         dm_type='adb',
                                         device_serial=self.device_serial)

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get('device_id')
        if not device:
            self.logger.error('Submitting to Treeherder is currently limited '
                              'to devices.')
            return

        try:
            group = DEVICE_GROUP_MAP[device]
            job.add_group_name(group['name'])
            job.add_group_symbol(group['symbol'])
            job.add_job_name('Gaia Python Integration Test (%s)' % device)
            job.add_job_symbol('Gip')
        except KeyError:
            self.logger.error('Unknown device id: %s, unable to determine '
                              'Treeherder group. Supported device ids: %s' %
                              (device, DEVICE_GROUP_MAP.keys()))
            return

        # Determine revision hash from application revision
        revision = version['application_changeset']
        project = version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            self.treeherder_url,
            'api/project/%s/revision-lookup/?revision=%s' %
            (project, revision))
        self.logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result('testfailed')
        else:
            job.add_result('success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        # All B2G device builds are currently opt builds
        job.add_option_collection({'opt': True})

        # TODO: Add log reference
        # job.add_log_reference()

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type':
            'link',
            'title':
            'Gaia revision:',
            'url':
            'https://github.com/mozilla-b2g/gaia/commit/%s' %
            version.get('gaia_changeset'),
            'value':
            version.get('gaia_changeset'),
        }, {
            'content_type':
            'text',
            'title':
            'Gaia date:',
            'value':
            version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': version.get('device_id')
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (date):',
            'value':
            version.get('device_firmware_date') and time.strftime(
                date_format,
                time.localtime(int(version.get('device_firmware_date')))),
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (incremental):',
            'value':
            version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': version.get('device_firmware_version_release')
        }]

        if self.ci_url:
            job_details.append({
                'url': self.ci_url,
                'value': self.ci_url,
                'content_type': 'link',
                'title': 'CI build:'
            })

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        # TODO: Add XML/HTML reports as artifacts
        # job.add_artifact()

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(protocol=url.scheme,
                                    host=url.hostname,
                                    project=project,
                                    oauth_key=self.treeherder_key,
                                    oauth_secret=self.treeherder_secret)
        self.logger.debug('Sending results to Treeherder: %s' %
                          job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug('Response: %s' % response.read())
        assert response.status == 200, 'Failed to send results!'
        self.logger.info(
            'Results are available to view at: %s' %
            (urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' %
                     (project, revision))))
Ejemplo n.º 27
0
def post_log_artifacts(project,
                       job_guid,
                       job_log_url,
                       retry_task,
                       extract_artifacts_cb,
                       check_errors=False):
    """Post a list of artifacts to a job."""
    def _retry(e):
        # Initially retry after 1 minute, then for each subsequent retry
        # lengthen the retry time by another minute.
        retry_task.retry(exc=e, countdown=(1 + retry_task.request.retries) * 60)
        # .retry() raises a RetryTaskError exception,
        # so nothing after this function will be executed

    credentials = OAuthCredentials.get_credentials(project)
    update_endpoint = 'job-log-url/{0}/update_parse_status'.format(
        job_log_url['id']
    )

    log_description = "%s %s (%s)" % (project, job_guid, job_log_url['url'])
    logger.debug("Downloading/parsing log for %s", log_description)

    req = TreeherderRequest(
        protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
        host=settings.TREEHERDER_REQUEST_HOST,
        project=project,
        oauth_key=credentials.get('consumer_key', None),
        oauth_secret=credentials.get('consumer_secret', None),
    )

    try:
        artifact_list = extract_artifacts_cb(job_log_url['url'],
                                             job_guid, check_errors)
    except Exception as e:
        logger.error("Failed to download/parse log for %s: %s", log_description, e)
        current_timestamp = time.time()
        req.send(
            update_endpoint,
            method='POST',
            data={
                'parse_status': 'failed',
                'parse_timestamp': current_timestamp
            }
        )
        _retry(e)

    # store the artifacts generated
    tac = TreeherderArtifactCollection()
    for artifact in artifact_list:
        ta = tac.get_artifact({
            "job_guid": artifact[0],
            "name": artifact[1],
            "type": artifact[2],
            "blob": artifact[3]
        })
        tac.add(ta)

    try:
        req.post(tac)

        # send an update to job_log_url
        # the job_log_url status changes from pending to parsed
        current_timestamp = time.time()
        req.send(
            update_endpoint,
            method='POST',
            data={
                'parse_status': 'parsed',
                'parse_timestamp': current_timestamp
            }
        )
        logger.debug("Finished posting artifact for %s %s", project, job_guid)
    except Exception as e:
        logger.error("Failed to upload parsed artifact for %s: %s", log_description, e)
        _retry(e)
Ejemplo n.º 28
0
def parse_log(project, job_log_url, job_guid, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """

    # if parse_status is not available, consider it pending
    parse_status = job_log_url.get("parse_status", "pending")
    # don't parse a log if it's already been parsed
    if parse_status == "parsed":
        return

    try:
        credentials = OAuthCredentials.get_credentials(project)
        req = TreeherderRequest(
            protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
            host=settings.TREEHERDER_REQUEST_HOST,
            project=project,
            oauth_key=credentials.get('consumer_key', None),
            oauth_secret=credentials.get('consumer_secret', None),
        )
        update_endpoint = 'job-log-url/{0}/update_parse_status'.format(
            job_log_url['id'])

        artifact_list = extract_log_artifacts(job_log_url['url'], job_guid,
                                              check_errors)
        # store the artifacts generated
        tac = TreeherderArtifactCollection()
        for artifact in artifact_list:
            ta = tac.get_artifact({
                "job_guid": artifact[0],
                "name": artifact[1],
                "type": artifact[2],
                "blob": artifact[3]
            })
            tac.add(ta)

        req.post(tac)

        # send an update to job_log_url
        # the job_log_url status changes from pending to parsed
        current_timestamp = time.time()
        req.send(update_endpoint,
                 method='POST',
                 data={
                     'parse_status': 'parsed',
                     'parse_timestamp': current_timestamp
                 })
    except Exception, e:
        # send an update to job_log_url
        #the job_log_url status changes from pending/running to failed
        current_timestamp = time.time()
        req.send(update_endpoint,
                 method='POST',
                 data={
                     'parse_status': 'failed',
                     'parse_timestamp': current_timestamp
                 })
        # for every retry, set the countdown to 10 minutes
        # .retry() raises a RetryTaskError exception,
        # so nothing below this line will be executed.
        parse_log.retry(exc=e, countdown=10 * 60)
Ejemplo n.º 29
0
    def post_to_treeherder(self, script, treeherder_url):
        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()
        job.add_group_name(self.device_properties['name'])
        job.add_group_symbol(self.device_properties['symbol'])
        job.add_job_name('Orangutan Monkey Script (%s)' %
                         self.device_properties.get('symbol'))
        job.add_job_symbol('Om')

        # Determine revision hash from application revision
        revision = self.version['application_changeset']
        project = self.version['application_repository'].split('/')[-1]
        lookup_url = urljoin(treeherder_url,
                             'api/project/%s/revision-lookup/?revision=%s' % (
                                 project, revision))
        self._logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')
        job.add_result(self.runner.crashed and 'testfailed' or 'success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        if self.is_debug:
            job.add_option_collection({'debug': True})
        else:
            job.add_option_collection({'opt': True})

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type': 'link',
            'title': 'Gaia revision:',
            'url': 'https://github.com/mozilla-b2g/gaia/commit/%s' %
                   self.version.get('gaia_changeset'),
            'value': self.version.get('gaia_changeset'),
        }, {
            'content_type': 'text',
            'title': 'Gaia date:',
            'value': self.version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(
                    self.version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': self.version.get('device_id')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (date):',
            'value': self.version.get('device_firmware_date') and
                     time.strftime(date_format, time.localtime(int(
                         self.version.get('device_firmware_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device firmware (incremental):',
            'value': self.version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': self.version.get('device_firmware_version_release')
        }]

        ci_url = os.environ.get('BUILD_URL')
        if ci_url:
            job_details.append({
                'url': ci_url,
                'value': ci_url,
                'content_type': 'link',
                'title': 'CI build:'})

        # Attach log files
        handlers = [handler for handler in self._logger.handlers
                    if isinstance(handler, StreamHandler) and
                    os.path.exists(handler.stream.name)]
        for handler in handlers:
            path = handler.stream.name
            filename = os.path.split(path)[-1]
            try:
                url = self.upload_to_s3(path)
                job_details.append({
                    'url': url,
                    'value': filename,
                    'content_type': 'link',
                    'title': 'Log:'})
                # Add log reference
                if type(handler.formatter) is TbplFormatter or \
                        type(handler.formatter) is LogLevelFilter and \
                        type(handler.formatter.inner) is TbplFormatter:
                    job.add_log_reference(filename, url)
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload %s' % filename,
                    'content_type': 'text',
                    'title': 'Error:'})

        # Attach script
        filename = os.path.split(script)[-1]
        try:
            url = self.upload_to_s3(script)
            job_details.append({
                'url': url,
                'value': filename,
                'content_type': 'link',
                'title': 'Script:'})
        except S3UploadError:
            job_details.append({
                'value': 'Failed to upload %s' % filename,
                'content_type': 'text',
                'title': 'Error:'})

        # Attach logcat
        filename = '%s.log' % self.runner.device.dm._deviceSerial
        path = os.path.join(self.temp_dir, filename)
        try:
            url = self.upload_to_s3(path)
            job_details.append({
                'url': url,
                'value': filename,
                'content_type': 'link',
                'title': 'Logcat:'})
        except S3UploadError:
            job_details.append({
                'value': 'Failed to upload %s' % filename,
                'content_type': 'text',
                'title': 'Error:'})

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        # Attach crash dumps
        if self.runner.crashed:
            crash_dumps = os.listdir(self.crash_dumps_path)
            for filename in crash_dumps:
                path = os.path.join(self.crash_dumps_path, filename)
                try:
                    url = self.upload_to_s3(path)
                    job_details.append({
                        'url': url,
                        'value': filename,
                        'content_type': 'link',
                        'title': 'Crash:'})
                except S3UploadError:
                    job_details.append({
                        'value': 'Failed to upload %s' % filename,
                        'content_type': 'text',
                        'title': 'Error:'})

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=os.environ.get('TREEHERDER_KEY'),
            oauth_secret=os.environ.get('TREEHERDER_SECRET'))
        self._logger.info('Sending results to Treeherder: %s' % treeherder_url)
        self._logger.debug('Job collection: %s' %
                           job_collection.to_json())
        response = request.post(job_collection)
        if response.status == 200:
            self._logger.debug('Response: %s' % response.read())
            self._logger.info('Results are available to view at: %s' % (
                urljoin(treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' % (
                    project, revision))))
        else:
            self._logger.error('Failed to send results to Treeherder! '
                               'Response: %s' % response.read())
Ejemplo n.º 30
0
def parse_log(project, job_log_url, job_guid, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """
    credentials = OAuthCredentials.get_credentials(project)
    req = TreeherderRequest(
        protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
        host=settings.TREEHERDER_REQUEST_HOST,
        project=project,
        oauth_key=credentials.get('consumer_key', None),
        oauth_secret=credentials.get('consumer_secret', None),
    )
    update_endpoint = 'job-log-url/{0}/update_parse_status'.format(
        job_log_url['id'])

    try:
        log_url = job_log_url['url']
        bug_suggestions = []
        bugscache_uri = '{0}{1}'.format(settings.API_HOSTNAME,
                                        reverse("bugscache-list"))
        terms_requested = {}

        if log_url:
            # parse a log given its url
            artifact_bc = ArtifactBuilderCollection(log_url,
                                                    check_errors=check_errors)
            artifact_bc.parse()

            artifact_list = []
            for name, artifact in artifact_bc.artifacts.items():
                artifact_list.append(
                    (job_guid, name, 'json', json.dumps(artifact)))
            if check_errors:
                all_errors = artifact_bc.artifacts.get(
                    'Structured Log', {}).get('step_data',
                                              {}).get('all_errors', [])

                for err in all_errors:
                    # remove the mozharness prefix
                    clean_line = get_mozharness_substring(err['line'])
                    # get a meaningful search term out of the error line
                    search_term = get_error_search_term(clean_line)
                    bugs = dict(open_recent=[], all_others=[])

                    # collect open recent and all other bugs suggestions
                    if search_term:
                        if not search_term in terms_requested:
                            # retrieve the list of suggestions from the api
                            bugs = get_bugs_for_search_term(
                                search_term, bugscache_uri)
                            terms_requested[search_term] = bugs
                        else:
                            bugs = terms_requested[search_term]

                    if not bugs or not (bugs['open_recent']
                                        or bugs['all_others']):
                        # no suggestions, try to use
                        # the crash signature as search term
                        crash_signature = get_crash_signature(clean_line)
                        if crash_signature:
                            if not crash_signature in terms_requested:
                                bugs = get_bugs_for_search_term(
                                    crash_signature, bugscache_uri)
                                terms_requested[crash_signature] = bugs
                            else:
                                bugs = terms_requested[crash_signature]

                    bug_suggestions.append({
                        "search": clean_line,
                        "bugs": bugs
                    })

            artifact_list.append((job_guid, 'Bug suggestions', 'json',
                                  json.dumps(bug_suggestions)))

            # store the artifacts generated
            tac = TreeherderArtifactCollection()
            for artifact in artifact_list:
                ta = tac.get_artifact({
                    "job_guid": artifact[0],
                    "name": artifact[1],
                    "type": artifact[2],
                    "blob": artifact[3]
                })
                tac.add(ta)

            req.post(tac)

            # send an update to job_log_url
            # the job_log_url status changes
            # from pending to running
            current_timestamp = time.time()
            status = 'parsed'
            req.send(update_endpoint,
                     method='POST',
                     data={
                         'parse_status': status,
                         'parse_timestamp': current_timestamp
                     })

    except Exception, e:
        parse_log.retry(exc=e)
        # send an update to job_log_url
        # the job_log_url status changes
        # from pending to running
        current_timestamp = time.time()
        status = 'failed'
        req.send(update_endpoint,
                 method='POST',
                 data={
                     'parse_status': status,
                     'parse_timestamp': current_timestamp
                 })
        # re raise the exception to leave a trace in the log
        raise
Ejemplo n.º 31
0
def parse_log(project, log_url, job_guid, resultset, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """
    mozharness_pattern = re.compile(
        '^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?'
    )

    bugs_cache = {'open': {}, 'closed': {}}
    bug_suggestions = {'open': {}, 'closed': {}}

    status_publisher = JobStatusPublisher(settings.BROKER_URL)
    failure_publisher = JobFailurePublisher(settings.BROKER_URL)

    try:
        # return the resultset with the job id to identify if the UI wants
        # to fetch the whole thing.

        bugscache_uri = '{0}{1}'.format(
            settings.API_HOSTNAME,
            reverse("bugscache-list")
        )

        credentials = OAuthCredentials.get_credentials(project)

        if log_url:
            # parse a log given its url
            artifact_bc = ArtifactBuilderCollection(
                log_url,
                check_errors=check_errors,
            )
            artifact_bc.parse()

            artifact_list = []
            for name, artifact in artifact_bc.artifacts.items():
                artifact_list.append((job_guid, name, 'json', json.dumps(artifact)))

            if check_errors:
                all_errors = artifact_bc.artifacts['Structured Log']['step_data']['all_errors']
                for err in all_errors:
                    # remove the mozharness prefix
                    clean_line = mozharness_pattern.sub('', err['line']).strip()
                    # get a meaningful search term out of the error line
                    search_term = get_error_search_term(clean_line)
                    # collect open and closed bugs suggestions
                    for status in ('open', 'closed'):
                        if not search_term:
                            bug_suggestions[status][clean_line] = []
                            continue
                        if search_term not in bugs_cache[status]:
                            # retrieve the list of suggestions from the api
                            bugs_cache[status][search_term] = get_bugs_for_search_term(
                                search_term,
                                status,
                                bugscache_uri
                            )
                            # no suggestions, try to use the crash signature as search term
                            if not bugs_cache[status][search_term]:
                                crash_signature = get_crash_signature(search_term)
                                if crash_signature:
                                    bugs_cache[status][search_term] = get_bugs_for_search_term(
                                        search_term,
                                        status,
                                        bugscache_uri
                                    )
                        bug_suggestions[status][clean_line] = bugs_cache[status][search_term]

                artifact_list.append((job_guid, 'Open bugs', 'json', json.dumps(bug_suggestions['open'])))
                artifact_list.append((job_guid, 'Closed bugs', 'json', json.dumps(bug_suggestions['closed'])))

            # store the artifacts generated
            tac = TreeherderArtifactCollection()
            for artifact in artifact_list:
                ta = tac.get_artifact({
                    "job_guid": artifact[0],
                    "name": artifact[1],
                    "type": artifact[2],
                    "blob": artifact[3]
                })
                tac.add(ta)
            req = TreeherderRequest(
                protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
                host=settings.TREEHERDER_REQUEST_HOST,
                project=project,
                oauth_key=credentials.get('consumer_key', None),
                oauth_secret=credentials.get('consumer_secret', None),
            )
            req.send(tac)

        status_publisher.publish(job_guid, resultset, project, 'processed')
        if check_errors:
            failure_publisher.publish(job_guid, project)

    finally:
        status_publisher.disconnect()
        failure_publisher.disconnect()
Ejemplo n.º 32
0
def parse_log(project, job_log_url, job_guid, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """

    # if parse_status is not available, consider it pending
    parse_status = job_log_url.get("parse_status", "pending")
    # don't parse a log if it's already been parsed
    if parse_status == "parsed":
        return

    try:
        credentials = OAuthCredentials.get_credentials(project)
        req = TreeherderRequest(
            protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
            host=settings.TREEHERDER_REQUEST_HOST,
            project=project,
            oauth_key=credentials.get('consumer_key', None),
            oauth_secret=credentials.get('consumer_secret', None),
        )
        update_endpoint = 'job-log-url/{0}/update_parse_status'.format(
            job_log_url['id']
        )

        artifact_list = extract_log_artifacts(job_log_url['url'],
                                              job_guid, check_errors)
        # store the artifacts generated
        tac = TreeherderArtifactCollection()
        for artifact in artifact_list:
            ta = tac.get_artifact({
                "job_guid": artifact[0],
                "name": artifact[1],
                "type": artifact[2],
                "blob": artifact[3]
            })
            tac.add(ta)

        req.post(tac)

        # send an update to job_log_url
        # the job_log_url status changes from pending to parsed
        current_timestamp = time.time()
        req.send(
            update_endpoint,
            method='POST',
            data={
                'parse_status': 'parsed',
                'parse_timestamp': current_timestamp
            }
        )
    except Exception, e:
        # send an update to job_log_url
        #the job_log_url status changes from pending/running to failed
        current_timestamp = time.time()
        req.send(
            update_endpoint,
            method='POST',
            data={
                'parse_status': 'failed',
                'parse_timestamp': current_timestamp
            }
        )
        # for every retry, set the countdown to 10 minutes
        # .retry() raises a RetryTaskError exception,
        # so nothing below this line will be executed.
        parse_log.retry(exc=e, countdown=10*60)
Ejemplo n.º 33
0
    def post_to_treeherder(self, script, treeherder_url):
        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()
        job.add_group_name(self.device_properties['name'])
        job.add_group_symbol(self.device_properties['symbol'])
        job.add_job_name('Orangutan Monkey Script (%s)' %
                         self.device_properties.get('symbol'))
        job.add_job_symbol('Om')

        # Determine revision hash from application revision
        revision = self.version['application_changeset']
        project = self.version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            treeherder_url, 'api/project/%s/revision-lookup/?revision=%s' %
            (project, revision))
        self._logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')
        job.add_result(self.runner.crashed and 'testfailed' or 'success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        if self.is_debug:
            job.add_option_collection({'debug': True})
        else:
            job.add_option_collection({'opt': True})

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type':
            'link',
            'title':
            'Gaia revision:',
            'url':
            'https://github.com/mozilla-b2g/gaia/commit/%s' %
            self.version.get('gaia_changeset'),
            'value':
            self.version.get('gaia_changeset'),
        }, {
            'content_type':
            'text',
            'title':
            'Gaia date:',
            'value':
            self.version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(
                    self.version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': self.version.get('device_id')
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (date):',
            'value':
            self.version.get('device_firmware_date') and time.strftime(
                date_format,
                time.localtime(int(self.version.get('device_firmware_date')))),
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (incremental):',
            'value':
            self.version.get('device_firmware_version_incremental')
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (release):',
            'value':
            self.version.get('device_firmware_version_release')
        }]

        ci_url = os.environ.get('BUILD_URL')
        if ci_url:
            job_details.append({
                'url': ci_url,
                'value': ci_url,
                'content_type': 'link',
                'title': 'CI build:'
            })

        # Attach log files
        handlers = [
            handler for handler in self._logger.handlers
            if isinstance(handler, StreamHandler)
            and os.path.exists(handler.stream.name)
        ]
        for handler in handlers:
            path = handler.stream.name
            filename = os.path.split(path)[-1]
            try:
                url = self.upload_to_s3(path)
                job_details.append({
                    'url': url,
                    'value': filename,
                    'content_type': 'link',
                    'title': 'Log:'
                })
                # Add log reference
                if type(handler.formatter) is TbplFormatter or \
                        type(handler.formatter) is LogLevelFilter and \
                        type(handler.formatter.inner) is TbplFormatter:
                    job.add_log_reference(filename, url)
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload %s' % filename,
                    'content_type': 'text',
                    'title': 'Error:'
                })

        # Attach script
        filename = os.path.split(script)[-1]
        try:
            url = self.upload_to_s3(script)
            job_details.append({
                'url': url,
                'value': filename,
                'content_type': 'link',
                'title': 'Script:'
            })
        except S3UploadError:
            job_details.append({
                'value': 'Failed to upload %s' % filename,
                'content_type': 'text',
                'title': 'Error:'
            })

        # Attach logcat
        filename = '%s.log' % self.runner.device.dm._deviceSerial
        path = os.path.join(self.temp_dir, filename)
        try:
            url = self.upload_to_s3(path)
            job_details.append({
                'url': url,
                'value': filename,
                'content_type': 'link',
                'title': 'Logcat:'
            })
        except S3UploadError:
            job_details.append({
                'value': 'Failed to upload %s' % filename,
                'content_type': 'text',
                'title': 'Error:'
            })

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        # Attach crash dumps
        if self.runner.crashed:
            crash_dumps = os.listdir(self.crash_dumps_path)
            for filename in crash_dumps:
                path = os.path.join(self.crash_dumps_path, filename)
                try:
                    url = self.upload_to_s3(path)
                    job_details.append({
                        'url': url,
                        'value': filename,
                        'content_type': 'link',
                        'title': 'Crash:'
                    })
                except S3UploadError:
                    job_details.append({
                        'value': 'Failed to upload %s' % filename,
                        'content_type': 'text',
                        'title': 'Error:'
                    })

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=os.environ.get('TREEHERDER_KEY'),
            oauth_secret=os.environ.get('TREEHERDER_SECRET'))
        self._logger.info('Sending results to Treeherder: %s' % treeherder_url)
        self._logger.debug('Job collection: %s' % job_collection.to_json())
        response = request.post(job_collection)
        if response.status == 200:
            self._logger.debug('Response: %s' % response.read())
            self._logger.info(
                'Results are available to view at: %s' % (urljoin(
                    treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' %
                    (project, revision))))
        else:
            self._logger.error('Failed to send results to Treeherder! '
                               'Response: %s' % response.read())
Ejemplo n.º 34
0
    def post_to_treeherder(self, tests):
        version = mozversion.get_version(binary=self.bin,
                                         sources=self.sources,
                                         dm_type='adb',
                                         device_serial=self.device_serial)

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get('device_id')
        device_firmware_version_release = \
            version.get('device_firmware_version_release')

        if not device:
            self.logger.error('Submitting to Treeherder is currently limited '
                              'to devices.')
            return

        try:
            group = DEVICE_GROUP_MAP[device][device_firmware_version_release]
            job.add_group_name(group['name'])
            job.add_group_symbol(group['symbol'])
            job.add_job_name('Gaia Python Integration Test (%s)' %
                             group['symbol'])
            job.add_job_symbol('Gip')
        except KeyError:
            self.logger.error('Unknown device id: %s or device firmware '
                              'version: %s. Unable to determine Treeherder '
                              'group. Supported devices: %s' %
                              (device, device_firmware_version_release, [
                                  '%s: %s' % (k, [fw for fw in v.keys()])
                                  for k, v in DEVICE_GROUP_MAP.iteritems()
                              ]))
            return

        # Determine revision hash from application revision
        revision = version['application_changeset']
        project = version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            self.treeherder_url,
            'api/project/%s/revision-lookup/?revision=%s' %
            (project, revision))
        self.logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result('testfailed')
        else:
            job.add_result('success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        # All B2G device builds are currently opt builds
        job.add_option_collection({'opt': True})

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type':
            'link',
            'title':
            'Gaia revision:',
            'url':
            'https://github.com/mozilla-b2g/gaia/commit/%s' %
            version.get('gaia_changeset'),
            'value':
            version.get('gaia_changeset'),
        }, {
            'content_type':
            'text',
            'title':
            'Gaia date:',
            'value':
            version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': version.get('device_id')
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (date):',
            'value':
            version.get('device_firmware_date') and time.strftime(
                date_format,
                time.localtime(int(version.get('device_firmware_date')))),
        }, {
            'content_type':
            'text',
            'title':
            'Device firmware (incremental):',
            'value':
            version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': version.get('device_firmware_version_release')
        }]

        ci_url = os.environ.get('BUILD_URL')
        if ci_url:
            job_details.append({
                'url': ci_url,
                'value': ci_url,
                'content_type': 'link',
                'title': 'CI build:'
            })

        # Attach logcat
        adb_device = ADBDevice(self.device_serial)
        with tempfile.NamedTemporaryFile(suffix='logcat.txt') as f:
            f.writelines(adb_device.get_logcat())
            self.logger.debug('Logcat stored in: %s' % f.name)
            try:
                url = self.upload_to_s3(f.name)
                job_details.append({
                    'url': url,
                    'value': 'logcat.txt',
                    'content_type': 'link',
                    'title': 'Log:'
                })
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload logcat.txt',
                    'content_type': 'text',
                    'title': 'Error:'
                })

        # Attach log files
        handlers = [
            handler for handler in self.logger.handlers
            if isinstance(handler, StreamHandler)
            and os.path.exists(handler.stream.name)
        ]
        for handler in handlers:
            path = handler.stream.name
            filename = os.path.split(path)[-1]
            try:
                url = self.upload_to_s3(path)
                job_details.append({
                    'url': url,
                    'value': filename,
                    'content_type': 'link',
                    'title': 'Log:'
                })
                # Add log reference
                if type(handler.formatter) is TbplFormatter or \
                        type(handler.formatter) is LogLevelFilter and \
                        type(handler.formatter.inner) is TbplFormatter:
                    job.add_log_reference(filename, url)
            except S3UploadError:
                job_details.append({
                    'value': 'Failed to upload %s' % filename,
                    'content_type': 'text',
                    'title': 'Error:'
                })

        # Attach reports
        for report in [self.html_output, self.xml_output]:
            if report is not None:
                filename = os.path.split(report)[-1]
                try:
                    url = self.upload_to_s3(report)
                    job_details.append({
                        'url': url,
                        'value': filename,
                        'content_type': 'link',
                        'title': 'Report:'
                    })
                except S3UploadError:
                    job_details.append({
                        'value': 'Failed to upload %s' % filename,
                        'content_type': 'text',
                        'title': 'Error:'
                    })

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=os.environ.get('TREEHERDER_KEY'),
            oauth_secret=os.environ.get('TREEHERDER_SECRET'))
        self.logger.debug('Sending results to Treeherder: %s' %
                          job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug('Response: %s' % response.read())
        assert response.status == 200, 'Failed to send results!'
        self.logger.info(
            'Results are available to view at: %s' %
            (urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' %
                     (project, revision))))
Ejemplo n.º 35
0
def parse_log(project, job_log_url, job_guid, check_errors=False):
    """
    Call ArtifactBuilderCollection on the given job.
    """
    credentials = OAuthCredentials.get_credentials(project)
    req = TreeherderRequest(
        protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
        host=settings.TREEHERDER_REQUEST_HOST,
        project=project,
        oauth_key=credentials.get('consumer_key', None),
        oauth_secret=credentials.get('consumer_secret', None),
    )
    update_endpoint = 'job-log-url/{0}/update_parse_status'.format(job_log_url['id'])

    try:
        log_url = job_log_url['url']
        bug_suggestions = []
        bugscache_uri = '{0}{1}'.format(
            settings.API_HOSTNAME,
            reverse("bugscache-list")
        )
        terms_requested = {}

        if log_url:
            # parse a log given its url
            artifact_bc = ArtifactBuilderCollection(log_url,
                                                    check_errors=check_errors)
            artifact_bc.parse()

            artifact_list = []
            for name, artifact in artifact_bc.artifacts.items():
                artifact_list.append((job_guid, name, 'json',
                                      json.dumps(artifact)))
            if check_errors:
                all_errors = artifact_bc.artifacts.get(
                    'Structured Log', {}
                    ).get(
                        'step_data', {}
                        ).get(
                            'all_errors', [] )

                for err in all_errors:
                    # remove the mozharness prefix
                    clean_line = get_mozharness_substring(err['line'])
                    # get a meaningful search term out of the error line
                    search_term = get_error_search_term(clean_line)
                    bugs = dict(open_recent=[], all_others=[])

                    # collect open recent and all other bugs suggestions
                    if search_term:
                        if not search_term in terms_requested:
                            # retrieve the list of suggestions from the api
                            bugs = get_bugs_for_search_term(
                                search_term,
                                bugscache_uri
                            )
                            terms_requested[search_term] = bugs
                        else:
                            bugs = terms_requested[search_term]

                    if not bugs or not (bugs['open_recent']
                                        or bugs['all_others']):
                        # no suggestions, try to use
                        # the crash signature as search term
                        crash_signature = get_crash_signature(clean_line)
                        if crash_signature:
                            if not crash_signature in terms_requested:
                                bugs = get_bugs_for_search_term(
                                    crash_signature,
                                    bugscache_uri
                                )
                                terms_requested[crash_signature] = bugs
                            else:
                                bugs = terms_requested[crash_signature]

                    bug_suggestions.append({
                        "search": clean_line,
                        "bugs": bugs
                    })

            artifact_list.append((job_guid, 'Bug suggestions', 'json', json.dumps(bug_suggestions)))

            # store the artifacts generated
            tac = TreeherderArtifactCollection()
            for artifact in artifact_list:
                ta = tac.get_artifact({
                    "job_guid": artifact[0],
                    "name": artifact[1],
                    "type": artifact[2],
                    "blob": artifact[3]
                })
                tac.add(ta)

            req.post(tac)

            # send an update to job_log_url
            # the job_log_url status changes
            # from pending to running
            current_timestamp = time.time()
            status = 'parsed'
            req.send(
                update_endpoint,
                method='POST',
                data={
                    'parse_status': status,
                    'parse_timestamp': current_timestamp
                }
            )

    except Exception, e:
        parse_log.retry(exc=e)
        # send an update to job_log_url
        # the job_log_url status changes
        # from pending to running
        current_timestamp = time.time()
        status = 'failed'
        req.send(
            update_endpoint,
            method='POST',
            data={
                'parse_status': status,
                'parse_timestamp': current_timestamp
            }
        )
        # re raise the exception to leave a trace in the log
        raise
Ejemplo n.º 36
0
    def post_to_treeherder(self, tests):
        self.logger.info('\nTREEHERDER\n----------')
        version = mozversion.get_version(
            binary=self.bin, sources=self.sources,
            dm_type='adb', device_serial=self.device_serial)

        job_collection = TreeherderJobCollection()
        job = job_collection.get_job()

        device = version.get('device_id')
        if not device:
            self.logger.error('Submitting to Treeherder is currently limited '
                              'to devices.')
            return

        try:
            group = DEVICE_GROUP_MAP[device]
            job.add_group_name(group['name'])
            job.add_group_symbol(group['symbol'])
            job.add_job_name('Gaia Python Integration Test (%s)' % device)
            job.add_job_symbol('Gip')
        except KeyError:
            self.logger.error('Unknown device id: %s, unable to determine '
                              'Treeherder group. Supported device ids: %s' % (
                                  device, DEVICE_GROUP_MAP.keys()))
            return

        # Determine revision hash from application revision
        revision = version['application_changeset']
        project = version['application_repository'].split('/')[-1]
        lookup_url = urljoin(
            self.treeherder_url,
            'api/project/%s/revision-lookup/?revision=%s' % (
                project, revision))
        self.logger.debug('Getting revision hash from: %s' % lookup_url)
        response = requests.get(lookup_url)
        response.raise_for_status()
        assert response.json(), 'Unable to determine revision hash for %s. ' \
                                'Perhaps it has not been ingested by ' \
                                'Treeherder?' % revision
        revision_hash = response.json()[revision]['revision_hash']
        job.add_revision_hash(revision_hash)
        job.add_project(project)
        job.add_job_guid(str(uuid.uuid4()))
        job.add_product_name('b2g')
        job.add_state('completed')

        # Determine test result
        if self.failed or self.unexpected_successes:
            job.add_result('testfailed')
        else:
            job.add_result('success')

        job.add_submit_timestamp(int(self.start_time))
        job.add_start_timestamp(int(self.start_time))
        job.add_end_timestamp(int(self.end_time))

        job.add_machine(socket.gethostname())
        job.add_build_info('b2g', 'b2g-device-image', 'x86')
        job.add_machine_info('b2g', 'b2g-device-image', 'x86')

        # All B2G device builds are currently opt builds
        job.add_option_collection({'opt': True})

        # TODO: Add log reference
        # job.add_log_reference()

        date_format = '%d %b %Y %H:%M:%S'
        job_details = [{
            'content_type': 'link',
            'title': 'Gaia revision:',
            'url': 'https://github.com/mozilla-b2g/gaia/commit/%s' %
                   version.get('gaia_changeset'),
            'value': version.get('gaia_changeset'),
        }, {
            'content_type': 'text',
            'title': 'Gaia date:',
            'value': version.get('gaia_date') and time.strftime(
                date_format, time.localtime(int(version.get('gaia_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device identifier:',
            'value': version.get('device_id')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (date):',
            'value': version.get('device_firmware_date') and time.strftime(
                date_format, time.localtime(int(
                    version.get('device_firmware_date')))),
        }, {
            'content_type': 'text',
            'title': 'Device firmware (incremental):',
            'value': version.get('device_firmware_version_incremental')
        }, {
            'content_type': 'text',
            'title': 'Device firmware (release):',
            'value': version.get('device_firmware_version_release')
        }]

        if self.ci_url:
            job_details.append({
                'url': self.ci_url,
                'value': self.ci_url,
                'content_type': 'link',
                'title': 'CI build:'})

        artifacts = [self.html_output, self.xml_output]
        if any(artifacts):
            required_envs = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
            upload_artifacts = all([os.environ.get(v) for v in required_envs])

            if upload_artifacts:
                conn = boto.connect_s3()
                bucket = conn.create_bucket(
                    os.environ.get('S3_UPLOAD_BUCKET', 'gaiatest'))
                bucket.set_acl('public-read')

                for artifact in artifacts:
                    if artifact and os.path.exists(artifact):
                        h = hashlib.sha512()
                        with open(artifact, 'rb') as f:
                            for chunk in iter(lambda: f.read(1024 ** 2), b''):
                                h.update(chunk)
                        _key = h.hexdigest()
                        key = bucket.get_key(_key)
                        if not key:
                            key = bucket.new_key(_key)
                        key.set_contents_from_filename(artifact)
                        key.set_acl('public-read')
                        blob_url = key.generate_url(expires_in=0,
                                                    query_auth=False)
                        job_details.append({
                            'url': blob_url,
                            'value': artifact,
                            'content_type': 'link',
                            'title': 'Artifact:'})
                        self.logger.info('Artifact %s uploaded to: %s' % (
                            artifact, blob_url))
            else:
                self.logger.info(
                    'Artifacts will not be included with the report. Please '
                    'set the following environment variables to enable '
                    'uploading of artifacts: %s' % ', '.join([
                        v for v in required_envs if not os.environ.get(v)]))

        if job_details:
            job.add_artifact('Job Info', 'json', {'job_details': job_details})

        job_collection.add(job)

        # Send the collection to Treeherder
        url = urlparse(self.treeherder_url)
        request = TreeherderRequest(
            protocol=url.scheme,
            host=url.hostname,
            project=project,
            oauth_key=self.treeherder_key,
            oauth_secret=self.treeherder_secret)
        self.logger.debug('Sending results to Treeherder: %s' %
                          job_collection.to_json())
        response = request.post(job_collection)
        self.logger.debug('Response: %s' % response.read())
        assert response.status == 200, 'Failed to send results!'
        self.logger.info('Results are available to view at: %s' % (
            urljoin(self.treeherder_url, '/ui/#/jobs?repo=%s&revision=%s' % (
                project, revision))))
Ejemplo n.º 37
0
def main():
    result_revision_hash = create_revision_hash()

    trsc = TreeherderResultSetCollection()

    trs = trsc.get_resultset()

    # self.required_properties = {
    #     'revision_hash':{ 'len':50, 'cb':self.validate_existence },
    #     'revisions':{ 'type':list, 'cb':self.validate_existence },
    #     'author':{ 'len':150, 'cb':self.validate_existence }
    #     }

    trs.add_revision_hash(result_revision_hash)
    trs.add_author('WebRTC QA Tests')
    trs.add_push_timestamp(int(time.time()))

    tr = trs.get_revision()

    # self.required_properties = {
    #     'revision':{ 'len':50, 'cb':self.validate_existence },
    #     'repository':{ 'cb':self.validate_existence },
    #     'files':{ 'type':list, 'cb':self.validate_existence },
    #     }

    tr.add_revision(create_revision_hash()[:12])
    tr.add_author('Firefox Nightly')
    tr.add_comment('firefox-33.0a1.en-US')
    tr.add_files(['firefox-33.0a1.en-US.linux-i686.tar.bz2',
                  'firefox-33.0a1.en-US.linux-x86_64.tests.zip'])
    tr.add_repository(
        'ftp://ftp.mozilla.org/pub/firefox/nightly/latest-mozilla-central/')
    trs.add_revision(tr)

    trsc.add(trs)

    tjc = TreeherderJobCollection()
    tj = tjc.get_job()

    # self.required_properties = {
    #     'revision_hash':{ 'len':50, 'cb':self.validate_existence },
    #     'project':{ 'cb':self.validate_existence },
    #     'job':{ 'type':dict, 'cb':self.validate_existence },
    #     'job.job_guid':{ 'len':50, 'cb':self.validate_existence }
    # }

    tj.add_revision_hash(result_revision_hash)
    tj.add_project('qa-try')
    tj.add_job_guid(str(uuid.uuid4()))

    tj.add_build_info('linux', 'linux64', 'x86_64')
    tj.add_description('WebRTC Sunny Day')
    tj.add_machine_info('linux', 'linux64', 'x86_64')
    tj.add_end_timestamp(int(time.time()) - 5)
    tj.add_start_timestamp(int(time.time()) - 3600 * 3 - 5)
    tj.add_submit_timestamp(int(time.time()) - 3600 * 3 - 10)
    tj.add_state('completed')
    tj.add_machine('webrtc-server')
    tj.add_option_collection({'opt': True})  # must not be {}!
    tj.add_reason('testing')
    tj.add_result('success')  # must be success/testfailed/busted
    tj.add_who('*****@*****.**')
    tj.add_group_name('WebRTC QA Tests')
    tj.add_group_symbol('WebRTC')
    tj.add_job_symbol('end')
    tj.add_job_name('Endurance')

    tj.add_artifact('Job Info', 'json', {
        "job_details": [
            {
                'title': 'Iterations:',
                'value': '10782',
                'content_type': 'text'
            },
            {
                'title': 'Errors:',
                'value': '5',
                'content_type': 'text'
            },
            {
                'title': 'Longest Pass Duration:',
                'value': '2:58:36.5',
                'content_type': 'text'
            }
        ],
    })

    tjc.add(tj)

    key, secret = get_oauth_creds()
    project, host = get_repo_details()

    req = TreeherderRequest(
        protocol='http',
        host=host,
        project=project,
        oauth_key=key,
        oauth_secret=secret
    )

    print 'trsc = ' + json.dumps(json.loads(trsc.to_json()), sort_keys=True,
                                 indent=4, separators=(',', ': '))

    print 'tjc = ' + json.dumps(json.loads(tjc.to_json()), sort_keys=True,
                                indent=4, separators=(',', ': '))

    # print 'req.oauth_key = ' + req.oauth_key
    # print 'req.oauth_secret = ' + req.oauth_secret

    # uri = req.get_uri(trsc)
    # print 'req.get_uri() = ' + uri
    # print 'req.oauth_client.get_signed_uri() = ' +
    # req.oauth_client.get_signed_uri(trsc.to_json(), uri)

    req.post(trsc)
    req.post(tjc)