Example #1
0
    def _get_text_log_summary_artifact(self, job, job_guid):
        # We can only have one text_log_summary artifact,
        # so pick the first log with steps to create it.

        if "logs" in job:
            for log in job["logs"]:
                if "steps" in log:
                    all_errors = []
                    old_steps = log["steps"]
                    new_steps = []

                    for idx, step in enumerate(old_steps):
                        errors = step.get("errors", [])
                        error_count = len(errors)
                        if error_count:
                            all_errors.extend(errors)

                        started = to_timestamp(step["timeStarted"])
                        finished = to_timestamp(step["timeFinished"])
                        new_steps.append({
                            "name":
                            step["name"],
                            "result":
                            self._get_step_result(job, step["result"]),
                            "started":
                            started,
                            "finished":
                            finished,
                            "started_linenumber":
                            step["lineStarted"],
                            "finished_linenumber":
                            step["lineFinished"],
                            "errors":
                            errors,
                            "error_count":
                            error_count,
                            "duration":
                            finished - started,
                            "order":
                            idx
                        })

                    return {
                        "blob": {
                            "step_data": {
                                "all_errors": all_errors,
                                "steps": new_steps,
                                "errors_truncated": log.get("errorsTruncated")
                            },
                            "logurl": log["url"]
                        },
                        "type": "json",
                        "name": "text_log_summary",
                        "job_guid": job_guid
                    }
    def fetch_resultset(self, url, repository, sha=None):
        params = {"sha": sha} if sha else {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching resultset details: {}".format(url))
        try:
            commits = self.get_cleaned_commits(fetch_json(url, params))
            head_commit = commits[-1]
            resultset = {
                "revision": head_commit["sha"],
                "push_timestamp": to_timestamp(
                    head_commit["commit"]["author"]["date"]),
                "author": head_commit["commit"]["author"]["email"],
            }

            revisions = []
            for commit in commits:
                revisions.append({
                    "comment": commit["commit"]["message"],
                    "repository": repository,
                    "author": "{} <{}>".format(
                        commit["commit"]["author"]["name"],
                        commit["commit"]["author"]["email"]),
                    "revision": commit["sha"]
                })

            resultset["revisions"] = revisions
            return resultset

        except Exception as ex:
            logger.exception("Error fetching commits", exc_info=ex)
            newrelic.agent.record_exception(ex, params={
                "url": url, "repository": repository, "sha": sha
                })
Example #3
0
    def fetch_push(self, url, repository):
        params = {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching push details: %s", url)

        commits = self.get_cleaned_commits(fetch_json(url, params))
        head_commit = commits[-1]
        push = {
            "revision": head_commit["sha"],
            "push_timestamp":
            to_timestamp(head_commit["commit"]["author"]["date"]),
            "author": head_commit["commit"]["author"]["email"],
        }

        revisions = []
        for commit in commits:
            revisions.append({
                "comment":
                commit["commit"]["message"],
                "author":
                u"{} <{}>".format(commit["commit"]["author"]["name"],
                                  commit["commit"]["author"]["email"]),
                "revision":
                commit["sha"]
            })

        push["revisions"] = revisions
        return push
Example #4
0
    def fetch_push(self, url, repository):
        params = {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching push details: %s", url)

        commits = self.get_cleaned_commits(fetch_json(url, params))
        head_commit = commits[-1]
        push = {
            "revision": head_commit["sha"],
            "push_timestamp": to_timestamp(
                head_commit["commit"]["author"]["date"]),
            "author": head_commit["commit"]["author"]["email"],
        }

        revisions = []
        for commit in commits:
            revisions.append({
                "comment": commit["commit"]["message"],
                "author": u"{} <{}>".format(
                    commit["commit"]["author"]["name"],
                    commit["commit"]["author"]["email"]),
                "revision": commit["sha"]
            })

        push["revisions"] = revisions
        return push
Example #5
0
    def fetch_resultset(self, url, repository, sha=None):
        params = {"sha": sha} if sha else {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching resultset details: {}".format(url))
        newrelic.agent.add_custom_parameter("sha", sha)

        commits = self.get_cleaned_commits(fetch_json(url, params))
        head_commit = commits[-1]
        resultset = {
            "revision": head_commit["sha"],
            "push_timestamp":
            to_timestamp(head_commit["commit"]["author"]["date"]),
            "author": head_commit["commit"]["author"]["email"],
        }

        revisions = []
        for commit in commits:
            revisions.append({
                "comment":
                commit["commit"]["message"],
                "author":
                u"{} <{}>".format(commit["commit"]["author"]["name"],
                                  commit["commit"]["author"]["email"]),
                "revision":
                commit["sha"]
            })

        resultset["revisions"] = revisions
        return resultset
Example #6
0
    def fetch_push(self, url, repository):
        params = {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching push details: %s", url)

        commits = self.get_cleaned_commits(fetch_json(url, params))
        head_commit = commits[-1]
        push = {
            "revision":
            head_commit["sha"],
            # A push can be co-authored
            # The author's date is when the code was committed locally by the author
            # The committer's date is the info as to when the PR is merged (committed) into master
            "push_timestamp":
            to_timestamp(head_commit["commit"]["committer"]["date"]),
            # We want the original author's email to show up in the UI
            "author":
            head_commit["commit"]["author"]["email"],
        }

        revisions = []
        for commit in commits:
            revisions.append({
                "comment":
                commit["commit"]["message"],
                "author":
                u"{} <{}>".format(commit["commit"]["author"]["name"],
                                  commit["commit"]["author"]["email"]),
                "revision":
                commit["sha"]
            })

        push["revisions"] = revisions
        return push
Example #7
0
    def process_push(self, push_data):
        commits = self.get_cleaned_commits(push_data)
        head_commit = commits[-1]
        push = {
            "revision":
            head_commit["sha"],
            # A push can be co-authored
            # The author's date is when the code was committed locally by the author
            # The committer's date is the info as to when the PR is merged (committed) into master
            "push_timestamp":
            to_timestamp(head_commit["commit"]["committer"]["date"]),
            # We want the original author's email to show up in the UI
            "author":
            head_commit["commit"]["author"]["email"],
        }

        revisions = []
        for commit in commits:
            revisions.append({
                "comment":
                commit["commit"]["message"],
                "author":
                u"{} <{}>".format(commit["commit"]["author"]["name"],
                                  commit["commit"]["author"]["email"]),
                "revision":
                commit["sha"],
            })

        push["revisions"] = revisions
        return push
Example #8
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]

        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "superseded": pulse_job.get("coalesced", []),
            "revision": pulse_job["origin"]["revision"]
        }

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get(
            "buildMachine",
            pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        try:
            (real_task_id, retry_id) = task_and_retry_ids(job_guid)
            x["job"].update({
                "taskcluster_task_id": real_task_id,
                "taskcluster_retry_id": int(retry_id)
            })
        # TODO: Figure out what exception types we actually expect here.
        except Exception:
            pass

        return x
Example #9
0
    def _get_text_log_summary_artifact(self, job, job_guid):
        # We can only have one text_log_summary artifact,
        # so pick the first log with steps to create it.

        if "logs" in job:
            for log in job["logs"]:
                if "steps" in log:
                    all_errors = []
                    old_steps = log["steps"]
                    new_steps = []

                    for idx, step in enumerate(old_steps):
                        errors = step.get("errors", [])
                        error_count = len(errors)
                        if error_count:
                            all_errors.extend(errors)

                        started = to_timestamp(step["timeStarted"])
                        finished = to_timestamp(step["timeFinished"])
                        new_steps.append({
                            "name": step["name"],
                            "result": self._get_step_result(job, step["result"]),
                            "started": started,
                            "finished": finished,
                            "started_linenumber": step["lineStarted"],
                            "finished_linenumber": step["lineFinished"],
                            "errors": errors,
                            "error_count": error_count,
                            "duration": finished - started,
                            "order": idx
                        })

                    return {
                        "blob": {
                            "step_data": {
                                "all_errors": all_errors,
                                "steps": new_steps,
                                "errors_truncated": log.get("errorsTruncated")
                            },
                            "logurl": log["url"]
                        },
                        "type": "json",
                        "name": "text_log_summary",
                        "job_guid": job_guid
                    }
Example #10
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]
        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "coalesced": pulse_job.get("coalesced", []),
            "revision": pulse_job["origin"]["revision"]
        }

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get(
            "buildMachine",
            pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        return x
Example #11
0
    def fetch_resultset(self, url, repository, sha=None):
        params = {"sha": sha} if sha else {}
        params.update(self.CREDENTIALS)

        logger.info("Fetching resultset details: {}".format(url))
        try:
            commits = self.get_cleaned_commits(fetch_json(url, params))
            head_commit = commits[-1]
            resultset = {
                "revision":
                head_commit["sha"],
                "push_timestamp":
                to_timestamp(head_commit["commit"]["author"]["date"]),
                "author":
                head_commit["commit"]["author"]["email"],
            }

            revisions = []
            for commit in commits:
                revisions.append({
                    "comment":
                    commit["commit"]["message"],
                    "repository":
                    repository,
                    "author":
                    "{} <{}>".format(commit["commit"]["author"]["name"],
                                     commit["commit"]["author"]["email"]),
                    "revision":
                    commit["sha"]
                })

            resultset["revisions"] = revisions
            return resultset

        except Exception as ex:
            logger.exception("Error fetching commits", exc_info=ex)
            newrelic.agent.record_exception(ex,
                                            params={
                                                "url": url,
                                                "repository": repository,
                                                "sha": sha
                                            })
Example #12
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]

        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "coalesced": pulse_job.get("coalesced", []),
            "revision": pulse_job["origin"]["revision"]
        }

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get(
            "buildMachine",
            pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        # add some taskcluster metadata if it's available
        # currently taskcluster doesn't pass the taskId directly, so we'll
        # to derive it from the guid, where it is stored in uncompressed
        # guid form of a slug (see: https://github.com/taskcluster/slugid)
        # FIXME: add support for processing the taskcluster information
        # properly, when it's available:
        # https://bugzilla.mozilla.org/show_bug.cgi?id=1323110#c7
        try:
            (decoded_task_id, retry_id) = job_guid.split('/')
            real_task_id = slugid.encode(uuid.UUID(decoded_task_id))
            x["job"].update({
                "taskcluster_task_id": real_task_id,
                "taskcluster_retry_id": int(retry_id)
            })
        except:
            pass

        return x
Example #13
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]

        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "superseded": pulse_job.get("coalesced", []),
            "revision": pulse_job["origin"]["revision"]
        }

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get("buildMachine",
                                         pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        # add some taskcluster metadata if it's available
        # currently taskcluster doesn't pass the taskId directly, so we'll
        # to derive it from the guid, where it is stored in uncompressed
        # guid form of a slug (see: https://github.com/taskcluster/slugid)
        # FIXME: add support for processing the taskcluster information
        # properly, when it's available:
        # https://bugzilla.mozilla.org/show_bug.cgi?id=1323110#c7
        try:
            (decoded_task_id, retry_id) = job_guid.split('/')
            real_task_id = slugid.encode(uuid.UUID(decoded_task_id))
            x["job"].update({
                "taskcluster_task_id": real_task_id,
                "taskcluster_retry_id": int(retry_id)
            })
        # TODO: Figure out what exception types we actually expect here.
        except Exception:
            pass

        return x
Example #14
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]
        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "coalesced": pulse_job.get("coalesced", [])
        }

        # It is possible there will be either a revision or a revision_hash
        # At some point we will ONLY get revisions and no longer receive
        # revision_hashes and then this check can be removed.
        revision = pulse_job["origin"].get("revision", None)
        if revision:
            x["revision"] = revision
        else:
            x["revision_hash"] = pulse_job["origin"]["revision_hash"]
            logger.warning(
                "Pulse job had revision_hash instead of revision: {}:{}".
                format(pulse_job["origin"]["project"], x["revision_hash"]))
            params = {
                "project": pulse_job["origin"]["project"],
                "revision_hash": x["revision_hash"]
            }
            newrelic.agent.record_custom_event("revision_hash_usage",
                                               params=params)

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get("buildMachine",
                                         pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        return x
Example #15
0
    def transform(self, pulse_job):
        """
        Transform a pulse job into a job that can be written to disk.  Log
        References and artifacts will also be transformed and loaded with the
        job.

        We can rely on the structure of ``pulse_job`` because it will
        already have been validated against the JSON Schema at this point.
        """
        job_guid = pulse_job["taskId"]
        x = {
            "job": {
                "job_guid": job_guid,
                "name": pulse_job["display"].get("jobName", "unknown"),
                "job_symbol": self._get_job_symbol(pulse_job),
                "group_name": pulse_job["display"].get("groupName", "unknown"),
                "group_symbol": pulse_job["display"].get("groupSymbol"),
                "product_name": pulse_job.get("productName", "unknown"),
                "state": pulse_job["state"],
                "result": self._get_result(pulse_job),
                "reason": pulse_job.get("reason", "unknown"),
                "who": pulse_job.get("owner", "unknown"),
                "build_system_type": pulse_job["buildSystem"],
                "tier": pulse_job.get("tier", 1),
                "machine": self._get_machine(pulse_job),
                "option_collection": self._get_option_collection(pulse_job),
                "log_references": self._get_log_references(pulse_job),
                "artifacts": self._get_artifacts(pulse_job, job_guid),
            },
            "coalesced": pulse_job.get("coalesced", [])
        }

        # It is possible there will be either a revision or a revision_hash
        # At some point we will ONLY get revisions and no longer receive
        # revision_hashes and then this check can be removed.
        revision = pulse_job["origin"].get("revision", None)
        if revision:
            x["revision"] = revision
        else:
            x["revision_hash"] = pulse_job["origin"]["revision_hash"]
            logger.warning(
                "Pulse job had revision_hash instead of revision: {}:{}".format(
                    pulse_job["origin"]["project"],
                    x["revision_hash"]
                ))
            params = {
                "project": pulse_job["origin"]["project"],
                "revision_hash": x["revision_hash"]
            }
            newrelic.agent.record_custom_event("revision_hash_usage", params=params)

        # some or all the time fields may not be present in some cases
        for k, v in self.TIME_FIELD_MAP.items():
            if v in pulse_job:
                x["job"][k] = to_timestamp(pulse_job[v])

        # if only one platform is given, use it.
        default_platform = pulse_job.get(
            "buildMachine",
            pulse_job.get("runMachine", {}))

        for k, v in self.PLATFORM_FIELD_MAP.items():
            platform_src = pulse_job[v] if v in pulse_job else default_platform
            x["job"][k] = self._get_platform(platform_src)

        return x