def generate_request_body(self): """ Create the data structure that will be sent to ElasticSearch. """ jm = JobsModel(self.project) try: job_data = jm.get_job(self.job_id)[0] option_collection = jm.refdata_model.get_all_option_collections() revision_list = jm.get_resultset_revisions_list( job_data["result_set_id"]) buildapi_artifact = jm.get_job_artifact_list( 0, 1, { 'job_id': set([("=", self.job_id)]), 'name': set([("=", "buildapi")]) }) if buildapi_artifact: buildname = buildapi_artifact[0]["blob"]["buildername"] else: # OrangeFactor needs a buildname to be set or it skips the failure # classification, so we make one up for non-buildbot jobs. buildname = 'non-buildbot %s test %s' % ( job_data["platform"], job_data["job_type_name"]) finally: jm.disconnect() self.body = { "buildname": buildname, "machinename": job_data["machine_name"], "os": job_data["platform"], # I'm using the request time date here, as start time is not # available for pending jobs "date": datetime.fromtimestamp(int( job_data["submit_timestamp"])).strftime("%Y-%m-%d"), "type": job_data["job_type_name"], "buildtype": option_collection[job_data["option_collection_hash"]]["opt"], # Intentionally using strings for starttime, bug, timestamp for compatibility # with TBPL's legacy output format. "starttime": str(job_data["start_timestamp"]), "tree": self.project, "rev": revision_list[0]["revision"], "bug": str(self.bug_id), "who": self.who, "timestamp": str(self.submit_timestamp), "logfile": "00000000" }
def generate_request_body(self): """ Create the data structure required by tbpl's starcomment.php script """ jm = JobsModel(self.project) try: buildapi_artifact = jm.get_job_artifact_list( 0, 1, { 'job_id': set([("=", self.job_id)]), 'name': set([("=", "buildapi")]) })[0] job_data = jm.get_job(self.job_id)[0] option_collection = jm.refdata_model.get_all_option_collections() revision_list = jm.get_resultset_revisions_list( job_data["result_set_id"]) finally: jm.disconnect() self.body = { "buildname": buildapi_artifact["blob"]["buildername"], "machinename": job_data["machine_name"], "os": job_data["platform"], # I'm using the request time date here, as start time is not # available for pending jobs "date": datetime.fromtimestamp(int( job_data["submit_timestamp"])).strftime("%Y-%m-%d"), "type": job_data["job_type_name"], "buildtype": option_collection[job_data["option_collection_hash"]]["opt"], "starttime": int(job_data["start_timestamp"]), # "logfile": "", "tree": self.project, "rev": revision_list[0]["revision"], "comment": "Bug {0}".format(self.bug_id), "who": self.who, "timestamp": self.submit_timestamp, "logfile": "00000000" }
def generate_request_body(self): """ Create the data structure required by tbpl's starcomment.php script """ jm = JobsModel(self.project) try: buildapi_artifact = jm.get_job_artifact_list(0, 1, { 'job_id': set([("=", self.job_id)]), 'name': set([("=", "buildapi")]) })[0] job_data = jm.get_job(self.job_id)[0] option_collection = jm.refdata_model.get_all_option_collections() revision_list = jm.get_resultset_revisions_list(job_data["result_set_id"]) finally: jm.disconnect() self.body = { "buildname": buildapi_artifact["blob"]["buildername"], "machinename": job_data["machine_name"], "os": job_data["platform"], # I'm using the request time date here, as start time is not # available for pending jobs "date": datetime.fromtimestamp( int(job_data["submit_timestamp"])).strftime("%Y-%m-%d"), "type": job_data["job_type_name"], "buildtype": option_collection[ job_data["option_collection_hash"] ]["opt"], "starttime": int(job_data["start_timestamp"]), # "logfile": "", "tree": self.project, "rev": revision_list[0]["revision"], "comment": "Bug {0}".format(self.bug_id), "who": self.who, "timestamp": self.submit_timestamp, "logfile": "00000000" }
def generate_request_body(self): """ Create the data structure required by tbpl's submitBugzillaComment.php script This is triggered by a new bug-job association. """ jm = JobsModel(self.project) try: job = jm.get_job(self.job_id)[0] failures_artifacts = jm.get_job_artifact_list(0, 1, { 'job_id': set([('=', job['id'])]), 'name': set([('=', 'Bug suggestions')]), }) error_lines = [] for artifact in failures_artifacts: # a bug suggestion aritfact looks like this: # [{ "search": "my-error-line", "bugs": ....}] error_lines += [line["search"] for line in artifact["blob"]] bug_job_map = jm.get_bug_job_map_detail(self.job_id, self.bug_id) revision_list = jm.get_resultset_revisions_list( job["result_set_id"] ) buildapi_info = jm.get_job_artifact_list(0, 1, { 'job_id': set([("=", self.job_id)]), 'name': set([("=", "buildapi")]) }) finally: jm.disconnect() who = bug_job_map["who"]\ .replace("@", "[at]")\ .replace(".", "[dot]") submit_date = datetime.fromtimestamp(bug_job_map["submit_timestamp"])\ .replace(microsecond=0)\ .isoformat() job_description = { 'repository': self.project, 'who': who, 'submit_timestamp': submit_date, 'log': "{0}{1}/logviewer.html#?repo={2}&job_id={3}".format( settings.SITE_URL, settings.UI_PREFIX, self.project, self.job_id ), 'machine': job["machine_name"], 'revision': revision_list[0]["revision"], } if buildapi_info: job_description['buildname'] = buildapi_info[0]["blob"]["buildername"] body_comment = '\n'.join( ["{0}: {1}".format(k, v) for k, v in job_description.items()]) body_comment += '\n\n' body_comment += '\n'.join(error_lines) self.body = { "id": self.bug_id, "comment": body_comment }