def _post(self, *args, **kwargs): response = hresponse.HandlerResponse(201) if kwargs.get("id", None): response.status_code = 400 response.reason = "To update a lab, perform a PUT request" else: json_obj = kwargs["json_obj"] valid_contact, reason = \ validator.is_valid_lab_contact_data(json_obj) if valid_contact: response = handlers.common.lab.create_lab( json_obj, self.db, self.request.uri) else: response.status_code = 400 if reason: response.reason = reason return response
def _get_one(self, doc_id, **kwargs): """Get just one single document from the collection. Subclasses should override this method and implement their own search functionalities. This is a general one. It should return a `HandlerResponse` object, with the `result` attribute set with the operation results. :return A `HandlerResponse` object. """ response = None try: obj_id = bson.objectid.ObjectId(doc_id) except bson.errors.InvalidId, ex: self.log.exception(ex) self.log.error("Provided doc ID '%s' is not valid", doc_id) response = hresponse.HandlerResponse() response.status_code = 400 response.reason = "Wrong ID value provided"
def _execute_callback(self, lab_name, **kwargs): action = kwargs["action"] response = hresponse.HandlerResponse() response.status_code = 202 response.reason = "Request accepted and being processed" if action == "test": tasks = [ taskqueue.tasks.callback.lava_test.s( self.json_obj, self.job_meta, lab_name), taskqueue.tasks.kcidb.push_tests.s(), taskqueue.tasks.test.find_regression.s(), ] celery.chain(tasks).apply_async( link_error=taskqueue.tasks.error_handler.s()) else: response.status_code = 404 response.reason = "Unsupported LAVA action: {}".format(action) return response
def create_lab(json_obj, database, request_uri): """Create a new lab document in the database. :param json_obj: The JSON object with the data to create the new lab. :type json_obj: dict :param database: The database connection. :return A HandlerResponse. """ response = hresponse.HandlerResponse(201) lab_name = json_obj.get(models.NAME_KEY) prev_lab = utils.db.find_one2( database[models.LAB_COLLECTION], {models.NAME_KEY: lab_name}) if prev_lab: response.status_code = 400 response.reason = "Lab '%s' already exists" % lab_name else: lab_doc = models.lab.LabDocument.from_json(json_obj) lab_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc) ret_val, token_id, token = _get_or_create_token(lab_doc, database) if all([ret_val != 200, ret_val != 201]): response.errors = \ "Error saving or retrieving lab token: no token associated" lab_doc.token = token_id ret_val, lab_id = utils.db.save(database, lab_doc, manipulate=True) if ret_val == 201: response.result = { models.ID_KEY: lab_id, models.NAME_KEY: lab_name, models.TOKEN_KEY: token } response.headers = {"Location": request_uri + "/" + str(lab_id)} else: response.status_code = ret_val response.reason = "Error saving new lab '%s'" % lab_name return response
def execute_get(self, *args, **kwargs): """This is the actual GET operation. It is done in this way so that subclasses can implement a different token authorization if necessary. """ response = None valid_token, token = self.validate_req_token("GET") if valid_token: kwargs["token"] = token get_id = kwargs.get("id", None) if get_id: response = self._get_one(get_id, **kwargs) else: response = self._get(**kwargs) else: response = hresponse.HandlerResponse(403) return response
def _save_files(self, path): """Parse the request and for each file, save it. :param path: The directory path where to save the files. :type str :return A `HandlerResponse` object. """ response = hresponse.HandlerResponse() if self.request.files: response.result = [ utils.upload.create_or_update_file(path, u_file[0]["filename"], u_file[0]["content_type"], u_file[0]["body"]) for u_file in self.request.files.itervalues() ] else: response.status_code = 400 response.reason = "No files provided" return response
def _delete(self, doc_id, **kwargs): response = hresponse.HandlerResponse() try: set_id = bson.objectid.ObjectId(doc_id) if utils.db.find_one2(self.collection, set_id): response.status_code = utils.db.delete(self.collection, set_id) if response.status_code == 200: response.reason = "Resource '%s' deleted" % doc_id ret_val = utils.db.delete( self.db[models.TEST_CASE_COLLECTION], {models.TEST_SET_ID_KEY: set_id}) if ret_val != 200: response.errors = ( "Error deleting test cases with " "test_set_id '%s'" % doc_id) # Remove test set reference from test_suite collection. ret_val = utils.db.update( self.db[models.TEST_SUITE_COLLECTION], {models.TEST_SET_KEY: set_id}, {models.TEST_SET_KEY: [set_id]}, operation="$pullAll" ) if ret_val != 200: response.errors = \ "Error removing test set reference from test suite" else: response.reason = "Error deleting resource '%s'" % doc_id else: response.status_code = 404 response.reason = self._get_status_message(404) except bson.errors.InvalidId, ex: self.log.exception(ex) self.log.error("Invalid ID specified: %s", doc_id) response.status_code = 400 response.reason = "Wrong ID specified"
def _get(self, **kwargs): """Get all the documents in the collection. The returned results can be tweaked with the supported query arguments. Subclasses should override this method and implement their own search functionalities. This is a general one. It should return a `HandlerResponse` object, with the `result` attribute set with the operation results. :return A `HandlerResponse` object. """ response = hresponse.HandlerResponse() spec, sort, fields, skip, limit, aggregate = self._get_query_args() if aggregate: response.result = utils.db.aggregate(self.collection, aggregate, match=spec, sort=sort, fields=fields, limit=limit) else: result, count = utils.db.find_and_count(self.collection, limit, skip, spec=spec, fields=fields, sort=sort) if count > 0: response.result = result else: response.result = [] response.skip = skip response.count = count response.limit = limit return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse() set_id = kwargs.get("id", None) if set_id: response.status_code = 400 response.reason = "To update a test case, use a PUT request" else: test_case_json = kwargs.get("json_obj", None) j_get = test_case_json.get suite_id = j_get(models.TEST_SUITE_ID_KEY) case_name = j_get(models.NAME_KEY) suite_oid, suite_name, err_msg = \ self._check_and_get_test_suite(suite_id) if suite_oid: other_args = {"mail_options": self.settings["mailoptions"]} ret_val, doc_id, err_msg = tests_import.import_test_case( test_case_json, suite_oid, suite_name, self.db, self.settings["dboptions"], **other_args) response.status_code = ret_val if ret_val == 201: response.result = {models.ID_KEY: doc_id} response.reason = "Test case '%s' created" % case_name response.headers = { "Location": "/test/case/%s" % str(doc_id) } else: response.reason = "Error saving test case '%s'" % case_name response.errors = err_msg else: self.log.error("Test suite '%s' not found or not valid ID", suite_id) response.status_code = 400 response.reason = err_msg return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse() obj = kwargs["json_obj"] job = obj.get(models.JOB_KEY) kernel = obj.get(models.KERNEL_KEY) git_branch = utils.clean_branch_name(obj.get(models.GIT_BRANCH_KEY)) status = obj.get(models.STATUS_KEY, None) if not status: status = models.PASS_STATUS if (status in models.VALID_JOB_STATUS): ret_val = utils.db.find_and_update( self.collection, { models.GIT_BRANCH_KEY: git_branch, models.JOB_KEY: job, models.KERNEL_KEY: kernel }, {models.STATUS_KEY: status}) if ret_val == 404: response.status_code = 404 response.reason = JOB_NOT_FOUND % (job, kernel, git_branch) elif ret_val == 500: response.status_code = 500 response.reason = INTERNAL_ERROR % (job, kernel, git_branch) else: response.reason = \ JOB_UPDATED % (job, kernel, git_branch, status) # Create the build logs summary file. taskb.create_build_logs_summary.apply_async( [job, kernel, git_branch]) else: response.status_code = 400 response.reason = \ INVALID_STATUS % (status, str(models.VALID_JOB_STATUS)) return response
def _get_one(self, doc_id, **kwargs): response = hresponse.HandlerResponse() result = None try: obj_id = bson.objectid.ObjectId(doc_id) result = utils.db.find_one2( self.collection, {models.BUILD_ID_KEY: obj_id}, fields=handlers.common.query.get_query_fields( self.get_query_arguments)) if result: # result here is returned as a dictionary from mongodb response.result = result else: response.status_code = 404 response.reason = "Resource '%s' not found" % doc_id except bson.errors.InvalidId, ex: self.log.exception(ex) self.log.error("Provided doc ID '%s' is not valid", doc_id) response.status_code = 400 response.reason = "Wrong ID value provided"
def find_regressions(doc_id, database): """Look for the regressions of a boot report. :param doc_id: The id of the boot report to look for regressions. :type doc_id: ObjectId :return HandlerResponse A HandlerResponse object. """ response = hresponse.HandlerResponse() # First make sure we have a valid boot_id value. boot_doc = utils.db.find_one2( database[models.BOOT_COLLECTION], doc_id) if boot_doc: regr_idx_doc = utils.db.find_one2( database[models.BOOT_REGRESSIONS_BY_BOOT_COLLECTION], {models.BOOT_ID_KEY: doc_id}) if regr_idx_doc: spec = { models.ID_KEY: regr_idx_doc[models.BOOT_REGRESSIONS_ID_KEY] } result = utils.db.find_one2( database[models.BOOT_REGRESSIONS_COLLECTION], spec, fields=[models.REGRESSIONS_KEY]) if result: response.result = get_regressions_by_key( create_regressions_key(boot_doc), result[models.REGRESSIONS_KEY]) response.count = len(response.result) else: response.status_code = 404 response.reason = "Resource '{:s}' not found".format(str(doc_id)) return response
def _put(self, *args, **kwargs): response = hresponse.HandlerResponse() update_doc = kwargs.get("json_obj") doc_id = kwargs.get("id") try: set_id = bson.objectid.ObjectId(doc_id) if utils.db.find_one2(self.collection, set_id): update_val = utils.db.update( self.collection, {models.ID_KEY: set_id}, update_doc) if update_val == 200: response.reason = "Resource '%s' updated" % doc_id else: response.status_code = update_val response.reason = "Error updating resource '%s'" % doc_id else: response.status_code = 404 response.reason = self._get_status_message(404) except bson.errors.InvalidId, ex: self.log.exception(ex) self.log.error("Invalid ID specified: %s", doc_id) response.status_code = 400 response.reason = "Wrong ID specified"
def _get_bisect(self, collection, spec, fields=None): """Retrieve the bisect data. :param collection: The name of the collection to operate on. :type collection: str :param doc_id: The ID of the document to execute the bisect on. :type doc_id: str :param fields: A `fields` data structure with the fields to return or exclude. Default to None. :type fields: list or dict :return A `HandlerResponse` object. """ response = None if collection in models.BISECT_VALID_COLLECTIONS: if collection == models.BUILD_COLLECTION: bisect_func = execute_build_bisect if spec.get(models.COMPARE_TO_KEY, None): bisect_func = execute_build_bisect_compared_to else: # Force the compare_to field to None (null in mongodb) # so that we can search correctly otherwise we can get # multiple results out. This is due to how we store the # bisect calculations in the db. spec[models.COMPARE_TO_KEY] = None response = self._bisect(models.BUILD_ID_KEY, spec, bisect_func, fields=fields) else: response = hresponse.HandlerResponse(400) response.reason = ("Provided bisect collection '%s' is not valid" % collection) return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse(202) json_obj = kwargs["json_obj"] j_get = json_obj.get job = j_get(models.JOB_KEY) kernel = j_get(models.KERNEL_KEY) branch = utils.clean_branch_name(j_get(models.GIT_BRANCH_KEY)) lab_name = j_get(models.LAB_NAME_KEY, None) countdown = j_get(models.DELAY_KEY, self.settings["senddelay"]) if countdown is None: countdown = self.settings["senddelay"] try: send_boot = bool(j_get(models.SEND_BOOT_REPORT_KEY, False)) send_build = bool(j_get(models.SEND_BUILD_REPORT_KEY, False)) email_format = j_get(models.EMAIL_FORMAT_KEY, None) email_format, email_errors = _check_email_format(email_format) response.errors = email_errors boot_errors = False build_errors = False if send_boot or send_build: countdown = int(countdown) if countdown < 0: countdown = abs(countdown) response.errrors = ( "Negative value specified for the '%s' key, " "its positive value will be used instead (%ds)" % (models.DELAY_KEY, countdown)) if countdown > MAX_DELAY: response.errors = ( "Delay value specified out of range (%ds), " "maximum delay permitted (%ds) will be used instead" % (countdown, MAX_DELAY)) countdown = MAX_DELAY when = (datetime.datetime.now(tz=bson.tz_util.utc) + datetime.timedelta(seconds=countdown)) schedule_data = { "countdown": countdown, "boot_emails": j_get(models.BOOT_REPORT_SEND_TO_KEY, None), "boot_cc_emails": j_get(models.BOOT_REPORT_SEND_CC_KEY, None), "boot_bcc_emails": j_get(models.BOOT_REPORT_SEND_BCC_KEY, None), "build_emails": j_get(models.BUILD_REPORT_SEND_TO_KEY, None), "build_cc_emails": j_get(models.BUILD_REPORT_SEND_CC_KEY, None), "build_bcc_emails": j_get(models.BUILD_REPORT_SEND_BCC_KEY, None), "generic_emails": j_get(models.REPORT_SEND_TO_KEY, None), "generic_cc_emails": j_get(models.REPORT_CC_KEY, None), "generic_bcc_emails": j_get(models.REPORT_BCC_KEY, None), "in_reply_to": j_get(models.IN_REPLY_TO_KEY, None), "subject": j_get(models.SUBJECT_KEY, None), "db_options": self.settings["dboptions"], } email_type = [] if send_boot: email_type.append("boot") if send_build: email_type.append("build") self.log.info(TRIGGER_RECEIVED, self.request.remote_ip, job, branch, kernel, datetime.datetime.utcnow(), str(email_type)) hashable_str = "{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}".format( job, branch, kernel, str(schedule_data["boot_emails"]), str(schedule_data["boot_cc_emails"]), str(schedule_data["boot_bcc_emails"]), str(schedule_data["build_emails"]), str(schedule_data["build_cc_emails"]), str(schedule_data["build_bcc_emails"]), str(schedule_data["generic_emails"]), str(schedule_data["generic_cc_emails"]), str(schedule_data["generic_bcc_emails"]), schedule_data["in_reply_to"], schedule_data["subject"], str(email_type), str(email_format)) schedule_hash = hashlib.sha1(hashable_str).hexdigest() try: lock_key = \ "email-{}-{}-{}-{}".format( str(email_type), job, branch, kernel) with redis.lock.Lock(self.redisdb, lock_key, timeout=2): if not self.redisdb.exists(schedule_hash): self.redisdb.set(schedule_hash, "schedule", ex=86400) if send_boot: email_type.append("boot") boot_errors, response.errors = \ self._schedule_boot_report( job, branch, kernel, lab_name, email_format, schedule_data) if send_build: build_errors, response.errors = \ self._schedule_build_report( job, branch, kernel, email_format, schedule_data) response.reason, response.status_code = \ _check_status( send_boot, send_build, boot_errors, build_errors, when) else: self.log.warn(TRIGGER_RECEIVED_ALREADY, job, branch, kernel, str(email_type)) taskq.send_multiple_emails_error.apply_async([ job, branch, kernel, datetime.datetime.utcnow(), email_format, email_type, schedule_data ]) response.status_code = 409 response.reason = ERR_409_MESSAGE except redis.lock.LockError: # Probably only reached during the unit tests. pass else: response.status_code = 400 response.reason = ( "Don't know which report to send: either specify " " '%s' or '%s'" % (models.SEND_BOOT_REPORT_KEY, models.SEND_BUILD_REPORT_KEY)) except (TypeError, ValueError): response.status_code = 400 response.reason = ("Wrong value specified for 'delay': %s" % countdown) return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse() group_id = kwargs.get("id", None) if group_id: response.status_code = 400 response.reason = "To update a test group, use a PUT request" else: # TODO: double check the token with its lab name, we need to make # sure people are sending test reports with a token lab with the # correct lab name value. group_json = kwargs.get("json_obj", None) group_pop = group_json.pop group_get = group_json.get group_name = group_get(models.NAME_KEY) # TODO: move name validation into the initial json validation. if utils.valid_test_name(group_name): if group_get(models.LOG_KEY): path_parts = (utils.BASE_PATH, group_get(models.JOB_KEY), group_get(models.GIT_BRANCH_KEY), group_get(models.KERNEL_KEY), group_get(models.ARCHITECTURE_KEY), group_get(models.DEFCONFIG_FULL_KEY), group_get(models.BUILD_ENVIRONMENT_KEY), group_get(models.LAB_NAME_KEY)) directory_path = os.path.join(*path_parts) name = "-".join((group_get(models.NAME_KEY), group_get(models.DEVICE_TYPE_KEY))) ext = 'txt' filename = "{}.{}".format(name, ext) kci_test._add_test_log(directory_path, filename, group_get(models.LOG_KEY)) group_json[models.BOOT_LOG_KEY] = filename dboptions = self.settings["dboptions"] (ret_val, group_id, errors) = kci_test.import_and_save_kci_tests(group_json, dboptions) if ret_val == 201: response.status_code = ret_val response.result = {models.ID_KEY: group_id} response.reason = ( "Test group '%s' created" % group_name) response.headers = { "Location": "/test/group/%s" % str(group_id)} else: response.status_code = ret_val response.reason = ( "Error saving test group '%s'" % group_name) else: response.status_code = 400 response.reason = "Test group name not valid" return response
def update_lab(doc_id, json_obj, valid_keys, database): """Update a lab document based on the provided values. :param doc_id: The ID of the lab document to update. ;type doc_id: str :param json_obj: The JSON object with the data to update. :type json_obj: dict :param valid_keys: The list of valid keys that should be in the JSON data. :type valid_keys: list :param database: The database connection. :return A HandlerResponse. """ response = hresponse.HandlerResponse(200) response.reason = "Lab document updated" errors = [] bson_doc_id = bson.objectid.ObjectId(doc_id) old_lab = utils.db.find_one2( database[models.LAB_COLLECTION], {models.ID_KEY: bson_doc_id}) if old_lab: new_lab = copy.deepcopy(json_obj) for key, val in json_obj.iteritems(): if key not in valid_keys: new_lab.pop(key) errors.append("Unrecognized key '%s' will be dropped" % key) continue if old_lab[key] == val: new_lab.pop(key) if new_lab: is_valid = True if models.CONTACT_KEY in new_lab.viewkeys(): is_valid, reason = validator.is_valid_lab_contact_data(new_lab) new_contact = new_lab[models.CONTACT_KEY] # Update the old token email contact address. ret_val = utils.db.find_and_update( database[models.TOKEN_COLLECTION], {models.ID_KEY: old_lab[models.TOKEN_KEY]}, {models.EMAIL_KEY: new_contact[models.EMAIL_KEY]} ) if ret_val != 200: errors.append("Error updating token with new email") if is_valid: new_lab[models.UPDATED_KEY] = datetime.datetime.now( tz=bson.tz_util.utc) if models.TOKEN_KEY in new_lab.viewkeys(): _, local_errors = _update_lab_token( old_lab, new_lab, database) errors.extend(local_errors) ret_val = utils.db.find_and_update( database[models.LAB_COLLECTION], {models.ID_KEY: bson_doc_id}, new_lab) if ret_val != 200: response.status_code = ret_val response.reason = "Error updating lab document" else: response.status_code = 400 response.reason = reason else: response.reason = "No new data to update, lab not modified" else: response.status_code = 404 response.reason = "Provided lab ID does not exists" response.errors = errors return response
def test_response_setter_valid(self): response = hresponse.HandlerResponse(1) response.status_code = 200 self.assertEqual(response.status_code, 200)
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse(202) json_obj = kwargs["json_obj"] j_get = json_obj.get # Mandatory keys job = j_get(models.JOB_KEY) kernel = j_get(models.KERNEL_KEY) branch = utils.clean_branch_name(j_get(models.GIT_BRANCH_KEY)) # Optional keys report_type = j_get(models.REPORT_TYPE_KEY) countdown = j_get(models.DELAY_KEY) if countdown is None: countdown = self.settings["senddelay"] # Deprecated - ToDo: use report_type only in client code if j_get(models.SEND_BOOT_REPORT_KEY): report_type = 'boot' elif j_get(models.SEND_BUILD_REPORT_KEY): report_type = 'build' report_keys = REPORT_TYPE_KEYS.get(report_type) if not report_keys: response.status_code = 400 response.reason = ( "Invalid report type: {}. Valid values are: {}".format( report_type, ", ".join(REPORT_TYPE_KEYS.keys()))) return response report_data = {k: j_get(k) for k in report_keys} email_format = j_get(models.EMAIL_FORMAT_KEY, None) email_format, email_errors = _check_email_format(email_format) response.errors = email_errors schedule_errors = None try: countdown = int(countdown) if countdown < 0: countdown = abs(countdown) response.errrors = ( "Negative value specified for the '%s' key, " "its positive value will be used instead (%ds)" % (models.DELAY_KEY, countdown)) if countdown > MAX_DELAY: response.errors = ( "Delay value specified out of range (%ds), " "maximum delay permitted (%ds) will be used instead" % (countdown, MAX_DELAY)) countdown = MAX_DELAY when = (datetime.datetime.now(tz=bson.tz_util.utc) + datetime.timedelta(seconds=countdown)) def j_get_list(key): value = j_get(key) if value is None: value = [] elif not isinstance(value, list): value = [value] return value email_opts = { "to": j_get_list(models.REPORT_SEND_TO_KEY), "cc": j_get_list(models.REPORT_CC_KEY), "bcc": j_get_list(models.REPORT_BCC_KEY), "in_reply_to": j_get(models.IN_REPLY_TO_KEY), "subject": j_get(models.SUBJECT_KEY), "format": email_format, } report_type_or_plan = j_get(models.PLAN_KEY, report_type) self.log.info(TRIGGER_RECEIVED, self.request.remote_ip, job, branch, kernel, datetime.datetime.utcnow(), report_type_or_plan) hashable_str = ''.join( str(x) for x in [ job, branch, kernel, email_opts["to"], email_opts["cc"], email_opts["bcc"], email_opts["in_reply_to"], email_opts["subject"], report_type_or_plan, str(email_format), ]) schedule_hash = hashlib.sha1(hashable_str).hexdigest() try: lock_key = '-'.join( ['email', report_type, job, branch, kernel]) with redis.lock.Lock(self.redisdb, lock_key, timeout=2): if not self.redisdb.exists(schedule_hash): self.redisdb.set(schedule_hash, "schedule", ex=86400) schedule_method = getattr( self, "_schedule_{}_report".format(report_type)) errors, response.errors = schedule_method( report_data, email_opts, countdown) response.reason, response.status_code = \ _check_status(report_type, errors, when) else: self.log.warn(TRIGGER_RECEIVED_ALREADY, job, branch, kernel, report_type_or_plan) taskq.send_multiple_emails_error.apply_async([ job, branch, kernel, datetime.datetime.utcnow(), email_format, report_type, email_opts ]) response.status_code = 409 response.reason = ERR_409_MESSAGE except redis.lock.LockError: # Probably only reached during the unit tests. pass except (TypeError, ValueError): response.status_code = 400 response.reason = ("Wrong value specified for 'delay': %s" % countdown) return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse() group_id = kwargs.get("id", None) if group_id: response.status_code = 400 response.reason = "To update a test group, use a PUT request" else: # TODO: double check the token with its lab name, we need to make # sure people are sending test reports with a token lab with the # correct lab name value. group_json = kwargs.get("json_obj", None) group_pop = group_json.pop group_get = group_json.get # Remove the test_cases from the JSON and pass it as is. cases_list = group_pop(models.TEST_CASES_KEY, []) group_name = group_get(models.NAME_KEY) # TODO: move name validation into the initial json validation. if utils.valid_test_name(group_name): # Make sure the *_id values passed are valid. ret_val, error = self._check_references( group_get(models.BUILD_ID_KEY, None), group_get(models.JOB_ID_KEY, None)) if ret_val == 200: test_group = \ mtgroup.TestGroupDocument.from_json(group_json) test_group.created_on = datetime.datetime.now( tz=bson.tz_util.utc) ret_val, group_id = utils.db.save(self.db, test_group, manipulate=True) if ret_val == 201: response.status_code = ret_val response.result = {models.ID_KEY: group_id} response.reason = ("Test group '%s' created" % group_name) response.headers = { "Location": "/test/group/%s" % str(group_id) } if cases_list: if isinstance(cases_list, types.ListType): response.status_code = 202 response.messages = ( "Test cases will be parsed and imported") else: cases_list = [] response.errors = ( "Test cases are not wrapped in a " "list; they will not be imported") # Complete the update of the test group and import # everything else. if all([cases_list]): self._import_group_and_cases( group_json, group_id, cases_list, group_name) else: # Just update the test group document. taskq.complete_test_group_import.apply_async([ group_json, group_id, group_name, self.settings["dboptions"] ]) else: response.status_code = ret_val response.reason = ("Error saving test group '%s'" % group_name) else: response.status_code = 400 response.reason = error else: response.status_code = 400 response.reason = "Test group name not valid" return response
def test_response_headers_setter_valid(self): response = hresponse.HandlerResponse() response.headers = {'foo': 'bar'} self.assertEqual({'foo': 'bar'}, response.headers)
def execute_delete(self, *args, **kwargs): """Execute DELETE pre-operations.""" return hresponse.HandlerResponse(501)
def execute_put(self, *args, **kwargs): """Execute PUT pre-operations.""" return hresponse.HandlerResponse(501)
def _get(self, **kwargs): response = hresponse.HandlerResponse() spec, sort, fields, skip, limit, compared = self._get_query_args() if compared: lab_name = kwargs.get("lab_name", None) if lab_name: # The final results and count. result = [] count = 0 # First get all the defconfigs with the spec as specified # in the query parameters. all_defconfigs, all_count = utils.db.find_and_count( self.collection, 0, 0, spec=spec, fields=[models.ID_KEY], sort=sort) # If we have defconfigs, search for all the boot reports with # almost the same specified query, excluding the boots # performed by the querying lab, and looking only for the # defconfings similar to the ones retrieved above. if all_count > 0: all_distinct_def = all_defconfigs.distinct(models.ID_KEY) # Make a copy of the spec used to retrieve the defconfing # since we need it later as well. boot_spec = copy.deepcopy(spec) # Remove possible query arguments that are not in the boot # schema. boot_spec.pop(models.GIT_BRANCH_KEY, None) boot_spec.pop(models.GIT_COMMIT_KEY, None) boot_spec.pop(models.GIT_DESCRIBE_KEY, None) boot_spec.pop(models.ID_KEY, None) # Inject the lab name and the previous defconfigs. boot_spec[models.LAB_NAME_KEY] = {"$ne": lab_name} boot_spec[models.BUILD_ID_KEY] = {"$in": all_distinct_def} already_booted, booted_count = utils.db.find_and_count( self.db[models.BOOT_COLLECTION], 0, 0, spec=boot_spec, fields=[models.BUILD_ID_KEY], sort=[(models.CREATED_KEY, pymongo.DESCENDING)]) booted_defconfigs = [] if booted_count > 0: booted_defconfigs = already_booted.distinct( models.BUILD_ID_KEY) # Do a set difference to get the not booted ones. not_booted = set(all_distinct_def).difference( set(booted_defconfigs)) if not_booted: spec[models.ID_KEY] = {"$in": list(not_booted)} # These are the final results, what gets back to the # user. result, count = utils.db.find_and_count( self.collection, limit, skip, spec=spec, fields=fields, sort=sort) response.result = result response.count = count else: response.status_code = 400 response.reason = ("Missing lab name to perform a comparison: " "was a lab token used?") else: result, count = utils.db.find_and_count(self.collection, limit, skip, spec=spec, fields=fields, sort=sort) if count > 0: response.result = result else: response.result = [] response.count = count response.limit = limit return response
def _post(self, *args, **kwargs): response = hresponse.HandlerResponse() suite_id = kwargs.get("id", None) if suite_id: response.status_code = 400 response.reason = "To update a test suite, use a PUT request" else: # TODO: double check the token with its lab name, we need to make # sure people are sending test reports with a token lab with the # correct lab name value. Check the boot handler. suite_json = kwargs.get("json_obj", None) suite_pop = suite_json.pop suite_get = suite_json.get # Remove the test_set and test_case from the JSON and pass them # as is. sets_list = suite_pop(models.TEST_SET_KEY, []) cases_list = suite_pop(models.TEST_CASE_KEY, []) suite_name = suite_get(models.NAME_KEY) # TODO: move name validation into the initial json validation. if utils.valid_test_name(suite_name): # Make sure the *_id values passed are valid. ret_val, error = self._check_references( suite_get(models.BUILD_ID_KEY, None), suite_get(models.JOB_ID_KEY, None), suite_get(models.BOOT_ID_KEY, None) ) if ret_val == 200: test_suite = \ mtsuite.TestSuiteDocument.from_json(suite_json) test_suite.created_on = datetime.datetime.now( tz=bson.tz_util.utc) ret_val, suite_id = utils.db.save( self.db, test_suite, manipulate=True) if ret_val == 201: response.status_code = ret_val response.result = {models.ID_KEY: suite_id} response.reason = ( "Test suite '%s' created" % suite_name) response.headers = { "Location": "/test/suite/%s" % str(suite_id)} if sets_list: if isinstance(sets_list, types.ListType): response.status_code = 202 response.messages = ( "Test sets will be parsed and imported") else: sets_list = [] response.errors = ( "Test sets are not wrapped in a list; " "they will not be imported") if cases_list: if isinstance(cases_list, types.ListType): response.status_code = 202 response.messages = ( "Test cases will be parsed and imported") else: cases_list = [] response.errors = ( "Test cases are not wrapped in a " "list; they will not be imported") # Complete the update of the test suite and import # everything else. if all([cases_list, sets_list]): self._import_suite_with_sets_and_cases( suite_json, suite_id, sets_list, cases_list, suite_name) elif all([cases_list, not sets_list]): self._import_suite_and_cases( suite_json, suite_id, cases_list, suite_name) elif all([not cases_list, sets_list]): self._import_suite_and_sets( suite_json, suite_id, sets_list, suite_name) else: # Just update the test suite document. taskq.complete_test_suite_import.apply_async( [ suite_json, suite_id, suite_name, self.settings["dboptions"], self.settings["mailoptions"] ] ) else: response.status_code = ret_val response.reason = ( "Error saving test suite '%s'" % suite_name) else: response.status_code = 400 response.reason = error else: response.status_code = 400 response.reason = "Test suite name not valid" return response
def _delete(self, spec_or_id, **kwargs): response = hresponse.HandlerResponse(200) response.status_code = utils.db.delete(self.collection, spec_or_id) response.reason = self._get_status_message(response.status_code) return response
response = hresponse.HandlerResponse(400) response.reason = "Wrong ID value passed as object ID" else: spec = handlers.common.query.get_query_spec( self.get_query_arguments, self._valid_keys("DELETE")) if spec: response = self._delete(spec) if response.status_code == 200: response.reason = ( "Resources identified with '%s' deleted" % spec) else: response = hresponse.HandlerResponse(400) response.reason = ( "No valid data provided to execute a DELETE") else: response = hresponse.HandlerResponse(403) return response def _valid_boot_delete_token(self, token, boot_doc): """Make sure the token is an actual delete token. This is an extra step in making sure the token is valid. A lab token, token used to send boot reports, can be used to delete boot reports only belonging to its lab. :param token: The req token. :type token: models.token.Token :param boot_doc: The document to delete. :type boot_doc: dict :return True or False.
def execute_put(self, *args, **kwargs): return hresponse.HandlerResponse(501)
response.errors = errors else: response = hresponse.HandlerResponse(400) response.reason = "Provided JSON is not valid" response.errors = errors except ValueError, ex: self.log.exception(ex) error = "No JSON data found in the PUT request" self.log.error(error) response = hresponse.HandlerResponse(422) response.reason = error else: response = hresponse.HandlerResponse(400) response.reason = "Missing token ID" else: response = hresponse.HandlerResponse(valid_request) response.reason = \ "Wrong content type, must be '%s'" % self.content_type return response def _new_data(self, json_obj): """Create a new token in the DB. :param json_obj: The JSON object with the paramters. :return A `HandlerResponse` object. """ response = hresponse.HandlerResponse(201) try: new_token = self._token_update_create(json_obj)
def execute_post(self, *args, **kwargs): """Not implemented.""" return hresponse.HandlerResponse(501)