async def DELETE_Domain(request): """HTTP DELETE method to delete a domain """ log.request(request) app = request.app if not request.has_body: msg = "Expected body in delete domain" log.error(msg) raise HTTPInternalServerError() body = await request.json() domain = get_domain(request, body=body) log.info("delete domain: {}".format(domain)) # raises exception if domain not found domain_json = await get_metadata_obj(app, domain) if domain_json: log.debug("got domain json") # delete domain await delete_metadata_obj(app, domain, notify=True) json_rsp = { "domain": domain } resp = json_response(json_rsp) log.response(request, resp=resp) return resp
async def GET_Group(request): """HTTP GET method to return JSON for /groups/ """ log.request(request) app = request.app group_id = get_obj_id(request) log.info("GET group: {}".format(group_id)) if not isValidUuid(group_id, obj_class="group"): log.error("Unexpected group_id: {}".format(group_id)) raise HTTPInternalServerError() group_json = await get_metadata_obj(app, group_id) resp_json = {} resp_json["id"] = group_json["id"] resp_json["root"] = group_json["root"] resp_json["created"] = group_json["created"] resp_json["lastModified"] = group_json["lastModified"] resp_json["linkCount"] = len(group_json["links"]) resp_json["attributeCount"] = len(group_json["attributes"]) resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def check_metadata_obj(app, obj_id, bucket=None): """ Return False is obj does not exist """ validateObjId(obj_id, bucket) if isValidDomain(obj_id): bucket = getBucketForDomain(obj_id) try: validateInPartition(app, obj_id) except KeyError: log.error("Domain not in partition") raise HTTPInternalServerError() deleted_ids = app['deleted_ids'] if obj_id in deleted_ids: msg = f"{obj_id} has been deleted" log.info(msg) return False meta_cache = app['meta_cache'] if obj_id in meta_cache: found = True else: # Not in chache, check s3 obj exists s3_key = getS3Key(obj_id) log.debug(f"check_metadata_obj({s3_key})") # does key exist? found = await isS3Obj(app, s3_key, bucket=bucket) return found
async def GET_Dataset(request): """HTTP GET method to return JSON for /groups/ """ log.request(request) app = request.app dset_id = get_obj_id(request) if not isValidUuid(dset_id, obj_class="dataset"): log.error("Unexpected type_id: {}".format(dset_id)) raise HTTPInternalServerError() dset_json = await get_metadata_obj(app, dset_id) resp_json = {} resp_json["id"] = dset_json["id"] resp_json["root"] = dset_json["root"] resp_json["created"] = dset_json["created"] resp_json["lastModified"] = dset_json["lastModified"] resp_json["type"] = dset_json["type"] resp_json["shape"] = dset_json["shape"] resp_json["attributeCount"] = len(dset_json["attributes"]) if "creationProperties" in dset_json: resp_json["creationProperties"] = dset_json["creationProperties"] if "layout" in dset_json: resp_json["layout"] = dset_json["layout"] resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def get_info(app, url): """ Invoke the /info request on the indicated url and return the response """ req = url + "/info" log.info(f"get_info({url})") try: rsp_json = await http_get(app, req) if "node" not in rsp_json: log.error("Unexpected response from node") return None except OSError as ose: log.warn("OSError for req: {}: {}".format(req, str(ose))) return None except HTTPInternalServerError as hpe: log.warn(f"HTTPInternalServerError for req {req}: {hpe}") # node has gone away? return None except HTTPNotFound as nfe: log.warn(f"HTTPNotFound error for req {req}: {nfe}") # node has gone away? return None except TimeoutError as toe: log.warn("Timeout error for req: {}: {}".format(req, str(toe))) # node has gone away? return None return rsp_json
async def PUT_Object(request): """HTTP method to notify creation/update of objid""" log.request(request) app = request.app pending_set = app["pending"] objid = request.match_info.get('id') if not objid: log.error("PUT_Object with no id") raise HTTPBadRequest() log.info(f"PUT_Object/{objid}") if not isValidUuid(objid): log.warn(f"Invalid id: {objid}, ignoring") raise HTTPBadRequest() if isSchema2Id(objid): rootid = getRootObjId(objid) log.debug(f"adding root: {rootid} to pending queue for objid: {objid}") pending_set.add(rootid) resp_json = { } resp = json_response(resp_json, status=201) log.response(request, resp=resp) return resp
async def doFlush(app, root_id): """ return wnen all DN nodes have wrote any pending changes to S3""" log.info(f"doFlush {root_id}") params = {"flush": 1} client = get_http_client(app) dn_urls = getDataNodeUrls(app) log.debug(f"dn_urls: {dn_urls}") try: tasks = [] for dn_url in dn_urls: req = dn_url + "/groups/" + root_id task = asyncio.ensure_future(client.put(req, params=params)) tasks.append(task) done, pending = await asyncio.wait(tasks) if pending: # should be empty since we didn't use return_when parameter log.error("Got pending tasks") raise HTTPInternalServerError() for task in done: log.info(f"task: {task}") if task.exception(): log.warn(f"task had exception: {type(task.exception())}") raise HTTPInternalServerError() clientResponse = task.result() if clientResponse.status != 204: log.warn(f"expected 204 but got: {clientResponse.status}") raise HTTPInternalServerError() except ClientError as ce: log.error(f"Error for http_put('/groups/{root_id}'): {str(ce)}") raise HTTPInternalServerError() except CancelledError as cle: log.warn(f"CancelledError '/groups/{root_id}'): {str(cle)}") raise HTTPInternalServerError()
async def getDatasetDetails(app, dset_id, root_id): """ Get extra information about the given dataset """ # Gather additional info on the domain log.debug(f"getDatasetDetails {dset_id}") if not isSchema2Id(root_id): log.info( f"no dataset details not available for schema v1 id: {root_id} returning null results" ) return None root_info = await getRootInfo(app, root_id) if not root_info: log.warn(f"info.json not found for root: {root_id}") return None if "datasets" not in root_info: log.error("datasets key not found in root_info") return None datasets = root_info["datasets"] if dset_id not in datasets: log.warn(f"dataset id: {dset_id} not found in root_info") return None log.debug(f"returning datasetDetails: {datasets[dset_id]}") return datasets[dset_id]
def loadPasswordFile(password_file): log.info("using password file: {}".format(password_file)) line_number = 0 user_db = {} try: with open(password_file) as f: for line in f: line_number += 1 s = line.strip() if not s: continue if s[0] == '#': # comment line continue fields = s.split(':') if len(fields) < 2: msg = "line: {} is not valid".format(line_number) log.warn(msg) continue username = fields[0] passwd = fields[1] if len(username) < 3 or len(passwd) < 3: msg = "line: {} is not valid, username and password must be 3 characters are longer".format( line_number) log.warn(msg) continue if username in user_db: msg = "line: {}, username is repated".format(line_number) log.warn(msg) continue user_db[username] = {"pwd": passwd} log.info("added user: {}".format(username)) except FileNotFoundError: log.error("unable to open password file") return user_db
async def getHeadUrl(app): head_url = None if head_url in app: head_url = app["head_url"] elif config.get("head_endpoint"): head_url = config.get("head_endpoint") else: # pull url form headnode object in bucket headnode_key = getHeadNodeS3Key() head_state = None # With Minio, reading from S3 may trigger an error if the minio container # is initializing. Cycle around till we get a valid response. while not head_state: try: head_state = await getS3JSONObj(app, headnode_key) except ClientError as ce: log.warn("ClientError: {} for health check".format(str(ce))) await asyncio.sleep(1) except HTTPInternalServerError as he: log.warn(f"HTTPInternalServiceError <{he}> for health check") await asyncio.sleep(1) except HTTPNotFound: log.warn("headnode not found, sleeping") await asyncio.sleep(1) if "head_url" not in head_state: msg = "head_url not found in head_state" log.error(msg) else: head_url = head_state["head_url"] app["head_url"] = head_url # so we don't need to check S3 next time log.debug("head_url: {}".format(head_url)) return head_url
async def getRootInfo(app, root_id): """ Get extra information the root collection. """ # Gather additional info on the domain log.debug(f"getRootInfo {root_id}") if not isSchema2Id(root_id): log.info( f"no dataset details not available for schema v1 id: {root_id} returning null results" ) return None s3_key = getS3Key(root_id) parts = s3_key.split('/') # dset_key is in the format db/<root>/d/<dset>/.dataset.json # get the key for the root info object as: db/<root>/.info.json if len(parts) != 3: log.error(f"Unexpected s3key format: {s3_key}") return None info_key = f"db/{parts[1]}/.info.json" try: info_json = await getS3JSONObj(app, info_key) except HTTPNotFound: log.warn(f"info.json not found for key: {info_key}") return None return info_json
async def delete_object(self, key, bucket=None): """ Deletes the object at the given key """ if not bucket: log.error("delete_object - bucket not set") raise HTTPInternalServerError() buckets = self._client if bucket not in buckets: msg = f"s3_bucket: {bucket} not found" log.info(msg) raise HTTPNotFound() bucket_map = buckets[bucket] if key not in bucket_map: msg = f"keyu: {key} not found in bucket: {bucket}" log.info(msg) raise HTTPNotFound() start_time = time.time() log.debug( f"memClient.delete_object({bucket}/{key} start: {start_time}") try: await asyncio.sleep(0) # 0 sec sleep to make the function async del bucket_map[key] except Exception as e: msg = f"Unexpected Exception {type(e)} putting s3 obj {key}: {e}" log.error(msg) raise HTTPInternalServerError()
async def http_post(app, url, data=None, params=None): log.info("http_post('{}', data)".format(url, data)) client = get_http_client(app) rsp_json = None timeout = config.get("timeout") try: async with client.post(url, json=data, params=params, timeout=timeout) as rsp: log.info("http_post status: {}".format(rsp.status)) if rsp.status == 200: pass # ok elif rsp.status == 201: pass # also ok elif rsp.status == 204: # no data return None elif rsp.status == 404: log.info(f"POST reqest HTTPNotFound error for url: {url}") elif rsp.status == 410: log.info(f"POST reqest HTTPGone error for url: {url}") else: log.warn( f"POST request error for url: {url} - status: {rsp.status}" ) raise HTTPInternalServerError() rsp_json = await rsp.json() log.debug("http_post({}) response: {}".format(url, rsp_json)) except ClientError as ce: log.error("Error for http_post({}): {} ".format(url, str(ce))) raise HTTPInternalServerError() except CancelledError as cle: log.error(f"CancelledError for http_post({url}): {cle}") raise HTTPInternalServerError() return rsp_json
async def putS3JSONObj(app, key, json_obj, bucket=None): """ Store JSON data as S3 object with given key """ client = getS3Client(app) if not bucket: bucket = app['bucket_name'] if key[0] == '/': key = key[1:] # no leading slash log.info(f"putS3JSONObj(s3://{bucket}/{key})") s3_stats_increment(app, "put_count") data = json.dumps(json_obj) data = data.encode('utf8') start_time = time.time() try: rsp = await client.put_object(Bucket=bucket, Key=key, Body=data) finish_time = time.time() log.info( f"s3Util.putS3JSONObj({key} bucket={bucket}) start={start_time:.4f} finish={finish_time:.4f} elapsed={finish_time-start_time:.4f} bytes={len(data)}" ) s3_rsp = { "etag": rsp["ETag"], "size": len(data), "lastModified": int(finish_time) } except ClientError as ce: s3_stats_increment(app, "error_count") msg = f"Error putting s3 obj {key}: {ce}" log.error(msg) raise HTTPInternalServerError() if data and len(data) > 0: s3_stats_increment(app, "bytes_out", inc=len(data)) log.debug(f"putS3JSONObj {key} complete, s3_rsp: {s3_rsp}") return s3_rsp
async def DELETE_Object(request): log.request(request) app = request.app delete_set = app["delete_set"] objid = request.match_info.get('id') if not isValidUuid(objid): log.warn(f"Invalid id: {objid}") raise HTTPBadRequest() if isSchema2Id(objid): # get rootid for this id collection = getCollectionForId(objid) if collection == "datasets": delete_set.add(objid) elif collection == "groups": # only need to do anything if this the root group if isRootObjId(objid): log.info(f"adding root group: {objid} to delete_set") delete_set.add(objid) else: log.info(f"ignoring delete non-root group: {objid}") elif collection == "datatypes": log.info(f"ignoring delete for datatype object: {objid}") else: log.error(f"Unexpected collection type: {collection}") resp_json = {} resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def removeKeys(app, objid): # iterate through all s3 keys under the given root or dataset id and delete them # # Note: not re-entrant! Only one scanRoot an be run at a time per app. log.debug(f"removeKeys: {objid}") if not isSchema2Id(objid): log.warn("ignoring non-schema2 id") raise KeyError("Invalid key") s3key = getS3Key(objid) log.debug(f"got s3key: {s3key}") expected_suffixes = (".dataset.json", ".group.json") s3prefix = None for suffix in expected_suffixes: if s3key.endswith(suffix): s3prefix = s3key[:-len(suffix)] if not s3prefix: log.error("unexpected s3key for delete_set") raise KeyError("unexpected key suffix") log.info(f"delete for {objid} searching for s3prefix: {s3prefix}") app["objDelete_prefix"] = s3prefix try: await getS3Keys(app, prefix=s3prefix, include_stats=False, callback=objDeleteCallback) except ClientError as ce: log.error(f"getS3Keys faiiled: {ce}") # reset the prefix app["objDelete_prefix"] = None
async def pendingCheck(app): """ Periodic method to check pending updates """ log.info("pendingCheck start") async_sleep_time = config.get("async_sleep_time") log.info("async_sleep_time: {}".format(async_sleep_time)) # update/initialize root object before starting node updates while True: if app["node_state"] != "READY": log.info("pendingCheck waiting for Node state to be READY") await asyncio.sleep(1) continue # wait for READY state try: await processPending(app) except Exception as e: log.warn("pendingCheck - got exception from processPendingQueue: {}".format(e)) await asyncio.sleep(async_sleep_time) # shouldn't ever get here log.error("pendingCheck terminating unexpectedly")
def getChunkCoverage(chunk_id, slices, layout): """ Get chunk-relative selection of the given chunk and selection. """ chunk_index = getChunkIndex(chunk_id) chunk_sel = getChunkSelection(chunk_id, slices, layout) rank = len(layout) sel = [] for dim in range(rank): s = chunk_sel[dim] w = layout[dim] offset = chunk_index[dim] * w start = s.start - offset if start < 0: msg = "Unexpected chunk selection" log.error(msg) raise ValueError(msg) stop = s.stop - offset if stop > w: msg = "Unexpected chunk selection" log.error(msg) raise ValueError(msg) step = s.step sel.append(slice(start, stop, step)) return sel
async def GET_Datatype(request): """HTTP GET method to return JSON for /groups/ """ log.request(request) app = request.app params = request.rel_url.query ctype_id = get_obj_id(request) if not isValidUuid(ctype_id, obj_class="type"): log.error(f"Unexpected type_id: {ctype_id}") raise HTTPInternalServerError() if "bucket" in params: bucket = params["bucket"] else: bucket = None ctype_json = await get_metadata_obj(app, ctype_id, bucket=bucket) resp_json = { } resp_json["id"] = ctype_json["id"] resp_json["root"] = ctype_json["root"] resp_json["created"] = ctype_json["created"] resp_json["lastModified"] = ctype_json["lastModified"] resp_json["type"] = ctype_json["type"] resp_json["attributeCount"] = len(ctype_json["attributes"]) if "include_attrs" in params and params["include_attrs"]: resp_json["attributes"] = ctype_json["attributes"] resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def DELETE_Domain(request): """HTTP DELETE method to delete a domain """ log.request(request) app = request.app domain = get_domain(request) bucket = getBucketForDomain(domain) log.info(f"delete domain: {domain}") # raises exception if domain not found if not bucket: log.error(f"expected bucket to be used in domain: {domain}") raise HTTPInternalServerError() log.debug(f"using bucket: {bucket}") domain_json = await get_metadata_obj(app, domain) if domain_json: log.debug("got domain json") # delete domain await delete_metadata_obj(app, domain, notify=True) json_rsp = {"domain": domain} resp = json_response(json_rsp) log.response(request, resp=resp) return resp
async def DELETE_Dataset(request): """HTTP DELETE method for dataset """ log.request(request) app = request.app params = request.rel_url.query dset_id = request.match_info.get('id') log.info("DELETE dataset: {}".format(dset_id)) if not isValidUuid(dset_id, obj_class="dataset"): log.error("Unexpected dataset id: {}".format(dset_id)) raise HTTPInternalServerError() # verify the id exist obj_found = await check_metadata_obj(app, dset_id) if not obj_found: raise HTTPNotFound() log.debug("deleting dataset: {}".format(dset_id)) notify = True if "Notify" in params and not params["Notify"]: notify = False await delete_metadata_obj(app, dset_id, notify=notify) resp_json = {} resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def check_metadata_obj(app, obj_id): """ Return False is obj does not exist """ if not isValidDomain(obj_id) and not isValidUuid(obj_id): msg = "Invalid obj id: {}".format(obj_id) log.error(msg) raise HTTPInternalServerError() try: validateInPartition(app, obj_id) except KeyError: log.error("Domain not in partition") raise HTTPInternalServerError() deleted_ids = app['deleted_ids'] if obj_id in deleted_ids: msg = "{} has been deleted".format(obj_id) log.info(msg) return False meta_cache = app['meta_cache'] if obj_id in meta_cache: found = True else: # Not in chache, check s3 obj exists s3_key = getS3Key(obj_id) log.debug("check_metadata_obj({})".format(s3_key)) # does key exist? found = await isS3Obj(app, s3_key) return found
async def register(app): """ register node with headnode OK to call idempotently (e.g. if the headnode seems to have forgotten us)""" head_url = getHeadUrl(app) if not head_url: log.warn("head_url is not set, can not register yet") return req_reg = head_url + "/register" log.info("register: {}".format(req_reg)) body = { "id": app["id"], "port": app["node_port"], "node_type": app["node_type"] } app['register_time'] = int(time.time()) try: log.debug("register req: {} body: {}".format(req_reg, body)) rsp_json = await http_post(app, req_reg, data=body) if rsp_json is not None: log.debug("register response: {}".format(rsp_json)) app["node_number"] = rsp_json["node_number"] app["node_count"] = rsp_json["node_count"] log.info("setting node_state to WAITING") app["node_state"] = "WAITING" # wait for other nodes to be active except OSError: log.error("failed to register")
async def putS3JSONObj(app, key, json_obj): """ Store JSON data as S3 object with given key """ client = getS3Client(app) bucket = app['bucket_name'] if key[0] == '/': key = key[1:] # no leading slash log.info("putS3JSONObj({})".format(key)) s3_stats_increment(app, "put_count") data = json.dumps(json_obj) data = data.encode('utf8') try: rsp = await client.put_object(Bucket=bucket, Key=key, Body=data) now = int(time.time()) s3_rsp = {"etag": rsp["ETag"], "size": len(data), "lastModified": now} except ClientError as ce: s3_stats_increment(app, "error_count") msg = "Error putting s3 obj: " + str(ce) log.error(msg) raise HTTPInternalServerError() if data and len(data) > 0: s3_stats_increment(app, "bytes_out", inc=len(data)) log.debug("putS3JSONObj complete, s3_rsp: {}".format(s3_rsp)) return s3_rsp
async def DELETE_Group(request): """HTTP DELETE method for /groups/ """ log.request(request) app = request.app params = request.rel_url.query group_id = get_obj_id(request) log.info("DELETE group: {}".format(group_id)) if not isValidUuid(group_id, obj_class="group"): log.error("Unexpected group_id: {}".format(group_id)) raise HTTPInternalServerError() # verify the id exist obj_found = await check_metadata_obj(app, group_id) if not obj_found: log.debug(f"delete called on non-exsistet obj: {group_id}") raise HTTPNotFound() log.debug("deleting group: {}".format(group_id)) notify = True if "Notify" in params and not params["Notify"]: notify = False await delete_metadata_obj(app, group_id, notify=notify) resp_json = {} resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def deleteS3Obj(app, key): """ Delete S3 object identfied by given key """ client = getS3Client(app) bucket = app['bucket_name'] if key[0] == '/': key = key[1:] # no leading slash log.info("deleteS3Obj({})".format(key)) s3_stats_increment(app, "delete_count") try: await client.delete_object(Bucket=bucket, Key=key) except ClientError as ce: # key does not exist? key_found = await isS3Obj(app, key) if not key_found: log.warn(f"delete on s3key {key} but not found") raise HTTPNotFound() # else some other error s3_stats_increment(app, "error_count") msg = "Error deleting s3 obj: " + str(ce) log.error(msg) raise HTTPInternalServerError() log.debug("deleteS3Obj complete")
async def verifyDomain(domain): """ create domain if it doesn't already exist """ params = {"host": domain} headers = getRequestHeaders() client = globals["client"] req = getEndpoint() + '/' root_id = None log.info("GET " + req) timeout = config.get("timeout") async with client.get(req, headers=headers, timeout=timeout, params=params) as rsp: if rsp.status == 200: domain_json = await rsp.json() else: log.info("got status: {}".format(rsp.status)) if rsp.status == 200: root_id = domain_json["root"] elif rsp.status == 404: # create the domain setupDomain(domain) async with client.get(req, headers=headers, timeout=timeout, params=params) as rsp: if rsp.status == 200: domain_json = await rsp.json() root_id = domain_json["root"] else: log.error("got status: {} for GET req: {}".format( rsp.status, req)) raise HttpProcessingError(code=rsp.status, message="Service error") globals["root"] = root_id
async def getS3ObjStats(app, key): """ Return etag, size, and last modified time for given object """ client = getS3Client(app) bucket = app['bucket_name'] stats = {} if key[0] == '/': #key = key[1:] # no leading slash msg = "key with leading slash: {}".format(key) log.error(msg) raise KeyError(msg) log.info("getS3ObjStats({})".format(key)) s3_stats_increment(app, "list_count") try: resp = await client.list_objects(Bucket=bucket, MaxKeys=1, Prefix=key) except ClientError as ce: # key does not exist? s3_stats_increment(app, "error_count") msg = "Error listing s3 obj: " + str(ce) log.error(msg) raise HTTPInternalServerError() if 'Contents' not in resp: msg = "key: {} not found".format(key) log.info(msg) raise HTTPInternalServerError() contents = resp['Contents'] log.debug("s3_contents: {}".format(contents)) found = False if len(contents) > 0: item = contents[0] if item["Key"] == key: # if the key is a S3 folder, the key will be the first object in the folder, # not the requested object found = True if item["ETag"]: etag = item["ETag"] if len(etag) > 2 and etag[0] == '"' and etag[-1] == '"': # S3 returning extra quotes around etag? etag = etag[1:-1] stats["ETag"] = etag else: if "Owner" in item and "ID" in item["Owner"] and item["Owner"]["ID"] == "minio": pass # minio is not creating ETags... else: log.warn("No ETag for key: {}".format(key)) # If no ETAG put in a fake one stats["ETag"] = "9999" stats["Size"] = item["Size"] stats["LastModified"] = int(item["LastModified"].timestamp()) if not found: msg = "key: {} not found".format(key) log.info(msg) raise HTTPNotFound() return stats
async def GET_Group(request): """HTTP GET method to return JSON for /groups/ """ log.request(request) app = request.app params = request.rel_url.query group_id = get_obj_id(request) if "bucket" in params: bucket = params["bucket"] else: bucket = None log.info(f"GET group: {group_id} bucket: {bucket}") if not isValidUuid(group_id, obj_class="group"): log.error("Unexpected group_id: {}".format(group_id)) raise HTTPInternalServerError() group_json = await get_metadata_obj(app, group_id, bucket=bucket) resp_json = {} resp_json["id"] = group_json["id"] resp_json["root"] = group_json["root"] resp_json["created"] = group_json["created"] resp_json["lastModified"] = group_json["lastModified"] resp_json["linkCount"] = len(group_json["links"]) resp_json["attributeCount"] = len(group_json["attributes"]) if "include_links" in params and params["include_links"]: resp_json["links"] = group_json["links"] if "include_attrs" in params and params["include_attrs"]: resp_json["attributes"] = group_json["attributes"] resp = json_response(resp_json) log.response(request, resp=resp) return resp
async def GET_Attribute(request): """HTTP GET method to return JSON for /(obj)/<id>/attributes/<name> """ log.request(request) app = request.app obj_id = get_obj_id(request) attr_name = request.match_info.get('name') validateAttributeName(attr_name) obj_json = await get_metadata_obj(app, obj_id) log.info("GET attribute obj_id: {} name: {}".format(obj_id, attr_name)) log.debug(f"got obj_json: {obj_json}") if "attributes" not in obj_json: log.error("unexpected obj data for id: {}".format(obj_id)) raise HTTPInternalServerError() attributes = obj_json["attributes"] if attr_name not in attributes: msg = f"Attribute '{attr_name}' not found for id: {obj_id}" log.warn(msg) raise HTTPNotFound() attr_json = attributes[attr_name] resp = json_response(attr_json) log.response(request, resp=resp) return resp