def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node self.tclass = Node # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = {k: kwargs[k] for k in utils_conf_keys if k in kwargs} self.utils = Utils(**self.utils_confs)
def get_downloadable_data(node, format=None): """ Generic function extented for kpoints data. Currently it is not implemented. :param node: node object that has to be visualized :param format: file extension format :returns: raise RestFeatureNotAvailable exception """ response = {} if node.folder.exists(): folder_node = node._get_folder_pathsubfolder filename = node.filename try: content = NodeTranslator.get_file_content( folder_node, filename) except IOError as e: error = "Error in getting {} content".format(filename) raise RestInputValidationError(error) response["status"] = 200 response["data"] = content response["filename"] = filename else: response["status"] = 200 response["data"] = "file does not exist" return response
def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node as tNode self.tclass = tNode # Parse a uuid pattern in the URL path (not a pk) self.parse_pk_uuid = 'uuid' # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = {k: kwargs[k] for k in utils_conf_keys if k in kwargs} self.utils = Utils(**self.utils_confs) self.method_decorators = {'get': kwargs.get('get_decorators', [])}
def get_retrieved_outputs(node, filename=None, rtype=None): """ Get the retrieved output files for job calculation :param node: aiida node :return: the retrieved output files for job calculation """ if node.dbnode.type.startswith("calculation.job."): retrieved_folder = node.out.retrieved response = {} if retrieved_folder is None: response["status"] = 200 response["data"] = "This node does not have retrieved folder" return response output_folder = retrieved_folder._get_folder_pathsubfolder if filename is not None: if rtype is None: rtype = "download" if rtype == "download": try: content = NodeTranslator.get_file_content( output_folder, filename) except IOError as e: error = "Error in getting {} content".format(filename) raise RestInputValidationError(error) response["status"] = 200 response["data"] = content response["filename"] = filename.replace("/", "_") else: raise RestInputValidationError("rtype is not supported") return response # if filename is not provided, return list of all retrieved files retrieved = CalculationTranslator.get_files_list(output_folder) return retrieved return []
class Node(Resource): ##Differs from BaseResource in trans.set_query() mostly because it takes # query_type as an input def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node self.tclass = Node # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = {k: kwargs[k] for k in utils_conf_keys if k in kwargs} self.utils = Utils(**self.utils_confs) def get(self, pk=None, page=None): """ Get method for the Node resource. :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, pk, query_type) = self.utils.parse_path(path) (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request(limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) elif query_type == "statistics": (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist) = self.utils.parse_query_string(query_string) headers = self.utils.build_headers(url=request.url, total_count=0) if len(filters) > 0: usr = filters["user"]["=="] else: usr = [] results = self.trans.get_statistics(self.tclass, usr) elif query_type == "tree": if len(filters) > 0: depth = filters["depth"]["=="] else: depth = None results = self.trans.get_io_tree(pk, depth) headers = self.utils.build_headers(url=request.url, total_count=0) else: ## Instantiate a translator and initialize it self.trans.set_query(filters=filters, orders=orderby, query_type=query_type, pk=pk, alist=alist, nalist=nalist, elist=elist, nelist=nelist) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Retrieve results results = self.trans.get_results() ## Build response data = dict(method=request.method, url=url, url_root=url_root, path=path, pk=pk, query_string=query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class Node(Resource): """ Differs from BaseResource in trans.set_query() mostly because it takes query_type as an input and the presence of additional result types like "tree" """ def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node as tNode self.tclass = tNode # Parse a uuid pattern in the URL path (not a pk) self.parse_pk_uuid = 'uuid' # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = { k: kwargs[k] for k in utils_conf_keys if k in kwargs } self.utils = Utils(**self.utils_confs) self.method_decorators = {'get': kwargs.get('get_decorators', [])} #pylint: disable=too-many-locals,too-many-statements #pylint: disable=redefined-builtin,invalid-name,too-many-branches def get(self, id=None, page=None): """ Get method for the Node resource. :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, id, query_type) = self.utils.parse_path(path, parse_pk_uuid=self.parse_pk_uuid) (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist, downloadformat, visformat, filename, rtype) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request( limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) ## Treat the statistics elif query_type == "statistics": (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist, downloadformat, visformat, filename, rtype) = self.utils.parse_query_string(query_string) headers = self.utils.build_headers(url=request.url, total_count=0) if filters: usr = filters["user"]["=="] else: usr = [] results = self.trans.get_statistics(usr) # TODO Might need to be improved elif query_type == "tree": headers = self.utils.build_headers(url=request.url, total_count=0) results = self.trans.get_io_tree(id) else: ## Initialize the translator self.trans.set_query(filters=filters, orders=orderby, query_type=query_type, id=id, alist=alist, nalist=nalist, elist=elist, nelist=nelist, downloadformat=downloadformat, visformat=visformat, filename=filename, rtype=rtype) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() if query_type == "download" and results: if results["download"]["status"] == 200: data = results["download"]["data"] response = make_response(data) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( results["download"]["filename"]) return response else: results = results["download"]["data"] if query_type in ["retrieved_inputs", "retrieved_outputs" ] and results: try: status = results[query_type]["status"] except KeyError: status = "" except TypeError: status = "" if status == 200: data = results[query_type]["data"] response = make_response(data) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( results[query_type]["filename"]) return response elif status == 500: results = results[query_type]["data"] headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Build response data = dict(method=request.method, url=url, url_root=url_root, path=path, id=id, query_string=query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
def __init__(self): from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator() from aiida.orm import Node self.tclass = Node