class Node(Resource): ##Differs from BaseResource in trans.set_query() mostly because it takes # query_type as an input def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node self.tclass = Node # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = {k: kwargs[k] for k in utils_conf_keys if k in kwargs} self.utils = Utils(**self.utils_confs) def get(self, pk=None, page=None): """ Get method for the Node resource. :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, pk, query_type) = self.utils.parse_path(path) (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request(limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) elif query_type == "statistics": (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist) = self.utils.parse_query_string(query_string) headers = self.utils.build_headers(url=request.url, total_count=0) if len(filters) > 0: usr = filters["user"]["=="] else: usr = [] results = self.trans.get_statistics(self.tclass, usr) elif query_type == "tree": if len(filters) > 0: depth = filters["depth"]["=="] else: depth = None results = self.trans.get_io_tree(pk, depth) headers = self.utils.build_headers(url=request.url, total_count=0) else: ## Instantiate a translator and initialize it self.trans.set_query(filters=filters, orders=orderby, query_type=query_type, pk=pk, alist=alist, nalist=nalist, elist=elist, nelist=nelist) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Retrieve results results = self.trans.get_results() ## Build response data = dict(method=request.method, url=url, url_root=url_root, path=path, pk=pk, query_string=query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class BaseResource(Resource): ## Each derived class will instantiate a different type of translator. # This is the only difference in the classes. def __init__(self, **kwargs): self.trans = None # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = {k: kwargs[k] for k in utils_conf_keys if k in kwargs} self.utils = Utils(**self.utils_confs) def get(self, pk=None, page=None): """ Get method for the Computer resource :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, pk, query_type) = self.utils.parse_path(path) (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request(limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) else: ## Set the query, and initialize qb object self.trans.set_query(filters=filters, orders=orderby, pk=pk) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Retrieve results results = self.trans.get_results() ## Build response and return it data = dict(method=request.method, url=url, url_root=url_root, path=request.path, pk=pk, query_string=request.query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class BaseResource(Resource): """ Each derived class will instantiate a different type of translator. This is the only difference in the classes. """ def __init__(self, **kwargs): self.trans = None # Flag to tell the path parser whether to expect a pk or a uuid pattern self.parse_pk_uuid = None # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = { k: kwargs[k] for k in utils_conf_keys if k in kwargs } self.utils = Utils(**self.utils_confs) self.method_decorators = {'get': kwargs.get('get_decorators', [])} # pylint: disable=too-many-locals,redefined-builtin,invalid-name def get(self, id=None, page=None): """ Get method for the Computer resource :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, id, query_type) = self.utils.parse_path(path, parse_pk_uuid=self.parse_pk_uuid) (limit, offset, perpage, orderby, filters, _alist, _nalist, _elist, _nelist, _downloadformat, _visformat, _filename, _rtype) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request( limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) else: ## Set the query, and initialize qb object self.trans.set_query(filters=filters, orders=orderby, id=id) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Retrieve results results = self.trans.get_results() ## Build response and return it data = dict(method=request.method, url=url, url_root=url_root, path=request.path, id=id, query_string=request.query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class Node(Resource): """ Differs from BaseResource in trans.set_query() mostly because it takes query_type as an input and the presence of additional result types like "tree" """ def __init__(self, **kwargs): # Set translator from aiida.restapi.translator.node import NodeTranslator self.trans = NodeTranslator(**kwargs) from aiida.orm import Node as tNode self.tclass = tNode # Parse a uuid pattern in the URL path (not a pk) self.parse_pk_uuid = 'uuid' # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = { k: kwargs[k] for k in utils_conf_keys if k in kwargs } self.utils = Utils(**self.utils_confs) self.method_decorators = {'get': kwargs.get('get_decorators', [])} #pylint: disable=too-many-locals,too-many-statements #pylint: disable=redefined-builtin,invalid-name,too-many-branches def get(self, id=None, page=None): """ Get method for the Node resource. :return: """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, id, query_type) = self.utils.parse_path(path, parse_pk_uuid=self.parse_pk_uuid) (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist, downloadformat, visformat, filename, rtype) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request( limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the schema case which does not imply access to the DataBase if query_type == 'schema': ## Retrieve the schema results = self.trans.get_schema() ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) ## Treat the statistics elif query_type == "statistics": (limit, offset, perpage, orderby, filters, alist, nalist, elist, nelist, downloadformat, visformat, filename, rtype) = self.utils.parse_query_string(query_string) headers = self.utils.build_headers(url=request.url, total_count=0) if filters: usr = filters["user"]["=="] else: usr = [] results = self.trans.get_statistics(usr) # TODO Might need to be improved elif query_type == "tree": headers = self.utils.build_headers(url=request.url, total_count=0) results = self.trans.get_io_tree(id) else: ## Initialize the translator self.trans.set_query(filters=filters, orders=orderby, query_type=query_type, id=id, alist=alist, nalist=nalist, elist=elist, nelist=nelist, downloadformat=downloadformat, visformat=visformat, filename=filename, rtype=rtype) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() if query_type == "download" and results: if results["download"]["status"] == 200: data = results["download"]["data"] response = make_response(data) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( results["download"]["filename"]) return response else: results = results["download"]["data"] if query_type in ["retrieved_inputs", "retrieved_outputs" ] and results: try: status = results[query_type]["status"] except KeyError: status = "" except TypeError: status = "" if status == 200: data = results[query_type]["data"] response = make_response(data) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( results[query_type]["filename"]) return response elif status == 500: results = results[query_type]["data"] headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Build response data = dict(method=request.method, url=url, url_root=url_root, path=path, id=id, query_string=query_string, resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class BaseResource(Resource): # pylint: disable=fixme """ Each derived class will instantiate a different type of translator. This is the only difference in the classes. """ from aiida.restapi.translator.base import BaseTranslator _translator_class = BaseTranslator _parse_pk_uuid = None # Flag to tell the path parser whether to expect a pk or a uuid pattern method_decorators = [close_session ] # Close SQLA session after any method call ## TODO add the caching support. I cache total count, results, and possibly def __init__(self, **kwargs): self.trans = self._translator_class(**kwargs) # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = { k: kwargs[k] for k in utils_conf_keys if k in kwargs } self.utils = Utils(**self.utils_confs) # HTTP Request method decorators if 'get_decorators' in kwargs and isinstance(kwargs['get_decorators'], (tuple, list, set)): self.method_decorators = {'get': list(kwargs['get_decorators'])} @classproperty def parse_pk_uuid(cls): # pylint: disable=no-self-argument return cls._parse_pk_uuid def _load_and_verify(self, node_id=None): """Load node and verify it is of the required type""" from aiida.orm import load_node node = load_node(node_id) if not isinstance(node, self.trans._aiida_class): # pylint: disable=protected-access,isinstance-second-argument-not-valid-type raise RestInputValidationError( f'node {node_id} is not of the required type {self.trans._aiida_class}' # pylint: disable=protected-access ) return node def get(self, id=None, page=None): # pylint: disable=redefined-builtin,invalid-name,unused-argument # pylint: disable=too-many-locals """ Get method for the resource :param id: node identifier :param page: page no, used for pagination :return: http response """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string.decode('utf-8')) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, node_id, query_type) = self.utils.parse_path(path, parse_pk_uuid=self.parse_pk_uuid) # pylint: disable=unused-variable (limit, offset, perpage, orderby, filters, download_format, download, filename, tree_in_limit, tree_out_limit, attributes, attributes_filter, extras, extras_filter, full_type) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request( limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the projectable_properties case which does not imply access to the DataBase if query_type == 'projectable_properties': ## Retrieve the projectable properties projectable_properties, ordering = self.trans.get_projectable_properties( ) results = dict(fields=projectable_properties, ordering=ordering) ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) else: ## Set the query, and initialize qb object self.trans.set_query(filters=filters, orders=orderby, node_id=node_id) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) headers = self.utils.build_headers(url=request.url, total_count=total_count) ## Retrieve results results = self.trans.get_results() ## Build response and return it data = dict(method=request.method, url=url, url_root=url_root, path=request.path, id=node_id, query_string=request.query_string.decode('utf-8'), resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)
class Node(BaseResource): """ Differs from BaseResource in trans.set_query() mostly because it takes query_type as an input and the presence of additional result types like "tree" """ from aiida.restapi.translator.nodes.node import NodeTranslator _translator_class = NodeTranslator _parse_pk_uuid = 'uuid' # Parse a uuid pattern in the URL path (not a pk) def __init__(self, **kwargs): super().__init__(**kwargs) from aiida.orm import Node as tNode self.tclass = tNode # Configure utils utils_conf_keys = ('PREFIX', 'PERPAGE_DEFAULT', 'LIMIT_DEFAULT') self.utils_confs = { k: kwargs[k] for k in utils_conf_keys if k in kwargs } self.utils = Utils(**self.utils_confs) self.method_decorators = {'get': kwargs.get('get_decorators', [])} def get(self, id=None, page=None): # pylint: disable=redefined-builtin,invalid-name,unused-argument # pylint: disable=too-many-locals,too-many-statements,too-many-branches,fixme,unused-variable """ Get method for the Node resource. :param id: node identifier :param page: page no, used for pagination :return: http response """ ## Decode url parts path = unquote(request.path) query_string = unquote(request.query_string.decode('utf-8')) url = unquote(request.url) url_root = unquote(request.url_root) ## Parse request (resource_type, page, node_id, query_type) = self.utils.parse_path(path, parse_pk_uuid=self.parse_pk_uuid) (limit, offset, perpage, orderby, filters, download_format, download, filename, tree_in_limit, tree_out_limit, attributes, attributes_filter, extras, extras_filter, full_type) = self.utils.parse_query_string(query_string) ## Validate request self.utils.validate_request( limit=limit, offset=offset, perpage=perpage, page=page, query_type=query_type, is_querystring_defined=(bool(query_string))) ## Treat the projectable properties case which does not imply access to the DataBase if query_type == 'projectable_properties': ## Retrieve the projectable properties projectable_properties, ordering = self.trans.get_projectable_properties( ) results = dict(fields=projectable_properties, ordering=ordering) ## Build response and return it headers = self.utils.build_headers(url=request.url, total_count=1) ## Treat the statistics elif query_type == 'statistics': headers = self.utils.build_headers(url=request.url, total_count=0) if filters: usr = filters['user']['=='] else: usr = None results = self.trans.get_statistics(usr) elif query_type == 'full_types': headers = self.utils.build_headers(url=request.url, total_count=0) results = self.trans.get_namespace() # TODO improve the performance of tree endpoint by getting the data from database faster # TODO add pagination for this endpoint (add default max limit) elif query_type == 'tree': headers = self.utils.build_headers(url=request.url, total_count=0) results = self.trans.get_io_tree(node_id, tree_in_limit, tree_out_limit) elif node_id is None and query_type == 'download_formats': headers = self.utils.build_headers(url=request.url, total_count=0) results = self.trans.get_all_download_formats(full_type) else: ## Initialize the translator self.trans.set_query(filters=filters, orders=orderby, query_type=query_type, node_id=node_id, download_format=download_format, download=download, filename=filename, attributes=attributes, attributes_filter=attributes_filter, extras=extras, extras_filter=extras_filter, full_type=full_type) ## Count results total_count = self.trans.get_total_count() ## Pagination (if required) if page is not None: (limit, offset, rel_pages) = self.utils.paginate(page, perpage, total_count) self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() headers = self.utils.build_headers(rel_pages=rel_pages, url=request.url, total_count=total_count) else: self.trans.set_limit_offset(limit=limit, offset=offset) ## Retrieve results results = self.trans.get_results() if query_type == 'repo_contents' and results: response = make_response(results) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( filename) return response if query_type == 'download' and download not in [ 'false', 'False', False ] and results: if results['download']['status'] == 200: data = results['download']['data'] response = make_response(data) response.headers[ 'content-type'] = 'application/octet-stream' response.headers[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( results['download']['filename']) return response results = results['download']['data'] headers = self.utils.build_headers(url=request.url, total_count=total_count) if attributes_filter is not None and attributes: for node in results['nodes']: node['attributes'] = {} if not isinstance(attributes_filter, list): attributes_filter = [attributes_filter] for attr in attributes_filter: node['attributes'][str(attr)] = node['attributes.' + str(attr)] del node['attributes.' + str(attr)] if extras_filter is not None and extras: for node in results['nodes']: node['extras'] = {} if not isinstance(extras_filter, list): extras_filter = [extras_filter] for extra in extras_filter: node['extras'][str(extra)] = node['extras.' + str(extra)] del node['extras.' + str(extra)] ## Build response data = dict(method=request.method, url=url, url_root=url_root, path=path, id=node_id, query_string=request.query_string.decode('utf-8'), resource_type=resource_type, data=results) return self.utils.build_response(status=200, headers=headers, data=data)