def list_credentials(page_token=None, page_size=None, sort_by=None, filter=None): # noqa: E501 """list_credentials :param page_token: :type page_token: str :param page_size: :type page_size: int :param sort_by: Can be format of \"field_name\", \"field_name asc\" or \"field_name des\" Ascending by default. :type sort_by: str :param filter: A string-serialized JSON dictionary containing key-value pairs with name of the object property to apply filter on and the value of the respective property. :type filter: str :rtype: ApiListCredentialsResponse """ if page_size == 0: return {}, 200 # TODO: do not misuse page_token as MySQL result offset offset = int(page_token) if page_token and page_token.isdigit() else 0 filter_dict = json.loads(filter) if filter else None api_credentials: [ApiCredential] = load_data(ApiCredential, filter_dict=filter_dict, sort_by=sort_by, count=page_size, offset=offset) next_page_token = offset + page_size if len( api_credentials) == page_size else None total_size = num_rows(ApiCredential) if total_size == next_page_token: next_page_token = None secrets = list_secrets(name_prefix=secret_name_prefix) # TODO: consolidate kubernetes secrets with MLX registered credentials (i.e. add status field?) comp_list = ApiListCredentialsResponse(credentials=api_credentials, total_size=total_size, next_page_token=next_page_token) return comp_list, 200
def list_notebooks(page_token=None, page_size=None, sort_by=None, filter=None): # noqa: E501 """list_notebooks :param page_token: :type page_token: str :param page_size: :type page_size: int :param sort_by: Can be format of \"field_name\", \"field_name asc\" or \"field_name des\" Ascending by default. :type sort_by: str :param filter: A string-serialized JSON dictionary containing key-value pairs with name of the object property to apply filter on and the value of the respective property. :type filter: str :rtype: ApiListNotebooksResponse """ if page_size == 0: return {}, 200 # TODO: do not misuse page_token as MySQL result offset offset = int(page_token) if page_token and page_token.isdigit() else 0 filter_dict = json.loads(filter) if filter else None api_notebooks: [ApiNotebook] = load_data(ApiNotebook, filter_dict=filter_dict, sort_by=sort_by, count=page_size, offset=offset) next_page_token = offset + page_size if len( api_notebooks) == page_size else None total_size = num_rows(ApiNotebook) if total_size == next_page_token: next_page_token = None notebooks = ApiListNotebooksResponse(notebooks=api_notebooks, total_size=total_size, next_page_token=next_page_token) return notebooks, 200
def list_pipelines(page_token=None, page_size=None, sort_by=None, filter=None): # noqa: E501 """list_pipelines :param page_token: :type page_token: str :param page_size: :type page_size: int :param sort_by: Can be format of \"field_name\", \"field_name asc\" or \"field_name des\" Ascending by default. :type sort_by: str :param filter: A string-serialized JSON dictionary containing key-value pairs with name of the object property to apply filter on and the value of the respective property. :type filter: str :rtype: ApiListPipelinesResponse """ if page_size == 0: return {}, 200 # TODO: do not misuse page_token as MySQL result offset offset = int(page_token) if page_token and page_token.isdigit() else 0 filter_dict = json.loads(filter) if filter else {} # TODO: add filter_categories to ApiPipelineExtension (and give users a way # to add category labels to pipelines) until then remove categories from filter if "filter_categories" in filter_dict: del filter_dict["filter_categories"] api_pipelines: [ApiPipeline] = load_data(ApiPipelineExtended, filter_dict=filter_dict, sort_by=sort_by, count=page_size, offset=offset) next_page_token = offset + page_size if len(api_pipelines) == page_size else None total_size = num_rows(ApiPipeline) if total_size == next_page_token: next_page_token = None pipeline_list = ApiListPipelinesResponse(pipelines=api_pipelines, total_size=total_size, next_page_token=next_page_token) return pipeline_list, 200