def _store_pipeline(yaml_file_content: AnyStr, name=None, description=None): yaml_dict = yaml.load(yaml_file_content, Loader=yaml.FullLoader) template_metadata = yaml_dict.get("metadata") or dict() annotations = template_metadata.get("annotations", {}) pipeline_spec = json.loads(annotations.get("pipelines.kubeflow.org/pipeline_spec", "{}")) name = name or template_metadata["name"] description = pipeline_spec.get("description", "").strip() namespace = pipeline_spec.get("namespace", "").strip() pipeline_id = "-".join([generate_id(length=l) for l in [8, 4, 4, 4, 12]]) created_at = datetime.now() parameters = [ApiParameter(name=p.get("name"), description=p.get("description"), default=p.get("default"), value=p.get("value")) for p in yaml_dict["spec"].get("params", {})] api_pipeline = ApiPipeline(id=pipeline_id, created_at=created_at, name=name, description=description, parameters=parameters, namespace=namespace) uuid = store_data(api_pipeline) api_pipeline.id = uuid store_file(bucket_name="mlpipeline", prefix=f"pipelines/", file_name=f"{pipeline_id}", file_content=yaml_file_content) enable_anonymous_read_access(bucket_name="mlpipeline", prefix="pipelines/*") return api_pipeline
def _upload_model_yaml(yaml_file_content: AnyStr, name=None, existing_id=None): model_def = yaml.load(yaml_file_content, Loader=yaml.FullLoader) api_model = ApiModel( id=existing_id or model_def.get("model_identifier") or generate_id(name=name or model_def["name"]), created_at=datetime.now(), name=name or model_def["name"], description=model_def["description"].strip(), domain=model_def.get("domain") or "", labels=model_def.get("labels") or dict(), framework=model_def["framework"], filter_categories=model_def.get("filter_categories") or dict(), trainable=model_def.get("train", {}).get("trainable") or False, trainable_tested_platforms=model_def.get( "train", {}).get("tested_platforms") or [], trainable_credentials_required=model_def.get( "train", {}).get("credentials_required") or False, trainable_parameters=model_def.get("train", {}).get("input_params") or [], servable=model_def.get("serve", {}).get("servable") or False, servable_tested_platforms=model_def.get( "serve", {}).get("tested_platforms") or [], servable_credentials_required=model_def.get( "serve", {}).get("credentials_required") or False, servable_parameters=model_def.get("serve", {}).get("input_params") or []) # convert comma-separate strings to lists if type(api_model.trainable_tested_platforms) == str: api_model.trainable_tested_platforms = api_model.trainable_tested_platforms.replace( ", ", ",").split(",") if type(api_model.servable_tested_platforms) == str: api_model.servable_tested_platforms = api_model.servable_tested_platforms.replace( ", ", ",").split(",") uuid = store_data(api_model) api_model.id = uuid store_file(bucket_name="mlpipeline", prefix=f"models/{api_model.id}/", file_name="template.yaml", file_content=yaml_file_content, content_type="text/yaml") enable_anonymous_read_access(bucket_name="mlpipeline", prefix="models/*") return api_model, 201
def _upload_component_yaml(yaml_file_content: AnyStr, name=None, existing_id=None): yaml_dict = yaml.load(yaml_file_content, Loader=yaml.FullLoader) template_metadata = yaml_dict.get("metadata") or dict() component_id = existing_id or generate_id(name=name or yaml_dict["name"]) created_at = datetime.now() name = name or yaml_dict["name"] description = (yaml_dict.get("description") or name).strip()[:255] filter_categories = yaml_dict.get("filter_categories") or dict() metadata = ApiMetadata(annotations=template_metadata.get("annotations"), labels=template_metadata.get("labels"), tags=template_metadata.get("tags")) parameters = [ ApiParameter(name=p.get("name"), description=p.get("description"), default=p.get("default"), value=p.get("value")) for p in yaml_dict.get("inputs", []) ] api_component = ApiComponent(id=component_id, created_at=created_at, name=name, description=description, metadata=metadata, parameters=parameters, filter_categories=filter_categories) uuid = store_data(api_component) api_component.id = uuid store_file(bucket_name="mlpipeline", prefix=f"components/{component_id}/", file_name="template.yaml", file_content=yaml_file_content, content_type="text/yaml") enable_anonymous_read_access(bucket_name="mlpipeline", prefix="components/*") return api_component, 201
def generate_notebook_run_script(api_notebook: ApiNotebook, parameters: dict = {}, run_name: str = None, hide_secrets: bool = True): if "dataset_pvc" in parameters: template_file = "run_notebook_with_dataset.TEMPLATE.py" else: template_file = "run_notebook.TEMPLATE.py" with open(join(CODE_TEMPLATE_DIR, template_file), 'r') as f: template_raw = f.read() notebook_file = api_notebook.url.split("/")[-1] requirements_url = get_object_url(bucket_name="mlpipeline", prefix=f"notebooks/{api_notebook.id}/", file_extensions=[".txt"], file_name_filter="requirements") cos_dependencies_archive_url = get_object_url(bucket_name="mlpipeline", prefix=f"notebooks/{api_notebook.id}/", file_extensions=[".tar.gz"], file_name_filter="elyra-dependencies-archive") if not cos_dependencies_archive_url: tar, bytes_io = create_tarfile(bucket_name="mlpipeline", prefix=f"notebooks/{api_notebook.id}/", file_extensions=[".ipynb"]) cos_dependencies_archive_url = store_file(bucket_name="mlpipeline", prefix=f"notebooks/{api_notebook.id}/", file_name="elyra-dependencies-archive.tar.gz", file_content=bytes_io.getvalue()) cos_dependencies_archive = cos_dependencies_archive_url.split("/")[-1] # TODO: move this into a ApiNotebook.image as opposed to parsing yaml here yaml_file_content = retrieve_file_content(bucket_name="mlpipeline", prefix=f"notebooks/{api_notebook.id}/", file_extensions=[".yaml", ".yml"]) metadata_yaml = yaml.load(yaml_file_content, Loader=yaml.FullLoader) image = metadata_yaml["implementation"]["github"].get("image", "tensorflow/tensorflow:latest") # TODO: elyra-ai/kfp-notebook generates output notebook as: "-output.ipynb" # https://github.com/elyra-ai/kfp-notebook/blob/c8f1298/etc/docker-scripts/bootstrapper.py#L188-L190 # so here we may consider renaming the generated file with a datetimestamp # output_folder = f"notebooks/{api_notebook.id}/runs/{datetime.now().strftime('%Y%m%d-%H%M%S')}" # output_file_name = notebook_file_name.replace(r'.ipynb', '-output.ipynb') # output_file_path = f"{output_folder}/{output_file_name}" # output_file_url = f"http://{minio_host}:{minio_port}/mlpipeline/{output_file_path}" kfp_url = f"'{_pipeline_service_url}'" if "POD_NAMESPACE" not in os.environ else "" substitutions = { "name": api_notebook.name, "notebook": notebook_file, "cos_bucket": "mlpipeline", "cos_directory": f"notebooks/{api_notebook.id}/", "cos_dependencies_archive": cos_dependencies_archive, "cos_endpoint": "***", "cos_username": "******", "cos_password": "******", "requirements_url": requirements_url or "", "image": image, "pipeline_server": kfp_url, "run_name": run_name or api_notebook.name } # TODO: make the `dataset_pvc` and `mount_path` parameters part of the Swagger spec? if "dataset_pvc" in parameters: substitutions.update({ "dataset_pvc": parameters["dataset_pvc"], "mount_path": parameters.get("mount_path", "/tmp/data") }) if not hide_secrets: substitutions.update({ "cos_endpoint": f"http://{minio_host}:{minio_port}/minio", "cos_username": minio_access_key, "cos_password": minio_secret_key }) run_script = Template(template_raw).substitute(substitutions) return run_script
def _upload_notebook_yaml(yaml_file_content: AnyStr, name=None, access_token=None, existing_id=None): yaml_dict = yaml.load(yaml_file_content, Loader=yaml.FullLoader) template_metadata = yaml_dict.get("metadata") or dict() notebook_id = existing_id or generate_id(name=name or yaml_dict["name"]) created_at = datetime.now() name = name or yaml_dict["name"] description = yaml_dict["description"].strip() url = yaml_dict["implementation"]["github"]["source"] requirements = yaml_dict["implementation"]["github"].get("requirements") metadata = ApiMetadata(annotations=template_metadata.get("annotations"), labels=template_metadata.get("labels"), tags=template_metadata.get("tags")) notebook_content = _download_notebook( url, enterprise_github_api_token=access_token) # parameters = _extract_notebook_parameters(notebook_content) # TODO: not using Papermill any longer, notebook parameters no longer valid? # kfp-notebook has inputs and outputs ? parameters = dict() api_notebook = ApiNotebook(id=notebook_id, created_at=created_at, name=name, description=description, url=url, metadata=metadata, parameters=parameters) uuid = store_data(api_notebook) api_notebook.id = uuid store_file(bucket_name="mlpipeline", prefix=f"notebooks/{notebook_id}/", file_name="template.yaml", file_content=yaml_file_content) s3_url = store_file(bucket_name="mlpipeline", prefix=f"notebooks/{notebook_id}/", file_name=url.split("/")[-1].split("?")[0], file_content=json.dumps(notebook_content).encode()) if requirements: if _is_url(requirements): requirements_url = requirements requirements_txt = download_file_content_from_url( requirements_url).decode() else: requirements_txt = "\n".join(requirements.split(",")) # TODO: remove this after fixing the Elyra-AI/KFP-Notebook runner so that # Elyra should install its own requirements in addition to the provided requirements requirements_elyra_url = "https://github.com/elyra-ai/kfp-notebook/blob/master/etc/requirements-elyra.txt" requirements_elyra_txt = download_file_content_from_url( requirements_elyra_url).decode() requirements_elyra = "\n".join([ line for line in requirements_elyra_txt.split("\n") if not line.startswith("#") ]) requirements_all = f"# Required packages for {api_notebook.name}:\n" \ f"{requirements_txt}\n" \ f"# Requirements from {requirements_elyra_url}:\n" \ f"{requirements_elyra}" store_file(bucket_name="mlpipeline", prefix=f"notebooks/{notebook_id}/", file_name="requirements.txt", file_content=requirements_all.encode()) # if the url included an access token, replace the original url with the s3 url if "?token=" in url or "github.ibm.com" in url: api_notebook.url = s3_url update_multiple(ApiNotebook, [notebook_id], "url", s3_url) enable_anonymous_read_access(bucket_name="mlpipeline", prefix="notebooks/*") return api_notebook, 201
def _upload_dataset_yaml(yaml_file_content: AnyStr, name=None, existing_id=None): yaml_dict = yaml.load(yaml_file_content, Loader=yaml.FullLoader) name = name or yaml_dict["name"] description = yaml_dict["description"] dataset_id = existing_id or generate_id(name=yaml_dict.get("id", name)) created_at = datetime.now() # if yaml_dict.get("id") != dataset_id: # raise ValueError(f"Dataset.id contains non k8s character: {yaml_dict.get('id')}") # TODO: re-evaluate if we should use dataset update time as our MLX "created_at" time if "updated" in yaml_dict: created_at = datetime.strptime(str(yaml_dict["updated"]), "%Y-%m-%d") elif "created" in yaml_dict: created_at = datetime.strptime(str(yaml_dict["created"]), "%Y-%m-%d") license_name = yaml_dict["license"]["name"] domain = yaml_dict["domain"] format_type = yaml_dict["format"][0]["type"] size = yaml_dict["content"][0].get("size") version = yaml_dict["version"] # # extract number of records and convert thousand separators based on Locale # num_records_str = yaml_dict["statistics"]["number_of_records"] # num_records_number_str = num_records_str.split()[0]. \ # replace("~", ""). \ # replace("+", ""). \ # replace("k", "000"). \ # replace(",", "") # assumes thousand separators in locale.en_US.UTF-8 # # locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') # setting locale does not work reliably in Docker # # number_of_records = locale.atoi(num_records_number_str) # number_of_records = int(num_records_number_str) number_of_records = yaml_dict["content"][0].get("records", 0) related_assets = [ a["application"].get("asset_id") for a in yaml_dict.get("related_assets", []) if "MLX" in a.get("application", {}).get("name", "") and "asset_id" in a.get("application", {}) ] template_metadata = yaml_dict.get("metadata") or dict() metadata = ApiMetadata(annotations=template_metadata.get("annotations"), labels=template_metadata.get("labels"), tags=template_metadata.get("tags") or yaml_dict.get("seo_tags")) # TODO: add "version" to ApiDataset api_dataset = ApiDataset(id=dataset_id, created_at=created_at, name=name, description=description, domain=domain, format=format_type, size=size, number_of_records=number_of_records, license=license_name, metadata=metadata, related_assets=related_assets) uuid = store_data(api_dataset) api_dataset.id = uuid store_file(bucket_name="mlpipeline", prefix=f"datasets/{api_dataset.id}/", file_name="template.yaml", file_content=yaml_file_content) enable_anonymous_read_access(bucket_name="mlpipeline", prefix="datasets/*") return api_dataset, 201