def run(self): self.logger.debug("------------- Kale Start Run -------------") try: # validate provided metadata self.validate_metadata() # convert notebook to nx graph pipeline_graph, pipeline_parameters_code_block = parser.parse_notebook( self.source_path, self.nbformat_version) pipeline_parameters_dict = dep_analysis.pipeline_parameters_detection( pipeline_parameters_code_block) # run static analysis over the source code dep_analysis.variables_dependencies_detection( pipeline_graph, ignore_symbols=set(pipeline_parameters_dict.keys())) # TODO: Run a static analysis over every step to check that pipeline parameters are not assigned with new values. # in case the user did not specify a custom docker image, use the same base image of # the current Notebook Server if self.docker_base_image == '': try: self.docker_base_image = pod_utils.get_docker_base_image() except ConfigException: # no K8s config found # use kfp default image pass except Exception: raise # generate full kfp pipeline definition kfp_code = generate_code.gen_kfp_code( nb_graph=pipeline_graph, experiment_name=self.experiment_name, pipeline_name=self.pipeline_name, pipeline_description=self.pipeline_description, pipeline_parameters=pipeline_parameters_dict, docker_base_image=self.docker_base_image, volumes=self.volumes, deploy_pipeline=self.run_pipeline, working_dir=self.abs_working_dir) # save kfp generated code self.save_pipeline(kfp_code) # deploy pipeline to KFP instance if self.upload_pipeline or self.run_pipeline: return self.deploy_pipeline_to_kfp(self.output_path) except Exception as e: # self.logger.debug(traceback.print_exc()) self.logger.debug(e, exc_info=True) self.logger.error(e) self.logger.error( "To see full traceback run Kale with --debug flag or have a look at kale.log logfile" )
def detect_environment(self): """Detect local confs to preserve reproducibility in pipeline steps.""" # When running inside a Kubeflow Notebook Server we can detect the # running docker image and use it as default in the pipeline steps. if not self.pipeline_metadata['docker_image']: docker_image = "" try: # will fail in case in cluster config is not found docker_image = get_docker_base_image() except ConfigException: # no K8s config found # use kfp default image pass except Exception: # some other exception raise self.pipeline_metadata["docker_image"] = docker_image
def detect_environment(self): """ Detect local configs to preserve reproducibility of dev env in pipeline steps """ # used to set container step working dir same as current environment self.pipeline_metadata['abs_working_dir'] = os.path.dirname( os.path.abspath(self.source_path)) # When running inside a Kubeflow Notebook Server we can detect the # running docker image and use it as default in the pipeline steps. if not self.pipeline_metadata['docker_image']: try: # will fail in case in cluster config is not found self.pipeline_metadata['docker_image'] = get_docker_base_image( ) except ConfigException: # no K8s config found # use kfp default image pass except Exception: # some other exception raise
def get_base_image(request): return pod_utils.get_docker_base_image()
def get_base_image(request): """Get the current pod's docker base image.""" return pod_utils.get_docker_base_image()