def submit(db_session: Session, data): task = data.get("task") function = data.get("function") url = data.get("functionUrl") if not url and task: url = get_in(task, "spec.function") if not (function or url) or not task: log_and_raise( HTTPStatus.BAD_REQUEST, reason="bad JSON, need to include function/url and task objects", ) # TODO: block exec for function["kind"] in ["", "local] (must be a # remote/container runtime) response = None try: if function and not url: fn = new_function(runtime=function) else: if "://" in url: fn = import_function(url=url) else: project, name, tag, hash_key = parse_function_uri(url) runtime = get_db().get_function(db_session, name, project, tag, hash_key) if not runtime: log_and_raise( HTTPStatus.BAD_REQUEST, reason="runtime error: function {} not found".format( url), ) fn = new_function(runtime=runtime) if function: fn2 = new_function(runtime=function) for attr in [ "volumes", "volume_mounts", "env", "resources", "image_pull_policy", "replicas", ]: val = getattr(fn2.spec, attr, None) if val: setattr(fn.spec, attr, val) run_db = get_run_db_instance(db_session) fn.set_db_connection(run_db, True) logger.info("func:\n{}".format(fn.to_yaml())) # fn.spec.rundb = "http://mlrun-api:8080" schedule = data.get("schedule") if schedule: args = (task, ) job_id = get_scheduler().add(schedule, fn, args) get_db().store_schedule(db_session, data) response = {"schedule": schedule, "id": job_id} else: run = fn.run(task, watch=False) if run: response = run.to_dict() except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST, reason="runtime error: {}".format(err)) logger.info("response: %s", response) return { "data": response, }
def _build_function( db_session, auth_info: mlrun.api.schemas.AuthInfo, function, with_mlrun=True, skip_deployed=False, mlrun_version_specifier=None, builder_env=None, ): fn = None ready = None try: fn = new_function(runtime=function) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}") try: run_db = get_run_db_instance(db_session) fn.set_db_connection(run_db) fn.save(versioned=False) if fn.kind in RuntimeKinds.nuclio_runtimes(): mlrun.api.api.utils.ensure_function_has_auth_set(fn, auth_info) mlrun.api.api.utils.process_function_service_account(fn) if fn.kind == RuntimeKinds.serving: # Handle model monitoring try: if fn.spec.track_models: logger.info( "Tracking enabled, initializing model monitoring") _init_serving_function_stream_args(fn=fn) model_monitoring_access_key = _process_model_monitoring_secret( db_session, fn.metadata.project, "MODEL_MONITORING_ACCESS_KEY", ) _create_model_monitoring_stream( project=fn.metadata.project) mlrun.api.crud.ModelEndpoints( ).deploy_monitoring_functions( project=fn.metadata.project, model_monitoring_access_key= model_monitoring_access_key, db_session=db_session, auth_info=auth_info, ) except Exception as exc: logger.warning( "Failed deploying model monitoring infrastructure for the project", project=fn.metadata.project, exc=exc, traceback=traceback.format_exc(), ) deploy_nuclio_function(fn, auth_info=auth_info) # deploy only start the process, the get status API is used to check readiness ready = False else: ready = build_runtime( auth_info, fn, with_mlrun, mlrun_version_specifier, skip_deployed, builder_env=builder_env, ) fn.save(versioned=True) logger.info("Fn:\n %s", fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}") return fn, ready
def _parse_submit_run_body(db_session: Session, data): task = data.get("task") function_dict = data.get("function") function_url = data.get("functionUrl") if not function_url and task: function_url = get_in(task, "spec.function") if not (function_dict or function_url) or not task: log_and_raise( HTTPStatus.BAD_REQUEST.value, reason="bad JSON, need to include function/url and task objects", ) # TODO: block exec for function["kind"] in ["", "local] (must be a # remote/container runtime) if function_dict and not function_url: function = new_function(runtime=function_dict) else: if "://" in function_url: function = import_function(url=function_url) else: project, name, tag, hash_key = parse_function_uri(function_url) function_record = get_db().get_function( db_session, name, project, tag, hash_key ) if not function_record: log_and_raise( HTTPStatus.NOT_FOUND.value, reason="runtime error: function {} not found".format(function_url), ) function = new_function(runtime=function_record) if function_dict: # The purpose of the function dict is to enable the user to override configurations of the existing function # without modifying it - to do that we're creating a function object from the request function dict and # assign values from it to the main function object override_function = new_function(runtime=function_dict, kind=function.kind) for attribute in [ "volumes", "volume_mounts", "env", "resources", "image_pull_policy", "replicas", ]: override_value = getattr(override_function.spec, attribute, None) if override_value: if attribute == "env": for env_dict in override_value: function.set_env(env_dict["name"], env_dict["value"]) elif attribute == "volumes": function.spec.update_vols_and_mounts(override_value, []) elif attribute == "volume_mounts": # volume mounts don't have a well defined identifier (like name for volume) so we can't merge, # only override function.spec.volume_mounts = override_value elif attribute == "resources": # don't override it there are limits and requests but both are empty if override_value.get("limits", {}) or override_value.get( "requests", {} ): setattr(function.spec, attribute, override_value) else: setattr(function.spec, attribute, override_value) return function, task