def show(self, format=None): """show the data object content in Jupyter :param format: format to use (when there is no/wrong suffix), e.g. 'png' """ if not is_ipython: logger.warning( "Jupyter/IPython was not detected, .show() will only display inside Jupyter" ) return from IPython import display suffix = self.suffix.lower() if format: suffix = "." + format if suffix in [".jpg", ".png", ".gif"]: display.display(display.Image(self.get(), format=suffix[1:])) elif suffix in [".htm", ".html"]: display.display(display.HTML(self.get(encoding="utf-8"))) elif suffix in [".csv", ".pq", ".parquet"]: display.display(self.as_df()) elif suffix in [".yaml", ".txt", ".py"]: display.display(display.Pretty(self.get(encoding="utf-8"))) elif suffix == ".json": display.display(display.JSON(orjson.loads(self.get()))) elif suffix == ".md": display.display(display.Markdown(self.get(encoding="utf-8"))) else: logger.error(f"unsupported show() format {suffix} for {self.url}")
def _init_endpoint_record(graph_server, model: V2ModelServer): logger.info("Initializing endpoint records") try: project, uri, tag, hash_key = parse_versioned_object_uri( graph_server.function_uri) if model.version: versioned_model_name = f"{model.name}:{model.version}" else: versioned_model_name = f"{model.name}:latest" model_endpoint = ModelEndpoint( metadata=ModelEndpointMetadata(project=project, labels=model.labels), spec=ModelEndpointSpec( function_uri=graph_server.function_uri, model=versioned_model_name, model_class=model.__class__.__name__, model_uri=model.model_path, stream_path=config.model_endpoint_monitoring.store_prefixes. default.format(project=project, kind="stream"), active=True, ), status=ModelEndpointStatus(), ) db = mlrun.get_run_db() db.create_or_patch_model_endpoint( project=project, endpoint_id=model_endpoint.metadata.uid, model_endpoint=model_endpoint, ) except Exception as e: logger.error("Failed to create endpoint record", exc=e)
def _start_function(function, auth_info: mlrun.api.schemas.AuthInfo): db_session = mlrun.api.db.session.create_session() try: resource = runtime_resources_map.get(function.kind) if "start" not in resource: log_and_raise( HTTPStatus.BAD_REQUEST.value, reason="runtime error: 'start' not supported by this runtime", ) try: run_db = get_run_db_instance(db_session) function.set_db_connection(run_db) mlrun.api.api.utils.ensure_function_has_auth_set( function, auth_info) mlrun.api.api.utils.process_function_service_account(function) # resp = resource["start"](fn) # TODO: handle resp? resource["start"](function) function.save(versioned=False) logger.info("Fn:\n %s", function.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}") finally: mlrun.api.db.session.close_session(db_session)
def _build_function( db_session, auth_info: mlrun.api.schemas.AuthInfo, function, with_mlrun, skip_deployed, mlrun_version_specifier, ): fn = None ready = None try: fn = new_function(runtime=function) run_db = get_run_db_instance(db_session, auth_info.session) fn.set_db_connection(run_db) fn.save(versioned=False) if fn.kind in RuntimeKinds.nuclio_runtimes(): mlrun.api.api.utils.ensure_function_has_auth_set(fn, auth_info) deploy_nuclio_function(fn) # deploy only start the process, the get status API is used to check readiness ready = False else: ready = build_runtime(fn, with_mlrun, mlrun_version_specifier, skip_deployed) fn.save(versioned=True) logger.info("Fn:\n %s", fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}") return fn, ready
def function_status(): try: data = request.get_json(force=True) except ValueError: return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body') logger.info('function_status:\n{}'.format(data)) selector = data.get('selector') kind = data.get('kind') if not selector or not kind: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: selector or runtime kind not specified', ) resource = runtime_resources_map.get(kind) if 'status' not in resource: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: "status" not supported by this runtime', ) try: resp = resource['status'](selector) logger.info('status: %s', resp) except Exception as err: logger.error(traceback.format_exc()) return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: {}'.format(err), ) return jsonify(ok=True, data=resp)
def _start_function(db_session, data): logger.info("start_function:\n{}".format(data)) url = data.get("functionUrl") if not url: log_and_raise(HTTPStatus.BAD_REQUEST, reason="runtime error: functionUrl not specified") project, name, tag, hash_key = parse_function_uri(url) runtime = get_db().get_function(db_session, name, project, tag, hash_key) if not runtime: log_and_raise( HTTPStatus.BAD_REQUEST, reason="runtime error: function {} not found".format(url)) fn = new_function(runtime=runtime) resource = runtime_resources_map.get(fn.kind) if "start" not in resource: log_and_raise( HTTPStatus.BAD_REQUEST, reason="runtime error: 'start' not supported by this runtime") try: run_db = get_run_db_instance(db_session) fn.set_db_connection(run_db) # resp = resource["start"](fn) # TODO: handle resp? resource["start"](fn) fn.save(versioned=False) logger.info("Fn:\n %s", fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST, reason="runtime error: {}".format(err)) return fn
def is_not_none(field: Any, dict_path: List[str]): if field is not None: return True logger.error( f"Expected event field is missing: {field} [Event -> {''.join(dict_path)}]" ) return False
def build_function(): try: data = request.get_json(force=True) except ValueError: return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body') logger.info('build_function:\n{}'.format(data)) function = data.get('function') with_mlrun = strtobool(data.get('with_mlrun', 'on')) try: fn = new_function(runtime=function) fn.set_db_connection(_db) fn.save(versioned=False) ready = build_runtime(fn, with_mlrun) fn.save(versioned=False) logger.info('Fn:\n %s', fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: {}'.format(err), ) return jsonify(ok=True, data=fn.to_dict(), ready=ready)
def _get_function_status(data): logger.info("function_status:\n{}".format(data)) selector = data.get("selector") kind = data.get("kind") if not selector or not kind: log_and_raise( HTTPStatus.BAD_REQUEST, reason="runtime error: selector or runtime kind not specified", ) resource = runtime_resources_map.get(kind) if "status" not in resource: log_and_raise( HTTPStatus.BAD_REQUEST, reason="runtime error: 'status' not supported by this runtime", ) resp = None try: resp = resource["status"](selector) logger.info("status: %s", resp) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST, reason="runtime error: {}".format(err))
def _submit_run(db_session: Session, auth_info: mlrun.api.schemas.AuthInfo, data) -> typing.Tuple[str, str, str, typing.Dict]: """ :return: Tuple with: 1. str of the project of the run 2. str of the kind of the function of the run 3. str of the uid of the run that started execution (None when it was scheduled) 4. dict of the response info """ run_uid = None project = None try: fn, task = _parse_submit_run_body(db_session, auth_info, data) run_db = get_run_db_instance(db_session, auth_info.session) fn.set_db_connection(run_db, True) logger.info("Submitting run", function=fn.to_dict(), task=task) # fn.spec.rundb = "http://mlrun-api:8080" schedule = data.get("schedule") if schedule: cron_trigger = schedule if isinstance(cron_trigger, dict): cron_trigger = schemas.ScheduleCronTrigger(**cron_trigger) schedule_labels = task["metadata"].get("labels") get_scheduler().create_schedule( db_session, auth_info, task["metadata"]["project"], task["metadata"]["name"], schemas.ScheduleKinds.job, data, cron_trigger, schedule_labels, ) project = task["metadata"]["project"] response = { "schedule": schedule, "project": task["metadata"]["project"], "name": task["metadata"]["name"], } else: run = fn.run(task, watch=False) run_uid = run.metadata.uid project = run.metadata.project if run: response = run.to_dict() except HTTPException: logger.error(traceback.format_exc()) raise except mlrun.errors.MLRunHTTPStatusError: raise except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}") logger.info("Run submission succeeded", response=response) return project, fn.kind, run_uid, {"data": response}
def _get_launcher(self, name, namespace=None): pods = self.get_pods(name, namespace, launcher=True) if not pods: logger.error("no pod matches that job name") return # TODO: Why was this here? # k8s = self._get_k8s() return list(pods.items())[0]
def is_list_of_numerics(field: List[Union[int, float, dict, list]], dict_path: List[str]): if all(isinstance(x, int) or isinstance(x, float) for x in field): return True logger.error( f"Expected event field is missing: {field} [Event -> {''.join(dict_path)}]" ) return False
def is_list_of_numerics(self, field: List[Union[int, float, dict, list]], dict_path: List[str]): if all(isinstance(x, int) or isinstance(x, float) for x in field): return True logger.error( f"List does not consist of only numeric values: {field} [Event -> {','.join(dict_path)}]" ) return False
def build_status(name: str = "", project: str = "", tag: str = "", offset: int = 0, logs: str = "on", db_session: Session = Depends(deps.get_db_session)): logs = strtobool(logs) fn = get_db().get_function(db_session, name, project, tag) if not fn: log_and_raise(HTTPStatus.NOT_FOUND, name=name, project=project, tag=tag) state = get_in(fn, "status.state", "") pod = get_in(fn, "status.build_pod", "") image = get_in(fn, "spec.build.image", "") out = b"" if not pod: if state == "ready": image = image or get_in(fn, "spec.image") return Response(content=out, media_type="text/plain", headers={ "function_status": state, "function_image": image, "builder_pod": pod }) logger.info("get pod {} status".format(pod)) state = get_k8s().get_pod_status(pod) logger.info("pod state={}".format(state)) if state == "succeeded": logger.info("build completed successfully") state = "ready" if state in ["failed", "error"]: logger.error("build {}, watch the build pod logs: {}".format( state, pod)) if logs and state != "pending": resp = get_k8s().logs(pod) if resp: out = resp.encode()[offset:] update_in(fn, "status.state", state) if state == "ready": update_in(fn, "spec.image", image) get_db().store_function(db_session, fn, name, project, tag) return Response(content=out, media_type="text/plain", headers={ "function_status": state, "function_image": image, "builder_pod": pod })
def build_status(): name = request.args.get('name', '') project = request.args.get('project', '') tag = request.args.get('tag', '') offset = int(request.args.get('offset', '0')) logs = strtobool(request.args.get('logs', 'on')) fn = _db.get_function(name, project, tag) if not fn: return json_error(HTTPStatus.NOT_FOUND, name=name, project=project, tag=tag) state = get_in(fn, 'status.state', '') pod = get_in(fn, 'status.build_pod', '') image = get_in(fn, 'spec.build.image', '') out = b'' if not pod: if state == 'ready': image = image or get_in(fn, 'spec.image') return Response(out, mimetype='text/plain', headers={ "function_status": state, "function_image": image, "builder_pod": pod }) logger.info('get pod {} status'.format(pod)) state = _k8s.get_pod_status(pod) logger.info('pod state={}'.format(state)) if state == 'succeeded': logger.info('build completed successfully') state = 'ready' if state in ['failed', 'error']: logger.error('build {}, watch the build pod logs: {}'.format( state, pod)) if logs and state != 'pending': resp = _k8s.logs(pod) if resp: out = resp.encode()[offset:] update_in(fn, 'status.state', state) if state == 'ready': update_in(fn, 'spec.image', image) _db.store_function(fn, name, project, tag) return Response(out, mimetype='text/plain', headers={ "function_status": state, "function_image": image, "builder_pod": pod })
def _submit(data): task = data.get('task') function = data.get('function') url = data.get('functionUrl') if not url and task: url = get_in(task, 'spec.function') if not (function or url) or not task: return json_error( HTTPStatus.BAD_REQUEST, reason='bad JSON, need to include function/url and task objects', ) # TODO: block exec for function['kind'] in ['', 'local] (must be a # remote/container runtime) try: if function: fn = new_function(runtime=function) else: if '://' in url: fn = import_function(url=url) else: project, name, tag = parse_function_uri(url) runtime = _db.get_function(name, project, tag) if not runtime: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: function {} not found'.format( url), ) fn = new_function(runtime=runtime) fn.set_db_connection(_db, True) logger.info('func:\n{}'.format(fn.to_yaml())) # fn.spec.rundb = 'http://mlrun-api:8080' schedule = data.get('schedule') if schedule: args = (task, ) job_id = _scheduler.add(schedule, fn, args) _db.save_schedule(data) resp = {'schedule': schedule, 'id': job_id} else: resp = fn.run(task, watch=False) logger.info('resp: %s', resp.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: {}'.format(err), ) if not isinstance(resp, dict): resp = resp.to_dict() return jsonify(ok=True, data=resp)
def post_init(self): try: self.v3io.stream.create( container=self.stream_container, stream_path=self.stream_path, shard_count=1, ) except Exception as e: if "ResourceInUseException" in str(e): logger.error("Stream already exsits...") else: raise e
def _submit_mpijob(self, job, namespace=None): mpi_group, mpi_version, mpi_plural = self._get_crd_info() k8s = self._get_k8s() namespace = k8s.resolve_namespace(namespace) try: resp = k8s.crdapi.create_namespaced_custom_object( mpi_group, mpi_version, namespace=namespace, plural=mpi_plural, body=job) name = get_in(resp, 'metadata.name', 'unknown') logger.info('MpiJob {} created'.format(name)) return resp except client.rest.ApiException as e: logger.error("Exception when creating MPIJob: %s" % e) raise RunError("Exception when creating MPIJob: %s" % e)
def _build_function(db_session, function, with_mlrun): fn = None ready = None try: fn = new_function(runtime=function) run_db = get_run_db_instance(db_session) fn.set_db_connection(run_db) fn.save(versioned=False) ready = build_runtime(fn, with_mlrun) fn.save(versioned=False) logger.info("Fn:\n %s", fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST, reason="runtime error: {}".format(err)) return fn, ready
def start_function(): try: data = request.get_json(force=True) except ValueError: return json_error(HTTPStatus.BAD_REQUEST, reason='bad JSON body') logger.info('start_function:\n{}'.format(data)) url = data.get('functionUrl') if not url: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: functionUrl not specified', ) project, name, tag = parse_function_uri(url) runtime = _db.get_function(name, project, tag) if not runtime: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: function {} not found'.format(url), ) fn = new_function(runtime=runtime) resource = runtime_resources_map.get(fn.kind) if 'start' not in resource: return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: "start" not supported by this runtime', ) try: fn.set_db_connection(_db) # resp = resource['start'](fn) # TODO: handle resp? resource['start'](fn) fn.save(versioned=False) logger.info('Fn:\n %s', fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) return json_error( HTTPStatus.BAD_REQUEST, reason='runtime error: {}'.format(err), ) return jsonify(ok=True, data=fn.to_dict())
def _submit_mpijob(self, job, namespace=None): mpi_group, mpi_version, mpi_plural = self._get_crd_info() k8s = self._get_k8s() namespace = k8s.resolve_namespace(namespace) try: resp = k8s.crdapi.create_namespaced_custom_object( mpi_group, mpi_version, namespace=namespace, plural=mpi_plural, body=job, ) name = get_in(resp, "metadata.name", "unknown") logger.info(f"MpiJob {name} created") return resp except client.rest.ApiException as exc: logger.error(f"Exception when creating MPIJob: {exc}") raise RunError(f"Exception when creating MPIJob: {exc}")
def _get_function_status(data, auth_info: mlrun.api.schemas.AuthInfo): logger.info(f"function_status:\n{data}") selector = data.get("selector") kind = data.get("kind") if not selector or not kind: log_and_raise( HTTPStatus.BAD_REQUEST.value, reason="runtime error: selector or runtime kind not specified", ) project, name = data.get("project"), data.get("name") # Only after 0.6.6 the client start sending the project and name, as long as 0.6.6 is a valid version we'll need # to try and resolve them from the selector. TODO: remove this when 0.6.6 is not relevant anymore if not project or not name: project, name, _ = mlrun.runtimes.utils.parse_function_selector( selector) mlrun.api.utils.auth.verifier.AuthVerifier( ).query_project_resource_permissions( mlrun.api.schemas.AuthorizationResourceTypes.function, project, name, mlrun.api.schemas.AuthorizationAction.read, auth_info, ) resource = runtime_resources_map.get(kind) if "status" not in resource: log_and_raise( HTTPStatus.BAD_REQUEST.value, reason="runtime error: 'status' not supported by this runtime", ) resp = None try: resp = resource["status"](selector) logger.info("status: %s", resp) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason=f"runtime error: {err}")
def _build_function(db_session, function, with_mlrun): fn = None ready = None try: fn = new_function(runtime=function) run_db = get_run_db_instance(db_session) fn.set_db_connection(run_db) fn.save(versioned=False) if fn.kind in RuntimeKinds.nuclio_runtimes(): deploy_nuclio_function(fn) # deploy only start the process, the get status API is used to check readiness ready = False else: ready = build_runtime(fn, with_mlrun) fn.save(versioned=True) logger.info("Fn:\n %s", fn.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason="runtime error: {}".format(err)) return fn, ready
async def _process_event(self, event): # async implementation (with storey) body = self._get_event_or_body(event) method, url, headers, body = self._generate_request(event, body) kwargs = {} if self.timeout: kwargs["timeout"] = aiohttp.ClientTimeout(total=self.timeout) try: resp = await self._client_session.request(method, url, headers=headers, data=body, ssl=False, **kwargs) if resp.status >= 500: text = await resp.text() raise RuntimeError(f"bad http response {resp.status}: {text}") return resp except asyncio.TimeoutError as exc: logger.error( f"http request to {url} timed out in RemoteStep {self.name}") raise exc
def _start_function(function): db_session = mlrun.api.db.session.create_session() try: resource = runtime_resources_map.get(function.kind) if "start" not in resource: log_and_raise( HTTPStatus.BAD_REQUEST.value, reason="runtime error: 'start' not supported by this runtime", ) try: run_db = get_run_db_instance(db_session) function.set_db_connection(run_db) # resp = resource["start"](fn) # TODO: handle resp? resource["start"](function) function.save(versioned=False) logger.info("Fn:\n %s", function.to_yaml()) except Exception as err: logger.error(traceback.format_exc()) log_and_raise(HTTPStatus.BAD_REQUEST.value, reason="runtime error: {}".format(err)) finally: mlrun.api.db.session.close_session(db_session)
def run( cls, project, workflow_spec: WorkflowSpec, name=None, workflow_handler=None, secrets=None, artifact_path=None, namespace=None, ) -> _PipelineRunStatus: pipeline_context.set(project, workflow_spec) workflow_handler = _PipelineRunner._get_handler( workflow_handler, workflow_spec, project, secrets ) workflow_id = uuid.uuid4().hex pipeline_context.workflow_id = workflow_id pipeline_context.workflow_artifact_path = artifact_path project.notifiers.push_start_message(project.metadata.name, id=workflow_id) try: workflow_handler(**workflow_spec.args) state = mlrun.run.RunStatuses.succeeded except Exception as e: trace = traceback.format_exc() logger.error(trace) project.notifiers.push( f"Workflow {workflow_id} run failed!, error: {e}\n{trace}" ) state = mlrun.run.RunStatuses.failed mlrun.run.wait_for_runs_completion(pipeline_context.runs_map.values()) project.notifiers.push_run_results( pipeline_context.runs_map.values(), state=state ) pipeline_context.clear() return _PipelineRunStatus( workflow_id, cls, project=project, workflow=workflow_spec, state=state )
def _init_endpoint_record(context, model_logger: Optional[_ModelLogPusher]): if model_logger is None or isinstance(model_logger.output_stream, _DummyStream): return try: project, uri, tag, hash_key = parse_versioned_object_uri( model_logger.function_uri) if model_logger.model.version: model = f"{model_logger.model.name}:{model_logger.model.version}" else: model = model_logger.model.name model_endpoint = ModelEndpoint( metadata=ModelEndpointMetadata(project=project, labels=model_logger.model.labels), spec=ModelEndpointSpec( function_uri=model_logger.function_uri, model=model, model_class=model_logger.model.__class__.__name__, model_uri=model_logger.model.model_path, stream_path=model_logger.stream_path, active=True, ), status=ModelEndpointStatus(), ) db = mlrun.get_run_db() db.create_or_patch( project=project, endpoint_id=model_endpoint.metadata.uid, model_endpoint=model_endpoint, ) except Exception as e: logger.error("Failed to create endpoint record", exc=e)
def log_and_raise(status=HTTPStatus.BAD_REQUEST.value, **kw): logger.error(str(kw)) raise HTTPException(status_code=status, detail=kw)
def build_status( name: str = "", project: str = "", tag: str = "", offset: int = 0, logs: bool = True, last_log_timestamp: float = 0.0, verbose: bool = False, db_session: Session = Depends(deps.get_db_session), ): fn = get_db().get_function(db_session, name, project, tag) if not fn: log_and_raise(HTTPStatus.NOT_FOUND.value, name=name, project=project, tag=tag) # nuclio deploy status if fn.get("kind") in RuntimeKinds.nuclio_runtimes(): ( state, address, nuclio_name, last_log_timestamp, text, ) = get_nuclio_deploy_status(name, project, tag, last_log_timestamp=last_log_timestamp, verbose=verbose) if state == "ready": logger.info("Nuclio function deployed successfully", name=name) if state == "error": logger.error(f"Nuclio deploy error, {text}", name=name) update_in(fn, "status.nuclio_name", nuclio_name) update_in(fn, "status.state", state) update_in(fn, "status.address", address) versioned = False if state == "ready": # Versioned means the version will be saved in the DB forever, we don't want to spam # the DB with intermediate or unusable versions, only successfully deployed versions versioned = True get_db().store_function(db_session, fn, name, project, tag, versioned=versioned) return Response( content=text, media_type="text/plain", headers={ "x-mlrun-function-status": state, "x-mlrun-last-timestamp": str(last_log_timestamp), "x-mlrun-address": address, "x-mlrun-name": nuclio_name, }, ) # job deploy status state = get_in(fn, "status.state", "") pod = get_in(fn, "status.build_pod", "") image = get_in(fn, "spec.build.image", "") out = b"" if not pod: if state == "ready": image = image or get_in(fn, "spec.image") return Response( content=out, media_type="text/plain", headers={ "function_status": state, "function_image": image, "builder_pod": pod, }, ) logger.info("get pod {} status".format(pod)) state = get_k8s().get_pod_status(pod) logger.info("pod state={}".format(state)) if state == "succeeded": logger.info("build completed successfully") state = "ready" if state in ["failed", "error"]: logger.error("build {}, watch the build pod logs: {}".format( state, pod)) if logs and state != "pending": resp = get_k8s().logs(pod) if resp: out = resp.encode()[offset:] update_in(fn, "status.state", state) if state == "ready": update_in(fn, "spec.image", image) versioned = False if state == "ready": versioned = True get_db().store_function(db_session, fn, name, project, tag, versioned=versioned) return Response( content=out, media_type="text/plain", headers={ "x-mlrun-function-status": state, "function_status": state, "function_image": image, "builder_pod": pod, }, )
def json_error(status=HTTPStatus.BAD_REQUEST, **kw): kw.setdefault('ok', False) logger.error(str(kw)) reply = jsonify(**kw) reply.status_code = status return reply