def __exit__(self, exc_type, exc_value, exc_traceback): if not exc_type: return True # no exception, just return if not self.options: # exception occurred before args were parsed return False # propagate exception further # log exception exc_msg_lines = traceback.format_exception(exc_type, exc_value, exc_traceback) exc_msg = "".join(exc_msg_lines) run_mode = RunMode(self.options.subparser_name) if (not hasattr(self.options, "show_stacktrace") or self.options.show_stacktrace or (run_mode == RunMode.SERVER and self.options.with_error_server)): logger.error(exc_msg) else: if exc_type == DrumCommonException: print(exc_value) return True else: return False if run_mode != RunMode.SERVER: # drum is not run in server mode return False # propagate exception further if getattr(self.options, "docker", None): # when run in docker mode, # drum is started from docker with the same options except `--docker`. # thus error server is started in docker as well. # return here two avoid starting error server 2nd time. return False # propagate exception further if not self.options.with_error_server: # force start is not set return False # propagate exception further if self.initialization_succeeded: # pipeline initialization was successful. # exceptions that occur during pipeline running # must be propagated further return False # propagate exception further # start 'error server' host_port_list = self.options.address.split(":", 1) host = host_port_list[0] port = int(host_port_list[1]) if len(host_port_list) == 2 else None with verbose_stdout(self.options.verbose): run_error_server(host, port, exc_value) return False # propagate exception further
def __exit__(self, exc_type, exc_value, exc_traceback): if not exc_type: return True # no exception, just return if not self.options: # exception occurred before args were parsed return False # propagate exception further run_mode = RunMode(self.options.subparser_name) if run_mode != RunMode.SERVER: # drum is not run in server mode return False # propagate exception further # TODO: add docker support if getattr(self.options, "docker", None): # running 'error server' in docker mode is not supported return False # propagate exception further if not self.options.with_error_server: # force start is not set return False # propagate exception further if self.initialization_succeeded: # pipeline initialization was successful. # exceptions that occur during pipeline running # must be propagated further return False # propagate exception further # start 'error server' host_port_list = self.options.address.split(":", 1) host = host_port_list[0] port = int(host_port_list[1]) if len(host_port_list) == 2 else None with verbose_stdout(self.options.verbose): run_error_server(host, port, exc_value) return False # propagate exception further
def _run_fit_and_predictions_pipelines_in_mlpiper(self): if self.run_mode == RunMode.SERVER: run_language = self._check_artifacts_and_get_run_language() # in prediction server mode infra pipeline == prediction server runner pipeline infra_pipeline_str = self._prepare_prediction_server_or_batch_pipeline( run_language) elif self.run_mode == RunMode.SCORE: run_language = self._check_artifacts_and_get_run_language() tmp_output_filename = None # if output is not provided, output into tmp file and print if not self.options.output: # keep object reference so it will be destroyed only in the end of the process __tmp_output_file = tempfile.NamedTemporaryFile(mode="w") self.options.output = tmp_output_filename = __tmp_output_file.name # in batch prediction mode infra pipeline == predictor pipeline infra_pipeline_str = self._prepare_prediction_server_or_batch_pipeline( run_language) elif self.run_mode == RunMode.FIT: run_language = self._get_fit_run_language() infra_pipeline_str = self._prepare_fit_pipeline(run_language) else: error_message = "{} mode is not supported here".format( self.run_mode) print(error_message) raise DrumCommonException(error_message) config = ExecutorConfig( pipeline=infra_pipeline_str, pipeline_file=None, run_locally=True, comp_root_path=CMRunnerUtils.get_components_repo(), mlpiper_jar=None, spark_jars=None, ) _pipeline_executor = Executor(config).standalone(True).set_verbose( self.options.verbose) # assign logger with the name drum.mlpiper.Executor to mlpiper Executor _pipeline_executor.set_logger( logging.getLogger(LOGGER_NAME_PREFIX + "." + _pipeline_executor.logger_name())) self.logger.info(">>> Start {} in the {} mode".format( ArgumentsOptions.MAIN_COMMAND, self.run_mode.value)) sc = StatsCollector(disable_instance=( not hasattr(self.options, "show_perf") or not self.options.show_perf or self.run_mode == RunMode.SERVER)) sc.register_report("Full time", "end", StatsOperation.SUB, "start") sc.register_report("Init time (incl model loading)", "init", StatsOperation.SUB, "start") sc.register_report("Run time (incl reading CSV)", "run", StatsOperation.SUB, "init") with verbose_stdout(self.options.verbose): sc.enable() try: sc.mark("start") _pipeline_executor.init_pipeline() self.runtime.initialization_succeeded = True sc.mark("init") _pipeline_executor.run_pipeline(cleanup=False) sc.mark("run") finally: _pipeline_executor.cleanup_pipeline() sc.mark("end") sc.disable() self.logger.info("<<< Finish {} in the {} mode".format( ArgumentsOptions.MAIN_COMMAND, self.run_mode.value)) sc.print_reports() if self.run_mode == RunMode.SCORE: # print result if output is not provided if tmp_output_filename: print(pd.read_csv(tmp_output_filename))