def model_start(model_name): global g_storage params = { 'save_prediction': get_bool_arg('save_prediction'), 'datasink': request.args.get('datasink'), 'detect_anomalies': get_bool_arg('detect_anomalies'), } model = g_storage.load_model(model_name) if not model.is_trained: raise errors.ModelNotTrained() model.set_run_params(params) model.set_run_state(None) g_storage.save_model(model) params['from_date'] = get_date_arg('from') try: _model_start(model, params) except errors.LoudMLException as exn: model.set_run_params(None) g_storage.save_model(model) raise (exn) return "real-time prediction started", 200
def _done_cb(self, result): """ Callback executed when job is done """ super()._done_cb(result) if self.state == 'done' and self.autostart: logging.info("scheduling autostart for model '%s'", self.model_name) model = g_storage.load_model(self.model_name) params = self._kwargs_start.copy() params.pop('from_date') model.set_run_params(params) g_storage.save_model(model) try: _model_start(model, self._kwargs_start) except errors.LoudMLException: model.set_run_params(None) g_storage.save_model(model)
def model_stop(model_name): global g_running_models global g_storage g_lock.acquire() timer = g_running_models.get(model_name) if timer is None: g_lock.release() return "model is not active", 404 timer.cancel() del g_running_models[model_name] g_lock.release() logging.info("model '%s' deactivated", model_name) model = g_storage.load_model(model_name) model.set_run_params(None) model.set_run_state(None) g_storage.save_model(model) return "model deactivated"