Example #1
0
def model_start(model_name):
    global g_storage

    params = {
        'save_prediction': get_bool_arg('save_prediction'),
        'datasink': request.args.get('datasink'),
        'detect_anomalies': get_bool_arg('detect_anomalies'),
    }

    model = g_storage.load_model(model_name)
    if not model.is_trained:
        raise errors.ModelNotTrained()

    model.set_run_params(params)
    model.set_run_state(None)
    g_storage.save_model(model)

    params['from_date'] = get_date_arg('from')
    try:
        _model_start(model, params)
    except errors.LoudMLException as exn:
        model.set_run_params(None)
        g_storage.save_model(model)
        raise (exn)

    return "real-time prediction started", 200
Example #2
0
def model_stop(model_name):
    global g_running_models
    global g_storage

    g_lock.acquire()
    timer = g_running_models.get(model_name)
    if timer is None:
        g_lock.release()
        return "model is not active", 404

    timer.cancel()
    del g_running_models[model_name]
    g_lock.release()
    logging.info("model '%s' deactivated", model_name)

    model = g_storage.load_model(model_name)
    model.set_run_params(None)
    model.set_run_state(None)
    g_storage.save_model(model)

    return "model deactivated"
Example #3
0
    def predict(self,
                model_name,
                save_run_state=True,
                save_prediction=False,
                detect_anomalies=False,
                datasink=None,
                **kwargs):
        """
        Ask model for a prediction
        """

        model = self.storage.load_model(model_name)
        src_settings = self.config.get_datasource(model.default_datasource)
        source = loudml.datasource.load_datasource(src_settings)

        if model.type in ['timeseries', 'donut']:
            mse_rtol = self.config.server['mse_rtol']
            _state = model.get_run_state()
            if detect_anomalies:
                prediction = model.predict2(
                    source,
                    mse_rtol=mse_rtol,
                    _state=_state,
                    num_cpus=self.config.inference['num_cpus'],
                    num_gpus=self.config.inference['num_gpus'],
                    **kwargs)
            else:
                prediction = model.predict(
                    source,
                    num_cpus=self.config.inference['num_cpus'],
                    num_gpus=self.config.inference['num_gpus'],
                    **kwargs)

            logging.info("job[%s] predicted values for %d time buckets",
                         self.job_id, len(prediction.timestamps))
            if detect_anomalies:
                hooks = self.storage.load_model_hooks(
                    model.settings,
                    source,
                )
                model.detect_anomalies(prediction, hooks)
            if save_run_state:
                model.set_run_state(_state)
                self.storage.save_state(model)
            if save_prediction:
                self._save_timeseries_prediction(
                    model,
                    prediction,
                    source,
                    datasink,
                )

            fmt = kwargs.get('format', 'series')

            if fmt == 'buckets':
                return prediction.format_buckets()
            elif fmt == 'series':
                return prediction.format_series()
            else:
                raise errors.Invalid('unknown requested format')

        else:
            logging.info("job[%s] prediction done", self.job_id)