def _updateAnomalyLikelihoodParams(cls, conn, metricId, modelParamsJson, likelihoodParams): """Update and save anomaly_params with the given likelihoodParams if the metric is ACTIVE. :param conn: Transactional SQLAlchemy connection object :type conn: sqlalchemy.engine.base.Connection :param metricId: Metric uid :param modelParamsJson: Model params JSON object (from model_params metric column) :param likelihoodParams: anomaly likelihood params dict :raises: htmengine.exceptions.MetricNotActiveError if metric's status is not MetricStatus.ACTIVE """ lockedRow = repository.getMetricWithUpdateLock( conn, metricId, fields=[schema.metric.c.status]) if lockedRow.status != MetricStatus.ACTIVE: raise MetricNotActiveError( "_updateAnomalyLikelihoodParams failed because metric=%s is not " "ACTIVE; status=%s" % (metricId, lockedRow.status,)) modelParams = json.loads(modelParamsJson) modelParams["anomalyLikelihoodParams"] = likelihoodParams repository.updateMetricColumns(conn, metricId, {"model_params": json.dumps(modelParams)})
def _updateAnomalyLikelihoodParams(cls, conn, metricId, modelParamsJson, likelihoodParams): """Update and save anomaly_params with the given likelyhoodParams if the metric is ACTIVE. :param conn: Transactional SQLAlchemy connection object :type conn: sqlalchemy.engine.base.Connection :param metricId: Metric uid :param modelParamsJson: Model params JSON object (from model_params metric column) :param likelihoodParams: anomaly likelihood params dict :raises: htmengine.exceptions.MetricNotActiveError if metric's status is not MetricStatus.ACTIVE """ lockedRow = repository.getMetricWithUpdateLock( conn, metricId, fields=[schema.metric.c.status]) if lockedRow.status != MetricStatus.ACTIVE: raise MetricNotActiveError( "_updateAnomalyLikelihoodParams failed because metric=%s is not " "ACTIVE; status=%s" % ( metricId, lockedRow.status, )) modelParams = json.loads(modelParamsJson) modelParams["anomalyLikelihoodParams"] = likelihoodParams repository.updateMetricColumns( conn, metricId, {"model_params": json.dumps(modelParams)})
def storeDataWithRetries(): """ :returns: a three-tuple <modelInputRows, datasource, metricStatus>; modelInputRows: None if model was in state not suitable for streaming; otherwise a (possibly empty) tuple of ModelInputRow objects corresponding to the samples that were stored; ordered by rowid """ with repository.engineFactory(config).connect() as conn: with conn.begin(): # Syncrhonize with adapter's monitorMetric metricObj = repository.getMetricWithUpdateLock( conn, metricID, fields=[schema.metric.c.status, schema.metric.c.last_rowid, schema.metric.c.datasource], ) if ( metricObj.status != MetricStatus.UNMONITORED and metricObj.status != MetricStatus.ACTIVE and metricObj.status != MetricStatus.PENDING_DATA and metricObj.status != MetricStatus.CREATE_PENDING ): self._log.error("Can't stream: metric=%s has unexpected status=%s", metricID, metricObj.status) modelInputRows = None else: # TODO: unit-test passingSamples = self._scrubDataSamples(data, metricID, conn, metricObj.last_rowid) if passingSamples: modelInputRows = self._storeDataSamples(passingSamples, metricID, conn) else: modelInputRows = tuple() return (modelInputRows, metricObj.datasource, metricObj.status)
def _startMonitoringWithRetries(self, metricId, modelSpec, swarmParams): """ Perform the start-monitoring operation atomically/reliably :param metricId: unique identifier of the metric row :param modelSpec: same as `modelSpec`` from `monitorMetric` :param swarmParams: object returned by scalar_metric_utils.generateSwarmParams() :raises htmengine.exceptions.ObjectNotFoundError: if referenced metric doesn't exist :raises htmengine.exceptions.MetricNotSupportedError: if requested metric isn't supported :raises htmengine.exceptions.MetricAlreadyMonitored: if the metric is already being monitored """ with self.connectionFactory() as conn: with conn.begin(): # Lock the metric to synchronize with metric streamer; must be first # call at start of transaction metricObj = repository.getMetricWithUpdateLock(conn, metricId) if metricObj.datasource != self._DATASOURCE: raise TypeError("Not an HTM metric=%r; modelSpec=%r" % (metricObj, modelSpec)) if metricObj.status != MetricStatus.UNMONITORED: self._log.info( "monitorMetric: already monitored; metric=%r", metricObj) raise app_exceptions.MetricAlreadyMonitored( ("Custom metric=%s is already monitored by model=%r" % ( metricObj.name, metricObj, )), uid=metricId) # Save model specification in metric row update = {"parameters": htmengine.utils.jsonEncode(modelSpec)} instanceName = self.getInstanceNameForModelSpec(modelSpec) if instanceName is not None: update["server"] = instanceName repository.updateMetricColumns(conn, metricId, update) modelStarted = scalar_metric_utils.startMonitoring( conn=conn, metricId=metricId, swarmParams=swarmParams, logger=self._log) if modelStarted: scalar_metric_utils.sendBacklogDataToModel( conn=conn, metricId=metricId, logger=self._log)
def _startMonitoringWithRetries(self, metricId, modelSpec, swarmParams): """ Perform the start-monitoring operation atomically/reliably :param metricId: unique identifier of the metric row :param modelSpec: same as `modelSpec`` from `monitorMetric` :param swarmParams: object returned by scalar_metric_utils.generateSwarmParams() :raises htmengine.exceptions.ObjectNotFoundError: if referenced metric doesn't exist :raises htmengine.exceptions.MetricNotSupportedError: if requested metric isn't supported :raises htmengine.exceptions.MetricAlreadyMonitored: if the metric is already being monitored """ with self.connectionFactory() as conn: with conn.begin(): # Lock the metric to synchronize with metric streamer; must be first # call at start of transaction metricObj = repository.getMetricWithUpdateLock(conn, metricId) if metricObj.datasource != self._DATASOURCE: raise TypeError("Not an HTM metric=%r; modelSpec=%r" % (metricObj, modelSpec)) if metricObj.status != MetricStatus.UNMONITORED: self._log.info("monitorMetric: already monitored; metric=%r", metricObj) raise app_exceptions.MetricAlreadyMonitored( ("Custom metric=%s is already monitored by model=%r" % (metricObj.name, metricObj,)), uid=metricId) # Save model specification in metric row update = {"parameters": htmengine.utils.jsonEncode(modelSpec)} instanceName = self.getInstanceNameForModelSpec(modelSpec) if instanceName is not None: update["server"] = instanceName repository.updateMetricColumns(conn, metricId, update) modelStarted = scalar_metric_utils.startMonitoring( conn=conn, metricId=metricId, swarmParams=swarmParams, logger=self._log) if modelStarted: scalar_metric_utils.sendBacklogDataToModel( conn=conn, metricId=metricId, logger=self._log)
def storeDataWithRetries(): """ :returns: a three-tuple <modelInputRows, datasource, metricStatus>; modelInputRows: None if model was in state not suitable for streaming; otherwise a (possibly empty) tuple of ModelInputRow objects corresponding to the samples that were stored; ordered by rowid """ with repository.engineFactory(config).connect() as conn: with conn.begin(): # Syncrhonize with adapter's monitorMetric metricObj = repository.getMetricWithUpdateLock( conn, metricID, fields=[ schema.metric.c.status, schema.metric.c.last_rowid, schema.metric.c.datasource ]) if (metricObj.status != MetricStatus.UNMONITORED and metricObj.status != MetricStatus.ACTIVE and metricObj.status != MetricStatus.PENDING_DATA and metricObj.status != MetricStatus.CREATE_PENDING): self._log.error( "Can't stream: metric=%s has unexpected status=%s", metricID, metricObj.status) modelInputRows = None else: # TODO: unit-test passingSamples = self._scrubDataSamples( data, metricID, conn, metricObj.last_rowid) if passingSamples: modelInputRows = self._storeDataSamples( passingSamples, metricID, conn) else: modelInputRows = tuple() return (modelInputRows, metricObj.datasource, metricObj.status)