def __init__(self, subscription_key, anomaly_detector_endpoint, data_source=None): self.sub_key = subscription_key self.end_point = anomaly_detector_endpoint # Create an Anomaly Detector client # <client> self.ad_client = AnomalyDetectorClient(AzureKeyCredential(self.sub_key), self.end_point) # </client> self.data_source = data_source
def detect_entire_series(self): SUBSCRIPTION_KEY = os.environ["ANOMALY_DETECTOR_KEY"] ANOMALY_DETECTOR_ENDPOINT = os.environ["ANOMALY_DETECTOR_ENDPOINT"] TIME_SERIES_DATA_PATH = os.path.join("./sample_data", "request-data.csv") # Create an Anomaly Detector client # <client> client = AnomalyDetectorClient(AzureKeyCredential(SUBSCRIPTION_KEY), ANOMALY_DETECTOR_ENDPOINT) # </client> # Load in the time series data file # <loadDataFile> series = [] data_file = pd.read_csv(TIME_SERIES_DATA_PATH, header=None, encoding='utf-8', parse_dates=[0]) for index, row in data_file.iterrows(): series.append(TimeSeriesPoint(timestamp=row[0], value=row[1])) # </loadDataFile> # Create a request from the data file # <request> request = DetectRequest(series=series, granularity=TimeGranularity.daily) # </request> # detect anomalies throughout the entire time series, as a batch # <detectAnomaliesBatch> print('Detecting anomalies in the entire time series.') try: response = client.detect_entire_series(request) except AnomalyDetectorError as e: print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message)) except Exception as e: print(e) if any(response.is_anomaly): print('An anomaly was detected at index:') for i, value in enumerate(response.is_anomaly): if value: print(i) else: print('No anomalies were detected in the time series.')
def detect_last_point(self): SUBSCRIPTION_KEY = os.environ["ANOMALY_DETECTOR_KEY"] ANOMALY_DETECTOR_ENDPOINT = os.environ["ANOMALY_DETECTOR_ENDPOINT"] TIME_SERIES_DATA_PATH = os.path.join("./sample_data", "request-data.csv") # Create an Anomaly Detector client # <client> client = AnomalyDetectorClient(AzureKeyCredential(SUBSCRIPTION_KEY), ANOMALY_DETECTOR_ENDPOINT) # </client> # Load in the time series data file # <loadDataFile> series = [] data_file = pd.read_csv(TIME_SERIES_DATA_PATH, header=None, encoding='utf-8', parse_dates=[0]) for index, row in data_file.iterrows(): series.append(TimeSeriesPoint(timestamp=row[0], value=row[1])) # </loadDataFile> # Create a request from the data file # <request> request = DetectRequest(series=series, granularity=TimeGranularity.daily) # </request> # Detect the anomaly status of the latest data point # <latestPointDetection> print('Detecting the anomaly status of the latest data point.') try: response = client.detect_last_point(request) except AnomalyDetectorError as e: print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message)) except Exception as e: print(e) if response.is_anomaly: print('The latest point is detected as anomaly.') else: print('The latest point is not detected as anomaly.')
def last_detect(subscription_key): print("Sample of detecting whether the latest point in series is anomaly.") # Add your Azure Anomaly Detector subscription key to your environment variables. endpoint = os.environ["ANOMALY_DETECTOR_ENDPOINT"] client = AnomalyDetectorClient(AzureKeyCredential(subscription_key), endpoint) request = get_request() try: response = client.detect_last_point(request) except AnomalyDetectorError as e: print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message)) except Exception as e: print(e) if response.is_anomaly: print('The latest point is detected as anomaly.') else: print('The latest point is not detected as anomaly.')
def entire_detect(subscription_key): print("Sample of detecting anomalies in the entire series.") # Add your Azure Anomaly Detector subscription key to your environment variables. endpoint = os.environ["ANOMALY_DETECTOR_ENDPOINT"] client = AnomalyDetectorClient(AzureKeyCredential(subscription_key), endpoint) request = get_request() try: response = client.detect_entire_series(request) except AnomalyDetectorError as e: print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message)) except Exception as e: print(e) if any(response.is_anomaly): print('Anomaly was detected from the series at index:') for i, value in enumerate(response.is_anomaly): if value: print(i) else: print('No anomalies were detected in the time series.')
class MultivariateSample: def __init__(self, subscription_key, anomaly_detector_endpoint, data_source=None): self.sub_key = subscription_key self.end_point = anomaly_detector_endpoint # Create an Anomaly Detector client # <client> self.ad_client = AnomalyDetectorClient( AzureKeyCredential(self.sub_key), self.end_point) # </client> self.data_source = data_source def train(self, start_time, end_time): # Number of models available now model_list = list( self.ad_client.list_multivariate_model(skip=0, top=10000)) print("{:d} available models before training.".format(len(model_list))) # Use sample data to train the model print("Training new model...(it may take a few minutes)") data_feed = ModelInfo(start_time=start_time, end_time=end_time, source=self.data_source) response_header = \ self.ad_client.train_multivariate_model(data_feed, cls=lambda *args: [args[i] for i in range(len(args))])[-1] trained_model_id = response_header['Location'].split("/")[-1] # Wait until the model is ready. It usually takes several minutes model_status = None while model_status != ModelStatus.READY and model_status != ModelStatus.FAILED: model_info = self.ad_client.get_multivariate_model( trained_model_id).model_info model_status = model_info.status time.sleep(10) if model_status == ModelStatus.FAILED: print("Creating model failed.") print("Errors:") if model_info.errors: for error in model_info.errors: print("Error code: {}. Message: {}".format( error.code, error.message)) else: print("None") return None if model_status == ModelStatus.READY: # Model list after training new_model_list = list( self.ad_client.list_multivariate_model(skip=0, top=10000)) print("Done.\n--------------------") print("{:d} available models after training.".format( len(new_model_list))) # Return the latest model id return trained_model_id def detect(self, model_id, start_time, end_time): # Detect anomaly in the same data source (but a different interval) try: detection_req = DetectionRequest(source=self.data_source, start_time=start_time, end_time=end_time) response_header = self.ad_client.detect_anomaly( model_id, detection_req, cls=lambda *args: [args[i] for i in range(len(args))])[-1] result_id = response_header['Location'].split("/")[-1] # Get results (may need a few seconds) r = self.ad_client.get_detection_result(result_id) print("Get detection result...(it may take a few seconds)") while r.summary.status != DetectionStatus.READY and r.summary.status != DetectionStatus.FAILED: r = self.ad_client.get_detection_result(result_id) time.sleep(1) if r.summary.status == DetectionStatus.FAILED: print("Detection failed.") print("Errors:") if r.summary.errors: for error in r.summary.errors: print("Error code: {}. Message: {}".format( error.code, error.message)) else: print("None") return None except HttpResponseError as e: print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message)) except Exception as e: raise e return r def export_model(self, model_id, model_path="model.zip"): # Export the model model_stream_generator = self.ad_client.export_model(model_id) with open(model_path, "wb") as f_obj: while True: try: f_obj.write(next(model_stream_generator)) except StopIteration: break except Exception as e: raise e def delete_model(self, model_id): # Delete the mdoel self.ad_client.delete_multivariate_model(model_id) model_list_after_delete = list( self.ad_client.list_multivariate_model(skip=0, top=10000)) print("{:d} available models after deletion.".format( len(model_list_after_delete)))