def _evaluate_signal(pipeline, name, dataset, signal, hyperparameter, metrics, test_split=False, detrend=False): train, test = _load_signal(signal, test_split) truth = load_anomalies(signal) if detrend: train = _detrend_signal(train, 'value') test = _detrend_signal(test, 'value') try: LOGGER.info("Scoring pipeline %s on signal %s (test split: %s)", name, signal, test_split) start = datetime.utcnow() pipeline = _load_pipeline(pipeline, hyperparameter) anomalies = analyze(pipeline, train, test) elapsed = datetime.utcnow() - start scores = { name: scorer(truth, anomalies, test) for name, scorer in metrics.items() } scores['status'] = 'OK' except Exception as ex: LOGGER.exception( "Exception scoring pipeline %s on signal %s (test split: %s), error %s.", name, signal, test_split, ex) elapsed = datetime.utcnow() - start scores = {name: 0 for name in metrics.keys()} metric_ = 'confusion_matrix' if metric_ in metrics.keys(): fn = len(truth) scores[metric_] = (None, 0, fn, 0) # (tn, fp, fn, tp) scores['status'] = 'ERROR' scores['elapsed'] = elapsed.total_seconds() scores['pipeline'] = name scores['split'] = test_split scores['dataset'] = dataset scores['signal'] = signal return scores
def _evaluate_signal(pipeline, signal, hyperparameter, metrics, test_split=False, detrend=False, pipeline_path=None): train, test = _load_signal(signal, test_split) truth = load_anomalies(signal) if detrend: train = _detrend_signal(train, 'value') test = _detrend_signal(test, 'value') try: LOGGER.info("Scoring pipeline %s on signal %s (test split: %s)", pipeline, signal, test_split) start = datetime.utcnow() pipeline = _load_pipeline(pipeline, hyperparameter) anomalies = analyze(pipeline, train, test) elapsed = datetime.utcnow() - start scores = { name: scorer(truth, anomalies, test) for name, scorer in metrics.items() } status = 'OK' except Exception as ex: LOGGER.exception( "Exception scoring pipeline %s on signal %s (test split: %s), error %s.", pipeline, signal, test_split, ex) elapsed = datetime.utcnow() - start scores = {name: 0 for name in metrics.keys()} status = 'ERROR' if 'confusion_matrix' in metrics.keys(): _parse_confusion_matrix(scores, truth) scores['status'] = status scores['elapsed'] = elapsed.total_seconds() scores['split'] = test_split if pipeline_path: with open(pipeline_path, 'wb') as f: pickle.dump(pipeline, f) return scores
def tadgan_pipline(tadgan_hyperparameters): pipeline_path = 'tadgan' pipline = analysis._load_pipeline(pipeline_path, tadgan_hyperparameters) return pipline