def test_peak_model_for_cache(self): cache = { 'patternCenter': [1, 6], 'patternModel': [1, 4, 0], 'confidence': 2, 'convolveMax': 8, 'convolveMin': 7, 'windowSize': 1, 'convDelMin': 0, 'convDelMax': 0, 'heightMax': 4, 'heightMin': 4, } data_val = [ 2.0, 5.0, 1.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0, 3.0, 7.0, 1.0, 1.0, 1.0 ] dataframe = create_dataframe(data_val) segments = [{ '_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False }] segments = [Segment.from_json(segment) for segment in segments] model = models.PeakModel() model.state = model.get_state(cache) result = model.fit(dataframe, segments, 'test') self.assertEqual(len(result.pattern_center), 3)
def test_peak_antisegments(self): data_val = [ 1.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 5.0, 7.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] dataframe = create_dataframe(data_val) segments = [{ '_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False }, { '_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True }] segments = [Segment.from_json(segment) for segment in segments] try: model = models.PeakModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name))
def test_three_value_segment(self): data_val = [ 1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 2.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 2.0, 3.0, 4.0, 5.0, 4.0, 2.0, 1.0, 3.0, 4.0 ] dataframe = create_dataframe(data_val) segments = [{ '_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000004, 'to': 1523889000006, 'labeled': True, 'deleted': False }] segments = [Segment.from_json(segment) for segment in segments] model_instances = [ models.GeneralModel(), models.PeakModel(), ] try: for model in model_instances: model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name))
def main(model_type: str) -> None: table_metric = [] if model_type == 'peak': for data in PEAK_DATASETS: dataset = data.frame segments = data.get_segments_for_detection(1, 0) model = models.PeakModel() cache = model.fit(dataset, segments, 'test', {}) detect_result = model.detect(dataset, 'test', cache) peak_metric = Metric(data.get_all_correct_segments(), detect_result) table_metric.append((peak_metric.get_amount(), peak_metric.get_accuracy())) return table_metric
def resolve_model_by_pattern(pattern: str) -> models.Model: if pattern == 'GENERAL': return models.GeneralModel() if pattern == 'PEAK': return models.PeakModel() if pattern == 'TROUGH': return models.TroughModel() if pattern == 'DROP': return models.DropModel() if pattern == 'JUMP': return models.JumpModel() if pattern == 'CUSTOM': return models.CustomModel() raise ValueError('Unknown pattern "%s"' % pattern)
def test_peak_model_for_cache(self): cache = { 'pattern_center': [1, 6], 'model_peak': [1, 4, 0], 'confidence': 2, 'convolve_max': 8, 'convolve_min': 7, 'WINDOW_SIZE': 1, 'conv_del_min': 0, 'conv_del_max': 0, } data_val = [2.0, 5.0, 1.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0, 3.0, 7.0, 1.0, 1.0, 1.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}] model = models.PeakModel() result = model.fit(dataframe, segments, cache) self.assertEqual(len(result['pattern_center']), 3)
def test_models_with_corrupted_dataframe(self): data = [[1523889000000 + i, float('nan')] for i in range(10)] dataframe = pd.DataFrame(data, columns=['timestamp', 'value']) segments = [] model_instances = [ models.JumpModel(), models.DropModel(), models.GeneralModel(), models.PeakModel(), models.TroughModel() ] try: for model in model_instances: model_name = model.__class__.__name__ model.fit(dataframe, segments, dict()) except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name))
def test_models_with_corrupted_dataframe(self): data = [[1523889000000 + i, float('nan')] for i in range(10)] dataframe = pd.DataFrame(data, columns=['timestamp', 'value']) segments = [] model_instances = [ models.JumpModel(), models.DropModel(), models.GeneralModel(), models.PeakModel(), models.TroughModel() ] for model in model_instances: model_name = model.__class__.__name__ model.state = model.get_state(None) with self.assertRaises(AssertionError): model.fit(dataframe, segments, 'test')
def test_random_dataset_for_random_model(self): data = create_random_model(random.randint(1, 100)) data = create_dataframe(data) model_instances = [models.PeakModel(), models.TroughModel()] cache = { 'patternCenter': [5, 50], 'patternModel': [], 'windowSize': 2, 'convolveMin': 0, 'convolveMax': 0, 'confidence': 0, 'heightMax': 0, 'heightMin': 0, 'convDelMin': 0, 'convDelMax': 0, } ws = random.randint(1, int(len(data['value'] / 2))) pattern_model = create_random_model(ws) convolve = scipy.signal.fftconvolve(pattern_model, pattern_model) confidence = 0.2 * (data['value'].max() - data['value'].min()) cache['windowSize'] = ws cache['patternModel'] = pattern_model cache['convolveMin'] = max(convolve) cache['convolveMax'] = max(convolve) cache['confidence'] = confidence cache['heightMax'] = data['value'].max() cache['heightMin'] = confidence try: for model in model_instances: model_name = model.__class__.__name__ model.state = model.get_state(cache) model.detect(data, 'test') except ValueError: self.fail( 'Model {} raised unexpectedly with dataset {} and cache {}'. format(model_name, data['value'], cache))