def test_rating_cpu_frames(self): labels = { 'instance_type': 'large', 'storage_type': 'hdd', 'gpu_accel': 'false' } _, rule = rules.find_match('request_cpu', labels, self.rules['rules']) rating = rate(rule, {'qty': 42}) self.assertEqual(rating, 0.0378)
def test_matching_default(self): labelset, rule = rules.find_match('usage_cpu', { 'cpu_arch': 'z80', }, self.rules) self.assertEqual({}, labelset) self.assertEqual( { 'metric': 'usage_cpu', 'value': 0.0002, 'unit': 'core-hours', }, rule)
def test_matching_default_with_checked_label(self): labelset, rule = rules.find_match('usage_cpu', { 'instance_type': 'foobar', }, self.rules) self.assertEqual({}, labelset) self.assertEqual( { 'metric': 'usage_cpu', 'value': 0.0002, 'unit': 'core-hours', }, rule)
def test_matching_labels(self): labelset, rule = rules.find_match('usage_cpu', { 'instance_type': 'small', 'sldjalfksdjf': 'alsdkfjalsdkfj' }, self.rules) self.assertEqual({'instance_type': 'small'}, labelset) self.assertEqual( { 'metric': 'usage_cpu', 'value': 0.0015, 'unit': 'core-hours', }, rule)
def test_rating_no_match(self): _, rule = rules.find_match('pokemon', {}, self.rules['rules']) rating = rate(rule, {'qty': 42}) self.assertEqual(rating, None)
def test_matching_wrong_metric(self): labelset, rule = rules.find_match('nothing', { 'instance_type': 'pokemon', }, self.rules) self.assertEqual({}, labelset) self.assertEqual({}, rule)
def retrieve_data(rules: Dict, metric_config: Dict, logger: Logger): """ Retrieve and rate data according to rules and metrics configuration. :rules (Dict) A dictionary holding the rules to rate the frames. :metric_config (Dict) A dictionary holding the metrics configuration. """ logger.info(f'Loading frames from {metric_config["presto_table"]}..') logger.info('checking for labels..') labels_name = get_labels_from_table(metric_config['presto_table'], metric_config['presto_column']) potential_labels = ', '.join(labels_name) if len(potential_labels) > 0: logger.info(f'found labels: {potential_labels}') potential_labels = f', {potential_labels}' else: logger.info('no labels found') frames = get_frames(metric_config, potential_labels) loaded = len(frames) if loaded == 0: logger.info('no frames loaded') return logger.info(f'{loaded} frames loaded') rated_frames, rated_namespaces = [], [] rating_time = dt.utcnow() for frame in frames: # 6 here because every columns after is considered a label frame_labels = extract_frames_labels(frame, metric_config['presto_column'], labels_name) labels, rule = rs.find_match(metric_config['metric'], frame_labels, rules) converted = rates.convert_metrics_unit( metric_config['unit'], rule['unit'], frame[metric_config['presto_column']]) rated_frames.append(( frame['period_start'], # frame_begin frame['period_end'], # frame_end frame['namespace'], # namespace frame['node'], # node metric_config['metric'], # metric frame['pod'], # pod converted, # quantity rates.rate(rule, {'qty': converted}), # rating f'{labels}')) rated_namespace = frame['namespace'] if rated_namespace not in rated_namespaces: rated_namespaces.append(rated_namespace) logger.info('frame processed') logger.info('sending data..') result = update_rated_data( rated_frames, rated_namespaces, metric_config, rating_time.isoformat(sep=' ', timespec='milliseconds')) if result: logger.info( f'updated rated-{metric_config["metric"].replace("_", "-")} object' ) logger.info('finished rating instance')