def get_disk_metrics(self, storage_id, selection): disks = self.get_all_disks() disk_metrics = [] select_metrics, select_ids = _get_selection(selection) for disk in disks: try: metrics = self._get_metrics(disk['TYPE'], disk['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'disk', 'resource_id': disk['ID'], 'type': 'RAW', 'unit': consts.DISK_CAP[key]['unit'], 'resource_name': disk['MODEL'] + ':' + disk['SERIALNUMBER'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) disk_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for disk:{0} error: {1}"\ .format(disk['ID'], ex) LOG.error(msg) return disk_metrics
def get_storage_metrics(self, storage_id, resource_metrics, start_time, end_time, resource_type): metrics = [] arrays_id, arrays_name = self.get_array() packaging_data = self.get_packaging_storage_data( end_time, start_time, resource_type) if not arrays_id or not arrays_name or not packaging_data or\ end_time < start_time: return metrics for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': arrays_id, 'resource_name': arrays_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} for about_timestamp in packaging_data.keys(): metrics_data = packaging_data.get(about_timestamp) resource_value[about_timestamp] = \ metrics_data.get(resource_key) metrics_res = constants.metric_struct(name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics
def get_controller_metrics(self, storage_id, selection): controllers = self.get_all_controllers() controller_metrics = [] select_metrics, select_ids = _get_selection(selection) for controller in controllers: try: metrics = self._get_metrics(controller['TYPE'], controller['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'controller', 'resource_id': controller['ID'], 'resource_name': controller['NAME'], 'type': 'RAW', 'unit': consts.CONTROLLER_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) controller_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for controller:{0} error: {1}" \ .format(controller['NAME'], ex) LOG.error(msg) return controller_metrics
def get_array_performance_metrics(self, storage_id, start_time, end_time): """Get performance metrics.""" try: # Fetch VMAX Array Performance data from REST client # TODO : # Check whether array is registered for performance collection # in unisphere perf_data = self.rest.get_array_performance_metrics( self.array_id, start_time, end_time) # parse VMAX REST response to metric->values map metrics_value_map = perf_utils.parse_performance_data(perf_data) # prepare labels required for array_leval performance data labels = {'storage_id': storage_id, 'resource_type': 'array'} # map to unified delifn metrics delfin_metrics = perf_utils.\ map_array_perf_metrics_to_delfin_metrics(metrics_value_map) metrics_array = [] for key in constants.DELFIN_ARRAY_METRICS: m = constants.metric_struct(name=key, labels=labels, values=delfin_metrics[key]) metrics_array.append(m) return metrics_array except Exception as err: msg = "Failed to get performance metrics data for VMAX: {}".format( err) LOG.error(msg) raise exception.StorageBackendException(msg)
def get_volume_metrics(self, storage_id, resource_metrics, start_time, end_time, resource_type): metrics = [] packaging_data = self.get_packaging_volume_data( end_time, resource_type, start_time) if end_time < start_time or not packaging_data: return metrics for volume_name in packaging_data.keys(): for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': volume_name, 'resource_name': volume_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} for volume_metrics in (packaging_data.get(volume_name) or []): resource_value[volume_metrics.get('time')] = \ volume_metrics.get(resource_key) metrics_res = constants.metric_struct(name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics
def construct_metrics(storage_id, resource_metrics, unit_map, perf_list): metrics_list = [] metrics_values = {} for perf in perf_list: collected_metrics_list = perf.get('metrics') for collected_metrics in collected_metrics_list: metrics_map = parse_performance_data(collected_metrics) for key, value in resource_metrics.items(): metrics_map_value = metrics_map.get(value) if metrics_map_value: metrics_values[key] = metrics_values.get(key, {}) for k, v in metrics_map_value.items(): metrics_values[key][k] = v for resource_key, resource_value in metrics_values.items(): labels = { 'storage_id': storage_id, 'resource_type': perf.get('resource_type'), 'resource_id': perf.get('resource_id'), 'resource_name': perf.get('resource_name'), 'type': 'RAW', 'unit': unit_map[resource_key]['unit'] } metrics_res = constants.metric_struct(name=resource_key, labels=labels, values=resource_value) metrics_list.append(metrics_res) return metrics_list
def _get_metric_model(self, metric_list, labels, metric_values, obj_cap, resources_type): metric_model_list = [] tools = Tools() for metric_name in (metric_list or []): values = {} obj_labels = copy.copy(labels) obj_labels['unit'] = obj_cap.get(metric_name).get('unit') for metric_value in metric_values: metric_value_infos = metric_value if not consts.METRIC_MAP.get(resources_type, {}).get( metric_name): continue value = metric_value_infos[ consts.METRIC_MAP.get(resources_type).get(metric_name)] if not value: value = '0' collection_timestamp = tools.time_str_to_timestamp( metric_value_infos[1], consts.TIME_PATTERN) collection_time_str = tools.timestamp_to_time_str( collection_timestamp, consts.COLLECTION_TIME_PATTERN) collection_timestamp = tools.time_str_to_timestamp( collection_time_str, consts.COLLECTION_TIME_PATTERN) if "iops" == obj_cap.get(metric_name).get('unit').lower(): value = int(float(value)) else: value = float('%.6f' % (float(value))) values[collection_timestamp] = value if values: metric_model = constants.metric_struct(name=metric_name, labels=obj_labels, values=values) metric_model_list.append(metric_model) return metric_model_list
def packege_data(self, storage_id, resource_type, metrics, metric_map): resource_id = None resource_name = None unit = None for resource_info in metric_map: if resource_type == constants.ResourceType.PORT: port_info = self.get_fc_port(storage_id) if port_info: for fc_port in port_info: if resource_info.strip('0x').upper() == fc_port.get( 'wwn').upper(): resource_id = fc_port.get('native_port_id') resource_name = fc_port.get('name') break else: resource_arr = resource_info.split('_') resource_id = resource_arr[0] resource_name = resource_arr[1] for target in metric_map.get(resource_info): if resource_type == constants.ResourceType.PORT: unit = consts.PORT_CAP[target]['unit'] elif resource_type == constants.ResourceType.VOLUME: unit = consts.VOLUME_CAP[target]['unit'] elif resource_type == constants.ResourceType.DISK: unit = consts.DISK_CAP[target]['unit'] elif resource_type == constants.ResourceType.CONTROLLER: unit = consts.CONTROLLER_CAP[target]['unit'] if 'responseTime' == target: for res_time in metric_map.get(resource_info).get(target): for iops_time in metric_map.get(resource_info).get( 'iops'): if res_time == iops_time: res_value = metric_map.get(resource_info).get( target).get(res_time) iops_value = metric_map.get(resource_info).get( 'iops').get(iops_time) res_value = \ res_value / iops_value if iops_value else 0 res_value = round(res_value, 3) metric_map[resource_info][target][res_time] = \ res_value break labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': unit } metric_value = constants.metric_struct( name=target, labels=labels, values=metric_map.get(resource_info).get(target)) metrics.append(metric_value)
def collect_array_metrics(self, ctx, storage_id, interval, is_history): rd_array_count = random.randint(MIN_STORAGE, MAX_STORAGE) LOG.info("Fake_array_metrics number for %s: %d" % ( storage_id, rd_array_count)) array_metrics = [] labels = {'storage_id': storage_id, 'resource_type': 'array'} fake_metrics = self._get_random_performance() for _ in range(rd_array_count): for key in constants.DELFIN_ARRAY_METRICS: m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) array_metrics.append(m) return array_metrics
def _get_metric_model(self, metric_list, labels, metric_values, obj_cap): metric_model_list = [] for metric_name in (metric_list or []): values = {} obj_labels = copy.deepcopy(labels) obj_labels['unit'] = obj_cap.get(metric_name).get('unit') for metric_value in metric_values: if metric_value.get(metric_name) is not None: collect_timestamp = self.convert_to_system_time( metric_value.get('collect_timestamp')) values[collect_timestamp] = metric_value.get(metric_name) if values: metric_model = constants.metric_struct(name=metric_name, labels=obj_labels, values=values) metric_model_list.append(metric_model) return metric_model_list
def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collects performance metric for the given interval""" rd_array_count = random.randint(MIN_STORAGE, MAX_STORAGE) LOG.debug("Fake_perf_metrics number for %s: %d" % (storage_id, rd_array_count)) array_metrics = [] labels = {'storage_id': storage_id, 'resource_type': 'array'} fake_metrics = self._get_random_performance() for _ in range(rd_array_count): for key in constants.DELFIN_ARRAY_METRICS: m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) array_metrics.append(m) return array_metrics
def get_resource_perf_metrics(self, storage_id, start_time, end_time, resource_type, metric_list): LOG.info("###########collecting metrics for resource %s: from" " storage %s" % (resource_type, self.storage_id)) resource_metrics = [] resource_count = RESOURCE_COUNT_DICT[resource_type] for i in range(resource_count): labels = {'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_type + '_' + str(i), 'type': 'RAW'} fake_metrics = self._get_random_performance(metric_list, start_time, end_time) for key in metric_list.keys(): labels['unit'] = metric_list[key]['unit'] m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) resource_metrics.append(copy.deepcopy(m)) return resource_metrics
def get_perf_value(metrics, storage_id, start_time, end_time, data_info, resource_id, resource_name, resource_type): fs_metrics = [] selection = metrics.get(resource_type) for key in selection: labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': constant.CAP_MAP[key]['unit'] } values = {} for perf_info in data_info: if perf_info.get('timestamp'): occur_time = \ int(time.mktime(time.strptime( perf_info.get('timestamp'), PerformanceHandler.TIME_TYPE))) second_offset = \ (time.mktime(time.localtime()) - time.mktime(time.gmtime())) timestamp = \ (occur_time + int(second_offset)) * 1000 if int(start_time) <= timestamp <= int(end_time) \ and timestamp % 60000 == 0: key_list = constant.PERF_MAP.get(key, []) if len(key_list) > 0: value = perf_info.get(key_list[0], {}) \ .get(key_list[1], None) if value is not None: value = PerformanceHandler. \ get_value(value, key) values[timestamp] = value if values: m = constants.metric_struct(name=key, labels=labels, values=values) fs_metrics.append(m) return fs_metrics
def get_port_metrics(self, storage_id, selection): ports = self.get_all_ports() port_metrics = [] select_metrics, select_ids = _get_selection(selection) for port in ports: # ETH_PORT collection not supported if port['TYPE'] == 213: continue try: metrics = self._get_metrics(port['TYPE'], port['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'port', 'resource_id': port['ID'], 'resource_name': port['NAME'], 'type': 'RAW', 'unit': consts.PORT_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) port_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for port:{0} error: {1}" \ .format(port['NAME'], ex) LOG.error(msg) return port_metrics
'storage_id': '12345', 'native_port_id': '0', 'location': 'node1_0', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'max_speed': 8589934592, 'native_parent_id': 'node1', 'wwn': '0x50050768021065cb' }] metrics_result = [ constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'IOPS' }, values={1638346330000: 0.0}), constants.metric_struct(name='readIops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'IOPS' }, values={1638346330000: 0.0}), constants.metric_struct(name='writeIops',
def test_get_storage_performance(self, mock_unisphere_version, mock_version, mock_array, mock_array_details, mock_performnace): vmax_array_perf_resp_historic = { "expirationTime": 1600172441701, "count": 4321, "maxPageSize": 1000, "id": "d495891f-1607-42b7-ba8d-44d0786bd335_0", "resultList": { "result": [{ "HostIOs": 296.1, "HostMBWritten": 0.31862956, "ReadResponseTime": 4.4177675, "HostMBReads": 0.05016927, "HostReads": 14.056666, "HostWrites": 25.78, "WriteResponseTime": 4.7228317, "timestamp": 1598875800000 }, { "HostIOs": 350.22998, "HostMBWritten": 0.40306965, "ReadResponseTime": 4.396796, "HostMBReads": 0.043291014, "HostReads": 13.213333, "HostWrites": 45.97333, "WriteResponseTime": 4.7806735, "timestamp": 1598876100000 }, { "HostIOs": 297.63333, "HostMBWritten": 0.25046548, "ReadResponseTime": 4.3915706, "HostMBReads": 0.042753905, "HostReads": 13.176666, "HostWrites": 28.643333, "WriteResponseTime": 4.8760557, "timestamp": 1598876400000 }] } } vmax_array_perf_resp_real_time = { "expirationTime": 1600172441701, "count": 4321, "maxPageSize": 1000, "id": "d495891f-1607-42b7-ba8d-44d0786bd335_0", "resultList": { "result": [{ "HostIOs": 296.1, "HostMBWritten": 0.31862956, "ReadResponseTime": 4.4177675, "HostMBReads": 0.05016927, "HostReads": 14.056666, "HostWrites": 25.78, "WriteResponseTime": 4.7228317, "timestamp": 1598875800000 }] } } expected_historic = [ constants.metric_struct(name='responseTime', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 9.1405992, 1598876400000: 9.2676263, 1598876100000: 9.1774695 }), constants.metric_struct(name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 0.36879882999999997, 1598876400000: 0.293219385, 1598876100000: 0.446360664 }), constants.metric_struct(name='readThroughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 0.05016927, 1598876100000: 0.043291014, 1598876400000: 0.042753905 }), constants.metric_struct(name='writeThroughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 0.31862956, 1598876100000: 0.40306965, 1598876400000: 0.25046548 }), constants.metric_struct(name='requests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 296.1, 1598876100000: 350.22998, 1598876400000: 297.63333 }), constants.metric_struct(name='readRequests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 14.056666, 1598876100000: 13.213333, 1598876400000: 13.176666 }), constants.metric_struct(name='writeRequests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={ 1598875800000: 25.78, 1598876100000: 45.97333, 1598876400000: 28.643333 }) ] expected_realtime = [ constants.metric_struct(name='responseTime', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 9.1405992}), constants.metric_struct( name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 0.36879882999999997}), constants.metric_struct(name='readThroughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 0.05016927}), constants.metric_struct(name='writeThroughput', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 0.31862956}), constants.metric_struct(name='requests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 296.1}), constants.metric_struct(name='readRequests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 14.056666}), constants.metric_struct(name='writeRequests', labels={ 'storage_id': '12345', 'resource_type': 'array' }, values={1598875800000: 25.78}) ] kwargs = VMAX_STORAGE_CONF mock_version.return_value = ['V9.0.2.7', '90'] mock_unisphere_version.return_value = ['V9.0.2.7', '90'] mock_array.return_value = {'symmetrixId': ['00112233']} mock_array_details.return_value = { 'model': 'VMAX250F', 'ucode': '5978.221.221', 'display_name': 'VMAX250F-00112233' } mock_performnace.return_value = 200, vmax_array_perf_resp_historic driver = VMAXStorageDriver(**kwargs) self.assertEqual(driver.storage_id, "12345") self.assertEqual(driver.client.array_id, "00112233") ret = driver.collect_perf_metrics(context, '12345', "", 10000000, 10900000) self.assertEqual(ret, expected_historic) mock_performnace.return_value = 200, vmax_array_perf_resp_real_time ret = driver.collect_perf_metrics(context, '12345', "", 10900000, 10900000) self.assertEqual(ret, expected_realtime) mock_performnace.side_effect = \ exception.StoragePerformanceCollectionFailed with self.assertRaises(Exception) as exc: ret = driver.collect_perf_metrics(context, '12345', "", 10000000, 10900000) self.assertIn('Failed to collect performance metrics. Reason', str(exc.exception))
def test_collect_perf_metrics(self, mock_unisphere_version, mock_version, mock_array, mock_array_keys, mock_r_keys, mock_r_metrics): expected = [ constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'storage', 'resource_id': '00112233', 'resource_name': 'VMAX00112233', 'type': 'RAW', 'unit': 'IOPS' }, values={1566550500000: 417.42667}), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'storagePool', 'resource_id': 'SRP_1', 'resource_name': 'SRP_1', 'type': 'RAW', 'unit': 'IOPS' }, values={1566550800000: 304.8}), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'controller', 'resource_id': 'DF-1C', 'resource_name': 'BEDirector_DF-1C', 'type': 'RAW', 'unit': 'IOPS' }, values={1566987000000: 248.40666}), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '12', 'resource_name': 'BEPort_DF-1C_12', 'type': 'RAW', 'unit': 'IOPS' }, values={1566987000000: 6.693333}), ] kwargs = VMAX_STORAGE_CONF mock_version.return_value = ['V9.0.2.7', '90'] mock_unisphere_version.return_value = ['V9.0.2.7', '90'] mock_array.return_value = {'symmetrixId': ['00112233']} driver = VMAXStorageDriver(**kwargs) self.assertEqual(driver.storage_id, "12345") self.assertEqual(driver.client.array_id, "00112233") ret_array_key = { "arrayInfo": [{ "symmetrixId": "00112233", "firstAvailableDate": "1566146400000", "lastAvailableDate": "1566550800000", }] } ret_pool_key = { "srpInfo": [ { "srpId": "SRP_1", "firstAvailableDate": 1567065600000, "lastAvailableDate": 1568130900000 }, ] } ret_be_dir_key = { "beDirectorInfo": [ { "directorId": "DF-1C", "firstAvailableDate": 1566557100000, "lastAvailableDate": 1566987300000 }, ] } ret_fe_dir_key = { "feDirectorInfo": [ { "directorId": "FA-1D", "firstAvailableDate": 1567065600000, "lastAvailableDate": 1567093200000 }, ] } ret_rdf_dir_key = { "rdfDirectorInfo": [ { "directorId": "RF-1F", "firstAvailableDate": 1567065600000, "lastAvailableDate": 1567437900000 }, ] } ret_be_port_key = { "bePortInfo": [ { "portId": "12", "firstAvailableDate": 1566557100000, "lastAvailableDate": 1566988500000 }, ] } ret_fe_port_key = { "fePortInfo": [ { "firstAvailableDate": 1567065600000, "lastAvailableDate": 1567162500000, "portId": "4" }, ] } ret_rdf_port_key = { "rdfPortInfo": [{ "portId": "7", "firstAvailableDate": 1567065600000, "lastAvailableDate": 1567439100000 }] } mock_array_keys.return_value = ret_array_key mock_r_keys.side_effect = [ ret_pool_key, ret_be_dir_key, ret_fe_dir_key, ret_rdf_dir_key, ret_be_dir_key, ret_be_port_key, ret_fe_dir_key, ret_fe_port_key, ret_rdf_dir_key, ret_rdf_port_key, ] ret_array_metric = { "HostIOs": 417.42667, "HostMBs": 0.0018131511, "FEReqs": 23.55, "BEIOs": 25.216667, "BEReqs": 5.55, "PercentCacheWP": 0.031244868, "timestamp": 1566550500000 } ret_pool_metric = { "HostIOs": 304.8, "HostMBs": 0.005192057, "FEReqs": 23.04, "BEIOs": 22.566668, "BEReqs": 4.7733335, "PercentCacheWP": 0.018810686, "timestamp": 1566550800000 } ret_be_dir_metric = { "PercentBusy": 0.025403459, "IOs": 248.40666, "Reqs": 3.91, "MBRead": 1.7852213, "MBWritten": 0.37213543, "PercentNonIOBusy": 0.0, "timestamp": 1566987000000 } ret_fe_dir_metric = { "PercentBusy": 2.54652, "HostIOs": 3436.9368, "HostMBs": 51.7072, "Reqs": 3330.5947, "ReadResponseTime": 0.12916493, "WriteResponseTime": 0.3310084, "timestamp": 1567078200000 } ret_rdf_dir_metric = { "PercentBusy": 4.8083158, "IOs": 1474.2234, "WriteReqs": 1189.76, "MBWritten": 54.89597, "MBRead": 0.4565983, "MBSentAndReceived": 55.35257, "AvgIOServiceTime": 0.89211756, "CopyIOs": 0.0, "CopyMBs": 0.0, "timestamp": 1567161600000 } ret_be_port_metric = { "Reads": 4.7, "Writes": 1.9933333, "IOs": 6.693333, "MBRead": 0.43401042, "MBWritten": 0.10486979, "MBs": 0.5388802, "AvgIOSize": 82.44224, "PercentBusy": 0.013356605, "timestamp": 1566987000000 } ret_fe_port_metric = { "ResponseTime": 0.1263021, "ReadResponseTime": 0.1263021, "WriteResponseTime": 0.0, "Reads": 0.32, "Writes": 0.0, "IOs": 0.32, "MBRead": 4.296875E-4, "MBWritten": 0.0, "MBs": 4.296875E-4, "AvgIOSize": 1.375, "SpeedGBs": 16.0, "PercentBusy": 2.6226044E-5, "timestamp": 1567161600000 } ret_rdf_port_metric = { "Reads": 0.0, "Writes": 1216.7633, "IOs": 1216.7633, "MBRead": 0.0, "MBWritten": 57.559597, "MBs": 57.559597, "AvgIOSize": 48.440834, "SpeedGBs": 16.0, "PercentBusy": 3.5131588, "timestamp": 1567161600000 } mock_r_metrics.side_effect = [ [ret_array_metric], [ret_pool_metric], [ret_be_dir_metric], [ret_fe_dir_metric], [ret_rdf_dir_metric], [ret_be_port_metric], [ret_fe_port_metric], [ret_rdf_port_metric], ] resource_metrics = { 'storage': { 'iops': { 'unit': 'IOPS' } }, 'storagePool': { 'iops': { 'unit': 'IOPS' } }, 'controller': { 'iops': { 'unit': 'IOPS' } }, 'port': { 'iops': { 'unit': 'IOPS' } }, } ret = driver.collect_perf_metrics(context, driver.storage_id, resource_metrics, 1000, 2000) self.assertEqual(ret[0], expected[0]) self.assertEqual(ret[2], expected[1]) self.assertEqual(ret[4], expected[2]) self.assertEqual(ret[13], expected[3]) with self.assertRaises(Exception) as exc: driver.collect_perf_metrics(context, driver.storage_id, resource_metrics, 1000, 2000) self.assertIn('', str(exc.exception))
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os from unittest import TestCase from delfin.exporter.prometheus import prometheus from delfin.common.constants import metric_struct fake_metrics = [ metric_struct(name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'storage', 'resource_id': 'storage0', 'type': 'RAW', 'unit': 'MB/s' }, values={1622808000000: 61.9388895680357}) ] class TestPrometheusExporter(TestCase): def test_push_to_prometheus(self): prometheus_obj = prometheus.PrometheusExporter() prometheus_obj.metrics_dir = os.getcwd() prometheus_obj.push_to_prometheus(fake_metrics) self.assertTrue(glob.glob(prometheus_obj.metrics_dir + '/' + '*.prom'))
HOST_RESULT = [{ 'name': 'aix_ma', 'storage_id': '12345', 'native_storage_host_id': 'aix_ma', 'os_type': 'Unknown', 'status': 'normal', 'ip_address': '8.44.129.26' }] METRICS_RESULT = [ constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'controller', 'resource_id': '3600485', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1625717816000: 0.0, 1625717875000: 0.0, 1625717936000: 0.73, 1625717996000: 0.0 }), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': 'A-6', 'type': 'RAW', 'unit': 'IOPS' }, values={