def get_data_latest_timestamp(self, storage_id): archive_file_list = [] try: tools = Tools() archive_name = self.navi_handler.create_archives(storage_id) LOG.info("Create archive_name: {}".format(archive_name)) archive_file_list.append(archive_name) archive_name_infos = archive_name.split('.') file_path = '%s%s.csv' % ( self.navi_handler.get_local_file_path(), archive_name_infos[0]) resource_obj_name = '' collection_time = '' with open(file_path) as file: f_csv = csv.reader(file) next(f_csv) for row in f_csv: if not resource_obj_name or resource_obj_name == row[0]: resource_obj_name = row[0] collection_time = row[1] else: break latest_time = tools.time_str_to_timestamp(collection_time, consts.TIME_PATTERN) except Exception as err: err_msg = "Failed to get latest perf timestamp " \ "from VnxBlockStor: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) finally: self._remove_archive_file(archive_file_list) return latest_time
def get_aggregate(self, storage_id): agg_list = [] agg_info = self.ssh_pool.do_exec( constant.AGGREGATE_SHOW_DETAIL_COMMAND) agg_array = agg_info.split(constant.AGGREGATE_SPLIT_STR) agg_map = {} for agg in agg_array[1:]: Tools.split_value_map(agg, agg_map, split=':') status = constant.AGGREGATE_STATUS.get(agg_map['State']) pool_model = { 'name': agg_map['e'], 'storage_id': storage_id, 'native_storage_pool_id': agg_map['UUIDString'], 'description': '', 'status': status, 'storage_type': constants.StorageType.UNIFIED, 'total_capacity': int(Tools.get_capacity_size(agg_map['Size'])), 'used_capacity': int(Tools.get_capacity_size(agg_map['UsedSize'])), 'free_capacity': int(Tools.get_capacity_size(agg_map['AvailableSize'])), } agg_list.append(pool_model) return agg_list
def get_pool(self, storage_id): pool_list = [] pool_info = self.ssh_do_exec(constant.POOLS_SHOW_DETAIL_COMMAND) pool_map_list = [] Tools.split_value_map_list(pool_info, pool_map_list, split=':') for pool_map in pool_map_list: if pool_map and 'StoragePoolName' in pool_map.keys(): status = constants.StoragePoolStatus.ABNORMAL if pool_map['IsPoolHealthy?'] == 'true': status = constants.StoragePoolStatus.NORMAL pool_model = { 'name': pool_map['StoragePoolName'], 'storage_id': storage_id, 'native_storage_pool_id': pool_map['UUIDofStoragePool'], 'description': None, 'status': status, 'storage_type': constants.StorageType.UNIFIED, 'total_capacity': self.get_size(pool_map['StoragePoolTotalSize'], True), 'used_capacity': self.get_size(pool_map['StoragePoolTotalSize'], True) - self.get_size(pool_map['StoragePoolUsableSize'], True), 'free_capacity': self.get_size(pool_map['StoragePoolUsableSize'], True) } pool_list.append(pool_model) return pool_list
def get_archive_file_name(self, storage_id): tools = Tools() create_time = tools.timestamp_to_time_str( time.time() * units.k, consts.ARCHIVE_FILE_NAME_TIME_PATTERN) archive_file_name = consts.ARCHIVE_FILE_NAME % (storage_id, create_time) return archive_file_name
def _get_metric_model(self, metric_list, labels, metric_values, obj_cap, resources_type): metric_model_list = [] tools = Tools() for metric_name in (metric_list or []): values = {} obj_labels = copy.copy(labels) obj_labels['unit'] = obj_cap.get(metric_name).get('unit') for metric_value in metric_values: metric_value_infos = metric_value if not consts.METRIC_MAP.get(resources_type, {}).get( metric_name): continue value = metric_value_infos[ consts.METRIC_MAP.get(resources_type).get(metric_name)] if not value: value = '0' collection_timestamp = tools.time_str_to_timestamp( metric_value_infos[1], consts.TIME_PATTERN) collection_time_str = tools.timestamp_to_time_str( collection_timestamp, consts.COLLECTION_TIME_PATTERN) collection_timestamp = tools.time_str_to_timestamp( collection_time_str, consts.COLLECTION_TIME_PATTERN) if "iops" == obj_cap.get(metric_name).get('unit').lower(): value = int(float(value)) else: value = float('%.6f' % (float(value))) values[collection_timestamp] = value if values: metric_model = constants.metric_struct(name=metric_name, labels=obj_labels, values=values) metric_model_list.append(metric_model) return metric_model_list
def cli_log_to_list(self, resource_info): obj_list = [] try: tools = Tools() # Filter log information for log codes 70-77 pattern = re.compile(consts.LOG_FILTER_PATTERN) obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: search_obj = pattern.search(str_line) if search_obj: str_line = str_line.replace( 'See alerts for details.', '') str_infos = str_line.split(search_obj.group()) str_0 = str_infos[0].strip() log_time = str_0[0:str_0.rindex(' ')] event_code = search_obj.group() \ .replace('(', '').replace(')', '') obj_model = { 'log_time': log_time, 'log_time_stamp': tools.time_str_to_timestamp( log_time, consts.TIME_PATTERN), 'event_code': event_code, 'message': str_infos[1].strip() } obj_list.append(obj_model) except Exception as e: err_msg = "arrange log info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list
def list_filesystems(self, storage_id): try: fs_list = [] fs_info = self.ssh_do_exec([constant.FS_INFO_COMMAND]) fs_array = self.get_table_data(fs_info) status_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND]) status_array = self.get_table_data(status_info) status_map = {} for status in status_array: status_info = status.split() if len(status_info) > constant.FS_INDEX['status_len']: status_map[status_info[constant.FS_INDEX['id_index']]] = \ [status_info[constant.FS_INDEX['pool_index']], status_info[constant.FS_INDEX['status_index']]] for fs in fs_array: fs_info = list(filter(None, fs.split(' '))) if len(fs_info) > constant.FS_INDEX['detail_len']: total_capacity = \ fs_info[constant.FS_INDEX['total_index']].replace( ' ', '') used_capacity = \ fs_info[constant.FS_INDEX['used_index']].replace( ' ', '').split('(')[0] free_capacity = \ fs_info[constant.FS_INDEX['free_index']].replace( ' ', '').split('(')[0] total_capacity = Tools.get_capacity_size(total_capacity) used_capacity = Tools.get_capacity_size(used_capacity) free_capacity = Tools.get_capacity_size(free_capacity) volume_type = constants.VolumeType.THICK \ if fs_info[constant.FS_INDEX['type_index']] == 'No' \ else constants.VolumeType.THIN pool_id = status_map.get(fs_info[0])[0] \ if status_map.get(fs_info[0]) else None status = status_map.get(fs_info[0])[1] \ if status_map.get(fs_info[0]) else None fs_model = { 'name': fs_info[1], 'storage_id': storage_id, 'native_filesystem_id': fs_info[1], 'native_pool_id': pool_id, 'status': constant.FS_STATUS_MAP[status], 'type': volume_type, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': free_capacity } fs_list.append(fs_model) return fs_list except exception.DelfinException as e: err_msg = "Failed to get filesystem from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get filesystem from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg)
def list_volumes(self, storage_id): try: volume_list = [] volume_info = self.ssh_do_exec(constant.LUN_SHOW_DETAIL_COMMAND) fs_list = self.get_filesystems(storage_id) volume_map_list = [] Tools.split_value_map_list(volume_info, volume_map_list, split=':') for volume_map in volume_map_list: if volume_map and 'LUNName' in volume_map.keys(): pool_id = None status = 'normal' if volume_map['State'] == 'online' \ else 'offline' for fs in fs_list: if fs['name'] == volume_map['VolumeName']: pool_id = fs['native_pool_id'] type = constants.VolumeType.THIN \ if volume_map['SpaceAllocation'] == 'enabled' \ else constants.VolumeType.THICK volume_model = { 'name': volume_map['LUNName'], 'storage_id': storage_id, 'description': None, 'status': status, 'native_volume_id': volume_map['SerialNumber'], 'native_storage_pool_id': pool_id, 'wwn': None, 'compressed': None, 'deduplicated': None, 'type': type, 'total_capacity': self.get_size(volume_map['LUNSize'], True), 'used_capacity': self.get_size(volume_map['UsedSize'], True), 'free_capacity': self.get_size(volume_map['LUNSize'], True) - self.get_size(volume_map['UsedSize'], True) } volume_list.append(volume_model) return volume_list except exception.DelfinException as e: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg)
def get_storage(self): try: raw_capacity = total_capacity = used_capacity = free_capacity = 0 controller_map_list = [] system_info = self.ssh_do_exec(constant.CLUSTER_SHOW_COMMAND) version_info = self.ssh_do_exec(constant.VERSION_SHOW_COMMAND) status_info = self.ssh_do_exec(constant.STORAGE_STATUS_COMMAND) controller_info = self.ssh_do_exec( constant.CONTROLLER_SHOW_DETAIL_COMMAND) Tools.split_value_map_list(controller_info, controller_map_list, ":") version_array = version_info.split("\r\n") storage_version = '' for version in version_array: if 'NetApp' in version: storage_version = version.split(":") break status = self.get_table_data(status_info) status = constant.STORAGE_STATUS.get(status[0].split()[0]) disk_list = self.get_disks(None) pool_list = self.list_storage_pools(None) storage_map_list = [] Tools.split_value_map_list(system_info, storage_map_list, split=':') if len(storage_map_list) > 0: storage_map = storage_map_list[len(storage_map_list) - 1] controller_map = \ controller_map_list[len(controller_map_list) - 1] for disk in disk_list: raw_capacity += disk['capacity'] for pool in pool_list: total_capacity += pool['total_capacity'] free_capacity += pool['free_capacity'] used_capacity += pool['used_capacity'] storage_model = { "name": storage_map['ClusterName'], "vendor": constant.STORAGE_VENDOR, "model": controller_map['Model'], "status": status, "serial_number": storage_map['ClusterSerialNumber'], "firmware_version": storage_version[0], "location": controller_map['Location'], "total_capacity": total_capacity, "raw_capacity": raw_capacity, "used_capacity": used_capacity, "free_capacity": free_capacity } return storage_model except exception.DelfinException as e: err_msg = "Failed to get storage from " \ "netapp cmode: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg)
def log_query_time_to_list(self, begin_timestamp, end_timestamp): tools = Tools() begin_time = tools.timestamp_to_time_str(begin_timestamp, consts.DATE_PATTERN) end_time = tools.timestamp_to_time_str(end_timestamp, consts.DATE_PATTERN) query_time_map = {'begin_time': begin_time, 'end_time': end_time} return query_time_map
def get_pool_metrics(self, start_time, end_time): start_time_str = Tools.timestamp_to_utc_time_str( start_time, consts.REST_COLLEC_TTIME_PATTERN) end_time_str = Tools.timestamp_to_utc_time_str( end_time, consts.REST_COLLEC_TTIME_PATTERN) url = RestHandler.REST_CPGSTATISTICS_URL % ( start_time_str, end_time_str) rejson = self.get_resinfo_call(url, method='GET') return rejson
def handle_stats_filename(file_name, file_map): name_arr = file_name.split('_') file_type = '%s_%s_%s' % (name_arr[0], name_arr[1], name_arr[2]) file_time = '20%s%s' % (name_arr[3], name_arr[4]) time_pattern = '%Y%m%d%H%M%S' tools = Tools() occur_time = tools.time_str_to_timestamp(file_time, time_pattern) if file_map.get(file_type): file_map[file_type][occur_time] = file_name else: file_map[file_type] = {occur_time: file_name}
def get_pools_volumes(self, storage_id): volumes_list = [] pool_list = cli_handler((constant.POOL_INFO_COMMAND % { 'unit_name': self.storage_name })) pools_array = pool_list.split("\r\n") if len(pools_array) > 2: for pool in pools_array[2:]: pool_array = pool.split() pool_info = cli_handler(constant.POOL_DETAIL_INFO_COMMAND % { 'unit_name': self.storage_name, 'pool_no': pool_array[0] }) volumes = pool_info.split("Logical Unit") if len(volumes) > 1: volume_array = volumes[1].split('\r\n') for volume_info in volume_array[3:]: volume = volume_info.split() if len(volume) > 9: volume_model = { 'name': volume[0], 'storage_id': storage_id, 'description': '', 'status': constants.StoragePoolStatus.NORMAL if volume[9] == 'normal' else constants.StoragePoolStatus.ABNORMAL, 'native_volume_id': volume[0], 'native_storage_pool_id': pool_array[0], 'wwn': '', 'compressed': '', 'deduplicated': '', 'type': constants.VolumeType.THIN, 'total_capacity': int(Tools.get_capacity_size(volume[1] + 'GB')), 'used_capacity': int(Tools.get_capacity_size(volume[3] + 'GB')), 'free_capacity': int(Tools.get_capacity_size(volume[1] + 'GB')) - int(Tools.get_capacity_size(volume[3] + 'GB')) } volumes_list.append(volume_model) return volumes_list
def _get__archive_file(self, start_time, end_time): archive_file_list = [] archives = self.navi_handler.get_archives() tools = Tools() for archive_info in (archives or []): collection_timestamp = tools.time_str_to_timestamp( archive_info.get('collection_time'), consts.TIME_PATTERN) if collection_timestamp > start_time: archive_file_list.append(archive_info.get('archive_name')) if collection_timestamp > end_time: break return archive_file_list
def get_disks(self, storage_id): disks_list = [] physicals_list = [] disks_info = self.ssh_do_exec(constant.DISK_SHOW_DETAIL_COMMAND) physicals_info = self.ssh_do_exec(constant.DISK_SHOW_PHYSICAL_COMMAND) error_disk = self.ssh_do_exec(constant.DISK_ERROR_COMMAND) error_disk_list = [] error_disk_array = self.get_table_data(error_disk) for error_disk in error_disk_array: error_array = error_disk.split() if len(error_array) > 2: error_disk_list.append(error_array[0]) disks_map_list = [] physical_array = self.get_table_data(physicals_info) for physical in physical_array: physicals_list.append(physical.split()) Tools.split_value_map_list(disks_info, disks_map_list, split=':') for disks_map in disks_map_list: if disks_map and 'Disk' in disks_map.keys(): speed = physical_type = firmware = None logical_type = constant.DISK_LOGICAL. \ get(disks_map['ContainerType']) """Map disk physical information""" for physical_info in physicals_list: if len(physical_info) > 6 and \ physical_info[0] == disks_map['Disk']: physical_type = \ constant.DISK_TYPE.get(physical_info[1]) speed = physical_info[5] \ if physical_info[5] != '-' else 0 firmware = physical_info[4] status = constants.DiskStatus.NORMAL if disks_map['Disk'] in error_disk_list: status = constants.DiskStatus.ABNORMAL disk_model = { 'name': disks_map['Disk'], 'storage_id': storage_id, 'native_disk_id': disks_map['Disk'], 'serial_number': disks_map['SerialNumber'], 'manufacturer': disks_map['Vendor'], 'model': disks_map['Model'], 'firmware': firmware, 'speed': speed, 'capacity': self.get_size(disks_map['PhysicalSize'], True), 'status': status, 'physical_type': physical_type, 'logical_type': logical_type, 'native_disk_group_id': disks_map['Aggregate'], 'location': None, } disks_list.append(disk_model) return disks_list
def list_qtrees(self, storage_id): try: qt_list = [] qt_info = self.ssh_do_exec(constant.QTREE_SHOW_DETAIL_COMMAND) fs_info = self.ssh_do_exec(constant.FS_SHOW_DETAIL_COMMAND) fs_map_list = [] qt_map_list = [] Tools.split_value_map_list(fs_info, fs_map_list, split=':') Tools.split_value_map_list(qt_info, qt_map_list, split=':') for qt_map in qt_map_list: if qt_map and 'QtreeName' in qt_map.keys(): fs_id = self.get_fs_id(qt_map['VserverName'], qt_map['VolumeName']) qtree_path = None for fs_map in fs_map_list: if fs_map and 'VserverName' in fs_map.keys() \ and fs_id == self.get_fs_id( fs_map['VserverName'], fs_map['VolumeName']) \ and fs_map['JunctionPath'] != '-': qtree_path = fs_map['JunctionPath'] break qt_id = self.get_qt_id(qt_map['VserverName'], qt_map['VolumeName'], qt_map['QtreeName']) qtree_name = qt_map['QtreeName'] if qt_map['QtreeName'] and qtree_path: qtree_path += '/' + qt_map['QtreeName'] qtree_path = qtree_path.replace('//', '/') else: qtree_name = qt_id qt_model = { 'name': qtree_name, 'storage_id': storage_id, 'native_qtree_id': qt_id, 'path': qtree_path, 'native_filesystem_id': fs_id, 'security_mode': qt_map['SecurityStyle'], } qt_list.append(qt_model) return qt_list except exception.DelfinException as err: err_msg = "Failed to get storage qtrees from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to get storage qtrees from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg)
def get_raid_groups(self, storage_id): pool_list = [] raid_group_map = {} raid_group_info = cli_handler((constant.RAIDGROUP_INFO_COMMAND % { 'unit_name': self.storage_name })) raid_group_array = raid_group_info.split("\r\n") if len(raid_group_array) > 2: for raid_array in raid_group_array[2:]: raid_group = raid_array.split() if len(raid_group) > 2: raid_detail = cli_handler( constant.RAIDGROUP_DETAIL_INFO_COMMAND % { 'unit_name': self.storage_name, 'raidgroup_no': raid_group[0] }) self.get_detail(raid_detail, raid_group_map, ":", True) raid_group_model = { 'name': raid_group_map['RAIDGroup'], 'storage_id': storage_id, 'native_storage_pool_id': raid_group_map['RAIDGroup'], 'description': '', 'status': constants.StoragePoolStatus.NORMAL if raid_group_map['Status'] == 'Normal' else constants.StoragePoolStatus.OFFLINE, 'storage_type': constants.StorageType.BLOCK, 'subscribed_capacity': '', 'total_capacity': int( Tools.get_capacity_size( raid_group_map['TotalCapacity'])), 'used_capacity': int( Tools.get_capacity_size( raid_group_map['TotalCapacity'])) - int( Tools.get_capacity_size( raid_group_map['FreeCapacity'])), 'free_capacity': int( Tools.get_capacity_size( raid_group_map['FreeCapacity'])) } pool_list.append(raid_group_model) return pool_list
def get_event(events_error_str, query_para): events_error_dict = dict() events_error_arr = events_error_str.split('\n') for events_error_row_str in events_error_arr: events_error_row_str = events_error_row_str.strip() reg = re.compile(r"(\d{4}-\d{1,2}-\d{1,2})") if not re.match(reg, events_error_row_str): continue error_description_dict = dict() time_stamp = Tools().time_str_to_timestamp( events_error_row_str[:consts.OCCUR_TIME_RANGE].strip(), consts.TIME_PATTERN) if query_para is not None: try: if time_stamp is None or time_stamp \ < int(query_para.get('begin_time')) or \ time_stamp > int(query_para.get('end_time')): continue except Exception as e: LOG.error(e) severity = events_error_row_str[consts.SEVERITY_RANGE_BEGIN:consts. SEVERITY_RANGE_END].strip() code = events_error_row_str[consts.CODE_RANGE_BEGIN:consts. CODE_RANGE_END].strip() description = events_error_row_str[consts.DESCRIPTION_RANGE:] \ .strip() key = '{}{}{}'.format(severity, code, description) if events_error_dict.get(key): continue error_description_dict['severity'] = severity error_description_dict['code'] = code error_description_dict['description'] = description error_description_dict['occur_time'] = time_stamp events_error_dict[key] = error_description_dict return events_error_dict
def analysis_sp_time(self, resource_info): system_time = 0 try: tools = Tools() obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if "Time on SP A:" in str_line: time_str = str_line.replace("Time on SP A:", "").strip() system_time = tools.time_str_to_timestamp( time_str, consts.GET_SP_TIME_PATTERN) except Exception as e: err_msg = "analysis sp time error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return system_time
def _filter_performance_data(self, archive_file_list, resources_map, start_time, end_time): performance_lines_map = {} try: tools = Tools() for archive_file in archive_file_list: self.navi_handler.download_archives(archive_file) archive_name_infos = archive_file.split('.') file_path = '%s%s.csv' % ( self.navi_handler.get_local_file_path(), archive_name_infos[0]) with open(file_path) as file: f_csv = csv.reader(file) next(f_csv) for row in f_csv: self._package_performance_data(row, resources_map, start_time, end_time, tools, performance_lines_map) except Exception as err: err_msg = "Failed to filter performance data: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.StorageBackendException(err_msg) return performance_lines_map
def list_controllers(self, context): controllers = self.cli_handler.get_controllers() controllers_status = self.cli_handler.common_data_encapsulation( consts.GET_STORAGE_CONTROLLER_STATUS) controller_list = [] for controller in (controllers or []): name = controller.get('name') status = constants.ControllerStatus.FAULT if controllers_status and controllers_status.get(name): status_value = controllers_status.get(name) if status_value and \ consts.CONTROLLER_STATUS_NORMAL_KEY in status_value: status = constants.ControllerStatus.NORMAL controller_model = { 'name': controller.get('name'), 'storage_id': self.storage_id, 'native_controller_id': controller.get('Serial Number'), 'status': status, 'location': controller.get('name'), 'soft_version': controller.get('Hard Revision'), 'cpu_info': controller.get('CPU Clock'), 'memory_size': str(int( Tools.get_capacity_size(controller.get('Memory Size')))) } controller_list.append(controller_model) return controller_list
def get_disks(self, storage_id): disks_list = [] physicals_list = [] disks_info = self.ssh_pool.do_exec(constant.DISK_SHOW_DETAIL_COMMAND) disks_array = disks_info.split(constant.DISK_SPLIT_STR) physicals_info = self.ssh_pool.do_exec( constant.DISK_SHOW_PHYSICAL_COMMAND) disks_map = {} physical_array = physicals_info.split('\r\n') for i in range(2, len(physical_array), 2): physicals_list.append(physical_array[i].split()) for disk_str in disks_array[1:]: speed = physical_type = firmware = None Tools.split_value_map(disk_str, disks_map, split=':') logical_type = constant.DISK_LOGICAL. \ get(disks_map['ContainerType']) """Map disk physical information""" for physical_info in physicals_list: if len(physical_info) > 6 \ and physical_info[0] == disks_map['k']: physical_type = constant.DISK_TYPE.get(physical_info[1]) speed = physical_info[5] firmware = physical_info[4] break status = constants.DiskStatus.ABNORMAL if disks_map['Errors:'] is None or disks_map['Errors:'] == "": status = constants.DiskStatus.NORMAL disk_model = { 'name': disks_map['k'], 'storage_id': storage_id, 'native_disk_id': disks_map['k'], 'serial_number': disks_map['SerialNumber'], 'manufacturer': disks_map['Vendor'], 'model': disks_map['Model'], 'firmware': firmware, 'speed': speed, 'capacity': int(Tools.get_capacity_size(disks_map['PhysicalSize'])), 'status': status, 'physical_type': physical_type, 'logical_type': logical_type, 'health_score': '', 'native_disk_group_id': disks_map['Aggregate'], 'location': '', } disks_list.append(disk_model) return disks_list
def list_controllers(self, storage_id): try: controller_list = [] controller_info = self.ssh_do_exec( constant.CONTROLLER_SHOW_DETAIL_COMMAND) controller_ips = self.ssh_do_exec(constant.CONTROLLER_IP_COMMAND) ips_array = self.get_table_data(controller_ips) ip_map = {} for ips in ips_array: ip_array = ips.split() if len(ip_array) == 4: ip_map[ip_array[2]] = ip_array[3] controller_map_list = [] Tools.split_value_map_list(controller_info, controller_map_list, split=':') for controller_map in controller_map_list: if controller_map and 'Node' in controller_map.keys(): status = constants.ControllerStatus.NORMAL \ if controller_map['Health'] == 'true' \ else constants.ControllerStatus.OFFLINE controller_model = { 'name': controller_map['Node'], 'storage_id': storage_id, 'native_controller_id': controller_map['SystemID'], 'status': status, 'location': controller_map['Location'], 'soft_version': None, 'cpu_info': None, 'memory_size': None, 'mgmt_ip': ip_map.get(controller_map['Node']) } controller_list.append(controller_model) return controller_list except exception.DelfinException as e: err_msg = "Failed to get storage controllers from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage controllers from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg)
def format_port_group(port_set_info, lif_info, storage_id): port_map_list, port_group_list = [], [] lif_map_list, port_group_relation_list = [], [] Tools.split_value_map_list(port_set_info, port_map_list, split=':') Tools.split_value_map_list(lif_info, lif_map_list, split=':') for port_map in port_map_list: if 'PortsetName' in port_map: port_group_id = "%s-%s-%s" % \ (port_map.get('VserverName'), port_map.get('PortsetName'), port_map.get('Protocol')) ports = \ port_map.get('LIFOrTPGName').replace(' ', '').split(',') ports_str = '' for lif_map in lif_map_list: if 'LogicalInterfaceName' in lif_map: if lif_map.get('LogicalInterfaceName') in ports: port_id = "%s_%s" % \ (lif_map['CurrentNode'], lif_map['CurrentPort']) port_group_relation = { 'storage_id': storage_id, 'native_port_group_id': port_group_id, 'native_port_id': port_id } port_group_relation_list.append( port_group_relation) if ports_str: ports_str = \ "{0},{1}".format(ports_str, port_id) else: ports_str = "{0}".format(port_id) port_group_model = { 'native_port_group_id': port_group_id, 'name': port_map.get('PortsetName'), 'ports': ports_str, 'storage_id': storage_id, } port_group_list.append(port_group_model) result = { 'port_groups': port_group_list, 'port_grp_port_rels': port_group_relation_list } return result
def get_alerts(self, query_para): alert_list = [] alert_info = self.ssh_do_exec(constant.ALTER_SHOW_DETAIL_COMMAND) alert_map_list = [] Tools.split_value_map_list(alert_info, alert_map_list, True, split=':') for alert_map in alert_map_list: if alert_map and 'AlertID' in alert_map.keys(): occur_time = int( time.mktime( time.strptime(alert_map['IndicationTime'], constant.ALTER_TIME_TYPE))) if not query_para or \ (int(query_para['begin_time']) <= occur_time <= int(query_para['end_time'])): alert_model = { 'alert_id': alert_map['AlertID'], 'alert_name': alert_map['AlertID'], 'severity': constant.ALERT_SEVERITY[ alert_map['PerceivedSeverity']], 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'occur_time': occur_time * 1000, 'description': alert_map['Description'], 'sequence_number': alert_map['AlertID'], 'match_key': hashlib.md5((alert_map['AlertID'] + alert_map['Node'] + alert_map['AlertingResource'] ).encode()).hexdigest(), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': alert_map['ProbableCause'] + ':' + alert_map['PossibleEffect'] } alert_list.append(alert_model) return alert_list
def get_cifs_shares(self, storage_id, vserver_name, qtree_list, protocol_map): shares_list = [] share_info = self.ssh_do_exec( (constant.CIFS_SHARE_SHOW_DETAIL_COMMAND % { 'vserver_name': vserver_name })) share_map_list = [] Tools.split_value_map_list(share_info, share_map_list, split=':') for share_map in share_map_list: if share_map and 'VolumeName' in share_map.keys() and \ share_map['VolumeName'] != '-': protocol_str = protocol_map.get(share_map['Vserver']) fs_id = self.get_fs_id(share_map['Vserver'], share_map['VolumeName']) share_id = fs_id + '_' + share_map['Share'] + '_' qtree_id = None for qtree in qtree_list: name_array = share_map['Path'].split('/') if len(name_array) > 0: qtree_name = name_array[len(name_array) - 1] if qtree_name == share_map['VolumeName']: qtree_name = '' qt_id = self.get_qt_id(share_map['Vserver'], share_map['VolumeName'], qtree_name) else: break if qtree['native_qtree_id'] == qt_id: qtree_id = qt_id break if constants.ShareProtocol.CIFS in protocol_str: share = { 'name': share_map['Share'], 'storage_id': storage_id, 'native_share_id': share_id + constants.ShareProtocol.CIFS, 'native_qtree_id': qtree_id, 'native_filesystem_id': fs_id, 'path': share_map['Path'], 'protocol': constants.ShareProtocol.CIFS } shares_list.append(share) return shares_list
def get_unit_size(value, unit): if value is None: return None if value == '0' or value == 0: return 0 unit_array = unit.split('/') capacity = Tools.change_capacity_to_bytes(unit_array[0]) if capacity == 1: return value return round(int(value) / capacity, 3)
def get_latest_perf_timestamp(self): latest_time = 0 stats_file_command = 'lsdumps -prefix /dumps/iostats' file_list = self.exec_ssh_command(stats_file_command) file_line = file_list.split('\n') for file in islice(file_line, 1, None): if file: file_arr = ' '.join(file.split()).split(' ') if len(file_arr) > 1: file_name = file_arr[1] name_arr = file_name.split('_') file_time = '20%s%s' % (name_arr[3], name_arr[4]) time_pattern = '%Y%m%d%H%M%S' tools = Tools() occur_time = tools.time_str_to_timestamp( file_time, time_pattern) if latest_time < occur_time: latest_time = occur_time return latest_time
def get_raid_group_volumes(self, storage_id): volumes_list = [] volumes_info = cli_handler((constant.VOLUMES_INFO_COMMAND % { 'unit_name': self.storage_name })) volumes_array = volumes_info.split("\r\n") if len(volumes_array) > 2: for volume_info in volumes_array[2:]: volume_array = volume_info.split() if len(volume_array) > 4 and volume_array[4] != 'N/A': volume_model = { 'name': volume_array[0], 'storage_id': storage_id, 'description': '', 'status': constants.VolumeStatus.AVAILABLE if volume_array[13] == 'normal' else constants.VolumeStatus.ERROR, 'native_volume_id': volume_array[0], 'native_storage_pool_id': volume_array[4], 'wwn': '', 'compressed': '', 'deduplicated': '', 'type': constants.VolumeType.THICK, 'total_capacity': int(Tools.get_capacity_size(volume_array[1] + 'GB')), 'used_capacity': int(Tools.get_capacity_size(volume_array[1] + 'GB')), 'free_capacity': 0 } volumes_list.append(volume_model) return volumes_list
def get_events(self, query_para): event_list = [] event_info = self.ssh_pool.do_exec(constant.EVENT_SHOW_DETAIL_COMMAND) event_array = event_info.split(constant.ALTER_SPLIT_STR) event_map = {} for event_str in event_array[1:]: Tools.split_value_map(event_str, event_map, split=':') occur_time = int( time.mktime( time.strptime(event_map['Time'], constant.EVENT_TIME_TYPE))) if query_para is None or \ (query_para['begin_time'] <= occur_time <= query_para['end_time']): alert_model = { 'alert_id': event_map['Sequence#'], 'alert_name': event_map['MessageName'], 'severity': constants.Severity.CRITICAL, 'category': constants.Category.EVENT, 'type': constants.EventType.EQUIPMENT_ALARM, 'occur_time': occur_time, 'description': event_map['Event'], 'match_key': hashlib.md5((event_map['Sequence#'] + str(occur_time)).encode()).hexdigest(), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': event_map['Source'] } event_list.append(alert_model) return event_list