Exemple #1
0
 def test_ssh_pool_create(self):
     with self.assertRaises(Exception) as exc:
         kwargs = ACCESS_INFO
         ssh_pool = SSHPool(**kwargs)
         ssh_pool.create()
     self.assertIn('Exception in SSH protocol negotiation or logic',
                   str(exc.exception))
Exemple #2
0
 def __init__(self, **kwargs):
     self.ssh_pool = SSHPool(**kwargs)
Exemple #3
0
class NetAppHandler(object):

    # TODO
    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'
    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'

    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def parse_alert(alert):
        try:
            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)
            alert_arr = alert_info.split(":")
            if len(alert_arr) > 1:
                alert_name = alert_arr[0]
                description = alert_arr[1]
                if netapp_constants.SEVERITY_MAP.get(alert_name):
                    severity = netapp_constants.SEVERITY_MAP.get(alert_name)
                    a = {
                        'alert_id': '',
                        'alert_name': alert_name,
                        'severity': severity,
                        'category': constants.Category.EVENT,
                        'type': constants.EventType.EQUIPMENT_ALARM,
                        'occur_time': int(time.time()),
                        'description': description,
                        'sequence_number': '',
                        'resource_type': constants.DEFAULT_RESOURCE_TYPE,
                        'location': ''
                    }
                    return a
        except exception.DelfinException as e:
            err_msg = "Failed to parse alert from " \
                      "netapp_fas fas: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to parse alert from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def login(self):
        try:
            self.exec_ssh_command('version')
        except Exception as e:
            LOG.error("Failed to login netapp_fas %s" %
                      (six.text_type(e)))
            raise e

    @staticmethod
    def do_exec(command_str, ssh):
        result = None
        try:
            utils.check_ssh_injection(command_str)
            if command_str is not None and ssh is not None:
                stdin, stdout, stderr = ssh.exec_command(command_str)
                res, err = stdout.read(), stderr.read()
                re = res if res else err
                result = re.decode()
        except paramiko.AuthenticationException as ae:
            LOG.error('doexec Authentication error:{}'.format(ae))
            raise exception.InvalidUsernameOrPassword()
        except Exception as e:
            err = six.text_type(e)
            LOG.error('doexec InvalidUsernameOrPassword error')
            if 'timed out' in err:
                raise exception.SSHConnectTimeout()
            elif 'No authentication methods available' in err \
                    or 'Authentication failed' in err:
                raise exception.InvalidUsernameOrPassword()
            elif 'not a valid RSA private key file' in err:
                raise exception.InvalidPrivateKey()
            else:
                raise exception.SSHException(err)
        return result

    def exec_ssh_command(self, command):
        try:
            with self.ssh_pool.item() as ssh:
                ssh_info = NetAppHandler.do_exec(command, ssh)
            return ssh_info
        except Exception as e:
            msg = "Failed to ssh netapp_fas %s: %s" % \
                  (command, six.text_type(e))
            raise exception.SSHException(msg)

    @staticmethod
    def change_capacity_to_bytes(unit):
        unit = unit.upper()
        if unit == 'TB':
            res = units.Ti
        elif unit == 'GB':
            res = units.Gi
        elif unit == 'MB':
            res = units.Mi
        elif unit == 'KB':
            res = units.Ki
        else:
            res = 1
        return int(res)

    def parse_string(self, value):
        capacity = 0
        if value:
            if value.isdigit():
                capacity = float(value)
            else:
                unit = value[-2:]
                capacity = float(value[:-2]) * int(
                    self.change_capacity_to_bytes(unit))
        return capacity

    def get_storage(self):
        try:
            STATUS_MAP = {
                'ok': constants.StorageStatus.NORMAL,
                'ok-with-suppressed': constants.StorageStatus.NORMAL,
                'degraded': constants.StorageStatus.ABNORMAL,
                'unreachable': constants.StorageStatus.ABNORMAL
            }
            raw_capacity = total_capacity = used_capacity = free_capacity = 0
            system_info = self.exec_ssh_command(
                netapp_constants.CLUSTER_SHOW_COMMAND)
            version = self.exec_ssh_command(
                netapp_constants.VERSION_SHOW_COMMAND)
            status_info = self.exec_ssh_command(
                netapp_constants.STORAGE_STATUS_COMMAND)
            version_arr = version.split('\n')
            status = STATUS_MAP.get(status_info.split("\n")[2])
            disk_list = self.list_disks(None)
            pool_list = self.list_storage_pools(None)
            storage_map = {}
            self.handle_detail(system_info, storage_map, split=':')
            for disk in disk_list:
                raw_capacity += disk['capacity']

            for pool in pool_list:
                total_capacity += pool['total_capacity']
                free_capacity += pool['free_capacity']
                used_capacity += pool['used_capacity']

            s = {
                "name": storage_map['ClusterName'],
                "vendor": netapp_constants.STORAGE_VENDOR,
                "model": '',
                "status": status,
                "serial_number": storage_map['ClusterSerialNumber'],
                "firmware_version": version_arr[0],
                "location": '',
                "total_capacity": total_capacity,
                "raw_capacity": raw_capacity,
                "used_capacity": used_capacity,
                "free_capacity": free_capacity
            }
            return s
        except exception.DelfinException as e:
            err_msg = "Failed to get storage from " \
                      "netapp_fas fas: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    @staticmethod
    def handle_detail(system_info, storage_map, split):
        detail_arr = system_info.split('\n')
        for detail in detail_arr:
            if detail is not None and detail != '':
                strinfo = detail.split(split + " ")
                key = strinfo[0].replace(' ', '')
                value = ''
                if len(strinfo) > 1:
                    value = strinfo[1]
                storage_map[key] = value

    def get_aggregate(self, storage_id):
        STATUS_MAP = {
            'online': constants.StoragePoolStatus.NORMAL,
            'creating': constants.StoragePoolStatus.NORMAL,
            'mounting': constants.StoragePoolStatus.NORMAL,
            'relocating': constants.StoragePoolStatus.NORMAL,
            'quiesced': constants.StoragePoolStatus.OFFLINE,
            'quiescing': constants.StoragePoolStatus.OFFLINE,
            'unmounted': constants.StoragePoolStatus.OFFLINE,
            'unmounting': constants.StoragePoolStatus.OFFLINE,
            'destroying': constants.StoragePoolStatus.ABNORMAL,
            'partial': constants.StoragePoolStatus.ABNORMAL,
            'frozen': constants.StoragePoolStatus.ABNORMAL,
            'reverted': constants.StoragePoolStatus.NORMAL,
            'restricted': constants.StoragePoolStatus.ABNORMAL,
            'inconsistent': constants.StoragePoolStatus.ABNORMAL,
            'iron_restricted': constants.StoragePoolStatus.ABNORMAL,
            'unknown': constants.StoragePoolStatus.OFFLINE,
            'offline': constants.StoragePoolStatus.OFFLINE,
            'failed': constants.StoragePoolStatus.ABNORMAL,
            'remote_cluster': constants.StoragePoolStatus.NORMAL,
        }
        agg_list = []
        agg_info = self.exec_ssh_command(
            netapp_constants.AGGREGATE_SHOW_DETAIL_COMMAND)
        agg_arr = agg_info.split(
            netapp_constants.AGGREGATE_SPLIT_STR)
        agg_map = {}
        for agg in agg_arr[1:]:
            self.handle_detail(agg, agg_map, split=':')
            status = STATUS_MAP.get(agg_map['State'])
            p = {
                'name': agg_map['e'],
                'storage_id': storage_id,
                'native_storage_pool_id': agg_map['UUIDString'],
                'description': '',
                'status': status,
                'storage_type': constants.StorageType.UNIFIED,
                'subscribed_capacity': '',
                'total_capacity':
                    int(self.parse_string(agg_map['Size'])),
                'used_capacity':
                    int(self.parse_string(agg_map['UsedSize'])),
                'free_capacity':
                    int(self.parse_string(agg_map['AvailableSize'])),
            }
            agg_list.append(p)
        return agg_list

    def get_pool(self, storage_id):
        pool_list = []
        pool_info = self.exec_ssh_command(
            netapp_constants.POOLS_SHOW_DETAIL_COMMAND)
        pool_arr = pool_info.split(netapp_constants.POOLS_SPLIT_STR)
        pool_map = {}
        for pool_str in pool_arr[1:]:
            self.handle_detail(pool_str, pool_map, split=':')
            status = \
                constants.StoragePoolStatus.NORMAL \
                if pool_map['IsPoolHealthy?'] == 'true' \
                else constants.StoragePoolStatus.OFFLINE
            p = {
                'name': pool_map['ame'],
                'storage_id': storage_id,
                'native_storage_pool_id': pool_map['UUIDofStoragePool'],
                'description': '',
                'status': status,
                'storage_type': constants.StorageType.BLOCK,
                'subscribed_capacity': '',
                'total_capacity':
                    int(self.parse_string(pool_map['StoragePoolTotalSize'])),
                'used_capacity':
                    int(self.parse_string(pool_map['StoragePoolTotalSize'])) -
                    int(self.parse_string(pool_map['StoragePoolUsableSize'])),
                'free_capacity':
                    int(self.parse_string(pool_map['StoragePoolUsableSize']))
            }
            pool_list.append(p)
        return pool_list

    def list_storage_pools(self, storage_id):
        try:
            pool_list = self.get_pool(storage_id)
            agg_list = self.get_aggregate(storage_id)
            return agg_list + pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            STATUS_MAP = {
                'online': constants.VolumeStatus.AVAILABLE,
                'offline': constants.VolumeStatus.ERROR,
                'nvfail': constants.VolumeStatus.ERROR,
                'space-error': constants.VolumeStatus.ERROR,
                'foreign-lun-error': constants.VolumeStatus.ERROR,
            }
            volume_list = []
            volume_info = self.exec_ssh_command(
                netapp_constants.LUN_SHOW_DETAIL_COMMAND)
            volume_arr = volume_info.split(netapp_constants.LUN_SPLIT_STR)
            fs_list = self.list_filesystems(storage_id)
            volume_map = {}
            for volume_str in volume_arr[1:]:
                self.handle_detail(volume_str, volume_map, split=':')
                if volume_map is not None or volume_map != {}:
                    pool_id = ''
                    status = STATUS_MAP.get(volume_map['State'])
                    for fs in fs_list:
                        if fs['name'] == volume_map['VolumeName']:
                            pool_id = fs['native_pool_id']
                    type = constants.VolumeType.THIN \
                        if volume_map['SpaceAllocation'] == 'enabled' \
                        else constants.VolumeType.THICK
                    v = {
                        'name': volume_map['LUNName'],
                        'storage_id': storage_id,
                        'description': '',
                        'status': status,
                        'native_volume_id': volume_map['LUNUUID'],
                        'native_storage_pool_id': pool_id,
                        'wwn': '',
                        'compressed': '',
                        'deduplicated': '',
                        'type': type,
                        'total_capacity':
                            int(self.parse_string(volume_map['LUNSize'])),
                        'used_capacity':
                            int(self.parse_string(volume_map['UsedSize'])),
                        'free_capacity':
                            int(self.parse_string(volume_map['LUNSize'])) -
                            int(self.parse_string(volume_map['UsedSize']))
                    }
                    volume_list.append(v)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_alerts(self, query_para):
        try:
            alert_list = []
            alert_info = self.exec_ssh_command(
                netapp_constants.ALTER_SHOW_DETAIL_COMMAND)
            event_info = self.exec_ssh_command(
                netapp_constants.EVENT_SHOW_DETAIL_COMMAND)
            """Query the two alarms separately"""
            AlertHandler.list_events(self, event_info, query_para, alert_list)
            AlertHandler.list_alerts(self, alert_info, query_para, alert_list)
            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def clear_alert(self, alert):
        try:
            ssh_command = \
                netapp_constants.CLEAR_ALERT_COMMAND + alert['alert_id']
            self.exec_ssh_command(ssh_command)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_controllers(self, storage_id):
        try:
            controller_list = []
            controller_info = self.exec_ssh_command(
                netapp_constants.CONTROLLER_SHOW_DETAIL_COMMAND)
            controller_arr = controller_info.split(
                netapp_constants.CONTROLLER_SPLIT_STR)
            controller_map = {}
            for controller_str in controller_arr[1:]:
                self.handle_detail(controller_str, controller_map, split=':')
                if controller_map is not None or controller_map != {}:
                    status = constants.ControllerStatus.NORMAL \
                        if controller_map['Health'] == 'true' \
                        else constants.ControllerStatus.OFFLINE
                    c = {
                        'name': controller_map['e'],
                        'storage_id': storage_id,
                        'native_controller_id': controller_map['SystemID'],
                        'status': status,
                        'location': controller_map['Location'],
                        'soft_version': '',
                        'cpu_info': '',
                        'memory_size': '',
                    }
                controller_list.append(c)
            return controller_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage controllers from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage controllers from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_network_port(self, storage_id):
        try:
            LOGICAL_TYPE_MAP = {
                'data': constants.PortLogicalType.FRONTEND,
                'cluster': constants.PortLogicalType.FRONTEND,
                'node-mgmt': constants.PortLogicalType.MANAGEMENT,
                'cluster-mgmt': constants.PortLogicalType.INTERNAL,
                'intercluster': constants.PortLogicalType.INTERCONNECT,
            }
            ports_list = []
            interfaces_info = self.exec_ssh_command(
                netapp_constants.INTERFACE_SHOW_DETAIL_COMMAND)
            interface_arr = interfaces_info.split(
                netapp_constants.INTERFACE_SPLIT_STR)
            interface_map = {}
            ipv4 = ipv4_mask = ipv6 = ipv6_mask = '-'
            """Traversal to get port IP address information"""
            for interface_info in interface_arr[1:]:
                self.handle_detail(interface_info, interface_map, split=':')
                logical_type = LOGICAL_TYPE_MAP.get(interface_map['Role'])
                type = interface_map['DataProtocol']
                if interface_map['Addressfamily'] == 'ipv4':
                    ipv4 += interface_map['NetworkAddress'] + ','
                    ipv4_mask += interface_map['Netmask'] + ','
                else:
                    ipv6 += interface_map['NetworkAddress'] + ','
                    ipv6_mask += interface_map['Netmask'] + ','
                p = {
                    'name': interface_map['LogicalInterfaceName'],
                    'storage_id': storage_id,
                    'native_port_id': interface_map['LogicalInterfaceName'],
                    'location': '',
                    'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if interface_map['OperationalStatus'] == 'up'
                        else constants.PortConnectionStatus.DISCONNECTED,
                    'health_status':
                        constants.PortHealthStatus.NORMAL
                        if interface_map['OperationalStatus'] == 'healthy'
                        else constants.PortHealthStatus.ABNORMAL,
                    'type': type,
                    'logical_type': logical_type,
                    'speed': '',
                    'max_speed': '',
                    'native_parent_id': '',
                    'wwn': '',
                    'mac_address': '',
                    'ipv4': ipv4,
                    'ipv4_mask': ipv4_mask,
                    'ipv6': ipv6,
                    'ipv6_mask': ipv6_mask,
                }
                ports_list.append(p)
            return ports_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_eth_port(self, storage_id):
        try:
            eth_list = []
            eth_info = self.exec_ssh_command(
                netapp_constants.PORT_SHOW_DETAIL_COMMAND)
            eth_arr = eth_info.split(
                netapp_constants.PORT_SPLIT_STR)
            for eth in eth_arr[1:]:
                eth_map = {}
                self.handle_detail(eth, eth_map, split=':')
                e = {
                    'name': eth_map['Port'],
                    'storage_id': storage_id,
                    'native_port_id': eth_map['Port'],
                    'location': '',
                    'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if eth_map['Link'] == 'up'
                        else constants.PortConnectionStatus.DISCONNECTED,
                    'health_status':
                        constants.PortHealthStatus.NORMAL
                        if eth_map['PortHealthStatus'] == 'healthy'
                        else constants.PortHealthStatus.ABNORMAL,
                    'type': constants.PortType.ETH,
                    'logical_type': '',
                    'speed': eth_map['SpeedOperational'],
                    'max_speed': eth_map['MTU'],
                    'native_parent_id': '',
                    'wwn': '',
                    'mac_address': eth_map['MACAddress'],
                    'ipv4': '',
                    'ipv4_mask': '',
                    'ipv6': '',
                    'ipv6_mask': '',
                }
                eth_list.append(e)
            return eth_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_fc_port(self, storage_id):
        try:
            TYPE_MAP = {
                'fibre-channel': constants.PortType.FC,
                'ethernet': constants.PortType.FCOE
            }
            fc_list = []
            fc_info = self.exec_ssh_command(
                netapp_constants.FC_PORT_SHOW_DETAIL_COMMAND)
            fc_arr = fc_info.split(
                netapp_constants.PORT_SPLIT_STR)
            for fc in fc_arr[1:]:
                fc_map = {}
                self.handle_detail(fc, fc_map, split=':')
                type = TYPE_MAP.get(fc_map['PhysicalProtocol'])
                f = {
                    'name': fc_map['Adapter'],
                    'storage_id': storage_id,
                    'native_port_id': fc_map['Adapter'],
                    'location': '',
                    'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if fc_map['AdministrativeStatus'] == 'up'
                        else constants.PortConnectionStatus.DISCONNECTED,
                    'health_status':
                        constants.PortHealthStatus.NORMAL
                        if fc_map['OperationalStatus'] == 'online'
                        else constants.PortHealthStatus.ABNORMAL,
                    'type': type,
                    'logical_type': '',
                    'speed': fc_map['DataLinkRate(Gbit)'],
                    'max_speed': fc_map['MaximumSpeed'],
                    'native_parent_id': '',
                    'wwn': fc_map['AdapterWWNN'],
                    'mac_address': '',
                    'ipv4': '',
                    'ipv4_mask': '',
                    'ipv6': '',
                    'ipv6_mask': '',
                }
                fc_list.append(f)
            return fc_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_ports(self, storage_id):
        ports_list = \
            self.get_network_port(storage_id) + \
            self.get_fc_port(storage_id) + \
            self.get_eth_port(storage_id)
        return ports_list

    def list_disks(self, storage_id):
        try:
            TYPE_MAP = {
                'ATA': constants.DiskPhysicalType.SATA,
                'BSAS': constants.DiskPhysicalType,
                'FCAL': constants.DiskPhysicalType,
                'FSAS': constants.DiskPhysicalType,
                'LUN ': constants.DiskPhysicalType,
                'SAS': constants.DiskPhysicalType.SAS,
                'MSATA': constants.DiskPhysicalType,
                'SSD': constants.DiskPhysicalType.SSD,
                'VMDISK': constants.DiskPhysicalType,
                'unknown': constants.DiskPhysicalType.UNKNOWN,
            }
            LOGICAL_MAP = {
                'aggregate': constants.DiskLogicalType.MEMBER,
                'spare': constants.DiskLogicalType.HOTSPARE,
                'unknown': constants.DiskLogicalType.UNKNOWN,
                'free': constants.DiskLogicalType.FREE,
                'broken': constants.DiskLogicalType,
                'foreign': constants.DiskLogicalType,
                'labelmaint': constants.DiskLogicalType,
                'maintenance': constants.DiskLogicalType,
                'shared': constants.DiskLogicalType,
                'unassigned': constants.DiskLogicalType,
                'unsupported': constants.DiskLogicalType,
                'remote': constants.DiskLogicalType,
                'mediator': constants.DiskLogicalType,
            }
            disks_list = []
            physicals_list = []
            disks_info = self.exec_ssh_command(
                netapp_constants.DISK_SHOW_DETAIL_COMMAND)
            disks_arr = disks_info.split(
                netapp_constants.DISK_SPLIT_STR)
            physicals_info = self.exec_ssh_command(
                netapp_constants.DISK_SHOW_PHYSICAL_COMMAND)
            disks_map = {}
            physical_arr = physicals_info.split('\n')
            speed = physical_type = firmware = '-'
            for i in range(2, len(physical_arr), 2):
                physicals_list.append(physical_arr[i].split())
            for disk_str in disks_arr[1:]:
                self.handle_detail(disk_str, disks_map, split=':')
                logical_type = LOGICAL_MAP.get(disks_map['ContainerType'])
                """Map disk physical information"""
                for physical_info in physicals_list:
                    if len(physical_info) > 6:
                        if physical_info[0] == disks_map['k']:
                            physical_type = TYPE_MAP.get(physical_info[1])
                            speed = physical_info[5]
                            firmware = physical_info[4]
                status = constants.DiskStatus.NORMAL \
                    if disks_map['Errors:'] is None \
                    or disks_map['Errors:'] == "" \
                    else constants.DiskStatus.OFFLINE
                d = {
                    'name': disks_map['k'],
                    'storage_id': storage_id,
                    'native_disk_id': disks_map['k'],
                    'serial_number': disks_map['SerialNumber'],
                    'manufacturer': disks_map['Vendor'],
                    'model': disks_map['Model'],
                    'firmware': firmware,
                    'speed': speed,
                    'capacity':
                        int(self.parse_string(disks_map['PhysicalSize'])),
                    'status': status,
                    'physical_type': physical_type,
                    'logical_type': logical_type,
                    'health_score': '',
                    'native_disk_group_id': disks_map['Aggregate'],
                    'location': '',
                }
                disks_list.append(d)
            return disks_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage disks from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage disks from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_qtrees(self, storage_id):
        try:
            qt_list = []
            qt_info = self.exec_ssh_command(
                netapp_constants.QTREE_SHOW_DETAIL_COMMAND)
            qt_arr = qt_info.split(netapp_constants.QTREE_SPLIT_STR)
            qt_map = {}
            for qt in qt_arr[1:]:
                self.handle_detail(qt, qt_map, split=':')
                q = {
                    'name': qt_map['QtreeName'],
                    'storage_id': storage_id,
                    'native_qtree_id': qt_map['Actual(Non-Junction)QtreePath'],
                    'native_filesystem_id': qt_map['VolumeName'],
                    'security_mode': qt_map['SecurityStyle'],
                }
                qt_list.append(q)

            return qt_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_shares(self, storage_id):
        try:
            shares_list = []
            cifs_share_info = self.exec_ssh_command(
                netapp_constants.CIFS_SHARE_SHOW_DETAIL_COMMAND)
            cifs_share_arr = cifs_share_info.split(
                netapp_constants.CIFS_SHARE_SPLIT_STR)
            protocol_info = self.exec_ssh_command(
                netapp_constants.SHARE_AGREEMENT_SHOW_COMMAND)
            cifs_share_map = {}
            protocol_map = {}
            protocol_arr = protocol_info.split('\n')
            for protocol in protocol_arr[2:]:
                agr_arr = protocol.split()
                if len(agr_arr) > 1:
                    protocol_map[agr_arr[0]] = agr_arr[1]
            for cifs_share in cifs_share_arr[1:]:
                self.handle_detail(cifs_share, cifs_share_map, split=':')
                protocol = protocol_map.get(cifs_share_map['r'])
                s = {
                    'name': cifs_share_map['Share'],
                    'storage_id': storage_id,
                    'native_share_id': cifs_share_map['Share'],
                    'native_filesystem_id': cifs_share_map['VolumeName'],
                    'path': cifs_share_map['Path'],
                    'protocol': protocol
                }
                shares_list.append(s)
            return shares_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_filesystems(self, storage_id):
        try:
            STATUS_MAP = {
                'online': constants.FilesystemStatus.NORMAL,
                'restricted': constants.FilesystemStatus.FAULTY,
                'offline': constants.FilesystemStatus.NORMAL,
                'force-online': constants.FilesystemStatus.FAULTY,
                'force-offline': constants.FilesystemStatus.FAULTY,
            }
            fs_list = []
            fs_info = self.exec_ssh_command(
                netapp_constants.FS_SHOW_DETAIL_COMMAND)
            fs_arr = fs_info.split(
                netapp_constants.FS_SPLIT_STR)
            thin_fs_info = self.exec_ssh_command(
                netapp_constants.THIN_FS_SHOW_COMMAND)
            pool_list = self.list_storage_pools(storage_id)
            thin_fs_arr = thin_fs_info.split("\n")
            type = constants.FSType.THICK
            fs_map = {}
            for fs_str in fs_arr[1:]:
                self.handle_detail(fs_str, fs_map, split=':')
                if fs_map is not None or fs_map != {}:
                    pool_id = ""
                    """get pool id"""
                    for pool in pool_list:
                        if pool['name'] == fs_map['AggregateName']:
                            pool_id = pool['native_storage_pool_id']
                    deduplicated = False \
                        if fs_map['SpaceSavedbyDeduplication'] == '0B' \
                        else True
                    if len(thin_fs_arr) > 2:
                        for thin_vol in thin_fs_arr[2:]:
                            thin_arr = thin_vol.split()
                            if len(thin_arr) > 4:
                                if thin_arr[1] == fs_map['VolumeName']:
                                    type = constants.VolumeType.THIN
                    compressed = False \
                        if fs_map['VolumeContainsSharedorCompressedData'] == 'false' \
                        else True
                    status = STATUS_MAP.get(fs_map['VolumeState'])
                    f = {
                        'name': fs_map['VolumeName'],
                        'storage_id': storage_id,
                        'native_filesystem_id': fs_map['VolumeName'],
                        'native_pool_id': pool_id,
                        'compressed': compressed,
                        'deduplicated': deduplicated,
                        'worm': fs_map['SnapLockType'],
                        'status': status,
                        'type': type,
                        'total_capacity':
                            int(self.parse_string(fs_map['VolumeSize'])),
                        'used_capacity':
                            int(self.parse_string(fs_map['UsedSize'])),
                        'free_capacity':
                            int(self.parse_string(fs_map['VolumeSize'])) -
                            int(self.parse_string(fs_map['UsedSize']))
                    }
                    fs_list.append(f)
            return fs_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp_fas fas: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp_fas fas: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def add_trap_config(self, context, trap_config):
        pass

    def remove_trap_config(self, context, trap_config):
        pass
Exemple #4
0
class SSHHandler(object):
    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    def login(self):
        try:
            self.ssh_pool.do_exec('show pools')
        except Exception as e:
            LOG.error("Failed to login msa  %s" % (six.text_type(e)))
            raise e

    def get_storage(self, storage_id):
        try:
            system_info = self.ssh_pool.do_exec('show system')
            system_data = self.handle_xml_to_dict(system_info, 'system')
            version_info = self.ssh_pool.do_exec('show version')
            version_arr = self.handle_xml_to_json(version_info, 'versions')
            version_id = ""
            if version_arr:
                version_id = version_arr[0].get('bundle-version')
            if system_data:
                pools_list = self.list_storage_pools(storage_id)
                total_capacity = 0
                if pools_list:
                    for pool in pools_list:
                        total_capacity += int(pool.get('total_capacity'))
                disks_list = self.list_storage_disks(storage_id)
                raw_capacity = 0
                if disks_list:
                    for disk in disks_list:
                        raw_capacity += int(disk.get('capacity'))
                volumes_list = self.list_storage_volume(storage_id)
                volume_all_size = 0
                if volumes_list:
                    for volume in volumes_list:
                        volume_all_size += int(volume.get('total_capacity'))
                health = system_data.get('health')
                status = constants.StorageStatus.OFFLINE
                if health == 'OK':
                    status = constants.StorageStatus.NORMAL
                elif health == 'Degraded':
                    status = constants.StorageStatus.DEGRADED
                serial_num = system_data.get('midplane-serial-number')
                storage_map = {
                    'name': system_data.get('system-name'),
                    'vendor': consts.StorageVendor.HPE_MSA_VENDOR,
                    'model': system_data.get('product-id'),
                    'status': status,
                    'serial_number': serial_num,
                    'firmware_version': version_id,
                    'location': system_data.get('system-location'),
                    'raw_capacity': int(raw_capacity),
                    'total_capacity': int(total_capacity),
                    'used_capacity': int(volume_all_size),
                    'free_capacity': int(total_capacity - volume_all_size)
                }
                return storage_map
        except Exception as e:
            err_msg = "Failed to get system info : %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_disks(self, storage_id):
        try:
            disk_info = self.ssh_pool.do_exec('show disks')
            disk_detail = self.handle_xml_to_json(disk_info, 'drives')
            disks_arr = []
            if disk_detail:
                for data in disk_detail:
                    health = data.get('health')
                    status = constants.StoragePoolStatus.OFFLINE
                    if health == 'OK':
                        status = constants.StoragePoolStatus.NORMAL
                    size = self.parse_string_to_bytes(data.get('size'))
                    physical_type = consts.DiskPhysicalType.\
                        DISK_PHYSICAL_TYPE.get(data.get('description'),
                                               constants.DiskPhysicalType.
                                               UNKNOWN)
                    rpm = data.get('rpm')
                    if rpm:
                        rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED
                    data_map = {
                        'native_disk_id': data.get('location'),
                        'name': data.get('location'),
                        'physical_type': physical_type,
                        'status': status,
                        'storage_id': storage_id,
                        'native_disk_group_id': data.get('disk-group'),
                        'serial_number': data.get('serial-number'),
                        'manufacturer': data.get('vendor'),
                        'model': data.get('model'),
                        'speed': rpm,
                        'capacity': int(size),
                        'health_score': status
                    }
                    disks_arr.append(data_map)
            return disks_arr
        except Exception as e:
            err_msg = "Failed to get storage disk: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_ports(self, storage_id):
        try:
            ports_info = self.ssh_pool.do_exec('show ports')
            ports_split = ports_info.split('\n')
            ports_array = ports_split[1:len(ports_split) - 1]
            ports_xml_data = ''.join(ports_array)
            xml_element = Et.fromstring(ports_xml_data)
            ports_json = []
            for element_data in xml_element.iter('OBJECT'):
                property_name = element_data.get('basetype')
                if property_name != 'status':
                    msg = {}
                    for child in element_data.iter('PROPERTY'):
                        msg[child.get('name')] = child.text
                    ports_json.append(msg)
            ports_elements_info = []
            for i in range(0, len(ports_json) - 1, 2):
                port_element = ports_json[i].copy()
                port_element.update(ports_json[i + 1])
                ports_elements_info.append(port_element)
            list_ports = []
            for data in ports_elements_info:
                status = constants.PortHealthStatus.NORMAL
                conn_status = constants.PortConnectionStatus.CONNECTED
                if data.get('health') != 'OK':
                    status = constants.PortHealthStatus.ABNORMAL
                    conn_status = constants.PortConnectionStatus.\
                        DISCONNECTED
                wwn = None
                port_type = constants.PortType.FC
                location_port_type = data.get('port-type')
                if location_port_type:
                    location_port_type = location_port_type.upper()
                if location_port_type == 'ISCSI':
                    port_type = constants.PortType.ETH
                else:
                    target_id = data.get('target-id')
                    if target_id:
                        wwn = target_id
                location = '%s_%s' % (data.get('port'), location_port_type)
                speed = data.get('configured-speed', None)
                max_speed = 0
                if speed != 'Auto' and speed is not None:
                    max_speed = self.parse_string_to_bytes(speed)
                data_map = {
                    'native_port_id': data.get('durable-id'),
                    'name': data.get('port'),
                    'type': port_type,
                    'connection_status': conn_status,
                    'health_status': status,
                    'location': location,
                    'storage_id': storage_id,
                    'speed': max_speed,
                    'max_speed': max_speed,
                    'mac_address': data.get('mac-address'),
                    'ipv4': data.get('ip-address'),
                    'wwn': wwn
                }
                list_ports.append(data_map)
            return list_ports
        except Exception as e:
            err_msg = "Failed to get storage ports: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_controller(self, storage_id):
        try:
            controller_info = self.ssh_pool\
                .do_exec('show controllers')
            controller_detail = self.handle_xml_to_json(
                controller_info, 'controllers')
            controller_arr = []
            for data in controller_detail:
                health = data.get('health')
                status = constants.StoragePoolStatus.OFFLINE
                if health == 'OK':
                    status = constants.StoragePoolStatus.NORMAL
                cpu_info = data.get('sc-cpu-type')
                memory_size = data.get('system-memory-size')
                if memory_size is not None:
                    memory_size += "MB"
                system_memory_size = self.parse_string_to_bytes(memory_size)
                data_map = {
                    'native_controller_id': data.get('controller-id'),
                    'name': data.get('durable-id'),
                    'storage_id': storage_id,
                    'status': status,
                    'location': data.get('position'),
                    'soft_version': data.get('sc-fw'),
                    'cpu_info': cpu_info,
                    'memory_size': int(system_memory_size)
                }
                controller_arr.append(data_map)
            return controller_arr
        except Exception as e:
            err_msg = "Failed to get storage controllers: %s"\
                      % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_volume(self, storage_id):
        try:
            volume_infos = self.ssh_pool.do_exec('show volumes')
            volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')
            pools_info = self.ssh_pool.do_exec('show pools')
            pool_detail = self.handle_xml_to_json(pools_info, 'pools')
            list_volumes = []
            for data in volume_detail:
                health = data.get('health')
                status = constants.StoragePoolStatus.OFFLINE
                if health == 'OK':
                    status = constants.StoragePoolStatus.NORMAL
                total_size = self.parse_string_to_bytes(data.get('total-size'))
                total_avail = self.parse_string_to_bytes(
                    data.get('allocated-size'))
                native_storage_pool_id = ''
                if pool_detail:
                    native_storage_pool_id = pool_detail[0]. \
                        get('serial-number')
                    for pools in pool_detail:
                        if data.get('virtual-disk-name') == pools.\
                                get('name'):
                            native_storage_pool_id = pools.\
                                get('serial-number')
                blocks = data.get('blocks')
                if blocks is not None:
                    blocks = int(blocks)
                volume_map = {
                    'name': data.get('volume-name'),
                    'storage_id': storage_id,
                    'description': data.get('volume-name'),
                    'status': status,
                    'native_volume_id': str(data.get('durable-id')),
                    'native_storage_pool_id': native_storage_pool_id,
                    'wwn': str(data.get('wwn')),
                    'type': data.get('volume-type'),
                    'total_capacity': int(total_size),
                    'free_capacit': int(total_size - total_avail),
                    'used_capacity': int(total_avail),
                    'blocks': int(blocks),
                    'compressed': True,
                    'deduplicated': True
                }
                list_volumes.append(volume_map)
            return list_volumes
        except Exception as e:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_pools(self, storage_id):
        try:
            pool_infos = self.ssh_pool.do_exec('show pools')
            pool_detail = self.handle_xml_to_json(pool_infos, 'pools')
            volume_list = self.list_storage_volume(storage_id)
            pools_list = []
            if pool_detail:
                for data in pool_detail:
                    volume_size = 0
                    blocks = 0
                    if volume_list:
                        for volume in volume_list:
                            if volume.get('native_storage_pool_id') == data.\
                                    get('serial-number'):
                                volume_size += volume.get('total_capacity')
                                blocks += volume.get('blocks')
                    health = data.get('health')
                    status = constants.StoragePoolStatus.OFFLINE
                    if health == 'OK':
                        status = constants.StoragePoolStatus.NORMAL
                    total_size = self.parse_string_to_bytes(
                        data.get('total-size'))
                    pool_map = {
                        'name': data.get('name'),
                        'storage_id': storage_id,
                        'native_storage_pool_id': data.get('serial-number'),
                        'status': status,
                        'storage_type': constants.StorageType.BLOCK,
                        'total_capacity': int(total_size),
                        'subscribed_capacity': int(blocks),
                        'used_capacity': volume_size,
                        'free_capacity': int(total_size - volume_size)
                    }
                    pools_list.append(pool_map)
            return pools_list
        except Exception as e:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    @staticmethod
    def parse_string_to_bytes(value):
        capacity = 0
        if value:
            if value.isdigit():
                capacity = float(value)
            else:
                if value == '0B':
                    capacity = 0
                else:
                    unit = value[-2:]
                    capacity = float(value[:-2]) * int(
                        Tools.change_capacity_to_bytes(unit))
        return capacity

    @staticmethod
    def handle_xml_to_json(detail_info, element):
        detail_arr = []
        detail_data = detail_info.split('\n')
        detail = detail_data[1:len(detail_data) - 1]
        detail_xml = ''.join(detail)
        xml_element = Et.fromstring(detail_xml)
        for children in xml_element.iter('OBJECT'):
            property_name = children.get('basetype')
            if element == property_name:
                msg = {}
                for child in children.iter('PROPERTY'):
                    msg[child.get('name')] = child.text
                detail_arr.append(msg)
        return detail_arr

    def list_alerts(self, query_para):
        alert_list = []
        try:
            alert_infos = self.ssh_pool.do_exec('show events error')
            alert_json = self.handle_xml_to_json(alert_infos, 'events')
            for alert_map in alert_json:
                now = time.time()
                occur_time = int(
                    round(now * consts.SecondsNumber.SECONDS_TO_MS))
                time_stamp = alert_map.get('time-stamp-numeric')
                if time_stamp is not None:
                    occur_time = int(time_stamp) * consts.SecondsNumber\
                        .SECONDS_TO_MS
                    if not alert_util.is_alert_in_time_range(
                            query_para, occur_time):
                        continue
                event_code = alert_map.get('event-code')
                event_id = alert_map.get('event-id')
                location = alert_map.get('message')
                resource_type = alert_map.get('event-code')
                severity = alert_map.get('severity')
                additional_info = str(alert_map.get('additional-information'))
                match_key = None
                if event_code:
                    match_key = event_code
                if severity:
                    match_key += severity
                if location:
                    match_key += location
                description = None
                if additional_info:
                    description = additional_info
                if severity == 'Informational' or severity == 'RESOLVED':
                    continue
                alert_model = {
                    'alert_id': event_id,
                    'alert_name': event_code,
                    'severity': severity,
                    'category': constants.Category.FAULT,
                    'type': 'EquipmentAlarm',
                    'sequence_number': event_id,
                    'occur_time': occur_time,
                    'description': description,
                    'resource_type': resource_type,
                    'location': location,
                    'match_key': hashlib.md5(match_key.encode()).hexdigest()
                }
                alert_list.append(alert_model)
            alert_list_data = SSHHandler.get_last_alert_data(alert_list)
            return alert_list_data
        except Exception as e:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    @staticmethod
    def get_last_alert_data(alert_json):
        alert_list = []
        alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))
        for key, item in groupby(alert_json,
                                 key=itemgetter('alert_name', 'location',
                                                'severity')):
            alert_last_index = 0
            alert_list.append(list(item)[alert_last_index])
        return alert_list

    @staticmethod
    def parse_alert(alert):
        try:
            alert_model = dict()
            alert_id = None
            description = None
            severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')
            sequence_number = None
            event_type = None
            for alert_key, alert_value in alert.items():
                if consts.AlertOIDNumber.OID_ERR_ID in alert_key:
                    alert_id = str(alert_value)
                elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:
                    event_type = alert_value
                elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:
                    description = alert_value
                elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:
                    severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\
                        .get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),
                             constants.Severity.INFORMATIONAL)
                elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:
                    sequence_number = alert_value
            if description:
                desc_arr = description.split(",")
                if desc_arr:
                    alert_id = SSHHandler.split_by_char_and_number(
                        desc_arr[0], ":", 1)
            alert_model['alert_id'] = str(alert_id)
            alert_model['alert_name'] = event_type
            alert_model['severity'] = severity
            alert_model['category'] = constants.Category.FAULT
            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
            alert_model['sequence_number'] = sequence_number
            now = time.time()
            alert_model['occur_time'] = int(
                round(now * consts.SecondsNumber.SECONDS_TO_MS))
            alert_model['description'] = description
            alert_model['location'] = description
            return alert_model
        except Exception as e:
            LOG.error(e)
            msg = "Failed to build alert model: %s." % (six.text_type(e))
            raise exception.InvalidResults(msg)

    @staticmethod
    def split_by_char_and_number(split_str, split_char, arr_number):
        split_value = ''
        if split_str:
            tmp_value = split_str.split(split_char, 1)
            if arr_number == 1 and len(tmp_value) > 1:
                split_value = tmp_value[arr_number].strip()
            elif arr_number == 0:
                split_value = tmp_value[arr_number].strip()
        return split_value

    @staticmethod
    def handle_xml_to_dict(xml_info, element):
        msg = {}
        xml_split = xml_info.split('\n')
        xml_data = xml_split[1:len(xml_split) - 1]
        detail_xml = ''.join(xml_data)
        xml_element = Et.fromstring(detail_xml)
        for children in xml_element.iter('OBJECT'):
            property_name = children.get('basetype')
            if element == property_name:
                for child in children.iter('PROPERTY'):
                    msg[child.get('name')] = child.text
        return msg
Exemple #5
0
class SSHHandler(object):
    OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3'
    OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9'
    OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10'
    OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11'
    OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17'
    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'

    TRAP_SEVERITY_MAP = {
        '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL,
        '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING,
        '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL,
    }

    SEVERITY_MAP = {
        "warning": "Warning",
        "informational": "Informational",
        "error": "Major"
    }

    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def handle_split(split_str, split_char, arr_number):
        split_value = ''
        if split_str is not None and split_str != '':
            tmp_value = split_str.split(split_char, 1)
            if arr_number == 1 and len(tmp_value) > 1:
                split_value = tmp_value[arr_number].strip()
            elif arr_number == 0:
                split_value = tmp_value[arr_number].strip()
        return split_value

    @staticmethod
    def parse_alert(alert):
        try:
            alert_model = dict()
            alert_name = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_ERR_ID), ':', 1)
            error_info = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_ERR_ID), ':', 0)
            alert_id = SSHHandler.handle_split(error_info, '=', 1)
            severity = SSHHandler.TRAP_SEVERITY_MAP.get(
                alert.get(SSHHandler.OID_SEVERITY),
                constants.Severity.INFORMATIONAL)
            alert_model['alert_id'] = str(alert_id)
            alert_model['alert_name'] = alert_name
            alert_model['severity'] = severity
            alert_model['category'] = constants.Category.FAULT
            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
            alert_model['sequence_number'] = SSHHandler. \
                handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1)
            timestamp = SSHHandler. \
                handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1)
            time_type = '%a %b %d %H:%M:%S %Y'
            occur_time = int(time.mktime(time.strptime(timestamp, time_type)))
            alert_model['occur_time'] = int(occur_time *
                                            SSHHandler.SECONDS_TO_MS)
            alert_model['description'] = alert_name
            alert_model['resource_type'] = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1)
            alert_model['location'] = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_OBJ_NAME), '=', 1)
            return alert_model
        except Exception as e:
            LOG.error(e)
            msg = ("Failed to build alert model as some attributes missing "
                   "in alert message:%s.") % (six.text_type(e))
            raise exception.InvalidResults(msg)

    def login(self):
        try:
            with self.ssh_pool.item() as ssh:
                SSHHandler.do_exec('lssystem', ssh)
        except Exception as e:
            LOG.error("Failed to login ibm storwize_svc %s" %
                      (six.text_type(e)))
            raise e

    @staticmethod
    def do_exec(command_str, ssh):
        """Execute command"""
        try:
            utils.check_ssh_injection(command_str)
            if command_str is not None and ssh is not None:
                stdin, stdout, stderr = ssh.exec_command(command_str)
                res, err = stdout.read(), stderr.read()
                re = res if res else err
                result = re.decode()
        except paramiko.AuthenticationException as ae:
            LOG.error('doexec Authentication error:{}'.format(ae))
            raise exception.InvalidUsernameOrPassword()
        except Exception as e:
            err = six.text_type(e)
            LOG.error('doexec InvalidUsernameOrPassword error')
            if 'timed out' in err:
                raise exception.SSHConnectTimeout()
            elif 'No authentication methods available' in err \
                    or 'Authentication failed' in err:
                raise exception.InvalidUsernameOrPassword()
            elif 'not a valid RSA private key file' in err:
                raise exception.InvalidPrivateKey()
            else:
                raise exception.SSHException(err)
        return result

    def exec_ssh_command(self, command):
        try:
            with self.ssh_pool.item() as ssh:
                ssh_info = SSHHandler.do_exec(command, ssh)
            return ssh_info
        except Exception as e:
            msg = "Failed to ssh ibm storwize_svc %s: %s" % \
                  (command, six.text_type(e))
            raise exception.SSHException(msg)

    def change_capacity_to_bytes(self, unit):
        unit = unit.upper()
        if unit == 'TB':
            result = units.Ti
        elif unit == 'GB':
            result = units.Gi
        elif unit == 'MB':
            result = units.Mi
        elif unit == 'KB':
            result = units.Ki
        else:
            result = 1
        return int(result)

    def parse_string(self, value):
        capacity = 0
        if value:
            if value.isdigit():
                capacity = float(value)
            else:
                unit = value[-2:]
                capacity = float(value[:-2]) * int(
                    self.change_capacity_to_bytes(unit))
        return capacity

    def get_storage(self):
        try:
            system_info = self.exec_ssh_command('lssystem')
            enclosure_info = self.exec_ssh_command('lsenclosure -delim :')
            enclosure_res = enclosure_info.split('\n')
            enclosure = enclosure_res[1].split(':')
            serial_number = enclosure[7]
            storage_map = {}
            self.handle_detail(system_info, storage_map, split=' ')

            status = 'normal' if storage_map.get('statistics_status') == 'on' \
                else 'offline'
            location = storage_map.get('location')
            free_capacity = self.parse_string(
                storage_map.get('total_free_space'))
            used_capacity = self.parse_string(
                storage_map.get('total_used_capacity'))
            raw_capacity = self.parse_string(
                storage_map.get('total_drive_raw_capacity'))
            subscribed_capacity = self.parse_string(
                storage_map.get('virtual_capacity'))
            firmware_version = ''
            if storage_map.get('code_level') is not None:
                firmware_version = storage_map.get('code_level').split(' ')[0]
            s = {
                'name': storage_map.get('name'),
                'vendor': 'IBM',
                'model': storage_map.get('product_name'),
                'status': status,
                'serial_number': serial_number,
                'firmware_version': firmware_version,
                'location': location,
                'total_capacity': int(free_capacity + used_capacity),
                'raw_capacity': int(raw_capacity),
                'subscribed_capacity': int(subscribed_capacity),
                'used_capacity': int(used_capacity),
                'free_capacity': int(free_capacity)
            }
            return s
        except exception.DelfinException as e:
            err_msg = "Failed to get storage: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def handle_detail(self, deltail_info, detail_map, split):
        detail_arr = deltail_info.split('\n')
        for detail in detail_arr:
            if detail is not None and detail != '':
                strinfo = detail.split(split, 1)
                key = strinfo[0]
                value = ''
                if len(strinfo) > 1:
                    value = strinfo[1]
                detail_map[key] = value

    def list_storage_pools(self, storage_id):
        try:
            pool_list = []
            pool_info = self.exec_ssh_command('lsmdiskgrp')
            pool_res = pool_info.split('\n')
            for i in range(1, len(pool_res)):
                if pool_res[i] is None or pool_res[i] == '':
                    continue

                pool_str = ' '.join(pool_res[i].split())
                strinfo = pool_str.split(' ')
                detail_command = 'lsmdiskgrp %s' % strinfo[0]
                deltail_info = self.exec_ssh_command(detail_command)
                pool_map = {}
                self.handle_detail(deltail_info, pool_map, split=' ')
                status = 'normal' if pool_map.get('status') == 'online' \
                    else 'offline'
                total_cap = self.parse_string(pool_map.get('capacity'))
                free_cap = self.parse_string(pool_map.get('free_capacity'))
                used_cap = self.parse_string(pool_map.get('used_capacity'))
                subscribed_capacity = self.parse_string(
                    pool_map.get('virtual_capacity'))
                p = {
                    'name': pool_map.get('name'),
                    'storage_id': storage_id,
                    'native_storage_pool_id': pool_map.get('id'),
                    'description': '',
                    'status': status,
                    'storage_type': constants.StorageType.BLOCK,
                    'subscribed_capacity': int(subscribed_capacity),
                    'total_capacity': int(total_cap),
                    'used_capacity': int(used_cap),
                    'free_capacity': int(free_cap)
                }
                pool_list.append(p)

            return pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.exec_ssh_command('lsvdisk')
            volume_res = volume_info.split('\n')
            for i in range(1, len(volume_res)):
                if volume_res[i] is None or volume_res[i] == '':
                    continue
                volume_str = ' '.join(volume_res[i].split())
                strinfo = volume_str.split(' ')
                volume_name = strinfo[1]
                detail_command = 'lsvdisk -delim : %s' % volume_name
                deltail_info = self.exec_ssh_command(detail_command)
                volume_map = {}
                self.handle_detail(deltail_info, volume_map, split=':')
                status = 'normal' if volume_map.get('status') == 'online' \
                    else 'offline'
                volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \
                    else 'thick'
                total_capacity = self.parse_string(volume_map.get('capacity'))
                free_capacity = self.parse_string(
                    volume_map.get('free_capacity'))
                used_capacity = self.parse_string(
                    volume_map.get('used_capacity'))
                compressed = True
                deduplicated = True
                if volume_map.get('compressed_copy') == 'no':
                    compressed = False
                if volume_map.get('deduplicated_copy') == 'no':
                    deduplicated = False

                v = {
                    'name': volume_map.get('name'),
                    'storage_id': storage_id,
                    'description': '',
                    'status': status,
                    'native_volume_id': str(volume_map.get('id')),
                    'native_storage_pool_id': volume_map.get('mdisk_grp_id'),
                    'wwn': str(volume_map.get('vdisk_UID')),
                    'type': volume_type,
                    'total_capacity': int(total_capacity),
                    'used_capacity': int(used_capacity),
                    'free_capacity': int(free_capacity),
                    'compressed': compressed,
                    'deduplicated': deduplicated
                }
                volume_list.append(v)

            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_alerts(self, query_para):
        try:
            alert_list = []
            alert_info = self.exec_ssh_command('lseventlog -monitoring yes')
            alert_res = alert_info.split('\n')
            for i in range(1, len(alert_res)):
                if alert_res[i] is None or alert_res[i] == '':
                    continue
                alert_str = ' '.join(alert_res[i].split())
                strinfo = alert_str.split(' ', 1)
                detail_command = 'lseventlog %s' % strinfo[0]
                deltail_info = self.exec_ssh_command(detail_command)
                alert_map = {}
                self.handle_detail(deltail_info, alert_map, split=' ')
                occur_time = int(alert_map.get('last_timestamp_epoch')) * \
                    self.SECONDS_TO_MS
                if not alert_util.is_alert_in_time_range(
                        query_para, occur_time):
                    continue
                alert_name = alert_map.get('event_id_text', '')
                event_id = alert_map.get('event_id')
                location = alert_map.get('object_name', '')
                resource_type = alert_map.get('object_type', '')
                severity = self.SEVERITY_MAP.get(
                    alert_map.get('notification_type'))

                alert_model = {
                    'alert_id': event_id,
                    'alert_name': alert_name,
                    'severity': severity,
                    'category': constants.Category.FAULT,
                    'type': 'EquipmentAlarm',
                    'sequence_number': alert_map.get('sequence_number'),
                    'occur_time': occur_time,
                    'description': alert_name,
                    'resource_type': resource_type,
                    'location': location
                }
                alert_list.append(alert_model)

            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
Exemple #6
0
 def test_ssh_pool_put(self):
     ssh_pool = SSHPool(**ACCESS_INFO)
     ssh = paramiko.SSHClient()
     ssh_pool.put(ssh)
     ssh_pool.remove(ssh)
Exemple #7
0
class NetAppHandler(object):
    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'
    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'
    NODE_NAME = 'controller_name'
    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def get_table_data(values):
        header_index = 0
        table = values.split("\r\n")
        for i in range(0, len(table)):
            if constant.PATTERN.search(table[i]):
                header_index = i
        return table[(header_index + 1):]

    @staticmethod
    def get_fs_id(vserver, volume):
        return vserver + '_' + volume

    @staticmethod
    def get_qt_id(vserver, volume, qtree):
        qt_id = vserver + '/' + volume
        if qtree != '':
            qt_id += '/' + qtree
        return qt_id

    def ssh_do_exec(self, command):
        res = ''
        with eventlet.Timeout(10, False):
            res = self.ssh_pool.do_exec(command)
        return res

    @staticmethod
    def get_size(limit, is_calculate=False):
        if limit == '0B':
            return 0
        if limit == '-':
            return 0 if is_calculate else '-'
        return int(Tools.get_capacity_size(limit))

    @staticmethod
    def parse_alert(alert):
        try:
            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)
            node_name = alert.get(NetAppHandler.NODE_NAME)
            alert_info = alert_info.replace("]", '')
            alert_array = alert_info.split("[")
            alert_model = {}
            alert_map = {}
            if len(alert_array) > 1:
                category = constants.Category.RECOVERY \
                    if 'created' in alert_array[0] \
                    else constants.Category.RECOVERY
                alert_values = alert_array[1].split(",")
                for alert_value in alert_values:
                    array = alert_value.split("=")
                    if len(array) > 1:
                        key = array[0].replace(' ', '')
                        value = array[1].replace(' ', '').replace('.', '')
                        alert_map[key] = value
                if alert_map:
                    alert_map_info = \
                        alert_template.ALERT_TEMPLATE.get(
                            alert_map.get('AlertId'))
                    severity = description = location = ''
                    if alert_map_info:
                        severity = constant.ALERT_SEVERITY[
                            alert_map_info['severityofAlert']]
                        location = \
                            alert_map_info['probableCause'] +\
                            ':' + alert_map_info['PossibleEffect']
                        description = alert_map_info['description']
                    alert_model = {
                        'alert_id':
                        alert_map.get('AlertId'),
                        'alert_name':
                        alert_map.get('AlertId'),
                        'severity':
                        severity,
                        'category':
                        category,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        utils.utcnow_ms(),
                        'description':
                        description,
                        'match_key':
                        hashlib.md5((alert_map.get('AlertId') + node_name +
                                     alert_map['AlertingResource']
                                     ).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        location
                    }
            return alert_model
        except Exception as err:
            err_msg = "Failed to parse alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def login(self):
        try:
            result = self.ssh_do_exec('cluster identity show')
            if 'is not a recognized command' in result \
                    or 'command not found' in result:
                raise exception.InvalidIpOrPort()
        except Exception as e:
            LOG.error("Failed to login netapp %s" % (six.text_type(e)))
            raise e

    def get_storage(self):
        try:
            raw_capacity = total_capacity = used_capacity = free_capacity = 0
            controller_map_list = []
            system_info = self.ssh_do_exec(constant.CLUSTER_SHOW_COMMAND)
            version_info = self.ssh_do_exec(constant.VERSION_SHOW_COMMAND)
            status_info = self.ssh_do_exec(constant.STORAGE_STATUS_COMMAND)
            controller_info = self.ssh_do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            Tools.split_value_map_list(controller_info, controller_map_list,
                                       ":")
            version_array = version_info.split("\r\n")
            storage_version = ''
            for version in version_array:
                if 'NetApp' in version:
                    storage_version = version.split(":")
                    break
            status = self.get_table_data(status_info)
            status = constant.STORAGE_STATUS.get(status[0].split()[0])
            disk_list = self.get_disks(None)
            pool_list = self.list_storage_pools(None)
            storage_map_list = []
            Tools.split_value_map_list(system_info,
                                       storage_map_list,
                                       split=':')
            if len(storage_map_list) > 0:
                storage_map = storage_map_list[len(storage_map_list) - 1]
                controller_map = \
                    controller_map_list[len(controller_map_list) - 1]
                for disk in disk_list:
                    raw_capacity += disk['capacity']
                for pool in pool_list:
                    total_capacity += pool['total_capacity']
                    free_capacity += pool['free_capacity']
                    used_capacity += pool['used_capacity']
                storage_model = {
                    "name": storage_map['ClusterName'],
                    "vendor": constant.STORAGE_VENDOR,
                    "model": controller_map['Model'],
                    "status": status,
                    "serial_number": storage_map['ClusterSerialNumber'],
                    "firmware_version": storage_version[0],
                    "location": controller_map['Location'],
                    "total_capacity": total_capacity,
                    "raw_capacity": raw_capacity,
                    "used_capacity": used_capacity,
                    "free_capacity": free_capacity
                }
                return storage_model
        except exception.DelfinException as e:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_aggregate(self, storage_id):
        agg_list = []
        agg_info = self.ssh_do_exec(constant.AGGREGATE_SHOW_DETAIL_COMMAND)
        agg_map_list = []
        Tools.split_value_map_list(agg_info, agg_map_list, split=':')
        for agg_map in agg_map_list:
            if agg_map and 'Aggregate' in agg_map.keys():
                status = constant.AGGREGATE_STATUS.get(agg_map['State'])
                pool_model = {
                    'name': agg_map['Aggregate'],
                    'storage_id': storage_id,
                    'native_storage_pool_id': agg_map['UUIDString'],
                    'description': None,
                    'status': status,
                    'storage_type': constants.StorageType.UNIFIED,
                    'total_capacity': self.get_size(agg_map['Size'], True),
                    'used_capacity': self.get_size(agg_map['UsedSize'], True),
                    'free_capacity': self.get_size(agg_map['AvailableSize'],
                                                   True),
                }
                agg_list.append(pool_model)
        return agg_list

    def get_pool(self, storage_id):
        pool_list = []
        pool_info = self.ssh_do_exec(constant.POOLS_SHOW_DETAIL_COMMAND)
        pool_map_list = []
        Tools.split_value_map_list(pool_info, pool_map_list, split=':')
        for pool_map in pool_map_list:
            if pool_map and 'StoragePoolName' in pool_map.keys():
                status = constants.StoragePoolStatus.ABNORMAL
                if pool_map['IsPoolHealthy?'] == 'true':
                    status = constants.StoragePoolStatus.NORMAL
                pool_model = {
                    'name':
                    pool_map['StoragePoolName'],
                    'storage_id':
                    storage_id,
                    'native_storage_pool_id':
                    pool_map['UUIDofStoragePool'],
                    'description':
                    None,
                    'status':
                    status,
                    'storage_type':
                    constants.StorageType.UNIFIED,
                    'total_capacity':
                    self.get_size(pool_map['StoragePoolTotalSize'], True),
                    'used_capacity':
                    self.get_size(pool_map['StoragePoolTotalSize'], True) -
                    self.get_size(pool_map['StoragePoolUsableSize'], True),
                    'free_capacity':
                    self.get_size(pool_map['StoragePoolUsableSize'], True)
                }
                pool_list.append(pool_model)
        return pool_list

    def list_storage_pools(self, storage_id):
        try:
            pool_list = self.get_pool(storage_id)
            agg_list = self.get_aggregate(storage_id)
            return agg_list + pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.ssh_do_exec(constant.LUN_SHOW_DETAIL_COMMAND)
            fs_list = self.get_filesystems(storage_id)
            volume_map_list = []
            Tools.split_value_map_list(volume_info, volume_map_list, split=':')
            for volume_map in volume_map_list:
                if volume_map and 'LUNName' in volume_map.keys():
                    pool_id = None
                    status = 'normal' if volume_map['State'] == 'online' \
                        else 'offline'
                    for fs in fs_list:
                        if fs['name'] == volume_map['VolumeName']:
                            pool_id = fs['native_pool_id']
                    type = constants.VolumeType.THIN \
                        if volume_map['SpaceAllocation'] == 'enabled' \
                        else constants.VolumeType.THICK
                    volume_model = {
                        'name':
                        volume_map['LUNName'],
                        'storage_id':
                        storage_id,
                        'description':
                        None,
                        'status':
                        status,
                        'native_volume_id':
                        volume_map['SerialNumber'],
                        'native_storage_pool_id':
                        pool_id,
                        'wwn':
                        None,
                        'compressed':
                        None,
                        'deduplicated':
                        None,
                        'type':
                        type,
                        'total_capacity':
                        self.get_size(volume_map['LUNSize'], True),
                        'used_capacity':
                        self.get_size(volume_map['UsedSize'], True),
                        'free_capacity':
                        self.get_size(volume_map['LUNSize'], True) -
                        self.get_size(volume_map['UsedSize'], True)
                    }
                    volume_list.append(volume_model)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_alerts(self, query_para):
        alert_list = []
        alert_info = self.ssh_do_exec(constant.ALTER_SHOW_DETAIL_COMMAND)
        alert_map_list = []
        Tools.split_value_map_list(alert_info, alert_map_list, True, split=':')
        for alert_map in alert_map_list:
            if alert_map and 'AlertID' in alert_map.keys():
                occur_time = int(
                    time.mktime(
                        time.strptime(alert_map['IndicationTime'],
                                      constant.ALTER_TIME_TYPE)))
                if not query_para or \
                        (int(query_para['begin_time'])
                         <= occur_time
                         <= int(query_para['end_time'])):
                    alert_model = {
                        'alert_id':
                        alert_map['AlertID'],
                        'alert_name':
                        alert_map['AlertID'],
                        'severity':
                        constant.ALERT_SEVERITY[
                            alert_map['PerceivedSeverity']],
                        'category':
                        constants.Category.FAULT,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        occur_time * 1000,
                        'description':
                        alert_map['Description'],
                        'sequence_number':
                        alert_map['AlertID'],
                        'match_key':
                        hashlib.md5((alert_map['AlertID'] + alert_map['Node'] +
                                     alert_map['AlertingResource']
                                     ).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        alert_map['ProbableCause'] + ':' +
                        alert_map['PossibleEffect']
                    }
                    alert_list.append(alert_model)
        return alert_list

    def list_alerts(self, query_para):
        try:
            """Query the two alarms separately"""
            alert_list = self.get_alerts(query_para)
            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def clear_alert(self, alert):
        try:
            ssh_command = \
                constant.CLEAR_ALERT_COMMAND + alert['alert_id']
            self.ssh_do_exec(ssh_command)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_disks(self, storage_id):
        disks_list = []
        physicals_list = []
        disks_info = self.ssh_do_exec(constant.DISK_SHOW_DETAIL_COMMAND)
        physicals_info = self.ssh_do_exec(constant.DISK_SHOW_PHYSICAL_COMMAND)
        error_disk = self.ssh_do_exec(constant.DISK_ERROR_COMMAND)
        error_disk_list = []
        error_disk_array = self.get_table_data(error_disk)
        for error_disk in error_disk_array:
            error_array = error_disk.split()
            if len(error_array) > 2:
                error_disk_list.append(error_array[0])
        disks_map_list = []
        physical_array = self.get_table_data(physicals_info)
        for physical in physical_array:
            physicals_list.append(physical.split())
        Tools.split_value_map_list(disks_info, disks_map_list, split=':')
        for disks_map in disks_map_list:
            if disks_map and 'Disk' in disks_map.keys():
                speed = physical_type = firmware = None
                logical_type = constant.DISK_LOGICAL. \
                    get(disks_map['ContainerType'])
                """Map disk physical information"""
                for physical_info in physicals_list:
                    if len(physical_info) > 6 and \
                            physical_info[0] == disks_map['Disk']:
                        physical_type = \
                            constant.DISK_TYPE.get(physical_info[1])
                        speed = physical_info[5] \
                            if physical_info[5] != '-' else 0
                        firmware = physical_info[4]
                status = constants.DiskStatus.NORMAL
                if disks_map['Disk'] in error_disk_list:
                    status = constants.DiskStatus.ABNORMAL
                disk_model = {
                    'name': disks_map['Disk'],
                    'storage_id': storage_id,
                    'native_disk_id': disks_map['Disk'],
                    'serial_number': disks_map['SerialNumber'],
                    'manufacturer': disks_map['Vendor'],
                    'model': disks_map['Model'],
                    'firmware': firmware,
                    'speed': speed,
                    'capacity': self.get_size(disks_map['PhysicalSize'], True),
                    'status': status,
                    'physical_type': physical_type,
                    'logical_type': logical_type,
                    'native_disk_group_id': disks_map['Aggregate'],
                    'location': None,
                }
                disks_list.append(disk_model)
        return disks_list

    def get_filesystems(self, storage_id):
        fs_list = []
        fs_info = self.ssh_do_exec(constant.FS_SHOW_DETAIL_COMMAND)
        thin_fs_info = self.ssh_do_exec(constant.THIN_FS_SHOW_COMMAND)
        pool_list = self.list_storage_pools(storage_id)
        thin_fs_array = self.get_table_data(thin_fs_info)
        fs_map_list = []
        Tools.split_value_map_list(fs_info, fs_map_list, split=':')
        for fs_map in fs_map_list:
            type = constants.FSType.THICK
            if fs_map and 'VolumeName' in fs_map.keys():
                pool_id = ""
                """get pool id"""
                for pool in pool_list:
                    if pool['name'] == fs_map['AggregateName']:
                        pool_id = pool['native_storage_pool_id']
                deduplicated = True
                if fs_map['SpaceSavedbyDeduplication'] == '0B':
                    deduplicated = False
                if len(thin_fs_array) > 2:
                    for thin_vol in thin_fs_array:
                        thin_array = thin_vol.split()
                        if len(thin_array) > 4:
                            if thin_array[1] == fs_map['VolumeName']:
                                type = constants.VolumeType.THIN
                compressed = True
                if fs_map['VolumeContainsSharedorCompressedData'] == \
                        'false':
                    compressed = False
                status = constant.FS_STATUS.get(fs_map['VolumeState'])
                fs_id = self.get_fs_id(fs_map['VserverName'],
                                       fs_map['VolumeName'])
                fs_model = {
                    'name':
                    fs_map['VolumeName'],
                    'storage_id':
                    storage_id,
                    'native_filesystem_id':
                    fs_id,
                    'native_pool_id':
                    pool_id,
                    'compressed':
                    compressed,
                    'deduplicated':
                    deduplicated,
                    'worm':
                    constant.WORM_TYPE.get(fs_map['SnapLockType']),
                    'status':
                    status,
                    'security_mode':
                    constant.SECURITY_STYLE.get(fs_map['SecurityStyle'],
                                                fs_map['SecurityStyle']),
                    'type':
                    type,
                    'total_capacity':
                    self.get_size(fs_map['VolumeSize']),
                    'used_capacity':
                    self.get_size(fs_map['VolumeSize'], True) -
                    self.get_size(fs_map['AvailableSize'], True),
                    'free_capacity':
                    self.get_size(fs_map['AvailableSize'])
                }
                if fs_model['total_capacity'] != '-' \
                        and fs_model['total_capacity'] > 0:
                    fs_list.append(fs_model)
        return fs_list

    def list_controllers(self, storage_id):
        try:
            controller_list = []
            controller_info = self.ssh_do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            controller_ips = self.ssh_do_exec(constant.CONTROLLER_IP_COMMAND)
            ips_array = self.get_table_data(controller_ips)
            ip_map = {}
            for ips in ips_array:
                ip_array = ips.split()
                if len(ip_array) == 4:
                    ip_map[ip_array[2]] = ip_array[3]
            controller_map_list = []
            Tools.split_value_map_list(controller_info,
                                       controller_map_list,
                                       split=':')
            for controller_map in controller_map_list:
                if controller_map and 'Node' in controller_map.keys():
                    status = constants.ControllerStatus.NORMAL \
                        if controller_map['Health'] == 'true' \
                        else constants.ControllerStatus.OFFLINE
                    controller_model = {
                        'name': controller_map['Node'],
                        'storage_id': storage_id,
                        'native_controller_id': controller_map['SystemID'],
                        'status': status,
                        'location': controller_map['Location'],
                        'soft_version': None,
                        'cpu_info': None,
                        'memory_size': None,
                        'mgmt_ip': ip_map.get(controller_map['Node'])
                    }
                    controller_list.append(controller_model)
            return controller_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage controllers from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage controllers from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_eth_port(self, storage_id):
        try:
            eth_list = []
            eth_info = self.ssh_do_exec(constant.PORT_SHOW_DETAIL_COMMAND)

            eth_map_list = []
            Tools.split_value_map_list(eth_info, eth_map_list, split=':')
            for eth_map in eth_map_list:
                if eth_map and 'Port' in eth_map.keys():
                    logical_type = constant.ETH_LOGICAL_TYPE.get(
                        eth_map['PortType'])
                    port_id = \
                        eth_map['Node'] + '_' + eth_map['Port']
                    eth_model = {
                        'name':
                        eth_map['Port'],
                        'storage_id':
                        storage_id,
                        'native_port_id':
                        port_id,
                        'location':
                        eth_map['Node'] + ':' + eth_map['Port'],
                        'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if eth_map['Link'] == 'up' else
                        constants.PortConnectionStatus.DISCONNECTED,
                        'health_status':
                        constants.PortHealthStatus.NORMAL
                        if eth_map['PortHealthStatus'] == 'healthy' else
                        constants.PortHealthStatus.ABNORMAL,
                        'type':
                        constants.PortType.ETH,
                        'logical_type':
                        logical_type,
                        'speed':
                        int(eth_map['SpeedOperational']) *
                        units.Mi if eth_map['SpeedOperational'] != '-' else 0,
                        'max_speed':
                        int(eth_map['SpeedOperational']) *
                        units.Mi if eth_map['SpeedOperational'] != '-' else 0,
                        'native_parent_id':
                        None,
                        'wwn':
                        None,
                        'mac_address':
                        eth_map['MACAddress'],
                        'ipv4':
                        None,
                        'ipv4_mask':
                        None,
                        'ipv6':
                        None,
                        'ipv6_mask':
                        None,
                    }
                    eth_list.append(eth_model)
            return eth_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_fc_port(self, storage_id):
        try:
            fc_list = []
            fc_info = self.ssh_do_exec(constant.FC_PORT_SHOW_DETAIL_COMMAND)
            fc_map_list = []
            Tools.split_value_map_list(fc_info, fc_map_list, split=':')
            for fc_map in fc_map_list:
                if fc_map and 'Node' in fc_map.keys():
                    type = constant.FC_TYPE.get(fc_map['PhysicalProtocol'])
                    port_id = \
                        fc_map['Node'] + '_' + fc_map['Adapter']
                    fc_model = {
                        'name':
                        fc_map['Node'] + ':' + fc_map['Adapter'],
                        'storage_id':
                        storage_id,
                        'native_port_id':
                        port_id,
                        'location':
                        fc_map['Node'] + ':' + fc_map['Adapter'],
                        'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if fc_map['AdministrativeStatus'] == 'up' else
                        constants.PortConnectionStatus.DISCONNECTED,
                        'health_status':
                        constants.PortHealthStatus.NORMAL
                        if fc_map['OperationalStatus'] == 'online' else
                        constants.PortHealthStatus.ABNORMAL,
                        'type':
                        type,
                        'logical_type':
                        None,
                        'speed':
                        int(fc_map['DataLinkRate(Gbit)']) *
                        units.Gi if fc_map['DataLinkRate(Gbit)'] != '-' else 0,
                        'max_speed':
                        int(fc_map['MaximumSpeed']) *
                        units.Gi if fc_map['MaximumSpeed'] != '-' else 0,
                        'native_parent_id':
                        None,
                        'wwn':
                        fc_map['AdapterWWNN'],
                        'mac_address':
                        None,
                        'ipv4':
                        None,
                        'ipv4_mask':
                        None,
                        'ipv6':
                        None,
                        'ipv6_mask':
                        None,
                    }
                    fc_list.append(fc_model)
            return fc_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_ports(self, storage_id):
        ports_list = \
            self.get_fc_port(storage_id) + \
            self.get_eth_port(storage_id)
        return ports_list

    def list_disks(self, storage_id):
        try:
            return self.get_disks(storage_id)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage disks from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage disks from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_qtrees(self, storage_id):
        try:
            qt_list = []
            qt_info = self.ssh_do_exec(constant.QTREE_SHOW_DETAIL_COMMAND)
            fs_info = self.ssh_do_exec(constant.FS_SHOW_DETAIL_COMMAND)
            fs_map_list = []
            qt_map_list = []
            Tools.split_value_map_list(fs_info, fs_map_list, split=':')
            Tools.split_value_map_list(qt_info, qt_map_list, split=':')
            for qt_map in qt_map_list:
                if qt_map and 'QtreeName' in qt_map.keys():
                    fs_id = self.get_fs_id(qt_map['VserverName'],
                                           qt_map['VolumeName'])
                    qtree_path = None
                    for fs_map in fs_map_list:
                        if fs_map and 'VserverName' in fs_map.keys() \
                                and fs_id == self.get_fs_id(
                                fs_map['VserverName'],
                                fs_map['VolumeName']) \
                                and fs_map['JunctionPath'] != '-':
                            qtree_path = fs_map['JunctionPath']
                            break
                    qt_id = self.get_qt_id(qt_map['VserverName'],
                                           qt_map['VolumeName'],
                                           qt_map['QtreeName'])
                    qtree_name = qt_map['QtreeName']
                    if qt_map['QtreeName'] and qtree_path:
                        qtree_path += '/' + qt_map['QtreeName']
                        qtree_path = qtree_path.replace('//', '/')
                    else:
                        qtree_name = qt_id
                    qt_model = {
                        'name': qtree_name,
                        'storage_id': storage_id,
                        'native_qtree_id': qt_id,
                        'path': qtree_path,
                        'native_filesystem_id': fs_id,
                        'security_mode': qt_map['SecurityStyle'],
                    }
                    qt_list.append(qt_model)
            return qt_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_nfs_shares(self, storage_id, qtree_list, protocol_map):
        try:
            nfs_info = self.ssh_do_exec(constant.NFS_SHARE_SHOW_COMMAND)
            nfs_list = []
            fs_map_list = []
            Tools.split_value_map_list(nfs_info, fs_map_list, split=':')
            for fs_map in fs_map_list:
                if fs_map and 'VserverName' in fs_map.keys():
                    protocol = protocol_map.get(fs_map['VserverName'])
                    if constants.ShareProtocol.NFS in protocol:
                        fs_id = self.get_fs_id(fs_map['VserverName'],
                                               fs_map['VolumeName'])
                        share_name = \
                            fs_map['VserverName'] + '/' + fs_map['VolumeName']
                        qt_id = self.get_qt_id(fs_map['VserverName'],
                                               fs_map['VolumeName'], '')
                        qtree_id = None
                        for qtree in qtree_list:
                            if qtree['native_qtree_id'] == qt_id:
                                qtree_id = qt_id
                            if fs_id == qtree['native_filesystem_id']\
                                    and qtree['name'] != ""\
                                    and qtree['name'] != \
                                    qtree['native_qtree_id']:
                                qt_share_name = \
                                    share_name + '/' + qtree['name']
                                share = {
                                    'name':
                                    qt_share_name,
                                    'storage_id':
                                    storage_id,
                                    'native_share_id':
                                    qt_share_name + '_' +
                                    constants.ShareProtocol.NFS,
                                    'native_qtree_id':
                                    qtree['native_qtree_id'],
                                    'native_filesystem_id':
                                    qtree['native_filesystem_id'],
                                    'path':
                                    qtree['path'],
                                    'protocol':
                                    constants.ShareProtocol.NFS
                                }
                                nfs_list.append(share)
                        share = {
                            'name':
                            share_name,
                            'storage_id':
                            storage_id,
                            'native_share_id':
                            share_name + '_' + constants.ShareProtocol.NFS,
                            'native_qtree_id':
                            qtree_id,
                            'native_filesystem_id':
                            fs_id,
                            'path':
                            fs_map['JunctionPath'],
                            'protocol':
                            constants.ShareProtocol.NFS
                        }
                        nfs_list.append(share)
            return nfs_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage nfs share from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err
        except Exception as err:
            err_msg = "Failed to get storage nfs share from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_cifs_shares(self, storage_id, vserver_name, qtree_list,
                        protocol_map):
        shares_list = []
        share_info = self.ssh_do_exec(
            (constant.CIFS_SHARE_SHOW_DETAIL_COMMAND % {
                'vserver_name': vserver_name
            }))
        share_map_list = []
        Tools.split_value_map_list(share_info, share_map_list, split=':')
        for share_map in share_map_list:
            if share_map and 'VolumeName' in share_map.keys() and \
                    share_map['VolumeName'] != '-':
                protocol_str = protocol_map.get(share_map['Vserver'])
                fs_id = self.get_fs_id(share_map['Vserver'],
                                       share_map['VolumeName'])
                share_id = fs_id + '_' + share_map['Share'] + '_'
                qtree_id = None
                for qtree in qtree_list:
                    name_array = share_map['Path'].split('/')
                    if len(name_array) > 0:
                        qtree_name = name_array[len(name_array) - 1]
                        if qtree_name == share_map['VolumeName']:
                            qtree_name = ''
                        qt_id = self.get_qt_id(share_map['Vserver'],
                                               share_map['VolumeName'],
                                               qtree_name)
                    else:
                        break
                    if qtree['native_qtree_id'] == qt_id:
                        qtree_id = qt_id
                        break
                if constants.ShareProtocol.CIFS in protocol_str:
                    share = {
                        'name': share_map['Share'],
                        'storage_id': storage_id,
                        'native_share_id':
                        share_id + constants.ShareProtocol.CIFS,
                        'native_qtree_id': qtree_id,
                        'native_filesystem_id': fs_id,
                        'path': share_map['Path'],
                        'protocol': constants.ShareProtocol.CIFS
                    }
                    shares_list.append(share)
        return shares_list

    def list_shares(self, storage_id):
        try:
            shares_list = []
            qtree_list = self.list_qtrees(None)
            protocol_info = self.ssh_do_exec(
                constant.SHARE_AGREEMENT_SHOW_COMMAND)
            protocol_map = {}
            protocol_arr = self.get_table_data(protocol_info)
            for protocol in protocol_arr:
                agr_arr = protocol.split()
                if len(agr_arr) > 1:
                    protocol_map[agr_arr[0]] = agr_arr[1]
            vserver_info = self.ssh_do_exec(constant.VSERVER_SHOW_COMMAND)
            vserver_array = self.get_table_data(vserver_info)
            for vserver in vserver_array:
                vserver_name = vserver.split()
                if len(vserver_name) > 1:
                    shares_list += self.get_cifs_shares(
                        storage_id, vserver_name[0], qtree_list, protocol_map)
            shares_list += self.get_nfs_shares(storage_id, qtree_list,
                                               protocol_map)
            return shares_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_filesystems(self, storage_id):
        try:
            fs_list = self.get_filesystems(storage_id)
            return fs_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_quotas(self, storage_id):
        try:
            quota_list = []
            quotas_info = self.ssh_do_exec(constant.QUOTA_SHOW_DETAIL_COMMAND)
            quota_map_list = []
            Tools.split_value_map_list(quotas_info, quota_map_list, ":")
            for quota_map in quota_map_list:
                user_group_name = None
                if quota_map and 'VolumeName' in quota_map.keys():
                    quota_id = \
                        quota_map['Vserver'] + '_' + \
                        quota_map['VolumeName'] + '_' + \
                        quota_map['Type'] + '_' + \
                        quota_map['QtreeName'] + '_' + \
                        quota_map['Target']
                    type = constant.QUOTA_TYPE.get(quota_map['Type'])
                    qt_id = self.get_qt_id(quota_map['Vserver'],
                                           quota_map['VolumeName'], '')
                    if type == 'tree' and quota_map['Target'] != '':
                        qt_id += '/' + quota_map['Target']
                    else:
                        if type == 'user' or 'group':
                            user_group_name = quota_map['Target']
                        if quota_map['QtreeName'] != '':
                            qt_id += '/' + quota_map['QtreeName']
                    fs_id = self.get_fs_id(quota_map['Vserver'],
                                           quota_map['VolumeName'])
                    quota = {
                        'native_quota_id':
                        quota_id,
                        'type':
                        type,
                        'storage_id':
                        storage_id,
                        'native_filesystem_id':
                        fs_id,
                        'native_qtree_id':
                        qt_id,
                        'capacity_hard_limit':
                        self.get_size(quota_map['DiskLimit']),
                        'capacity_soft_limit':
                        self.get_size(quota_map['SoftDiskLimit']),
                        'file_hard_limit':
                        int(quota_map['FilesLimit'])
                        if quota_map['FilesLimit'] != '-' else '-',
                        'file_soft_limit':
                        int(quota_map['SoftFilesLimit'])
                        if quota_map['SoftFilesLimit'] != '-' else '-',
                        'file_count':
                        None,
                        'used_capacity':
                        None,
                        'user_group_name':
                        user_group_name
                    }
                    quota_list.append(quota)
            return quota_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_alert_sources(self):
        try:
            ip_list = []
            mgt_ip = self.ssh_pool.do_exec(constant.MGT_IP_COMMAND)
            node_ip = self.ssh_pool.do_exec(constant.NODE_IP_COMMAND)
            mgt_ip_array = self.get_table_data(mgt_ip)
            node_ip_array = self.get_table_data(node_ip)
            for node in node_ip_array:
                ip_array = node.split()
                if len(ip_array) == 3:
                    ip_list.append({'host': ip_array[2]})
            ip_list.append({'host': mgt_ip_array[0].split()[2]})
            return ip_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ip from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage ip from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
Exemple #8
0
class NetAppHandler(object):
    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'
    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'
    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def parse_alert(alert):
        try:
            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)
            alert_array = alert_info.split(":")
            alert_model = {}
            if len(alert_array) > 1:
                alert_name = alert_array[0]
                description = alert_array[1]
                if constant.SEVERITY_MAP.get(alert_name):
                    alert_model = {
                        'alert_id':
                        alert_name,
                        'alert_name':
                        alert_name,
                        'severity':
                        constants.Severity.CRITICAL,
                        'category':
                        constants.Category.EVENT,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        int(time.time()),
                        'description':
                        description,
                        'match_key':
                        hashlib.md5((alert.get(NetAppHandler.OID_TRAP_DATA) +
                                     str(time.time())).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        ''
                    }
            return alert_model
        except Exception as err:
            err_msg = "Failed to parse alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def login(self):
        try:
            self.ssh_pool.do_exec('version')
        except Exception as e:
            LOG.error("Failed to login netapp %s" % (six.text_type(e)))
            raise e

    def get_storage(self):
        try:
            raw_capacity = total_capacity = used_capacity = free_capacity = 0
            controller_map = {}
            system_info = self.ssh_pool.do_exec(constant.CLUSTER_SHOW_COMMAND)
            version_info = self.ssh_pool.do_exec(constant.VERSION_SHOW_COMMAND)
            status_info = self.ssh_pool.do_exec(
                constant.STORAGE_STATUS_COMMAND)
            controller_info = self.ssh_pool.do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            controller_array = controller_info.split(
                constant.CONTROLLER_SPLIT_STR)
            Tools.split_value_map(controller_array[1], controller_map, ":")
            version_array = version_info.split('\r\n')
            version = version_array[0].split(":")
            status = constant.STORAGE_STATUS.get(status_info.split("\r\n")[2])
            disk_list = self.get_disks(None)
            pool_list = self.list_storage_pools(None)
            storage_map = {}
            Tools.split_value_map(system_info, storage_map, split=':')
            for disk in disk_list:
                raw_capacity += disk['capacity']
            for pool in pool_list:
                total_capacity += pool['total_capacity']
                free_capacity += pool['free_capacity']
                used_capacity += pool['used_capacity']
            storage_model = {
                "name": storage_map['ClusterName'],
                "vendor": constant.STORAGE_VENDOR,
                "model": controller_map['Model'],
                "status": status,
                "serial_number": storage_map['ClusterSerialNumber'],
                "firmware_version": version[0],
                "location": controller_map['Location'],
                "total_capacity": total_capacity,
                "raw_capacity": raw_capacity,
                "used_capacity": used_capacity,
                "free_capacity": free_capacity
            }
            return storage_model
        except exception.DelfinException as e:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_aggregate(self, storage_id):
        agg_list = []
        agg_info = self.ssh_pool.do_exec(
            constant.AGGREGATE_SHOW_DETAIL_COMMAND)
        agg_array = agg_info.split(constant.AGGREGATE_SPLIT_STR)
        agg_map = {}
        for agg in agg_array[1:]:
            Tools.split_value_map(agg, agg_map, split=':')
            status = constant.AGGREGATE_STATUS.get(agg_map['State'])
            pool_model = {
                'name':
                agg_map['e'],
                'storage_id':
                storage_id,
                'native_storage_pool_id':
                agg_map['UUIDString'],
                'description':
                '',
                'status':
                status,
                'storage_type':
                constants.StorageType.UNIFIED,
                'total_capacity':
                int(Tools.get_capacity_size(agg_map['Size'])),
                'used_capacity':
                int(Tools.get_capacity_size(agg_map['UsedSize'])),
                'free_capacity':
                int(Tools.get_capacity_size(agg_map['AvailableSize'])),
            }
            agg_list.append(pool_model)
        return agg_list

    def get_pool(self, storage_id):
        pool_list = []
        pool_info = self.ssh_pool.do_exec(constant.POOLS_SHOW_DETAIL_COMMAND)
        pool_array = pool_info.split(constant.POOLS_SPLIT_STR)
        pool_map = {}
        for pool_str in pool_array[1:]:
            Tools.split_value_map(pool_str, pool_map, split=':')
            status = constants.StoragePoolStatus.ABNORMAL
            if pool_map['IsPoolHealthy?'] == 'true':
                status = constants.StoragePoolStatus.NORMAL
            pool_model = {
                'name':
                pool_map['ame'],
                'storage_id':
                storage_id,
                'native_storage_pool_id':
                pool_map['UUIDofStoragePool'],
                'description':
                '',
                'status':
                status,
                'storage_type':
                constants.StorageType.UNIFIED,
                'total_capacity':
                int(Tools.get_capacity_size(pool_map['StoragePoolTotalSize'])),
                'used_capacity':
                int(Tools.get_capacity_size(
                    pool_map['StoragePoolTotalSize'])) -
                int(Tools.get_capacity_size(
                    pool_map['StoragePoolUsableSize'])),
                'free_capacity':
                int(Tools.get_capacity_size(pool_map['StoragePoolUsableSize']))
            }
            pool_list.append(pool_model)
        return pool_list

    def list_storage_pools(self, storage_id):
        try:
            pool_list = self.get_pool(storage_id)
            agg_list = self.get_aggregate(storage_id)
            return agg_list + pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.ssh_pool.do_exec(
                constant.LUN_SHOW_DETAIL_COMMAND)
            volume_array = volume_info.split(constant.LUN_SPLIT_STR)
            fs_list = self.get_filesystems(storage_id)
            volume_map = {}
            for volume_str in volume_array[1:]:
                Tools.split_value_map(volume_str, volume_map, split=':')
                if volume_map is not None or volume_map != {}:
                    pool_id = ''
                    status = 'normal' if volume_map['State'] == 'online' \
                        else 'offline'
                    for fs in fs_list:
                        if fs['name'] == volume_map['VolumeName']:
                            pool_id = fs['native_pool_id']
                    type = constants.VolumeType.THIN \
                        if volume_map['SpaceAllocation'] == 'enabled' \
                        else constants.VolumeType.THICK
                    volume_model = {
                        'name':
                        volume_map['LUNName'],
                        'storage_id':
                        storage_id,
                        'description':
                        '',
                        'status':
                        status,
                        'native_volume_id':
                        volume_map['LUNUUID'],
                        'native_storage_pool_id':
                        pool_id,
                        'wwn':
                        '',
                        'compressed':
                        '',
                        'deduplicated':
                        '',
                        'type':
                        type,
                        'total_capacity':
                        int(Tools.get_capacity_size(volume_map['LUNSize'])),
                        'used_capacity':
                        int(Tools.get_capacity_size(volume_map['UsedSize'])),
                        'free_capacity':
                        int(Tools.get_capacity_size(volume_map['LUNSize'])) -
                        int(Tools.get_capacity_size(volume_map['UsedSize']))
                    }
                    volume_list.append(volume_model)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_events(self, query_para):
        event_list = []
        event_info = self.ssh_pool.do_exec(constant.EVENT_SHOW_DETAIL_COMMAND)
        event_array = event_info.split(constant.ALTER_SPLIT_STR)
        event_map = {}

        for event_str in event_array[1:]:
            Tools.split_value_map(event_str, event_map, split=':')
            occur_time = int(
                time.mktime(
                    time.strptime(event_map['Time'],
                                  constant.EVENT_TIME_TYPE)))
            if query_para is None or \
                    (query_para['begin_time']
                     <= occur_time
                     <= query_para['end_time']):
                alert_model = {
                    'alert_id':
                    event_map['Sequence#'],
                    'alert_name':
                    event_map['MessageName'],
                    'severity':
                    constants.Severity.CRITICAL,
                    'category':
                    constants.Category.EVENT,
                    'type':
                    constants.EventType.EQUIPMENT_ALARM,
                    'occur_time':
                    occur_time,
                    'description':
                    event_map['Event'],
                    'match_key':
                    hashlib.md5((event_map['Sequence#'] +
                                 str(occur_time)).encode()).hexdigest(),
                    'resource_type':
                    constants.DEFAULT_RESOURCE_TYPE,
                    'location':
                    event_map['Source']
                }
                event_list.append(alert_model)
        return event_list

    def get_alerts(self, query_para):
        alert_list = []
        alert_info = self.ssh_pool.do_exec(constant.ALTER_SHOW_DETAIL_COMMAND)
        alert_array = alert_info.split(constant.ALTER_SPLIT_STR)
        alert_map = {}
        for alert_str in alert_array[1:]:
            Tools.split_value_map(alert_str, alert_map, split=':')
            occur_time = int(
                time.mktime(
                    time.strptime(alert_map['IndicationTime'],
                                  constant.ALTER_TIME_TYPE)))
            if query_para is None or \
                    (query_para['begin_time']
                     <= occur_time
                     <= query_para['end_time']):
                alert_model = {
                    'alert_id':
                    alert_map['AlertID'],
                    'alert_name':
                    alert_map['ProbableCause'],
                    'severity':
                    constant.ALERT_SEVERITY[alert_map['PerceivedSeverity']],
                    'category':
                    constants.Category.FAULT,
                    'type':
                    constants.EventType.EQUIPMENT_ALARM,
                    'occur_time':
                    occur_time,
                    'description':
                    alert_map['Description'],
                    'match_key':
                    hashlib.md5((alert_map['AlertID'] +
                                 str(occur_time)).encode()).hexdigest(),
                    'resource_type':
                    constants.DEFAULT_RESOURCE_TYPE,
                    'location':
                    alert_map['AlertingResourceName']
                }
                alert_list.append(alert_model)
        return alert_list

    def list_alerts(self, query_para):
        try:
            alert_list = []
            """Query the two alarms separately"""
            alert_list += self.get_events(query_para)
            alert_list += self.get_alerts(query_para)
            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def clear_alert(self, alert):
        try:
            ssh_command = \
                constant.CLEAR_ALERT_COMMAND + alert['alert_id']
            self.ssh_pool.do_exec(ssh_command)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_disks(self, storage_id):
        disks_list = []
        physicals_list = []
        disks_info = self.ssh_pool.do_exec(constant.DISK_SHOW_DETAIL_COMMAND)
        disks_array = disks_info.split(constant.DISK_SPLIT_STR)
        physicals_info = self.ssh_pool.do_exec(
            constant.DISK_SHOW_PHYSICAL_COMMAND)
        disks_map = {}
        physical_array = physicals_info.split('\r\n')
        for i in range(2, len(physical_array), 2):
            physicals_list.append(physical_array[i].split())
        for disk_str in disks_array[1:]:
            speed = physical_type = firmware = None
            Tools.split_value_map(disk_str, disks_map, split=':')
            logical_type = constant.DISK_LOGICAL. \
                get(disks_map['ContainerType'])
            """Map disk physical information"""
            for physical_info in physicals_list:
                if len(physical_info) > 6 \
                        and physical_info[0] == disks_map['k']:
                    physical_type = constant.DISK_TYPE.get(physical_info[1])
                    speed = physical_info[5]
                    firmware = physical_info[4]
                    break
            status = constants.DiskStatus.ABNORMAL
            if disks_map['Errors:'] is None or disks_map['Errors:'] == "":
                status = constants.DiskStatus.NORMAL
            disk_model = {
                'name': disks_map['k'],
                'storage_id': storage_id,
                'native_disk_id': disks_map['k'],
                'serial_number': disks_map['SerialNumber'],
                'manufacturer': disks_map['Vendor'],
                'model': disks_map['Model'],
                'firmware': firmware,
                'speed': speed,
                'capacity':
                int(Tools.get_capacity_size(disks_map['PhysicalSize'])),
                'status': status,
                'physical_type': physical_type,
                'logical_type': logical_type,
                'health_score': '',
                'native_disk_group_id': disks_map['Aggregate'],
                'location': '',
            }
            disks_list.append(disk_model)
        return disks_list

    def get_filesystems(self, storage_id):
        fs_list = []
        fs_info = self.ssh_pool.do_exec(constant.FS_SHOW_DETAIL_COMMAND)
        fs_array = fs_info.split(constant.FS_SPLIT_STR)
        thin_fs_info = self.ssh_pool.do_exec(constant.THIN_FS_SHOW_COMMAND)
        pool_list = self.list_storage_pools(storage_id)
        thin_fs_array = thin_fs_info.split("\r\n")
        fs_map = {}
        for fs_str in fs_array[1:]:
            type = constants.FSType.THICK
            Tools.split_value_map(fs_str, fs_map, split=':')
            if fs_map is not None or fs_map != {}:
                pool_id = ""
                """get pool id"""
                for pool in pool_list:
                    if pool['name'] == fs_map['AggregateName']:
                        pool_id = pool['native_storage_pool_id']
                deduplicated = True
                if fs_map['SpaceSavedbyDeduplication'] == '0B':
                    deduplicated = False
                if len(thin_fs_array) > 2:
                    for thin_vol in thin_fs_array[2:]:
                        thin_array = thin_vol.split()
                        if len(thin_array) > 4:
                            if thin_array[1] == fs_map['VolumeName']:
                                type = constants.VolumeType.THIN
                compressed = True
                if fs_map['VolumeContainsSharedorCompressedData'] == \
                        'false':
                    compressed = False
                status = constant.FS_STATUS.get(fs_map['VolumeState'])
                fs_model = {
                    'name':
                    fs_map['VolumeName'],
                    'storage_id':
                    storage_id,
                    'native_filesystem_id':
                    fs_map['VolumeName'],
                    'native_pool_id':
                    pool_id,
                    'compressed':
                    compressed,
                    'deduplicated':
                    deduplicated,
                    'worm':
                    fs_map['SnapLockType'],
                    'status':
                    status,
                    'type':
                    type,
                    'total_capacity':
                    int(Tools.get_capacity_size(fs_map['VolumeSize'])),
                    'used_capacity':
                    int(Tools.get_capacity_size(fs_map['UsedSize'])),
                    'free_capacity':
                    int(Tools.get_capacity_size(fs_map['VolumeSize'])) -
                    int(Tools.get_capacity_size(fs_map['UsedSize']))
                }
                fs_list.append(fs_model)
        return fs_list
Exemple #9
0
class SSHHandler(object):
    """Common class for Hpe 3parStor storage system."""

    HPE3PAR_COMMAND_SHOWWSAPI = 'showwsapi'
    HPE3PAR_COMMAND_CHECKHEALTH = 'checkhealth vv vlun task snmp ' \
                                  'port pd node network ld dar cage cabling'
    HPE3PAR_COMMAND_SHOWALERT = 'showalert -d'
    HPE3PAR_COMMAND_REMOVEALERT = 'removealert -f %s'
    ALERT_NOT_EXIST_MSG = 'Unable to read alert'
    HPE3PAR_COMMAND_SHOWNODE = 'shownode'
    HPE3PAR_COMMAND_SHOWNODE_CPU = 'shownode -cpu'
    HPE3PAR_COMMAND_SHOWEEPROM = 'showeeprom'
    HPE3PAR_COMMAND_SHOWPD = 'showpd'
    HPE3PAR_COMMAND_SHOWPD_I = 'showpd -i'
    HPE3PAR_COMMAND_SHOWPORT = 'showport'
    HPE3PAR_COMMAND_SHOWPORT_I = 'showport -i'
    HPE3PAR_COMMAND_SHOWPORT_PAR = 'showport -par'
    HPE3PAR_COMMAND_SHOWPORT_C = 'showport -c'
    HPE3PAR_COMMAND_SHOWPORT_ISCSI = 'showport -iscsi'
    HPE3PAR_COMMAND_SHOWPORT_RCIP = 'showport -rcip'
    HPE3PAR_COMMAND_SHOWPORT_FCOE = 'showport -fcoe'
    HPE3PAR_COMMAND_SHOWPORT_FS = 'showport -fs'
    HPE3PAR_COMMAND_SHOWHOSTSET_D = 'showhostset -d'
    HPE3PAR_COMMAND_SHOWVVSET_D = 'showvvset -d'
    HPE3PAR_COMMAND_SHOWHOST_D = 'showhost -d'
    HPE3PAR_COMMAND_SHOWVV = 'showvv'
    HPE3PAR_COMMAND_SHOWVLUN_T = 'showvlun -t'

    HPE3PAR_COMMAND_SHOWVV = 'showvv'
    HPE3PAR_COMMAND_SRSTATPORT = 'srstatport -attime -groupby ' \
                                 'PORT_N,PORT_S,PORT_P -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATPD = 'srstatpd -attime -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATVV = 'srstatvv -attime -groupby VVID,VV_NAME' \
                               ' -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATPD_ATTIME = 'srstatpd -attime'

    def __init__(self, **kwargs):
        self.kwargs = kwargs
        self.ssh_pool = SSHPool(**kwargs)

    def login(self, context):
        """Test SSH connection """
        version = ''
        try:
            re = self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWWSAPI)
            if re:
                version = self.get_version(re)
        except Exception as e:
            LOG.error("Login error: %s", six.text_type(e))
            raise e
        return version

    def get_version(self, wsapi_infos):
        """get wsapi version """
        version = ''
        try:
            version_list = self.parse_datas_to_list(wsapi_infos,
                                                    consts.VERSION_PATTERN)
            if version_list and version_list[0]:
                version = version_list[0].get('version')
        except Exception as e:
            LOG.error("Get version error: %s, wsapi info: %s" %
                      (six.text_type(e), wsapi_infos))
        return version

    def get_health_state(self):
        """Check the hardware and software health
           status of the storage system

           return: System is healthy
        """
        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_CHECKHEALTH)

    def get_all_alerts(self):
        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWALERT)

    def remove_alerts(self, alert_id):
        """Clear alert from storage system.
            Currently not implemented   removes command : removealert
        """
        utils.check_ssh_injection([alert_id])
        command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id
        res = self.exec_command(command_str)
        if res:
            if self.ALERT_NOT_EXIST_MSG not in res:
                raise exception.InvalidResults(six.text_type(res))
            LOG.warning("Alert %s doesn't exist.", alert_id)

    def get_controllers(self):
        para_map = {'command': 'parse_node_table'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.NODE_PATTERN,
                                       para_map=para_map)

    def get_controllers_cpu(self):
        para_map = {'command': 'parse_node_cpu'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE_CPU,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.CPU_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_controllers_version(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWEEPROM,
                                       self.parse_node_version,
                                       throw_excep=False)

    def parse_node_version(self, resource_info, pattern_str, para_map=None):
        node_version_map = {}
        node_info_map = {}
        try:
            obj_infos = resource_info.split('\n')
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    if str_line.startswith('Node:'):
                        str_info = self.split_str_by_colon(str_line)
                        node_info_map['node_id'] = str_info[1]
                    if str_line.startswith('OS version:'):
                        str_info = self.split_str_by_colon(str_line)
                        node_info_map['node_os_version'] = str_info[1]
                else:
                    if node_info_map:
                        node_version_map[node_info_map.get(
                            'node_id')] = node_info_map.get('node_os_version')
                        node_info_map = {}
        except Exception as e:
            err_msg = "Analyse node version info error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return node_version_map

    def split_str_by_colon(self, str_line):
        str_info = []
        if str_line:
            # str_info[0] is the parsed attribute name, there are some special
            # characters such as spaces, brackets, etc.,
            # str_info[1] is the value
            str_info = str_line.split(':', 1)
            str_info[0] = str_info[0].strip()
            str_info[0] = str_info[0].replace(" ", "_") \
                .replace("(", "").replace(")", "").lower()
            if len(str_info) > 1:
                str_info[1] = str_info[1].strip()
        return str_info

    def get_disks(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPD,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.DISK_PATTERN)

    def get_disks_inventory(self):
        inventory_map = {}
        para_map = {'command': 'parse_disk_table'}
        inventorys = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPD_I,
            self.parse_datas_to_list,
            pattern_str=consts.DISK_I_PATTERN,
            para_map=para_map,
            throw_excep=False)
        for inventory in (inventorys or []):
            inventory_map[inventory.get('disk_id')] = inventory
        return inventory_map

    def get_ports(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.PORT_PATTERN)

    def get_ports_inventory(self):
        para_map = {'key_position': 0, 'value_position': 'last'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_I,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_I_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_config(self):
        para_map = {'key_position': 0, 'value_position': 4}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_PAR,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_PER_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_iscsi(self):
        iscsis_map = {}
        iscsis = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_ISCSI,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_ISCSI_PATTERN,
            throw_excep=False)
        for iscsi in (iscsis or []):
            iscsis_map[iscsi.get('n:s:p')] = iscsi
        return iscsis_map

    def get_ports_connected(self):
        para_map = {'key_position': 0, 'value_position': 6}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_C,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_C_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_rcip(self):
        rcip_map = {}
        rcips = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_RCIP,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_RCIP_PATTERN,
            throw_excep=False)
        for rcip in (rcips or []):
            rcip_map[rcip.get('n:s:p')] = rcip
        return rcip_map

    def get_ports_fs(self):
        port_fs_map = {}
        port_fss = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FS,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_FS_PATTERN,
            throw_excep=False)
        for port_fs in (port_fss or []):
            port_fs_map[port_fs.get('n:s:p')] = port_fs
        return port_fs_map

    def get_ports_fcoe(self):
        fcoe_map = {}
        fcoes = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FCOE,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_FCOE_PATTERN,
            throw_excep=False)
        for fcoe in (fcoes or []):
            fcoe_map[fcoe.get('n:s:p')] = fcoe
        return fcoe_map

    def parse_datas_to_list(self, resource_info, pattern_str, para_map=None):
        obj_list = []
        titles_size = 9999
        try:
            pattern = re.compile(pattern_str)
            obj_infos = resource_info.split('\n')
            titles = []
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    search_obj = pattern.search(str_line)
                    if search_obj:
                        titles = str_line.split()
                        titles_size = len(titles)
                    else:
                        str_info = str_line.split()
                        cols_size = len(str_info)
                        if para_map and para_map.get('command', '') \
                                == 'parse_disk_table':
                            obj_list = self.parse_disk_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_node_table':
                            obj_list = self.parse_node_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_metric_table':
                            if '---------------------------------' in str_line:
                                break
                            if 'Time:' in str_line:
                                collect_time = Tools.get_numbers_in_brackets(
                                    str_line, consts.SSH_COLLECT_TIME_PATTERN)
                                if collect_time:
                                    collect_time = int(collect_time) * units.k
                                else:
                                    collect_time = int(time.time() * units.k)
                                para_map['collect_time'] = collect_time
                            obj_list = self.parse_metric_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles, para_map)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_set_groups_table':
                            if '---------------------------------' in str_line:
                                break
                            obj_list = self.parse_set_groups_table(
                                cols_size, titles_size, str_info, obj_list)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_view_table':
                            if '---------------------------------' in str_line:
                                break
                            obj_list = self.parse_view_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        else:
                            if cols_size == titles_size:
                                obj_model = {}
                                for i in range(0, cols_size):
                                    key = titles[i].lower().replace('-', '')
                                    obj_model[key] = str_info[i]
                                if obj_model:
                                    obj_list.append(obj_model)
        except Exception as e:
            err_msg = "Analyse datas to list error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return obj_list

    def parse_datas_to_map(self, resource_info, pattern_str, para_map=None):
        obj_model = {}
        titles_size = 9999
        try:
            pattern = re.compile(pattern_str)
            obj_infos = resource_info.split('\n')
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    search_obj = pattern.search(str_line)
                    if search_obj:
                        titles = str_line.split()
                        titles_size = len(titles)
                    else:
                        str_info = str_line.split()
                        cols_size = len(str_info)
                        if para_map and para_map.get('command',
                                                     '') == 'parse_node_cpu':
                            obj_model = self.parse_node_cpu(
                                cols_size, titles_size, str_info, obj_model)
                        else:
                            if cols_size >= titles_size:
                                key_position = para_map.get('key_position')
                                value_position = para_map.get('value_position')
                                if para_map.get('value_position') == 'last':
                                    value_position = cols_size - 1
                                obj_model[str_info[key_position]] = str_info[
                                    value_position]
        except Exception as e:
            err_msg = "Analyse datas to map error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return obj_model

    def parse_disk_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            fw_rev_index = self.get_index_of_key(titles, 'FW_Rev')
            if fw_rev_index:
                inventory_map = {
                    'disk_id': str_info[0],
                    'disk_mfr': ' '.join(str_info[4:fw_rev_index - 2]),
                    'disk_model': str_info[fw_rev_index - 2],
                    'disk_serial': str_info[fw_rev_index - 1],
                    'disk_fw_rev': str_info[fw_rev_index]
                }
                obj_list.append(inventory_map)
        return obj_list

    def parse_node_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            obj_model = {}
            num_prefix = 1
            for i in range(cols_size):
                key_prefix = ''
                key = titles[i].lower().replace('-', '')
                if key == 'mem(mb)':
                    key_prefix = consts.SSH_NODE_MEM_TYPE.get(num_prefix)
                    num_prefix += 1
                key = '%s%s' % (key_prefix, key)
                obj_model[key] = str_info[i]
            if obj_model:
                obj_list.append(obj_model)
        return obj_list

    def parse_node_cpu(self, cols_size, titles_size, str_info, obj_map):
        if cols_size >= titles_size:
            node_id = str_info[0]
            cpu_info = str_info[4]
            if obj_map.get(node_id):
                obj_map[node_id][cpu_info] = obj_map.get(node_id).get(
                    cpu_info, 0) + 1
            else:
                cpu_info_map = {}
                cpu_info_map[cpu_info] = 1
                obj_map[node_id] = cpu_info_map
        return obj_map

    def parse_metric_table(self, cols_size, titles_size, str_info, obj_list,
                           titles, para_map):
        if cols_size == titles_size:
            obj_model = {}
            metric_type_num = 1
            key_prefix = ''
            for i in range(0, cols_size):
                key = titles[i].lower().replace('-', '')
                if key == 'rd':
                    key_prefix = consts.SSH_METRIC_TYPE.get(metric_type_num)
                    metric_type_num += 1
                key = '%s%s' % (key_prefix, key)
                obj_model[key] = str_info[i]
            if obj_model:
                if para_map and para_map.get('collect_time'):
                    obj_model['collect_time'] = para_map.get('collect_time')
                obj_list.append(obj_model)
        return obj_list

    def get_index_of_key(self, titles_list, key):
        if titles_list:
            for title in titles_list:
                if key in title:
                    return titles_list.index(title)
        return None

    def get_resources_info(self,
                           command,
                           parse_type,
                           pattern_str=None,
                           para_map=None,
                           throw_excep=True):
        re = self.exec_command(command)
        resources_info = None
        try:
            if re:
                resources_info = parse_type(re, pattern_str, para_map=para_map)
        except Exception as e:
            LOG.error("Get %s info error: %s" % (command, six.text_type(e)))
            if throw_excep:
                raise e
        return resources_info

    def exec_command(self, command):
        re = self.ssh_pool.do_exec(command)
        if re:
            if 'invalid command name' in re or 'Invalid option' in re:
                LOG.warning(re)
                raise NotImplementedError(re)
            elif 'Too many local CLI connections' in re:
                LOG.error("command %s failed: %s" % (command, re))
                raise exception.StorageBackendException(re)
        return re

    def get_volumes(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVV,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.VOLUME_PATTERN)

    def get_port_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPORT % (int(
            start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATPORT_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def get_disk_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD_ATTIME
        if start_time and end_time:
            command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD % (int(
                start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATPD_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def get_volume_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATVV % (int(
            start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATVV_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def list_storage_host_groups(self):
        para_map = {'command': 'parse_set_groups_table'}
        return self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWHOSTSET_D,
            self.parse_datas_to_list,
            pattern_str=consts.HOST_OR_VV_SET_PATTERN,
            para_map=para_map)

    def list_volume_groups(self):
        para_map = {'command': 'parse_set_groups_table'}
        return self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWVVSET_D,
            self.parse_datas_to_list,
            pattern_str=consts.HOST_OR_VV_SET_PATTERN,
            para_map=para_map)

    def parse_set_groups_table(self, cols_size, titles_size, str_info,
                               obj_list):
        if cols_size >= titles_size:
            members = []
            value = str_info[2].replace('-', '')
            if value:
                members = [str_info[2]]
            obj_model = {
                'id': str_info[0],
                'name': str_info[1],
                'members': members,
                'comment': (" ".join(str_info[3:])).replace('-', ''),
            }
            obj_list.append(obj_model)
        elif obj_list and cols_size == 1:
            value = str_info[0].replace('-', '')
            if value:
                obj_model = obj_list[-1]
                if obj_model and obj_model.get('members'):
                    obj_model.get('members').append(str_info[0])
                else:
                    members = [str_info[0]]
                    obj_model['members'] = members

        return obj_list

    def parse_view_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            obj_model = {}
            for i in range(titles_size):
                key = titles[i].lower().replace('-', '')
                obj_model[key] = str_info[i]
            if obj_model:
                obj_list.append(obj_model)
        return obj_list

    def get_resources_ids(self, command, pattern_str, para_map=None):
        if not para_map:
            para_map = {'key_position': 1, 'value_position': 0}
        return self.get_resources_info(command,
                                       self.parse_datas_to_map,
                                       pattern_str=pattern_str,
                                       para_map=para_map,
                                       throw_excep=False)

    def list_storage_host_initiators(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWHOST_D,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.HOST_OR_VV_PATTERN)

    def list_masking_views(self):
        para_map = {'command': 'parse_view_table'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVLUN_T,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.VLUN_PATTERN,
                                       para_map=para_map)
Exemple #10
0
class SSHHandler(object):
    # 显示配置参数的值
    CONFIG_GET = 'config_get [ name=machine_serial_number ]'
    # 打印系统的当前版本
    VERSION_GET = 'version_get'

    def __init__(self, **kwargs):
        self.kwargs = kwargs
        self.ssh_pool = SSHPool(**kwargs)

    def login(self):
        try:
            with self.ssh_pool.item() as ssh:
                SSHHandler.do_exec('lssystem', ssh)
        except Exception as e:
            LOG.error("Failed to login ibm a9000r %s" % (six.text_type(e)))
            raise e

    # 显示配置参数的值 machine_serial_number的获取
    def get_storage_serial_number(self):
        serialNumber = None
        try:
            serialNumber = self.exec_ssh_command(SSHHandler.CONFIG_GET)
        except Exception as e:
            LOG.error("Get all storage machine_serial_number error: %s",
                      six.text_type(e))
        return serialNumber

    # 可打印系统的当前版本  firmware_version字段获取
    def get_storage_version_get(self):
        firmwareVersion = None
        try:
            # ssh_client = SSHClient(**self.kwargs)
            # firmwareVersion = ssh_client.do_exec(SSHHandler.VERSION_GET)
            firmwareVersion = self.exec_ssh_command(SSHHandler.VERSION_GET)
        except Exception as e:
            LOG.error("Get all storage firmware_version error: %s",
                      six.text_type(e))
        return firmwareVersion

    def exec_ssh_command(self, command):
        try:
            with self.ssh_pool.item() as ssh:
                ssh_info = SSHHandler.do_exec(command, ssh)
            return ssh_info
        except Exception as e:
            msg = "Failed to ssh ibm a9000r ssh_handler %s: %s" % \
                  (command, six.text_type(e))
            raise exception.SSHException(msg)

    def exec_ssh_command(self, command):
        try:
            with self.ssh_pool.item() as ssh:
                ssh_info = SSHHandler.do_exec(command, ssh)
            return ssh_info
        except Exception as e:
            msg = "Failed to ssh ibm storwize_svc %s: %s" % \
                  (command, six.text_type(e))
            raise exception.SSHException(msg)

    @staticmethod
    def do_exec(command_str, ssh):
        """Execute command"""
        try:
            utils.check_ssh_injection(command_str)
            if command_str is not None and ssh is not None:
                stdin, stdout, stderr = ssh.exec_command(command_str)
                res, err = stdout.read(), stderr.read()
                re = res if res else err
                result = re.decode()
        except paramiko.AuthenticationException as ae:
            LOG.error('doexec Authentication error:{}'.format(ae))
            raise exception.InvalidUsernameOrPassword()
        except Exception as e:
            err = six.text_type(e)
            LOG.error('doexec InvalidUsernameOrPassword error')
            if 'timed out' in err:
                raise exception.SSHConnectTimeout()
            elif 'No authentication methods available' in err \
                    or 'Authentication failed' in err:
                raise exception.InvalidUsernameOrPassword()
            elif 'not a valid RSA private key file' in err:
                raise exception.InvalidPrivateKey()
            else:
                raise exception.SSHException(err)
        return result
Exemple #11
0
class SSHHandler(object):
    OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3'
    OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9'
    OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10'
    OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11'
    OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17'
    OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'

    TRAP_SEVERITY_MAP = {
        '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL,
        '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING,
        '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL,
    }

    SEVERITY_MAP = {
        "warning": "Warning",
        "informational": "Informational",
        "error": "Major"
    }
    CONTRL_STATUS_MAP = {
        "online": constants.ControllerStatus.NORMAL,
        "offline": constants.ControllerStatus.OFFLINE,
        "service": constants.ControllerStatus.NORMAL,
        "flushing": constants.ControllerStatus.UNKNOWN,
        "pending": constants.ControllerStatus.UNKNOWN,
        "adding": constants.ControllerStatus.UNKNOWN,
        "deleting": constants.ControllerStatus.UNKNOWN
    }

    DISK_PHYSICAL_TYPE = {
        'fc': constants.DiskPhysicalType.FC,
        'sas_direct': constants.DiskPhysicalType.SAS
    }
    VOLUME_PERF_METRICS = {
        'readIops': 'ro',
        'writeIops': 'wo',
        'readThroughput': 'rb',
        'writeThroughput': 'wb',
        'readIoSize': 'rb',
        'writeIoSize': 'wb',
        'responseTime': 'res_time',
        'throughput': 'tb',
        'iops': 'to',
        'ioSize': 'tb',
        'cacheHitRatio': 'hrt',
        'readCacheHitRatio': 'rhr',
        'writeCacheHitRatio': 'whr'
    }
    DISK_PERF_METRICS = {
        'readIops': 'ro',
        'writeIops': 'wo',
        'readThroughput': 'rb',
        'writeThroughput': 'wb',
        'responseTime': 'res_time',
        'throughput': 'tb',
        'iops': 'to'
    }
    CONTROLLER_PERF_METRICS = {
        'readIops': 'ro',
        'writeIops': 'wo',
        'readThroughput': 'rb',
        'writeThroughput': 'wb',
        'responseTime': 'res_time',
        'throughput': 'tb',
        'iops': 'to'
    }
    PORT_PERF_METRICS = {
        'readIops': 'ro',
        'writeIops': 'wo',
        'readThroughput': 'rb',
        'writeThroughput': 'wb',
        'throughput': 'tb',
        'responseTime': 'res_time',
        'iops': 'to'
    }
    TARGET_RESOURCE_RELATION = {
        constants.ResourceType.DISK: 'mdsk',
        constants.ResourceType.VOLUME: 'vdsk',
        constants.ResourceType.PORT: 'port',
        constants.ResourceType.CONTROLLER: 'node'
    }
    RESOURCE_PERF_MAP = {
        constants.ResourceType.DISK: DISK_PERF_METRICS,
        constants.ResourceType.VOLUME: VOLUME_PERF_METRICS,
        constants.ResourceType.PORT: PORT_PERF_METRICS,
        constants.ResourceType.CONTROLLER: CONTROLLER_PERF_METRICS
    }
    SECONDS_TO_MS = 1000
    ALERT_NOT_FOUND_CODE = 'CMMVC8275E'
    BLOCK_SIZE = 512
    BYTES_TO_BIT = 8

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def handle_split(split_str, split_char, arr_number):
        split_value = ''
        if split_str is not None and split_str != '':
            tmp_value = split_str.split(split_char, 1)
            if arr_number == 1 and len(tmp_value) > 1:
                split_value = tmp_value[arr_number].strip()
            elif arr_number == 0:
                split_value = tmp_value[arr_number].strip()
        return split_value

    @staticmethod
    def parse_alert(alert):
        try:
            alert_model = dict()
            alert_name = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_ERR_ID), ':', 1)
            error_info = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_ERR_ID), ':', 0)
            alert_id = SSHHandler.handle_split(error_info, '=', 1)
            severity = SSHHandler.TRAP_SEVERITY_MAP.get(
                alert.get(SSHHandler.OID_SEVERITY),
                constants.Severity.INFORMATIONAL)
            alert_model['alert_id'] = str(alert_id)
            alert_model['alert_name'] = alert_name
            alert_model['severity'] = severity
            alert_model['category'] = constants.Category.FAULT
            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
            alert_model['sequence_number'] = SSHHandler. \
                handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1)
            timestamp = SSHHandler. \
                handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1)
            time_type = '%a %b %d %H:%M:%S %Y'
            occur_time = int(time.mktime(time.strptime(timestamp, time_type)))
            alert_model['occur_time'] = int(occur_time *
                                            SSHHandler.SECONDS_TO_MS)
            alert_model['description'] = alert_name
            alert_model['resource_type'] = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1)
            alert_model['location'] = SSHHandler.handle_split(
                alert.get(SSHHandler.OID_OBJ_NAME), '=', 1)
            return alert_model
        except Exception as e:
            LOG.error(e)
            msg = ("Failed to build alert model as some attributes missing "
                   "in alert message:%s.") % (six.text_type(e))
            raise exception.InvalidResults(msg)

    def login(self):
        try:
            with self.ssh_pool.item() as ssh:
                result = SSHHandler.do_exec('lssystem', ssh)
                if 'is not a recognized command' in result:
                    raise exception.InvalidIpOrPort()
        except Exception as e:
            LOG.error("Failed to login ibm storwize_svc %s" %
                      (six.text_type(e)))
            raise e

    @staticmethod
    def do_exec(command_str, ssh):
        """Execute command"""
        try:
            utils.check_ssh_injection(command_str.split())
            if command_str is not None and ssh is not None:
                stdin, stdout, stderr = ssh.exec_command(command_str)
                res, err = stdout.read(), stderr.read()
                re = res if res else err
                result = re.decode()
        except paramiko.AuthenticationException as ae:
            LOG.error('doexec Authentication error:{}'.format(ae))
            raise exception.InvalidUsernameOrPassword()
        except Exception as e:
            err = six.text_type(e)
            LOG.error('doexec InvalidUsernameOrPassword error')
            if 'timed out' in err:
                raise exception.SSHConnectTimeout()
            elif 'No authentication methods available' in err \
                    or 'Authentication failed' in err:
                raise exception.InvalidUsernameOrPassword()
            elif 'not a valid RSA private key file' in err:
                raise exception.InvalidPrivateKey()
            else:
                raise exception.SSHException(err)
        return result

    def exec_ssh_command(self, command):
        try:
            with self.ssh_pool.item() as ssh:
                ssh_info = SSHHandler.do_exec(command, ssh)
            return ssh_info
        except Exception as e:
            msg = "Failed to ssh ibm storwize_svc %s: %s" % \
                  (command, six.text_type(e))
            raise exception.SSHException(msg)

    def change_capacity_to_bytes(self, unit):
        unit = unit.upper()
        if unit == 'TB':
            result = units.Ti
        elif unit == 'GB':
            result = units.Gi
        elif unit == 'MB':
            result = units.Mi
        elif unit == 'KB':
            result = units.Ki
        else:
            result = 1
        return int(result)

    def parse_string(self, value):
        capacity = 0
        if value:
            if value.isdigit():
                capacity = float(value)
            else:
                unit = value[-2:]
                capacity = float(value[:-2]) * int(
                    self.change_capacity_to_bytes(unit))
        return capacity

    def get_storage(self):
        try:
            system_info = self.exec_ssh_command('lssystem')
            storage_map = {}
            self.handle_detail(system_info, storage_map, split=' ')
            serial_number = storage_map.get('id')
            status = 'normal' if storage_map.get('statistics_status') == 'on' \
                else 'offline'
            location = storage_map.get('location')
            free_capacity = self.parse_string(
                storage_map.get('total_free_space'))
            used_capacity = self.parse_string(
                storage_map.get('total_used_capacity'))
            raw_capacity = self.parse_string(
                storage_map.get('total_mdisk_capacity'))
            subscribed_capacity = self.parse_string(
                storage_map.get('virtual_capacity'))
            firmware_version = ''
            if storage_map.get('code_level') is not None:
                firmware_version = storage_map.get('code_level').split(' ')[0]
            s = {
                'name': storage_map.get('name'),
                'vendor': 'IBM',
                'model': storage_map.get('product_name'),
                'status': status,
                'serial_number': serial_number,
                'firmware_version': firmware_version,
                'location': location,
                'total_capacity': int(free_capacity + used_capacity),
                'raw_capacity': int(raw_capacity),
                'subscribed_capacity': int(subscribed_capacity),
                'used_capacity': int(used_capacity),
                'free_capacity': int(free_capacity)
            }
            return s
        except exception.DelfinException as e:
            err_msg = "Failed to get storage: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def handle_detail(self, deltail_info, detail_map, split):
        detail_arr = deltail_info.split('\n')
        for detail in detail_arr:
            if detail is not None and detail != '':
                strinfo = detail.split(split, 1)
                key = strinfo[0]
                value = ''
                if len(strinfo) > 1:
                    value = strinfo[1]
                detail_map[key] = value

    def list_storage_pools(self, storage_id):
        try:
            pool_list = []
            pool_info = self.exec_ssh_command('lsmdiskgrp')
            pool_res = pool_info.split('\n')
            for i in range(1, len(pool_res)):
                if pool_res[i] is None or pool_res[i] == '':
                    continue

                pool_str = ' '.join(pool_res[i].split())
                strinfo = pool_str.split(' ')
                detail_command = 'lsmdiskgrp %s' % strinfo[0]
                deltail_info = self.exec_ssh_command(detail_command)
                pool_map = {}
                self.handle_detail(deltail_info, pool_map, split=' ')
                status = 'normal' if pool_map.get('status') == 'online' \
                    else 'offline'
                total_cap = self.parse_string(pool_map.get('capacity'))
                free_cap = self.parse_string(pool_map.get('free_capacity'))
                used_cap = self.parse_string(pool_map.get('used_capacity'))
                subscribed_capacity = self.parse_string(
                    pool_map.get('virtual_capacity'))
                p = {
                    'name': pool_map.get('name'),
                    'storage_id': storage_id,
                    'native_storage_pool_id': pool_map.get('id'),
                    'description': '',
                    'status': status,
                    'storage_type': constants.StorageType.BLOCK,
                    'subscribed_capacity': int(subscribed_capacity),
                    'total_capacity': int(total_cap),
                    'used_capacity': int(used_cap),
                    'free_capacity': int(free_cap)
                }
                pool_list.append(p)

            return pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.exec_ssh_command('lsvdisk')
            volume_res = volume_info.split('\n')
            for i in range(1, len(volume_res)):
                if volume_res[i] is None or volume_res[i] == '':
                    continue
                volume_str = ' '.join(volume_res[i].split())
                strinfo = volume_str.split(' ')
                volume_id = strinfo[0]
                detail_command = 'lsvdisk -delim : %s' % volume_id
                deltail_info = self.exec_ssh_command(detail_command)
                volume_map = {}
                self.handle_detail(deltail_info, volume_map, split=':')
                status = 'normal' if volume_map.get('status') == 'online' \
                    else 'offline'
                volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \
                    else 'thick'
                total_capacity = self.parse_string(volume_map.get('capacity'))
                free_capacity = self.parse_string(
                    volume_map.get('free_capacity'))
                used_capacity = self.parse_string(
                    volume_map.get('used_capacity'))
                compressed = True
                deduplicated = True
                if volume_map.get('compressed_copy') == 'no':
                    compressed = False
                if volume_map.get('deduplicated_copy') == 'no':
                    deduplicated = False

                v = {
                    'name': volume_map.get('name'),
                    'storage_id': storage_id,
                    'description': '',
                    'status': status,
                    'native_volume_id': str(volume_map.get('id')),
                    'native_storage_pool_id': volume_map.get('mdisk_grp_id'),
                    'wwn': str(volume_map.get('vdisk_UID')),
                    'type': volume_type,
                    'total_capacity': int(total_capacity),
                    'used_capacity': int(used_capacity),
                    'free_capacity': int(free_capacity),
                    'compressed': compressed,
                    'deduplicated': deduplicated
                }
                volume_list.append(v)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_alerts(self, query_para):
        try:
            alert_list = []
            alert_info = self.exec_ssh_command('lseventlog -monitoring yes '
                                               '-message no')
            alert_res = alert_info.split('\n')
            for i in range(1, len(alert_res)):
                if alert_res[i] is None or alert_res[i] == '':
                    continue
                alert_str = ' '.join(alert_res[i].split())
                strinfo = alert_str.split(' ', 1)
                detail_command = 'lseventlog %s' % strinfo[0]
                deltail_info = self.exec_ssh_command(detail_command)
                alert_map = {}
                self.handle_detail(deltail_info, alert_map, split=' ')
                occur_time = int(alert_map.get('last_timestamp_epoch')) * \
                    self.SECONDS_TO_MS
                if not alert_util.is_alert_in_time_range(
                        query_para, occur_time):
                    continue
                alert_name = alert_map.get('event_id_text', '')
                event_id = alert_map.get('event_id')
                location = alert_map.get('object_name', '')
                resource_type = alert_map.get('object_type', '')
                severity = self.SEVERITY_MAP.get(
                    alert_map.get('notification_type'))
                if severity == 'Informational' or severity is None:
                    continue
                alert_model = {
                    'alert_id': event_id,
                    'alert_name': alert_name,
                    'severity': severity,
                    'category': constants.Category.FAULT,
                    'type': 'EquipmentAlarm',
                    'sequence_number': alert_map.get('sequence_number'),
                    'occur_time': occur_time,
                    'description': alert_name,
                    'resource_type': resource_type,
                    'location': location
                }
                alert_list.append(alert_model)

            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def fix_alert(self, alert):
        command_line = 'cheventlog -fix %s' % alert
        result = self.exec_ssh_command(command_line)
        if result:
            if self.ALERT_NOT_FOUND_CODE not in result:
                raise exception.InvalidResults(six.text_type(result))
            LOG.warning("Alert %s doesn't exist.", alert)

    def list_controllers(self, storage_id):
        try:
            controller_list = []
            controller_cmd = 'lsnode'
            control_info = self.exec_ssh_command(controller_cmd)
            if 'command not found' in control_info:
                controller_cmd = 'lsnodecanister'
                control_info = self.exec_ssh_command(controller_cmd)
            control_res = control_info.split('\n')
            for i in range(1, len(control_res)):
                if control_res[i] is None or control_res[i] == '':
                    continue
                control_str = ' '.join(control_res[i].split())
                str_info = control_str.split(' ')
                control_id = str_info[0]
                detail_command = '%s %s' % (controller_cmd, control_id)
                deltail_info = self.exec_ssh_command(detail_command)
                control_map = {}
                self.handle_detail(deltail_info, control_map, split=' ')
                status = SSHHandler.CONTRL_STATUS_MAP.get(
                    control_map.get('status'),
                    constants.ControllerStatus.UNKNOWN)
                controller_result = {
                    'name': control_map.get('name'),
                    'storage_id': storage_id,
                    'native_controller_id': control_map.get('id'),
                    'status': status,
                    'soft_version': control_map.get('code_level',
                                                    '').split(' ')[0],
                    'location': control_map.get('name')
                }
                controller_list.append(controller_result)
            return controller_list
        except Exception as err:
            err_msg = "Failed to get controller attributes from Storwize: %s"\
                      % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_disks(self, storage_id):
        try:
            disk_list = []
            disk_info = self.exec_ssh_command('lsmdisk')
            disk_res = disk_info.split('\n')
            for i in range(1, len(disk_res)):
                if disk_res[i] is None or disk_res[i] == '':
                    continue
                control_str = ' '.join(disk_res[i].split())
                str_info = control_str.split(' ')
                disk_id = str_info[0]
                detail_command = 'lsmdisk %s' % disk_id
                deltail_info = self.exec_ssh_command(detail_command)
                disk_map = {}
                self.handle_detail(deltail_info, disk_map, split=' ')
                status = constants.DiskStatus.NORMAL
                if disk_map.get('status') == 'offline':
                    status = constants.DiskStatus.OFFLINE
                physical_type = SSHHandler.DISK_PHYSICAL_TYPE.get(
                    disk_map.get('fabric_type'),
                    constants.DiskPhysicalType.UNKNOWN)
                location = '%s_%s' % (disk_map.get('controller_name'),
                                      disk_map.get('name'))
                disk_result = {
                    'name': disk_map.get('name'),
                    'storage_id': storage_id,
                    'native_disk_id': disk_map.get('id'),
                    'capacity':
                    int(self.parse_string(disk_map.get('capacity'))),
                    'status': status,
                    'physical_type': physical_type,
                    'native_disk_group_id': disk_map.get('mdisk_grp_name'),
                    'location': location
                }
                disk_list.append(disk_result)
            return disk_list
        except Exception as err:
            err_msg = "Failed to get disk attributes from Storwize: %s" % \
                      (six.text_type(err))
            raise exception.InvalidResults(err_msg)

    def get_fc_port(self, storage_id):
        port_list = []
        fc_info = self.exec_ssh_command('lsportfc')
        fc_res = fc_info.split('\n')
        for i in range(1, len(fc_res)):
            if fc_res[i] is None or fc_res[i] == '':
                continue
            control_str = ' '.join(fc_res[i].split())
            str_info = control_str.split(' ')
            port_id = str_info[0]
            detail_command = 'lsportfc %s' % port_id
            deltail_info = self.exec_ssh_command(detail_command)
            port_map = {}
            self.handle_detail(deltail_info, port_map, split=' ')
            status = constants.PortHealthStatus.NORMAL
            conn_status = constants.PortConnectionStatus.CONNECTED
            if port_map.get('status') != 'active':
                status = constants.PortHealthStatus.ABNORMAL
                conn_status = constants.PortConnectionStatus.DISCONNECTED
            port_type = constants.PortType.FC
            if port_map.get('type') == 'ethernet':
                port_type = constants.PortType.ETH
            location = '%s_%s' % (port_map.get('node_name'),
                                  port_map.get('id'))
            speed = None
            if port_map.get('port_speed')[:-2].isdigit():
                speed = int(
                    self.handle_port_bps(port_map.get('port_speed'), 'fc'))
            port_result = {
                'name': location,
                'storage_id': storage_id,
                'native_port_id': port_map.get('id'),
                'location': location,
                'connection_status': conn_status,
                'health_status': status,
                'type': port_type,
                'speed': speed,
                'native_parent_id': port_map.get('node_name'),
                'wwn': port_map.get('WWPN')
            }
            port_list.append(port_result)
        return port_list

    def get_iscsi_port(self, storage_id):
        port_list = []
        for i in range(1, 3):
            port_array = []
            port_command = 'lsportip %s' % i
            port_info = self.exec_ssh_command(port_command)
            port_arr = port_info.split('\n')
            port_map = {}
            for detail in port_arr:
                if detail is not None and detail != '':
                    strinfo = detail.split(' ', 1)
                    key = strinfo[0]
                    value = ''
                    if len(strinfo) > 1:
                        value = strinfo[1]
                    port_map[key] = value
                else:
                    if len(port_map) > 1:
                        port_array.append(port_map)
                        port_map = {}
                        continue
            for port in port_array:
                if port.get('failover') == 'yes':
                    continue
                status = constants.PortHealthStatus.ABNORMAL
                if port.get('state') == 'online':
                    status = constants.PortHealthStatus.NORMAL
                conn_status = constants.PortConnectionStatus.DISCONNECTED
                if port.get('link_state') == 'active':
                    conn_status = constants.PortConnectionStatus.CONNECTED
                port_type = constants.PortType.ETH
                location = '%s_%s' % (port.get('node_name'), port.get('id'))
                port_result = {
                    'name': location,
                    'storage_id': storage_id,
                    'native_port_id': location,
                    'location': location,
                    'connection_status': conn_status,
                    'health_status': status,
                    'type': port_type,
                    'speed': int(self.handle_port_bps(port.get('speed'),
                                                      'eth')),
                    'native_parent_id': port.get('node_name'),
                    'mac_address': port.get('MAC'),
                    'ipv4': port.get('IP_address'),
                    'ipv4_mask': port.get('mask'),
                    'ipv6': port.get('IP_address_6')
                }
                port_list.append(port_result)
        return port_list

    @staticmethod
    def change_speed_to_bytes(unit):
        unit = unit.upper()
        if unit == 'TB':
            result = units.T
        elif unit == 'GB':
            result = units.G
        elif unit == 'MB':
            result = units.M
        elif unit == 'KB':
            result = units.k
        else:
            result = 1
        return int(result)

    def handle_port_bps(self, value, port_type):
        speed = 0
        if value:
            if value.isdigit():
                speed = float(value)
            else:
                if port_type == 'fc':
                    unit = value[-2:]
                    speed = float(value[:-2]) * int(
                        self.change_speed_to_bytes(unit))
                else:
                    unit = value[-4:-2]
                    speed = float(value[:-4]) * int(
                        self.change_speed_to_bytes(unit))
        return speed

    def list_ports(self, storage_id):
        try:
            port_list = []
            port_list.extend(self.get_fc_port(storage_id))
            port_list.extend(self.get_iscsi_port(storage_id))
            return port_list
        except Exception as err:
            err_msg = "Failed to get ports attributes from Storwize: %s" % \
                      (six.text_type(err))
            raise exception.InvalidResults(err_msg)

    @staticmethod
    def handle_stats_filename(file_name, file_map):
        name_arr = file_name.split('_')
        file_type = '%s_%s_%s' % (name_arr[0], name_arr[1], name_arr[2])
        file_time = '20%s%s' % (name_arr[3], name_arr[4])
        time_pattern = '%Y%m%d%H%M%S'
        tools = Tools()
        occur_time = tools.time_str_to_timestamp(file_time, time_pattern)
        if file_map.get(file_type):
            file_map[file_type][occur_time] = file_name
        else:
            file_map[file_type] = {occur_time: file_name}

    def get_stats_filelist(self, file_map):
        stats_file_command = 'lsdumps -prefix /dumps/iostats'
        file_list = self.exec_ssh_command(stats_file_command)
        file_line = file_list.split('\n')
        for file in islice(file_line, 1, None):
            if file:
                file_arr = ' '.join(file.split()).split(' ')
                if len(file_arr) > 1:
                    file_name = file_arr[1]
                    SSHHandler.handle_stats_filename(file_name, file_map)
        for file_stats in file_map:
            file_map[file_stats] = sorted(file_map.get(file_stats).items(),
                                          key=lambda x: x[0],
                                          reverse=False)

    def packege_data(self, storage_id, resource_type, metrics, metric_map):
        resource_id = None
        resource_name = None
        unit = None
        for resource_info in metric_map:
            if resource_type == constants.ResourceType.PORT:
                port_info = self.get_fc_port(storage_id)
                if port_info:
                    for fc_port in port_info:
                        if resource_info.strip('0x').upper() == fc_port.get(
                                'wwn').upper():
                            resource_id = fc_port.get('native_port_id')
                            resource_name = fc_port.get('name')
                            break
            else:
                resource_arr = resource_info.split('_')
                resource_id = resource_arr[0]
                resource_name = resource_arr[1]
            for target in metric_map.get(resource_info):
                if resource_type == constants.ResourceType.PORT:
                    unit = consts.PORT_CAP[target]['unit']
                elif resource_type == constants.ResourceType.VOLUME:
                    unit = consts.VOLUME_CAP[target]['unit']
                elif resource_type == constants.ResourceType.DISK:
                    unit = consts.DISK_CAP[target]['unit']
                elif resource_type == constants.ResourceType.CONTROLLER:
                    unit = consts.CONTROLLER_CAP[target]['unit']
                if 'responseTime' == target:
                    for res_time in metric_map.get(resource_info).get(target):
                        for iops_time in metric_map.get(resource_info).get(
                                'iops'):
                            if res_time == iops_time:
                                res_value = metric_map.get(resource_info).get(
                                    target).get(res_time)
                                iops_value = metric_map.get(resource_info).get(
                                    'iops').get(iops_time)
                                res_value = \
                                    res_value / iops_value if iops_value else 0
                                res_value = round(res_value, 3)
                                metric_map[resource_info][target][res_time] = \
                                    res_value
                                break
                labels = {
                    'storage_id': storage_id,
                    'resource_type': resource_type,
                    'resource_id': resource_id,
                    'resource_name': resource_name,
                    'type': 'RAW',
                    'unit': unit
                }
                metric_value = constants.metric_struct(
                    name=target,
                    labels=labels,
                    values=metric_map.get(resource_info).get(target))
                metrics.append(metric_value)

    @staticmethod
    def count_metric_data(last_data, now_data, interval, target, metric_type,
                          metric_map, res_id):
        if not target:
            return
        if 'CACHEHITRATIO' not in metric_type.upper():
            value = SSHHandler.count_difference(now_data.get(target),
                                                last_data.get(target))
        else:
            value = now_data.get(
                SSHHandler.VOLUME_PERF_METRICS.get(metric_type))
        if 'THROUGHPUT' in metric_type.upper():
            value = value / interval / units.Mi
        elif 'IOSIZE' in metric_type.upper():
            value = value / units.Ki
        elif 'IOPS' in metric_type.upper() or 'RESPONSETIME' \
                in metric_type.upper():
            value = value / interval
        value = round(value, 3)
        if metric_map.get(res_id):
            if metric_map.get(res_id).get(metric_type):
                if metric_map.get(res_id).get(metric_type).get(
                        now_data.get('time')):
                    metric_map[res_id][metric_type][now_data.get('time')] \
                        += value
                else:
                    metric_map[res_id][metric_type][now_data.get('time')] \
                        = value
            else:
                metric_map[res_id][metric_type] = {now_data.get('time'): value}
        else:
            metric_map[res_id] = {metric_type: {now_data.get('time'): value}}

    @staticmethod
    def count_difference(now_value, last_value):
        value = 0
        if now_value >= last_value:
            value = now_value - last_value
        else:
            value = now_value
        return value

    @staticmethod
    def handle_volume_cach_hit(now_data, last_data):
        rh = SSHHandler.count_difference(now_data.get('rh'),
                                         last_data.get('rh'))
        wh = SSHHandler.count_difference(now_data.get('wh'),
                                         last_data.get('wh'))
        rht = SSHHandler.count_difference(now_data.get('rht'),
                                          last_data.get('rht'))
        wht = SSHHandler.count_difference(now_data.get('wht'),
                                          last_data.get('wht'))
        rhr = rh * 100 / rht if rht > 0 else 0
        whr = wh * 100 / wht if wht > 0 else 0
        hrt = rhr + whr
        now_data['rhr'] = rhr
        now_data['whr'] = whr
        now_data['hrt'] = hrt

    def get_date_from_each_file(self, file, metric_map, target_list,
                                resource_type, last_data):
        with self.ssh_pool.item() as ssh:
            local_path = '%s/%s' % (os.path.abspath(os.path.join(
                os.getcwd())), consts.LOCAL_FILE_PATH)
            file_xml = Tools.get_remote_file_to_xml(ssh, file[1], local_path,
                                                    consts.REMOTE_FILE_PATH)
            if not file_xml:
                return
            for data in file_xml:
                if re.sub(u"\\{.*?}", "", data.tag) == \
                        SSHHandler.TARGET_RESOURCE_RELATION.get(
                            resource_type):
                    if resource_type == constants.ResourceType.PORT:
                        if data.attrib.get('fc_wwpn'):
                            resource_info = data.attrib.get('fc_wwpn')
                        else:
                            continue
                    elif resource_type == constants. \
                            ResourceType.CONTROLLER:
                        resource_info = '%s_%s' % (int(
                            data.attrib.get('node_id'),
                            16), data.attrib.get('id'))
                    else:
                        resource_info = '%s_%s' % (data.attrib.get('idx'),
                                                   data.attrib.get('id'))
                    now_data = SSHHandler.package_xml_data(
                        data.attrib, file[0], resource_type)
                    if last_data.get(resource_info):
                        interval = (
                            int(file[0]) -
                            last_data.get(resource_info).get('time')) / units.k
                        if interval <= 0:
                            break
                        if resource_type == constants.ResourceType.VOLUME:
                            SSHHandler.handle_volume_cach_hit(
                                now_data, last_data.get(resource_info))
                        for target in target_list:
                            device_target = SSHHandler. \
                                RESOURCE_PERF_MAP.get(resource_type)
                            SSHHandler.count_metric_data(
                                last_data.get(resource_info), now_data,
                                interval, device_target.get(target), target,
                                metric_map, resource_info)
                        last_data[resource_info] = now_data
                    else:
                        last_data[resource_info] = now_data

    def get_stats_from_file(self, file_list, metric_map, target_list,
                            resource_type, start_time, end_time):
        if not file_list:
            return
        find_first_file = False
        recent_file = None
        last_data = {}
        for file in file_list:
            if file[0] >= start_time and file[0] <= end_time:
                if find_first_file is False:
                    if recent_file:
                        self.get_date_from_each_file(recent_file, metric_map,
                                                     target_list,
                                                     resource_type, last_data)
                    self.get_date_from_each_file(file, metric_map, target_list,
                                                 resource_type, last_data)
                    find_first_file = True
                else:
                    self.get_date_from_each_file(file, metric_map, target_list,
                                                 resource_type, last_data)
            recent_file = file

    @staticmethod
    def package_xml_data(file_data, file_time, resource_type):
        rb = 0
        wb = 0
        res_time = 0
        rh = 0
        wh = 0
        rht = 0
        wht = 0
        if resource_type == constants.ResourceType.PORT:
            rb = int(file_data.get('cbr')) + int(file_data.get('hbr')) + int(
                file_data.get('lnbr')) + int(
                    file_data.get('rmbr')) * SSHHandler.BYTES_TO_BIT
            wb = int(file_data.get('cbt')) + int(file_data.get('hbt')) + int(
                file_data.get('lnbt')) + int(
                    file_data.get('rmbt')) * SSHHandler.BYTES_TO_BIT
            ro = int(file_data.get('cer')) + int(file_data.get('her')) + int(
                file_data.get('lner')) + int(file_data.get('rmer'))
            wo = int(file_data.get('cet')) + int(file_data.get('het')) + int(
                file_data.get('lnet')) + int(file_data.get('rmet'))
            res_time = int(file_data.get('dtdt', 0)) / units.Ki
        else:
            if resource_type == constants.ResourceType.VOLUME:
                rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE
                wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE
                rh = int(file_data.get('ctrhs'))
                wh = int(file_data.get('ctwhs'))
                rht = int(file_data.get('ctrs'))
                wht = int(file_data.get('ctws'))
                res_time = int(file_data.get('xl'))
            elif resource_type == constants.ResourceType.DISK:
                rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE
                wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE
                res_time = int(file_data.get('rq')) + int(file_data.get('wq'))
            elif resource_type == constants.ResourceType.CONTROLLER:
                rb = int(file_data.get('rb')) * SSHHandler.BYTES_TO_BIT
                wb = int(file_data.get('wb')) * SSHHandler.BYTES_TO_BIT
                res_time = int(file_data.get('rq')) + int(file_data.get('wq'))
            ro = int(file_data.get('ro'))
            wo = int(file_data.get('wo'))
        now_data = {
            'rb': rb,
            'wb': wb,
            'ro': ro,
            'wo': wo,
            'tb': rb + wb,
            'to': ro + wo,
            'rh': rh,
            'wh': wh,
            'rht': rht,
            'wht': wht,
            'res_time': res_time,
            'time': int(file_time)
        }
        return now_data

    def get_stats_file_data(self, file_map, res_type, metrics, storage_id,
                            target_list, start_time, end_time):
        metric_map = {}
        for file_tye in file_map:
            file_list = file_map.get(file_tye)
            if 'Nv' in file_tye and res_type == constants.ResourceType.VOLUME:
                self.get_stats_from_file(file_list, metric_map, target_list,
                                         constants.ResourceType.VOLUME,
                                         start_time, end_time)
            elif 'Nm' in file_tye and res_type == constants.ResourceType.DISK:
                self.get_stats_from_file(file_list, metric_map, target_list,
                                         constants.ResourceType.DISK,
                                         start_time, end_time)
            elif 'Nn' in file_tye and res_type == constants.ResourceType.PORT:
                self.get_stats_from_file(file_list, metric_map, target_list,
                                         constants.ResourceType.PORT,
                                         start_time, end_time)
            elif 'Nn' in file_tye and res_type == \
                    constants.ResourceType.CONTROLLER:
                self.get_stats_from_file(file_list, metric_map, target_list,
                                         constants.ResourceType.CONTROLLER,
                                         start_time, end_time)
        self.packege_data(storage_id, res_type, metrics, metric_map)

    def collect_perf_metrics(self, storage_id, resource_metrics, start_time,
                             end_time):
        metrics = []
        file_map = {}
        try:
            self.get_stats_filelist(file_map)
            if resource_metrics.get(constants.ResourceType.VOLUME):
                self.get_stats_file_data(
                    file_map, constants.ResourceType.VOLUME, metrics,
                    storage_id,
                    resource_metrics.get(constants.ResourceType.VOLUME),
                    start_time, end_time)
            if resource_metrics.get(constants.ResourceType.DISK):
                self.get_stats_file_data(
                    file_map, constants.ResourceType.DISK, metrics, storage_id,
                    resource_metrics.get(constants.ResourceType.DISK),
                    start_time, end_time)
            if resource_metrics.get(constants.ResourceType.PORT):
                self.get_stats_file_data(
                    file_map, constants.ResourceType.PORT, metrics, storage_id,
                    resource_metrics.get(constants.ResourceType.PORT),
                    start_time, end_time)
            if resource_metrics.get(constants.ResourceType.CONTROLLER):
                self.get_stats_file_data(
                    file_map, constants.ResourceType.CONTROLLER, metrics,
                    storage_id,
                    resource_metrics.get(constants.ResourceType.CONTROLLER),
                    start_time, end_time)
        except Exception as err:
            err_msg = "Failed to collect metrics from svc: %s" % \
                      (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return metrics

    def get_latest_perf_timestamp(self):
        latest_time = 0
        stats_file_command = 'lsdumps -prefix /dumps/iostats'
        file_list = self.exec_ssh_command(stats_file_command)
        file_line = file_list.split('\n')
        for file in islice(file_line, 1, None):
            if file:
                file_arr = ' '.join(file.split()).split(' ')
                if len(file_arr) > 1:
                    file_name = file_arr[1]
                    name_arr = file_name.split('_')
                    file_time = '20%s%s' % (name_arr[3], name_arr[4])
                    time_pattern = '%Y%m%d%H%M%S'
                    tools = Tools()
                    occur_time = tools.time_str_to_timestamp(
                        file_time, time_pattern)
                    if latest_time < occur_time:
                        latest_time = occur_time
        return latest_time