Beispiel #1
0
class SSHHandler(object):
    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    def login(self):
        try:
            self.ssh_pool.do_exec('show pools')
        except Exception as e:
            LOG.error("Failed to login msa  %s" % (six.text_type(e)))
            raise e

    def get_storage(self, storage_id):
        try:
            system_info = self.ssh_pool.do_exec('show system')
            system_data = self.handle_xml_to_dict(system_info, 'system')
            version_info = self.ssh_pool.do_exec('show version')
            version_arr = self.handle_xml_to_json(version_info, 'versions')
            version_id = ""
            if version_arr:
                version_id = version_arr[0].get('bundle-version')
            if system_data:
                pools_list = self.list_storage_pools(storage_id)
                total_capacity = 0
                if pools_list:
                    for pool in pools_list:
                        total_capacity += int(pool.get('total_capacity'))
                disks_list = self.list_storage_disks(storage_id)
                raw_capacity = 0
                if disks_list:
                    for disk in disks_list:
                        raw_capacity += int(disk.get('capacity'))
                volumes_list = self.list_storage_volume(storage_id)
                volume_all_size = 0
                if volumes_list:
                    for volume in volumes_list:
                        volume_all_size += int(volume.get('total_capacity'))
                health = system_data.get('health')
                status = constants.StorageStatus.OFFLINE
                if health == 'OK':
                    status = constants.StorageStatus.NORMAL
                elif health == 'Degraded':
                    status = constants.StorageStatus.DEGRADED
                serial_num = system_data.get('midplane-serial-number')
                storage_map = {
                    'name': system_data.get('system-name'),
                    'vendor': consts.StorageVendor.HPE_MSA_VENDOR,
                    'model': system_data.get('product-id'),
                    'status': status,
                    'serial_number': serial_num,
                    'firmware_version': version_id,
                    'location': system_data.get('system-location'),
                    'raw_capacity': int(raw_capacity),
                    'total_capacity': int(total_capacity),
                    'used_capacity': int(volume_all_size),
                    'free_capacity': int(total_capacity - volume_all_size)
                }
                return storage_map
        except Exception as e:
            err_msg = "Failed to get system info : %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_disks(self, storage_id):
        try:
            disk_info = self.ssh_pool.do_exec('show disks')
            disk_detail = self.handle_xml_to_json(disk_info, 'drives')
            disks_arr = []
            if disk_detail:
                for data in disk_detail:
                    health = data.get('health')
                    status = constants.StoragePoolStatus.OFFLINE
                    if health == 'OK':
                        status = constants.StoragePoolStatus.NORMAL
                    size = self.parse_string_to_bytes(data.get('size'))
                    physical_type = consts.DiskPhysicalType.\
                        DISK_PHYSICAL_TYPE.get(data.get('description'),
                                               constants.DiskPhysicalType.
                                               UNKNOWN)
                    rpm = data.get('rpm')
                    if rpm:
                        rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED
                    data_map = {
                        'native_disk_id': data.get('location'),
                        'name': data.get('location'),
                        'physical_type': physical_type,
                        'status': status,
                        'storage_id': storage_id,
                        'native_disk_group_id': data.get('disk-group'),
                        'serial_number': data.get('serial-number'),
                        'manufacturer': data.get('vendor'),
                        'model': data.get('model'),
                        'speed': rpm,
                        'capacity': int(size),
                        'health_score': status
                    }
                    disks_arr.append(data_map)
            return disks_arr
        except Exception as e:
            err_msg = "Failed to get storage disk: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_ports(self, storage_id):
        try:
            ports_info = self.ssh_pool.do_exec('show ports')
            ports_split = ports_info.split('\n')
            ports_array = ports_split[1:len(ports_split) - 1]
            ports_xml_data = ''.join(ports_array)
            xml_element = Et.fromstring(ports_xml_data)
            ports_json = []
            for element_data in xml_element.iter('OBJECT'):
                property_name = element_data.get('basetype')
                if property_name != 'status':
                    msg = {}
                    for child in element_data.iter('PROPERTY'):
                        msg[child.get('name')] = child.text
                    ports_json.append(msg)
            ports_elements_info = []
            for i in range(0, len(ports_json) - 1, 2):
                port_element = ports_json[i].copy()
                port_element.update(ports_json[i + 1])
                ports_elements_info.append(port_element)
            list_ports = []
            for data in ports_elements_info:
                status = constants.PortHealthStatus.NORMAL
                conn_status = constants.PortConnectionStatus.CONNECTED
                if data.get('health') != 'OK':
                    status = constants.PortHealthStatus.ABNORMAL
                    conn_status = constants.PortConnectionStatus.\
                        DISCONNECTED
                wwn = None
                port_type = constants.PortType.FC
                location_port_type = data.get('port-type')
                if location_port_type:
                    location_port_type = location_port_type.upper()
                if location_port_type == 'ISCSI':
                    port_type = constants.PortType.ETH
                else:
                    target_id = data.get('target-id')
                    if target_id:
                        wwn = target_id
                location = '%s_%s' % (data.get('port'), location_port_type)
                speed = data.get('configured-speed', None)
                max_speed = 0
                if speed != 'Auto' and speed is not None:
                    max_speed = self.parse_string_to_bytes(speed)
                data_map = {
                    'native_port_id': data.get('durable-id'),
                    'name': data.get('port'),
                    'type': port_type,
                    'connection_status': conn_status,
                    'health_status': status,
                    'location': location,
                    'storage_id': storage_id,
                    'speed': max_speed,
                    'max_speed': max_speed,
                    'mac_address': data.get('mac-address'),
                    'ipv4': data.get('ip-address'),
                    'wwn': wwn
                }
                list_ports.append(data_map)
            return list_ports
        except Exception as e:
            err_msg = "Failed to get storage ports: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_controller(self, storage_id):
        try:
            controller_info = self.ssh_pool\
                .do_exec('show controllers')
            controller_detail = self.handle_xml_to_json(
                controller_info, 'controllers')
            controller_arr = []
            for data in controller_detail:
                health = data.get('health')
                status = constants.StoragePoolStatus.OFFLINE
                if health == 'OK':
                    status = constants.StoragePoolStatus.NORMAL
                cpu_info = data.get('sc-cpu-type')
                memory_size = data.get('system-memory-size')
                if memory_size is not None:
                    memory_size += "MB"
                system_memory_size = self.parse_string_to_bytes(memory_size)
                data_map = {
                    'native_controller_id': data.get('controller-id'),
                    'name': data.get('durable-id'),
                    'storage_id': storage_id,
                    'status': status,
                    'location': data.get('position'),
                    'soft_version': data.get('sc-fw'),
                    'cpu_info': cpu_info,
                    'memory_size': int(system_memory_size)
                }
                controller_arr.append(data_map)
            return controller_arr
        except Exception as e:
            err_msg = "Failed to get storage controllers: %s"\
                      % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_volume(self, storage_id):
        try:
            volume_infos = self.ssh_pool.do_exec('show volumes')
            volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')
            pools_info = self.ssh_pool.do_exec('show pools')
            pool_detail = self.handle_xml_to_json(pools_info, 'pools')
            list_volumes = []
            for data in volume_detail:
                health = data.get('health')
                status = constants.StoragePoolStatus.OFFLINE
                if health == 'OK':
                    status = constants.StoragePoolStatus.NORMAL
                total_size = self.parse_string_to_bytes(data.get('total-size'))
                total_avail = self.parse_string_to_bytes(
                    data.get('allocated-size'))
                native_storage_pool_id = ''
                if pool_detail:
                    native_storage_pool_id = pool_detail[0]. \
                        get('serial-number')
                    for pools in pool_detail:
                        if data.get('virtual-disk-name') == pools.\
                                get('name'):
                            native_storage_pool_id = pools.\
                                get('serial-number')
                blocks = data.get('blocks')
                if blocks is not None:
                    blocks = int(blocks)
                volume_map = {
                    'name': data.get('volume-name'),
                    'storage_id': storage_id,
                    'description': data.get('volume-name'),
                    'status': status,
                    'native_volume_id': str(data.get('durable-id')),
                    'native_storage_pool_id': native_storage_pool_id,
                    'wwn': str(data.get('wwn')),
                    'type': data.get('volume-type'),
                    'total_capacity': int(total_size),
                    'free_capacit': int(total_size - total_avail),
                    'used_capacity': int(total_avail),
                    'blocks': int(blocks),
                    'compressed': True,
                    'deduplicated': True
                }
                list_volumes.append(volume_map)
            return list_volumes
        except Exception as e:
            err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    def list_storage_pools(self, storage_id):
        try:
            pool_infos = self.ssh_pool.do_exec('show pools')
            pool_detail = self.handle_xml_to_json(pool_infos, 'pools')
            volume_list = self.list_storage_volume(storage_id)
            pools_list = []
            if pool_detail:
                for data in pool_detail:
                    volume_size = 0
                    blocks = 0
                    if volume_list:
                        for volume in volume_list:
                            if volume.get('native_storage_pool_id') == data.\
                                    get('serial-number'):
                                volume_size += volume.get('total_capacity')
                                blocks += volume.get('blocks')
                    health = data.get('health')
                    status = constants.StoragePoolStatus.OFFLINE
                    if health == 'OK':
                        status = constants.StoragePoolStatus.NORMAL
                    total_size = self.parse_string_to_bytes(
                        data.get('total-size'))
                    pool_map = {
                        'name': data.get('name'),
                        'storage_id': storage_id,
                        'native_storage_pool_id': data.get('serial-number'),
                        'status': status,
                        'storage_type': constants.StorageType.BLOCK,
                        'total_capacity': int(total_size),
                        'subscribed_capacity': int(blocks),
                        'used_capacity': volume_size,
                        'free_capacity': int(total_size - volume_size)
                    }
                    pools_list.append(pool_map)
            return pools_list
        except Exception as e:
            err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    @staticmethod
    def parse_string_to_bytes(value):
        capacity = 0
        if value:
            if value.isdigit():
                capacity = float(value)
            else:
                if value == '0B':
                    capacity = 0
                else:
                    unit = value[-2:]
                    capacity = float(value[:-2]) * int(
                        Tools.change_capacity_to_bytes(unit))
        return capacity

    @staticmethod
    def handle_xml_to_json(detail_info, element):
        detail_arr = []
        detail_data = detail_info.split('\n')
        detail = detail_data[1:len(detail_data) - 1]
        detail_xml = ''.join(detail)
        xml_element = Et.fromstring(detail_xml)
        for children in xml_element.iter('OBJECT'):
            property_name = children.get('basetype')
            if element == property_name:
                msg = {}
                for child in children.iter('PROPERTY'):
                    msg[child.get('name')] = child.text
                detail_arr.append(msg)
        return detail_arr

    def list_alerts(self, query_para):
        alert_list = []
        try:
            alert_infos = self.ssh_pool.do_exec('show events error')
            alert_json = self.handle_xml_to_json(alert_infos, 'events')
            for alert_map in alert_json:
                now = time.time()
                occur_time = int(
                    round(now * consts.SecondsNumber.SECONDS_TO_MS))
                time_stamp = alert_map.get('time-stamp-numeric')
                if time_stamp is not None:
                    occur_time = int(time_stamp) * consts.SecondsNumber\
                        .SECONDS_TO_MS
                    if not alert_util.is_alert_in_time_range(
                            query_para, occur_time):
                        continue
                event_code = alert_map.get('event-code')
                event_id = alert_map.get('event-id')
                location = alert_map.get('message')
                resource_type = alert_map.get('event-code')
                severity = alert_map.get('severity')
                additional_info = str(alert_map.get('additional-information'))
                match_key = None
                if event_code:
                    match_key = event_code
                if severity:
                    match_key += severity
                if location:
                    match_key += location
                description = None
                if additional_info:
                    description = additional_info
                if severity == 'Informational' or severity == 'RESOLVED':
                    continue
                alert_model = {
                    'alert_id': event_id,
                    'alert_name': event_code,
                    'severity': severity,
                    'category': constants.Category.FAULT,
                    'type': 'EquipmentAlarm',
                    'sequence_number': event_id,
                    'occur_time': occur_time,
                    'description': description,
                    'resource_type': resource_type,
                    'location': location,
                    'match_key': hashlib.md5(match_key.encode()).hexdigest()
                }
                alert_list.append(alert_model)
            alert_list_data = SSHHandler.get_last_alert_data(alert_list)
            return alert_list_data
        except Exception as e:
            err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

    @staticmethod
    def get_last_alert_data(alert_json):
        alert_list = []
        alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))
        for key, item in groupby(alert_json,
                                 key=itemgetter('alert_name', 'location',
                                                'severity')):
            alert_last_index = 0
            alert_list.append(list(item)[alert_last_index])
        return alert_list

    @staticmethod
    def parse_alert(alert):
        try:
            alert_model = dict()
            alert_id = None
            description = None
            severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')
            sequence_number = None
            event_type = None
            for alert_key, alert_value in alert.items():
                if consts.AlertOIDNumber.OID_ERR_ID in alert_key:
                    alert_id = str(alert_value)
                elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:
                    event_type = alert_value
                elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:
                    description = alert_value
                elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:
                    severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\
                        .get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),
                             constants.Severity.INFORMATIONAL)
                elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:
                    sequence_number = alert_value
            if description:
                desc_arr = description.split(",")
                if desc_arr:
                    alert_id = SSHHandler.split_by_char_and_number(
                        desc_arr[0], ":", 1)
            alert_model['alert_id'] = str(alert_id)
            alert_model['alert_name'] = event_type
            alert_model['severity'] = severity
            alert_model['category'] = constants.Category.FAULT
            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
            alert_model['sequence_number'] = sequence_number
            now = time.time()
            alert_model['occur_time'] = int(
                round(now * consts.SecondsNumber.SECONDS_TO_MS))
            alert_model['description'] = description
            alert_model['location'] = description
            return alert_model
        except Exception as e:
            LOG.error(e)
            msg = "Failed to build alert model: %s." % (six.text_type(e))
            raise exception.InvalidResults(msg)

    @staticmethod
    def split_by_char_and_number(split_str, split_char, arr_number):
        split_value = ''
        if split_str:
            tmp_value = split_str.split(split_char, 1)
            if arr_number == 1 and len(tmp_value) > 1:
                split_value = tmp_value[arr_number].strip()
            elif arr_number == 0:
                split_value = tmp_value[arr_number].strip()
        return split_value

    @staticmethod
    def handle_xml_to_dict(xml_info, element):
        msg = {}
        xml_split = xml_info.split('\n')
        xml_data = xml_split[1:len(xml_split) - 1]
        detail_xml = ''.join(xml_data)
        xml_element = Et.fromstring(detail_xml)
        for children in xml_element.iter('OBJECT'):
            property_name = children.get('basetype')
            if element == property_name:
                for child in children.iter('PROPERTY'):
                    msg[child.get('name')] = child.text
        return msg
Beispiel #2
0
class NetAppHandler(object):
    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'
    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'
    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def parse_alert(alert):
        try:
            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)
            alert_array = alert_info.split(":")
            alert_model = {}
            if len(alert_array) > 1:
                alert_name = alert_array[0]
                description = alert_array[1]
                if constant.SEVERITY_MAP.get(alert_name):
                    alert_model = {
                        'alert_id':
                        alert_name,
                        'alert_name':
                        alert_name,
                        'severity':
                        constants.Severity.CRITICAL,
                        'category':
                        constants.Category.EVENT,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        int(time.time()),
                        'description':
                        description,
                        'match_key':
                        hashlib.md5((alert.get(NetAppHandler.OID_TRAP_DATA) +
                                     str(time.time())).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        ''
                    }
            return alert_model
        except Exception as err:
            err_msg = "Failed to parse alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def login(self):
        try:
            self.ssh_pool.do_exec('version')
        except Exception as e:
            LOG.error("Failed to login netapp %s" % (six.text_type(e)))
            raise e

    def get_storage(self):
        try:
            raw_capacity = total_capacity = used_capacity = free_capacity = 0
            controller_map = {}
            system_info = self.ssh_pool.do_exec(constant.CLUSTER_SHOW_COMMAND)
            version_info = self.ssh_pool.do_exec(constant.VERSION_SHOW_COMMAND)
            status_info = self.ssh_pool.do_exec(
                constant.STORAGE_STATUS_COMMAND)
            controller_info = self.ssh_pool.do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            controller_array = controller_info.split(
                constant.CONTROLLER_SPLIT_STR)
            Tools.split_value_map(controller_array[1], controller_map, ":")
            version_array = version_info.split('\r\n')
            version = version_array[0].split(":")
            status = constant.STORAGE_STATUS.get(status_info.split("\r\n")[2])
            disk_list = self.get_disks(None)
            pool_list = self.list_storage_pools(None)
            storage_map = {}
            Tools.split_value_map(system_info, storage_map, split=':')
            for disk in disk_list:
                raw_capacity += disk['capacity']
            for pool in pool_list:
                total_capacity += pool['total_capacity']
                free_capacity += pool['free_capacity']
                used_capacity += pool['used_capacity']
            storage_model = {
                "name": storage_map['ClusterName'],
                "vendor": constant.STORAGE_VENDOR,
                "model": controller_map['Model'],
                "status": status,
                "serial_number": storage_map['ClusterSerialNumber'],
                "firmware_version": version[0],
                "location": controller_map['Location'],
                "total_capacity": total_capacity,
                "raw_capacity": raw_capacity,
                "used_capacity": used_capacity,
                "free_capacity": free_capacity
            }
            return storage_model
        except exception.DelfinException as e:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_aggregate(self, storage_id):
        agg_list = []
        agg_info = self.ssh_pool.do_exec(
            constant.AGGREGATE_SHOW_DETAIL_COMMAND)
        agg_array = agg_info.split(constant.AGGREGATE_SPLIT_STR)
        agg_map = {}
        for agg in agg_array[1:]:
            Tools.split_value_map(agg, agg_map, split=':')
            status = constant.AGGREGATE_STATUS.get(agg_map['State'])
            pool_model = {
                'name':
                agg_map['e'],
                'storage_id':
                storage_id,
                'native_storage_pool_id':
                agg_map['UUIDString'],
                'description':
                '',
                'status':
                status,
                'storage_type':
                constants.StorageType.UNIFIED,
                'total_capacity':
                int(Tools.get_capacity_size(agg_map['Size'])),
                'used_capacity':
                int(Tools.get_capacity_size(agg_map['UsedSize'])),
                'free_capacity':
                int(Tools.get_capacity_size(agg_map['AvailableSize'])),
            }
            agg_list.append(pool_model)
        return agg_list

    def get_pool(self, storage_id):
        pool_list = []
        pool_info = self.ssh_pool.do_exec(constant.POOLS_SHOW_DETAIL_COMMAND)
        pool_array = pool_info.split(constant.POOLS_SPLIT_STR)
        pool_map = {}
        for pool_str in pool_array[1:]:
            Tools.split_value_map(pool_str, pool_map, split=':')
            status = constants.StoragePoolStatus.ABNORMAL
            if pool_map['IsPoolHealthy?'] == 'true':
                status = constants.StoragePoolStatus.NORMAL
            pool_model = {
                'name':
                pool_map['ame'],
                'storage_id':
                storage_id,
                'native_storage_pool_id':
                pool_map['UUIDofStoragePool'],
                'description':
                '',
                'status':
                status,
                'storage_type':
                constants.StorageType.UNIFIED,
                'total_capacity':
                int(Tools.get_capacity_size(pool_map['StoragePoolTotalSize'])),
                'used_capacity':
                int(Tools.get_capacity_size(
                    pool_map['StoragePoolTotalSize'])) -
                int(Tools.get_capacity_size(
                    pool_map['StoragePoolUsableSize'])),
                'free_capacity':
                int(Tools.get_capacity_size(pool_map['StoragePoolUsableSize']))
            }
            pool_list.append(pool_model)
        return pool_list

    def list_storage_pools(self, storage_id):
        try:
            pool_list = self.get_pool(storage_id)
            agg_list = self.get_aggregate(storage_id)
            return agg_list + pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.ssh_pool.do_exec(
                constant.LUN_SHOW_DETAIL_COMMAND)
            volume_array = volume_info.split(constant.LUN_SPLIT_STR)
            fs_list = self.get_filesystems(storage_id)
            volume_map = {}
            for volume_str in volume_array[1:]:
                Tools.split_value_map(volume_str, volume_map, split=':')
                if volume_map is not None or volume_map != {}:
                    pool_id = ''
                    status = 'normal' if volume_map['State'] == 'online' \
                        else 'offline'
                    for fs in fs_list:
                        if fs['name'] == volume_map['VolumeName']:
                            pool_id = fs['native_pool_id']
                    type = constants.VolumeType.THIN \
                        if volume_map['SpaceAllocation'] == 'enabled' \
                        else constants.VolumeType.THICK
                    volume_model = {
                        'name':
                        volume_map['LUNName'],
                        'storage_id':
                        storage_id,
                        'description':
                        '',
                        'status':
                        status,
                        'native_volume_id':
                        volume_map['LUNUUID'],
                        'native_storage_pool_id':
                        pool_id,
                        'wwn':
                        '',
                        'compressed':
                        '',
                        'deduplicated':
                        '',
                        'type':
                        type,
                        'total_capacity':
                        int(Tools.get_capacity_size(volume_map['LUNSize'])),
                        'used_capacity':
                        int(Tools.get_capacity_size(volume_map['UsedSize'])),
                        'free_capacity':
                        int(Tools.get_capacity_size(volume_map['LUNSize'])) -
                        int(Tools.get_capacity_size(volume_map['UsedSize']))
                    }
                    volume_list.append(volume_model)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_events(self, query_para):
        event_list = []
        event_info = self.ssh_pool.do_exec(constant.EVENT_SHOW_DETAIL_COMMAND)
        event_array = event_info.split(constant.ALTER_SPLIT_STR)
        event_map = {}

        for event_str in event_array[1:]:
            Tools.split_value_map(event_str, event_map, split=':')
            occur_time = int(
                time.mktime(
                    time.strptime(event_map['Time'],
                                  constant.EVENT_TIME_TYPE)))
            if query_para is None or \
                    (query_para['begin_time']
                     <= occur_time
                     <= query_para['end_time']):
                alert_model = {
                    'alert_id':
                    event_map['Sequence#'],
                    'alert_name':
                    event_map['MessageName'],
                    'severity':
                    constants.Severity.CRITICAL,
                    'category':
                    constants.Category.EVENT,
                    'type':
                    constants.EventType.EQUIPMENT_ALARM,
                    'occur_time':
                    occur_time,
                    'description':
                    event_map['Event'],
                    'match_key':
                    hashlib.md5((event_map['Sequence#'] +
                                 str(occur_time)).encode()).hexdigest(),
                    'resource_type':
                    constants.DEFAULT_RESOURCE_TYPE,
                    'location':
                    event_map['Source']
                }
                event_list.append(alert_model)
        return event_list

    def get_alerts(self, query_para):
        alert_list = []
        alert_info = self.ssh_pool.do_exec(constant.ALTER_SHOW_DETAIL_COMMAND)
        alert_array = alert_info.split(constant.ALTER_SPLIT_STR)
        alert_map = {}
        for alert_str in alert_array[1:]:
            Tools.split_value_map(alert_str, alert_map, split=':')
            occur_time = int(
                time.mktime(
                    time.strptime(alert_map['IndicationTime'],
                                  constant.ALTER_TIME_TYPE)))
            if query_para is None or \
                    (query_para['begin_time']
                     <= occur_time
                     <= query_para['end_time']):
                alert_model = {
                    'alert_id':
                    alert_map['AlertID'],
                    'alert_name':
                    alert_map['ProbableCause'],
                    'severity':
                    constant.ALERT_SEVERITY[alert_map['PerceivedSeverity']],
                    'category':
                    constants.Category.FAULT,
                    'type':
                    constants.EventType.EQUIPMENT_ALARM,
                    'occur_time':
                    occur_time,
                    'description':
                    alert_map['Description'],
                    'match_key':
                    hashlib.md5((alert_map['AlertID'] +
                                 str(occur_time)).encode()).hexdigest(),
                    'resource_type':
                    constants.DEFAULT_RESOURCE_TYPE,
                    'location':
                    alert_map['AlertingResourceName']
                }
                alert_list.append(alert_model)
        return alert_list

    def list_alerts(self, query_para):
        try:
            alert_list = []
            """Query the two alarms separately"""
            alert_list += self.get_events(query_para)
            alert_list += self.get_alerts(query_para)
            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def clear_alert(self, alert):
        try:
            ssh_command = \
                constant.CLEAR_ALERT_COMMAND + alert['alert_id']
            self.ssh_pool.do_exec(ssh_command)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_disks(self, storage_id):
        disks_list = []
        physicals_list = []
        disks_info = self.ssh_pool.do_exec(constant.DISK_SHOW_DETAIL_COMMAND)
        disks_array = disks_info.split(constant.DISK_SPLIT_STR)
        physicals_info = self.ssh_pool.do_exec(
            constant.DISK_SHOW_PHYSICAL_COMMAND)
        disks_map = {}
        physical_array = physicals_info.split('\r\n')
        for i in range(2, len(physical_array), 2):
            physicals_list.append(physical_array[i].split())
        for disk_str in disks_array[1:]:
            speed = physical_type = firmware = None
            Tools.split_value_map(disk_str, disks_map, split=':')
            logical_type = constant.DISK_LOGICAL. \
                get(disks_map['ContainerType'])
            """Map disk physical information"""
            for physical_info in physicals_list:
                if len(physical_info) > 6 \
                        and physical_info[0] == disks_map['k']:
                    physical_type = constant.DISK_TYPE.get(physical_info[1])
                    speed = physical_info[5]
                    firmware = physical_info[4]
                    break
            status = constants.DiskStatus.ABNORMAL
            if disks_map['Errors:'] is None or disks_map['Errors:'] == "":
                status = constants.DiskStatus.NORMAL
            disk_model = {
                'name': disks_map['k'],
                'storage_id': storage_id,
                'native_disk_id': disks_map['k'],
                'serial_number': disks_map['SerialNumber'],
                'manufacturer': disks_map['Vendor'],
                'model': disks_map['Model'],
                'firmware': firmware,
                'speed': speed,
                'capacity':
                int(Tools.get_capacity_size(disks_map['PhysicalSize'])),
                'status': status,
                'physical_type': physical_type,
                'logical_type': logical_type,
                'health_score': '',
                'native_disk_group_id': disks_map['Aggregate'],
                'location': '',
            }
            disks_list.append(disk_model)
        return disks_list

    def get_filesystems(self, storage_id):
        fs_list = []
        fs_info = self.ssh_pool.do_exec(constant.FS_SHOW_DETAIL_COMMAND)
        fs_array = fs_info.split(constant.FS_SPLIT_STR)
        thin_fs_info = self.ssh_pool.do_exec(constant.THIN_FS_SHOW_COMMAND)
        pool_list = self.list_storage_pools(storage_id)
        thin_fs_array = thin_fs_info.split("\r\n")
        fs_map = {}
        for fs_str in fs_array[1:]:
            type = constants.FSType.THICK
            Tools.split_value_map(fs_str, fs_map, split=':')
            if fs_map is not None or fs_map != {}:
                pool_id = ""
                """get pool id"""
                for pool in pool_list:
                    if pool['name'] == fs_map['AggregateName']:
                        pool_id = pool['native_storage_pool_id']
                deduplicated = True
                if fs_map['SpaceSavedbyDeduplication'] == '0B':
                    deduplicated = False
                if len(thin_fs_array) > 2:
                    for thin_vol in thin_fs_array[2:]:
                        thin_array = thin_vol.split()
                        if len(thin_array) > 4:
                            if thin_array[1] == fs_map['VolumeName']:
                                type = constants.VolumeType.THIN
                compressed = True
                if fs_map['VolumeContainsSharedorCompressedData'] == \
                        'false':
                    compressed = False
                status = constant.FS_STATUS.get(fs_map['VolumeState'])
                fs_model = {
                    'name':
                    fs_map['VolumeName'],
                    'storage_id':
                    storage_id,
                    'native_filesystem_id':
                    fs_map['VolumeName'],
                    'native_pool_id':
                    pool_id,
                    'compressed':
                    compressed,
                    'deduplicated':
                    deduplicated,
                    'worm':
                    fs_map['SnapLockType'],
                    'status':
                    status,
                    'type':
                    type,
                    'total_capacity':
                    int(Tools.get_capacity_size(fs_map['VolumeSize'])),
                    'used_capacity':
                    int(Tools.get_capacity_size(fs_map['UsedSize'])),
                    'free_capacity':
                    int(Tools.get_capacity_size(fs_map['VolumeSize'])) -
                    int(Tools.get_capacity_size(fs_map['UsedSize']))
                }
                fs_list.append(fs_model)
        return fs_list
Beispiel #3
0
class NetAppHandler(object):
    OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0'
    OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0'
    NODE_NAME = 'controller_name'
    SECONDS_TO_MS = 1000

    def __init__(self, **kwargs):
        self.ssh_pool = SSHPool(**kwargs)

    @staticmethod
    def get_table_data(values):
        header_index = 0
        table = values.split("\r\n")
        for i in range(0, len(table)):
            if constant.PATTERN.search(table[i]):
                header_index = i
        return table[(header_index + 1):]

    @staticmethod
    def get_fs_id(vserver, volume):
        return vserver + '_' + volume

    @staticmethod
    def get_qt_id(vserver, volume, qtree):
        qt_id = vserver + '/' + volume
        if qtree != '':
            qt_id += '/' + qtree
        return qt_id

    def ssh_do_exec(self, command):
        res = ''
        with eventlet.Timeout(10, False):
            res = self.ssh_pool.do_exec(command)
        return res

    @staticmethod
    def get_size(limit, is_calculate=False):
        if limit == '0B':
            return 0
        if limit == '-':
            return 0 if is_calculate else '-'
        return int(Tools.get_capacity_size(limit))

    @staticmethod
    def parse_alert(alert):
        try:
            alert_info = alert.get(NetAppHandler.OID_TRAP_DATA)
            node_name = alert.get(NetAppHandler.NODE_NAME)
            alert_info = alert_info.replace("]", '')
            alert_array = alert_info.split("[")
            alert_model = {}
            alert_map = {}
            if len(alert_array) > 1:
                category = constants.Category.RECOVERY \
                    if 'created' in alert_array[0] \
                    else constants.Category.RECOVERY
                alert_values = alert_array[1].split(",")
                for alert_value in alert_values:
                    array = alert_value.split("=")
                    if len(array) > 1:
                        key = array[0].replace(' ', '')
                        value = array[1].replace(' ', '').replace('.', '')
                        alert_map[key] = value
                if alert_map:
                    alert_map_info = \
                        alert_template.ALERT_TEMPLATE.get(
                            alert_map.get('AlertId'))
                    severity = description = location = ''
                    if alert_map_info:
                        severity = constant.ALERT_SEVERITY[
                            alert_map_info['severityofAlert']]
                        location = \
                            alert_map_info['probableCause'] +\
                            ':' + alert_map_info['PossibleEffect']
                        description = alert_map_info['description']
                    alert_model = {
                        'alert_id':
                        alert_map.get('AlertId'),
                        'alert_name':
                        alert_map.get('AlertId'),
                        'severity':
                        severity,
                        'category':
                        category,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        utils.utcnow_ms(),
                        'description':
                        description,
                        'match_key':
                        hashlib.md5((alert_map.get('AlertId') + node_name +
                                     alert_map['AlertingResource']
                                     ).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        location
                    }
            return alert_model
        except Exception as err:
            err_msg = "Failed to parse alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def login(self):
        try:
            result = self.ssh_do_exec('cluster identity show')
            if 'is not a recognized command' in result \
                    or 'command not found' in result:
                raise exception.InvalidIpOrPort()
        except Exception as e:
            LOG.error("Failed to login netapp %s" % (six.text_type(e)))
            raise e

    def get_storage(self):
        try:
            raw_capacity = total_capacity = used_capacity = free_capacity = 0
            controller_map_list = []
            system_info = self.ssh_do_exec(constant.CLUSTER_SHOW_COMMAND)
            version_info = self.ssh_do_exec(constant.VERSION_SHOW_COMMAND)
            status_info = self.ssh_do_exec(constant.STORAGE_STATUS_COMMAND)
            controller_info = self.ssh_do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            Tools.split_value_map_list(controller_info, controller_map_list,
                                       ":")
            version_array = version_info.split("\r\n")
            storage_version = ''
            for version in version_array:
                if 'NetApp' in version:
                    storage_version = version.split(":")
                    break
            status = self.get_table_data(status_info)
            status = constant.STORAGE_STATUS.get(status[0].split()[0])
            disk_list = self.get_disks(None)
            pool_list = self.list_storage_pools(None)
            storage_map_list = []
            Tools.split_value_map_list(system_info,
                                       storage_map_list,
                                       split=':')
            if len(storage_map_list) > 0:
                storage_map = storage_map_list[len(storage_map_list) - 1]
                controller_map = \
                    controller_map_list[len(controller_map_list) - 1]
                for disk in disk_list:
                    raw_capacity += disk['capacity']
                for pool in pool_list:
                    total_capacity += pool['total_capacity']
                    free_capacity += pool['free_capacity']
                    used_capacity += pool['used_capacity']
                storage_model = {
                    "name": storage_map['ClusterName'],
                    "vendor": constant.STORAGE_VENDOR,
                    "model": controller_map['Model'],
                    "status": status,
                    "serial_number": storage_map['ClusterSerialNumber'],
                    "firmware_version": storage_version[0],
                    "location": controller_map['Location'],
                    "total_capacity": total_capacity,
                    "raw_capacity": raw_capacity,
                    "used_capacity": used_capacity,
                    "free_capacity": free_capacity
                }
                return storage_model
        except exception.DelfinException as e:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(e.msg))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_aggregate(self, storage_id):
        agg_list = []
        agg_info = self.ssh_do_exec(constant.AGGREGATE_SHOW_DETAIL_COMMAND)
        agg_map_list = []
        Tools.split_value_map_list(agg_info, agg_map_list, split=':')
        for agg_map in agg_map_list:
            if agg_map and 'Aggregate' in agg_map.keys():
                status = constant.AGGREGATE_STATUS.get(agg_map['State'])
                pool_model = {
                    'name': agg_map['Aggregate'],
                    'storage_id': storage_id,
                    'native_storage_pool_id': agg_map['UUIDString'],
                    'description': None,
                    'status': status,
                    'storage_type': constants.StorageType.UNIFIED,
                    'total_capacity': self.get_size(agg_map['Size'], True),
                    'used_capacity': self.get_size(agg_map['UsedSize'], True),
                    'free_capacity': self.get_size(agg_map['AvailableSize'],
                                                   True),
                }
                agg_list.append(pool_model)
        return agg_list

    def get_pool(self, storage_id):
        pool_list = []
        pool_info = self.ssh_do_exec(constant.POOLS_SHOW_DETAIL_COMMAND)
        pool_map_list = []
        Tools.split_value_map_list(pool_info, pool_map_list, split=':')
        for pool_map in pool_map_list:
            if pool_map and 'StoragePoolName' in pool_map.keys():
                status = constants.StoragePoolStatus.ABNORMAL
                if pool_map['IsPoolHealthy?'] == 'true':
                    status = constants.StoragePoolStatus.NORMAL
                pool_model = {
                    'name':
                    pool_map['StoragePoolName'],
                    'storage_id':
                    storage_id,
                    'native_storage_pool_id':
                    pool_map['UUIDofStoragePool'],
                    'description':
                    None,
                    'status':
                    status,
                    'storage_type':
                    constants.StorageType.UNIFIED,
                    'total_capacity':
                    self.get_size(pool_map['StoragePoolTotalSize'], True),
                    'used_capacity':
                    self.get_size(pool_map['StoragePoolTotalSize'], True) -
                    self.get_size(pool_map['StoragePoolUsableSize'], True),
                    'free_capacity':
                    self.get_size(pool_map['StoragePoolUsableSize'], True)
                }
                pool_list.append(pool_model)
        return pool_list

    def list_storage_pools(self, storage_id):
        try:
            pool_list = self.get_pool(storage_id)
            agg_list = self.get_aggregate(storage_id)
            return agg_list + pool_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage pool from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_volumes(self, storage_id):
        try:
            volume_list = []
            volume_info = self.ssh_do_exec(constant.LUN_SHOW_DETAIL_COMMAND)
            fs_list = self.get_filesystems(storage_id)
            volume_map_list = []
            Tools.split_value_map_list(volume_info, volume_map_list, split=':')
            for volume_map in volume_map_list:
                if volume_map and 'LUNName' in volume_map.keys():
                    pool_id = None
                    status = 'normal' if volume_map['State'] == 'online' \
                        else 'offline'
                    for fs in fs_list:
                        if fs['name'] == volume_map['VolumeName']:
                            pool_id = fs['native_pool_id']
                    type = constants.VolumeType.THIN \
                        if volume_map['SpaceAllocation'] == 'enabled' \
                        else constants.VolumeType.THICK
                    volume_model = {
                        'name':
                        volume_map['LUNName'],
                        'storage_id':
                        storage_id,
                        'description':
                        None,
                        'status':
                        status,
                        'native_volume_id':
                        volume_map['SerialNumber'],
                        'native_storage_pool_id':
                        pool_id,
                        'wwn':
                        None,
                        'compressed':
                        None,
                        'deduplicated':
                        None,
                        'type':
                        type,
                        'total_capacity':
                        self.get_size(volume_map['LUNSize'], True),
                        'used_capacity':
                        self.get_size(volume_map['UsedSize'], True),
                        'free_capacity':
                        self.get_size(volume_map['LUNSize'], True) -
                        self.get_size(volume_map['UsedSize'], True)
                    }
                    volume_list.append(volume_model)
            return volume_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_alerts(self, query_para):
        alert_list = []
        alert_info = self.ssh_do_exec(constant.ALTER_SHOW_DETAIL_COMMAND)
        alert_map_list = []
        Tools.split_value_map_list(alert_info, alert_map_list, True, split=':')
        for alert_map in alert_map_list:
            if alert_map and 'AlertID' in alert_map.keys():
                occur_time = int(
                    time.mktime(
                        time.strptime(alert_map['IndicationTime'],
                                      constant.ALTER_TIME_TYPE)))
                if not query_para or \
                        (int(query_para['begin_time'])
                         <= occur_time
                         <= int(query_para['end_time'])):
                    alert_model = {
                        'alert_id':
                        alert_map['AlertID'],
                        'alert_name':
                        alert_map['AlertID'],
                        'severity':
                        constant.ALERT_SEVERITY[
                            alert_map['PerceivedSeverity']],
                        'category':
                        constants.Category.FAULT,
                        'type':
                        constants.EventType.EQUIPMENT_ALARM,
                        'occur_time':
                        occur_time * 1000,
                        'description':
                        alert_map['Description'],
                        'sequence_number':
                        alert_map['AlertID'],
                        'match_key':
                        hashlib.md5((alert_map['AlertID'] + alert_map['Node'] +
                                     alert_map['AlertingResource']
                                     ).encode()).hexdigest(),
                        'resource_type':
                        constants.DEFAULT_RESOURCE_TYPE,
                        'location':
                        alert_map['ProbableCause'] + ':' +
                        alert_map['PossibleEffect']
                    }
                    alert_list.append(alert_model)
        return alert_list

    def list_alerts(self, query_para):
        try:
            """Query the two alarms separately"""
            alert_list = self.get_alerts(query_para)
            return alert_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def clear_alert(self, alert):
        try:
            ssh_command = \
                constant.CLEAR_ALERT_COMMAND + alert['alert_id']
            self.ssh_do_exec(ssh_command)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage alert from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_disks(self, storage_id):
        disks_list = []
        physicals_list = []
        disks_info = self.ssh_do_exec(constant.DISK_SHOW_DETAIL_COMMAND)
        physicals_info = self.ssh_do_exec(constant.DISK_SHOW_PHYSICAL_COMMAND)
        error_disk = self.ssh_do_exec(constant.DISK_ERROR_COMMAND)
        error_disk_list = []
        error_disk_array = self.get_table_data(error_disk)
        for error_disk in error_disk_array:
            error_array = error_disk.split()
            if len(error_array) > 2:
                error_disk_list.append(error_array[0])
        disks_map_list = []
        physical_array = self.get_table_data(physicals_info)
        for physical in physical_array:
            physicals_list.append(physical.split())
        Tools.split_value_map_list(disks_info, disks_map_list, split=':')
        for disks_map in disks_map_list:
            if disks_map and 'Disk' in disks_map.keys():
                speed = physical_type = firmware = None
                logical_type = constant.DISK_LOGICAL. \
                    get(disks_map['ContainerType'])
                """Map disk physical information"""
                for physical_info in physicals_list:
                    if len(physical_info) > 6 and \
                            physical_info[0] == disks_map['Disk']:
                        physical_type = \
                            constant.DISK_TYPE.get(physical_info[1])
                        speed = physical_info[5] \
                            if physical_info[5] != '-' else 0
                        firmware = physical_info[4]
                status = constants.DiskStatus.NORMAL
                if disks_map['Disk'] in error_disk_list:
                    status = constants.DiskStatus.ABNORMAL
                disk_model = {
                    'name': disks_map['Disk'],
                    'storage_id': storage_id,
                    'native_disk_id': disks_map['Disk'],
                    'serial_number': disks_map['SerialNumber'],
                    'manufacturer': disks_map['Vendor'],
                    'model': disks_map['Model'],
                    'firmware': firmware,
                    'speed': speed,
                    'capacity': self.get_size(disks_map['PhysicalSize'], True),
                    'status': status,
                    'physical_type': physical_type,
                    'logical_type': logical_type,
                    'native_disk_group_id': disks_map['Aggregate'],
                    'location': None,
                }
                disks_list.append(disk_model)
        return disks_list

    def get_filesystems(self, storage_id):
        fs_list = []
        fs_info = self.ssh_do_exec(constant.FS_SHOW_DETAIL_COMMAND)
        thin_fs_info = self.ssh_do_exec(constant.THIN_FS_SHOW_COMMAND)
        pool_list = self.list_storage_pools(storage_id)
        thin_fs_array = self.get_table_data(thin_fs_info)
        fs_map_list = []
        Tools.split_value_map_list(fs_info, fs_map_list, split=':')
        for fs_map in fs_map_list:
            type = constants.FSType.THICK
            if fs_map and 'VolumeName' in fs_map.keys():
                pool_id = ""
                """get pool id"""
                for pool in pool_list:
                    if pool['name'] == fs_map['AggregateName']:
                        pool_id = pool['native_storage_pool_id']
                deduplicated = True
                if fs_map['SpaceSavedbyDeduplication'] == '0B':
                    deduplicated = False
                if len(thin_fs_array) > 2:
                    for thin_vol in thin_fs_array:
                        thin_array = thin_vol.split()
                        if len(thin_array) > 4:
                            if thin_array[1] == fs_map['VolumeName']:
                                type = constants.VolumeType.THIN
                compressed = True
                if fs_map['VolumeContainsSharedorCompressedData'] == \
                        'false':
                    compressed = False
                status = constant.FS_STATUS.get(fs_map['VolumeState'])
                fs_id = self.get_fs_id(fs_map['VserverName'],
                                       fs_map['VolumeName'])
                fs_model = {
                    'name':
                    fs_map['VolumeName'],
                    'storage_id':
                    storage_id,
                    'native_filesystem_id':
                    fs_id,
                    'native_pool_id':
                    pool_id,
                    'compressed':
                    compressed,
                    'deduplicated':
                    deduplicated,
                    'worm':
                    constant.WORM_TYPE.get(fs_map['SnapLockType']),
                    'status':
                    status,
                    'security_mode':
                    constant.SECURITY_STYLE.get(fs_map['SecurityStyle'],
                                                fs_map['SecurityStyle']),
                    'type':
                    type,
                    'total_capacity':
                    self.get_size(fs_map['VolumeSize']),
                    'used_capacity':
                    self.get_size(fs_map['VolumeSize'], True) -
                    self.get_size(fs_map['AvailableSize'], True),
                    'free_capacity':
                    self.get_size(fs_map['AvailableSize'])
                }
                if fs_model['total_capacity'] != '-' \
                        and fs_model['total_capacity'] > 0:
                    fs_list.append(fs_model)
        return fs_list

    def list_controllers(self, storage_id):
        try:
            controller_list = []
            controller_info = self.ssh_do_exec(
                constant.CONTROLLER_SHOW_DETAIL_COMMAND)
            controller_ips = self.ssh_do_exec(constant.CONTROLLER_IP_COMMAND)
            ips_array = self.get_table_data(controller_ips)
            ip_map = {}
            for ips in ips_array:
                ip_array = ips.split()
                if len(ip_array) == 4:
                    ip_map[ip_array[2]] = ip_array[3]
            controller_map_list = []
            Tools.split_value_map_list(controller_info,
                                       controller_map_list,
                                       split=':')
            for controller_map in controller_map_list:
                if controller_map and 'Node' in controller_map.keys():
                    status = constants.ControllerStatus.NORMAL \
                        if controller_map['Health'] == 'true' \
                        else constants.ControllerStatus.OFFLINE
                    controller_model = {
                        'name': controller_map['Node'],
                        'storage_id': storage_id,
                        'native_controller_id': controller_map['SystemID'],
                        'status': status,
                        'location': controller_map['Location'],
                        'soft_version': None,
                        'cpu_info': None,
                        'memory_size': None,
                        'mgmt_ip': ip_map.get(controller_map['Node'])
                    }
                    controller_list.append(controller_model)
            return controller_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage controllers from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage controllers from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_eth_port(self, storage_id):
        try:
            eth_list = []
            eth_info = self.ssh_do_exec(constant.PORT_SHOW_DETAIL_COMMAND)

            eth_map_list = []
            Tools.split_value_map_list(eth_info, eth_map_list, split=':')
            for eth_map in eth_map_list:
                if eth_map and 'Port' in eth_map.keys():
                    logical_type = constant.ETH_LOGICAL_TYPE.get(
                        eth_map['PortType'])
                    port_id = \
                        eth_map['Node'] + '_' + eth_map['Port']
                    eth_model = {
                        'name':
                        eth_map['Port'],
                        'storage_id':
                        storage_id,
                        'native_port_id':
                        port_id,
                        'location':
                        eth_map['Node'] + ':' + eth_map['Port'],
                        'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if eth_map['Link'] == 'up' else
                        constants.PortConnectionStatus.DISCONNECTED,
                        'health_status':
                        constants.PortHealthStatus.NORMAL
                        if eth_map['PortHealthStatus'] == 'healthy' else
                        constants.PortHealthStatus.ABNORMAL,
                        'type':
                        constants.PortType.ETH,
                        'logical_type':
                        logical_type,
                        'speed':
                        int(eth_map['SpeedOperational']) *
                        units.Mi if eth_map['SpeedOperational'] != '-' else 0,
                        'max_speed':
                        int(eth_map['SpeedOperational']) *
                        units.Mi if eth_map['SpeedOperational'] != '-' else 0,
                        'native_parent_id':
                        None,
                        'wwn':
                        None,
                        'mac_address':
                        eth_map['MACAddress'],
                        'ipv4':
                        None,
                        'ipv4_mask':
                        None,
                        'ipv6':
                        None,
                        'ipv6_mask':
                        None,
                    }
                    eth_list.append(eth_model)
            return eth_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_fc_port(self, storage_id):
        try:
            fc_list = []
            fc_info = self.ssh_do_exec(constant.FC_PORT_SHOW_DETAIL_COMMAND)
            fc_map_list = []
            Tools.split_value_map_list(fc_info, fc_map_list, split=':')
            for fc_map in fc_map_list:
                if fc_map and 'Node' in fc_map.keys():
                    type = constant.FC_TYPE.get(fc_map['PhysicalProtocol'])
                    port_id = \
                        fc_map['Node'] + '_' + fc_map['Adapter']
                    fc_model = {
                        'name':
                        fc_map['Node'] + ':' + fc_map['Adapter'],
                        'storage_id':
                        storage_id,
                        'native_port_id':
                        port_id,
                        'location':
                        fc_map['Node'] + ':' + fc_map['Adapter'],
                        'connection_status':
                        constants.PortConnectionStatus.CONNECTED
                        if fc_map['AdministrativeStatus'] == 'up' else
                        constants.PortConnectionStatus.DISCONNECTED,
                        'health_status':
                        constants.PortHealthStatus.NORMAL
                        if fc_map['OperationalStatus'] == 'online' else
                        constants.PortHealthStatus.ABNORMAL,
                        'type':
                        type,
                        'logical_type':
                        None,
                        'speed':
                        int(fc_map['DataLinkRate(Gbit)']) *
                        units.Gi if fc_map['DataLinkRate(Gbit)'] != '-' else 0,
                        'max_speed':
                        int(fc_map['MaximumSpeed']) *
                        units.Gi if fc_map['MaximumSpeed'] != '-' else 0,
                        'native_parent_id':
                        None,
                        'wwn':
                        fc_map['AdapterWWNN'],
                        'mac_address':
                        None,
                        'ipv4':
                        None,
                        'ipv4_mask':
                        None,
                        'ipv6':
                        None,
                        'ipv6_mask':
                        None,
                    }
                    fc_list.append(fc_model)
            return fc_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage ports from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_ports(self, storage_id):
        ports_list = \
            self.get_fc_port(storage_id) + \
            self.get_eth_port(storage_id)
        return ports_list

    def list_disks(self, storage_id):
        try:
            return self.get_disks(storage_id)
        except exception.DelfinException as e:
            err_msg = "Failed to get storage disks from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e

        except Exception as err:
            err_msg = "Failed to get storage disks from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_qtrees(self, storage_id):
        try:
            qt_list = []
            qt_info = self.ssh_do_exec(constant.QTREE_SHOW_DETAIL_COMMAND)
            fs_info = self.ssh_do_exec(constant.FS_SHOW_DETAIL_COMMAND)
            fs_map_list = []
            qt_map_list = []
            Tools.split_value_map_list(fs_info, fs_map_list, split=':')
            Tools.split_value_map_list(qt_info, qt_map_list, split=':')
            for qt_map in qt_map_list:
                if qt_map and 'QtreeName' in qt_map.keys():
                    fs_id = self.get_fs_id(qt_map['VserverName'],
                                           qt_map['VolumeName'])
                    qtree_path = None
                    for fs_map in fs_map_list:
                        if fs_map and 'VserverName' in fs_map.keys() \
                                and fs_id == self.get_fs_id(
                                fs_map['VserverName'],
                                fs_map['VolumeName']) \
                                and fs_map['JunctionPath'] != '-':
                            qtree_path = fs_map['JunctionPath']
                            break
                    qt_id = self.get_qt_id(qt_map['VserverName'],
                                           qt_map['VolumeName'],
                                           qt_map['QtreeName'])
                    qtree_name = qt_map['QtreeName']
                    if qt_map['QtreeName'] and qtree_path:
                        qtree_path += '/' + qt_map['QtreeName']
                        qtree_path = qtree_path.replace('//', '/')
                    else:
                        qtree_name = qt_id
                    qt_model = {
                        'name': qtree_name,
                        'storage_id': storage_id,
                        'native_qtree_id': qt_id,
                        'path': qtree_path,
                        'native_filesystem_id': fs_id,
                        'security_mode': qt_map['SecurityStyle'],
                    }
                    qt_list.append(qt_model)
            return qt_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage qtrees from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_nfs_shares(self, storage_id, qtree_list, protocol_map):
        try:
            nfs_info = self.ssh_do_exec(constant.NFS_SHARE_SHOW_COMMAND)
            nfs_list = []
            fs_map_list = []
            Tools.split_value_map_list(nfs_info, fs_map_list, split=':')
            for fs_map in fs_map_list:
                if fs_map and 'VserverName' in fs_map.keys():
                    protocol = protocol_map.get(fs_map['VserverName'])
                    if constants.ShareProtocol.NFS in protocol:
                        fs_id = self.get_fs_id(fs_map['VserverName'],
                                               fs_map['VolumeName'])
                        share_name = \
                            fs_map['VserverName'] + '/' + fs_map['VolumeName']
                        qt_id = self.get_qt_id(fs_map['VserverName'],
                                               fs_map['VolumeName'], '')
                        qtree_id = None
                        for qtree in qtree_list:
                            if qtree['native_qtree_id'] == qt_id:
                                qtree_id = qt_id
                            if fs_id == qtree['native_filesystem_id']\
                                    and qtree['name'] != ""\
                                    and qtree['name'] != \
                                    qtree['native_qtree_id']:
                                qt_share_name = \
                                    share_name + '/' + qtree['name']
                                share = {
                                    'name':
                                    qt_share_name,
                                    'storage_id':
                                    storage_id,
                                    'native_share_id':
                                    qt_share_name + '_' +
                                    constants.ShareProtocol.NFS,
                                    'native_qtree_id':
                                    qtree['native_qtree_id'],
                                    'native_filesystem_id':
                                    qtree['native_filesystem_id'],
                                    'path':
                                    qtree['path'],
                                    'protocol':
                                    constants.ShareProtocol.NFS
                                }
                                nfs_list.append(share)
                        share = {
                            'name':
                            share_name,
                            'storage_id':
                            storage_id,
                            'native_share_id':
                            share_name + '_' + constants.ShareProtocol.NFS,
                            'native_qtree_id':
                            qtree_id,
                            'native_filesystem_id':
                            fs_id,
                            'path':
                            fs_map['JunctionPath'],
                            'protocol':
                            constants.ShareProtocol.NFS
                        }
                        nfs_list.append(share)
            return nfs_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage nfs share from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err
        except Exception as err:
            err_msg = "Failed to get storage nfs share from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_cifs_shares(self, storage_id, vserver_name, qtree_list,
                        protocol_map):
        shares_list = []
        share_info = self.ssh_do_exec(
            (constant.CIFS_SHARE_SHOW_DETAIL_COMMAND % {
                'vserver_name': vserver_name
            }))
        share_map_list = []
        Tools.split_value_map_list(share_info, share_map_list, split=':')
        for share_map in share_map_list:
            if share_map and 'VolumeName' in share_map.keys() and \
                    share_map['VolumeName'] != '-':
                protocol_str = protocol_map.get(share_map['Vserver'])
                fs_id = self.get_fs_id(share_map['Vserver'],
                                       share_map['VolumeName'])
                share_id = fs_id + '_' + share_map['Share'] + '_'
                qtree_id = None
                for qtree in qtree_list:
                    name_array = share_map['Path'].split('/')
                    if len(name_array) > 0:
                        qtree_name = name_array[len(name_array) - 1]
                        if qtree_name == share_map['VolumeName']:
                            qtree_name = ''
                        qt_id = self.get_qt_id(share_map['Vserver'],
                                               share_map['VolumeName'],
                                               qtree_name)
                    else:
                        break
                    if qtree['native_qtree_id'] == qt_id:
                        qtree_id = qt_id
                        break
                if constants.ShareProtocol.CIFS in protocol_str:
                    share = {
                        'name': share_map['Share'],
                        'storage_id': storage_id,
                        'native_share_id':
                        share_id + constants.ShareProtocol.CIFS,
                        'native_qtree_id': qtree_id,
                        'native_filesystem_id': fs_id,
                        'path': share_map['Path'],
                        'protocol': constants.ShareProtocol.CIFS
                    }
                    shares_list.append(share)
        return shares_list

    def list_shares(self, storage_id):
        try:
            shares_list = []
            qtree_list = self.list_qtrees(None)
            protocol_info = self.ssh_do_exec(
                constant.SHARE_AGREEMENT_SHOW_COMMAND)
            protocol_map = {}
            protocol_arr = self.get_table_data(protocol_info)
            for protocol in protocol_arr:
                agr_arr = protocol.split()
                if len(agr_arr) > 1:
                    protocol_map[agr_arr[0]] = agr_arr[1]
            vserver_info = self.ssh_do_exec(constant.VSERVER_SHOW_COMMAND)
            vserver_array = self.get_table_data(vserver_info)
            for vserver in vserver_array:
                vserver_name = vserver.split()
                if len(vserver_name) > 1:
                    shares_list += self.get_cifs_shares(
                        storage_id, vserver_name[0], qtree_list, protocol_map)
            shares_list += self.get_nfs_shares(storage_id, qtree_list,
                                               protocol_map)
            return shares_list
        except exception.DelfinException as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise err

        except Exception as err:
            err_msg = "Failed to get storage shares from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_filesystems(self, storage_id):
        try:
            fs_list = self.get_filesystems(storage_id)
            return fs_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def list_quotas(self, storage_id):
        try:
            quota_list = []
            quotas_info = self.ssh_do_exec(constant.QUOTA_SHOW_DETAIL_COMMAND)
            quota_map_list = []
            Tools.split_value_map_list(quotas_info, quota_map_list, ":")
            for quota_map in quota_map_list:
                user_group_name = None
                if quota_map and 'VolumeName' in quota_map.keys():
                    quota_id = \
                        quota_map['Vserver'] + '_' + \
                        quota_map['VolumeName'] + '_' + \
                        quota_map['Type'] + '_' + \
                        quota_map['QtreeName'] + '_' + \
                        quota_map['Target']
                    type = constant.QUOTA_TYPE.get(quota_map['Type'])
                    qt_id = self.get_qt_id(quota_map['Vserver'],
                                           quota_map['VolumeName'], '')
                    if type == 'tree' and quota_map['Target'] != '':
                        qt_id += '/' + quota_map['Target']
                    else:
                        if type == 'user' or 'group':
                            user_group_name = quota_map['Target']
                        if quota_map['QtreeName'] != '':
                            qt_id += '/' + quota_map['QtreeName']
                    fs_id = self.get_fs_id(quota_map['Vserver'],
                                           quota_map['VolumeName'])
                    quota = {
                        'native_quota_id':
                        quota_id,
                        'type':
                        type,
                        'storage_id':
                        storage_id,
                        'native_filesystem_id':
                        fs_id,
                        'native_qtree_id':
                        qt_id,
                        'capacity_hard_limit':
                        self.get_size(quota_map['DiskLimit']),
                        'capacity_soft_limit':
                        self.get_size(quota_map['SoftDiskLimit']),
                        'file_hard_limit':
                        int(quota_map['FilesLimit'])
                        if quota_map['FilesLimit'] != '-' else '-',
                        'file_soft_limit':
                        int(quota_map['SoftFilesLimit'])
                        if quota_map['SoftFilesLimit'] != '-' else '-',
                        'file_count':
                        None,
                        'used_capacity':
                        None,
                        'user_group_name':
                        user_group_name
                    }
                    quota_list.append(quota)
            return quota_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage volume from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)

    def get_alert_sources(self):
        try:
            ip_list = []
            mgt_ip = self.ssh_pool.do_exec(constant.MGT_IP_COMMAND)
            node_ip = self.ssh_pool.do_exec(constant.NODE_IP_COMMAND)
            mgt_ip_array = self.get_table_data(mgt_ip)
            node_ip_array = self.get_table_data(node_ip)
            for node in node_ip_array:
                ip_array = node.split()
                if len(ip_array) == 3:
                    ip_list.append({'host': ip_array[2]})
            ip_list.append({'host': mgt_ip_array[0].split()[2]})
            return ip_list
        except exception.DelfinException as e:
            err_msg = "Failed to get storage ip from " \
                      "netapp cmode: %s" % (six.text_type(e))
            LOG.error(err_msg)
            raise e
        except Exception as err:
            err_msg = "Failed to get storage ip from " \
                      "netapp cmode: %s" % (six.text_type(err))
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
Beispiel #4
0
class SSHHandler(object):
    """Common class for Hpe 3parStor storage system."""

    HPE3PAR_COMMAND_SHOWWSAPI = 'showwsapi'
    HPE3PAR_COMMAND_CHECKHEALTH = 'checkhealth vv vlun task snmp ' \
                                  'port pd node network ld dar cage cabling'
    HPE3PAR_COMMAND_SHOWALERT = 'showalert -d'
    HPE3PAR_COMMAND_REMOVEALERT = 'removealert -f %s'
    ALERT_NOT_EXIST_MSG = 'Unable to read alert'
    HPE3PAR_COMMAND_SHOWNODE = 'shownode'
    HPE3PAR_COMMAND_SHOWNODE_CPU = 'shownode -cpu'
    HPE3PAR_COMMAND_SHOWEEPROM = 'showeeprom'
    HPE3PAR_COMMAND_SHOWPD = 'showpd'
    HPE3PAR_COMMAND_SHOWPD_I = 'showpd -i'
    HPE3PAR_COMMAND_SHOWPORT = 'showport'
    HPE3PAR_COMMAND_SHOWPORT_I = 'showport -i'
    HPE3PAR_COMMAND_SHOWPORT_PAR = 'showport -par'
    HPE3PAR_COMMAND_SHOWPORT_C = 'showport -c'
    HPE3PAR_COMMAND_SHOWPORT_ISCSI = 'showport -iscsi'
    HPE3PAR_COMMAND_SHOWPORT_RCIP = 'showport -rcip'
    HPE3PAR_COMMAND_SHOWPORT_FCOE = 'showport -fcoe'
    HPE3PAR_COMMAND_SHOWPORT_FS = 'showport -fs'
    HPE3PAR_COMMAND_SHOWHOSTSET_D = 'showhostset -d'
    HPE3PAR_COMMAND_SHOWVVSET_D = 'showvvset -d'
    HPE3PAR_COMMAND_SHOWHOST_D = 'showhost -d'
    HPE3PAR_COMMAND_SHOWVV = 'showvv'
    HPE3PAR_COMMAND_SHOWVLUN_T = 'showvlun -t'

    HPE3PAR_COMMAND_SHOWVV = 'showvv'
    HPE3PAR_COMMAND_SRSTATPORT = 'srstatport -attime -groupby ' \
                                 'PORT_N,PORT_S,PORT_P -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATPD = 'srstatpd -attime -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATVV = 'srstatvv -attime -groupby VVID,VV_NAME' \
                               ' -btsecs %d -etsecs %d'
    HPE3PAR_COMMAND_SRSTATPD_ATTIME = 'srstatpd -attime'

    def __init__(self, **kwargs):
        self.kwargs = kwargs
        self.ssh_pool = SSHPool(**kwargs)

    def login(self, context):
        """Test SSH connection """
        version = ''
        try:
            re = self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWWSAPI)
            if re:
                version = self.get_version(re)
        except Exception as e:
            LOG.error("Login error: %s", six.text_type(e))
            raise e
        return version

    def get_version(self, wsapi_infos):
        """get wsapi version """
        version = ''
        try:
            version_list = self.parse_datas_to_list(wsapi_infos,
                                                    consts.VERSION_PATTERN)
            if version_list and version_list[0]:
                version = version_list[0].get('version')
        except Exception as e:
            LOG.error("Get version error: %s, wsapi info: %s" %
                      (six.text_type(e), wsapi_infos))
        return version

    def get_health_state(self):
        """Check the hardware and software health
           status of the storage system

           return: System is healthy
        """
        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_CHECKHEALTH)

    def get_all_alerts(self):
        return self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWALERT)

    def remove_alerts(self, alert_id):
        """Clear alert from storage system.
            Currently not implemented   removes command : removealert
        """
        utils.check_ssh_injection([alert_id])
        command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id
        res = self.exec_command(command_str)
        if res:
            if self.ALERT_NOT_EXIST_MSG not in res:
                raise exception.InvalidResults(six.text_type(res))
            LOG.warning("Alert %s doesn't exist.", alert_id)

    def get_controllers(self):
        para_map = {'command': 'parse_node_table'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.NODE_PATTERN,
                                       para_map=para_map)

    def get_controllers_cpu(self):
        para_map = {'command': 'parse_node_cpu'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE_CPU,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.CPU_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_controllers_version(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWEEPROM,
                                       self.parse_node_version,
                                       throw_excep=False)

    def parse_node_version(self, resource_info, pattern_str, para_map=None):
        node_version_map = {}
        node_info_map = {}
        try:
            obj_infos = resource_info.split('\n')
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    if str_line.startswith('Node:'):
                        str_info = self.split_str_by_colon(str_line)
                        node_info_map['node_id'] = str_info[1]
                    if str_line.startswith('OS version:'):
                        str_info = self.split_str_by_colon(str_line)
                        node_info_map['node_os_version'] = str_info[1]
                else:
                    if node_info_map:
                        node_version_map[node_info_map.get(
                            'node_id')] = node_info_map.get('node_os_version')
                        node_info_map = {}
        except Exception as e:
            err_msg = "Analyse node version info error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return node_version_map

    def split_str_by_colon(self, str_line):
        str_info = []
        if str_line:
            # str_info[0] is the parsed attribute name, there are some special
            # characters such as spaces, brackets, etc.,
            # str_info[1] is the value
            str_info = str_line.split(':', 1)
            str_info[0] = str_info[0].strip()
            str_info[0] = str_info[0].replace(" ", "_") \
                .replace("(", "").replace(")", "").lower()
            if len(str_info) > 1:
                str_info[1] = str_info[1].strip()
        return str_info

    def get_disks(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPD,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.DISK_PATTERN)

    def get_disks_inventory(self):
        inventory_map = {}
        para_map = {'command': 'parse_disk_table'}
        inventorys = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPD_I,
            self.parse_datas_to_list,
            pattern_str=consts.DISK_I_PATTERN,
            para_map=para_map,
            throw_excep=False)
        for inventory in (inventorys or []):
            inventory_map[inventory.get('disk_id')] = inventory
        return inventory_map

    def get_ports(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.PORT_PATTERN)

    def get_ports_inventory(self):
        para_map = {'key_position': 0, 'value_position': 'last'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_I,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_I_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_config(self):
        para_map = {'key_position': 0, 'value_position': 4}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_PAR,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_PER_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_iscsi(self):
        iscsis_map = {}
        iscsis = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_ISCSI,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_ISCSI_PATTERN,
            throw_excep=False)
        for iscsi in (iscsis or []):
            iscsis_map[iscsi.get('n:s:p')] = iscsi
        return iscsis_map

    def get_ports_connected(self):
        para_map = {'key_position': 0, 'value_position': 6}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_C,
                                       self.parse_datas_to_map,
                                       pattern_str=consts.PORT_C_PATTERN,
                                       para_map=para_map,
                                       throw_excep=False)

    def get_ports_rcip(self):
        rcip_map = {}
        rcips = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_RCIP,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_RCIP_PATTERN,
            throw_excep=False)
        for rcip in (rcips or []):
            rcip_map[rcip.get('n:s:p')] = rcip
        return rcip_map

    def get_ports_fs(self):
        port_fs_map = {}
        port_fss = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FS,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_FS_PATTERN,
            throw_excep=False)
        for port_fs in (port_fss or []):
            port_fs_map[port_fs.get('n:s:p')] = port_fs
        return port_fs_map

    def get_ports_fcoe(self):
        fcoe_map = {}
        fcoes = self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FCOE,
            self.parse_datas_to_list,
            pattern_str=consts.PORT_FCOE_PATTERN,
            throw_excep=False)
        for fcoe in (fcoes or []):
            fcoe_map[fcoe.get('n:s:p')] = fcoe
        return fcoe_map

    def parse_datas_to_list(self, resource_info, pattern_str, para_map=None):
        obj_list = []
        titles_size = 9999
        try:
            pattern = re.compile(pattern_str)
            obj_infos = resource_info.split('\n')
            titles = []
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    search_obj = pattern.search(str_line)
                    if search_obj:
                        titles = str_line.split()
                        titles_size = len(titles)
                    else:
                        str_info = str_line.split()
                        cols_size = len(str_info)
                        if para_map and para_map.get('command', '') \
                                == 'parse_disk_table':
                            obj_list = self.parse_disk_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_node_table':
                            obj_list = self.parse_node_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_metric_table':
                            if '---------------------------------' in str_line:
                                break
                            if 'Time:' in str_line:
                                collect_time = Tools.get_numbers_in_brackets(
                                    str_line, consts.SSH_COLLECT_TIME_PATTERN)
                                if collect_time:
                                    collect_time = int(collect_time) * units.k
                                else:
                                    collect_time = int(time.time() * units.k)
                                para_map['collect_time'] = collect_time
                            obj_list = self.parse_metric_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles, para_map)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_set_groups_table':
                            if '---------------------------------' in str_line:
                                break
                            obj_list = self.parse_set_groups_table(
                                cols_size, titles_size, str_info, obj_list)
                        elif para_map and para_map.get('command', '') \
                                == 'parse_view_table':
                            if '---------------------------------' in str_line:
                                break
                            obj_list = self.parse_view_table(
                                cols_size, titles_size, str_info, obj_list,
                                titles)
                        else:
                            if cols_size == titles_size:
                                obj_model = {}
                                for i in range(0, cols_size):
                                    key = titles[i].lower().replace('-', '')
                                    obj_model[key] = str_info[i]
                                if obj_model:
                                    obj_list.append(obj_model)
        except Exception as e:
            err_msg = "Analyse datas to list error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return obj_list

    def parse_datas_to_map(self, resource_info, pattern_str, para_map=None):
        obj_model = {}
        titles_size = 9999
        try:
            pattern = re.compile(pattern_str)
            obj_infos = resource_info.split('\n')
            for obj_info in obj_infos:
                str_line = obj_info.strip()
                if str_line:
                    search_obj = pattern.search(str_line)
                    if search_obj:
                        titles = str_line.split()
                        titles_size = len(titles)
                    else:
                        str_info = str_line.split()
                        cols_size = len(str_info)
                        if para_map and para_map.get('command',
                                                     '') == 'parse_node_cpu':
                            obj_model = self.parse_node_cpu(
                                cols_size, titles_size, str_info, obj_model)
                        else:
                            if cols_size >= titles_size:
                                key_position = para_map.get('key_position')
                                value_position = para_map.get('value_position')
                                if para_map.get('value_position') == 'last':
                                    value_position = cols_size - 1
                                obj_model[str_info[key_position]] = str_info[
                                    value_position]
        except Exception as e:
            err_msg = "Analyse datas to map error: %s", six.text_type(e)
            LOG.error(err_msg)
            raise exception.InvalidResults(err_msg)
        return obj_model

    def parse_disk_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            fw_rev_index = self.get_index_of_key(titles, 'FW_Rev')
            if fw_rev_index:
                inventory_map = {
                    'disk_id': str_info[0],
                    'disk_mfr': ' '.join(str_info[4:fw_rev_index - 2]),
                    'disk_model': str_info[fw_rev_index - 2],
                    'disk_serial': str_info[fw_rev_index - 1],
                    'disk_fw_rev': str_info[fw_rev_index]
                }
                obj_list.append(inventory_map)
        return obj_list

    def parse_node_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            obj_model = {}
            num_prefix = 1
            for i in range(cols_size):
                key_prefix = ''
                key = titles[i].lower().replace('-', '')
                if key == 'mem(mb)':
                    key_prefix = consts.SSH_NODE_MEM_TYPE.get(num_prefix)
                    num_prefix += 1
                key = '%s%s' % (key_prefix, key)
                obj_model[key] = str_info[i]
            if obj_model:
                obj_list.append(obj_model)
        return obj_list

    def parse_node_cpu(self, cols_size, titles_size, str_info, obj_map):
        if cols_size >= titles_size:
            node_id = str_info[0]
            cpu_info = str_info[4]
            if obj_map.get(node_id):
                obj_map[node_id][cpu_info] = obj_map.get(node_id).get(
                    cpu_info, 0) + 1
            else:
                cpu_info_map = {}
                cpu_info_map[cpu_info] = 1
                obj_map[node_id] = cpu_info_map
        return obj_map

    def parse_metric_table(self, cols_size, titles_size, str_info, obj_list,
                           titles, para_map):
        if cols_size == titles_size:
            obj_model = {}
            metric_type_num = 1
            key_prefix = ''
            for i in range(0, cols_size):
                key = titles[i].lower().replace('-', '')
                if key == 'rd':
                    key_prefix = consts.SSH_METRIC_TYPE.get(metric_type_num)
                    metric_type_num += 1
                key = '%s%s' % (key_prefix, key)
                obj_model[key] = str_info[i]
            if obj_model:
                if para_map and para_map.get('collect_time'):
                    obj_model['collect_time'] = para_map.get('collect_time')
                obj_list.append(obj_model)
        return obj_list

    def get_index_of_key(self, titles_list, key):
        if titles_list:
            for title in titles_list:
                if key in title:
                    return titles_list.index(title)
        return None

    def get_resources_info(self,
                           command,
                           parse_type,
                           pattern_str=None,
                           para_map=None,
                           throw_excep=True):
        re = self.exec_command(command)
        resources_info = None
        try:
            if re:
                resources_info = parse_type(re, pattern_str, para_map=para_map)
        except Exception as e:
            LOG.error("Get %s info error: %s" % (command, six.text_type(e)))
            if throw_excep:
                raise e
        return resources_info

    def exec_command(self, command):
        re = self.ssh_pool.do_exec(command)
        if re:
            if 'invalid command name' in re or 'Invalid option' in re:
                LOG.warning(re)
                raise NotImplementedError(re)
            elif 'Too many local CLI connections' in re:
                LOG.error("command %s failed: %s" % (command, re))
                raise exception.StorageBackendException(re)
        return re

    def get_volumes(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVV,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.VOLUME_PATTERN)

    def get_port_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPORT % (int(
            start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATPORT_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def get_disk_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD_ATTIME
        if start_time and end_time:
            command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD % (int(
                start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATPD_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def get_volume_metrics(self, start_time, end_time):
        command = SSHHandler.HPE3PAR_COMMAND_SRSTATVV % (int(
            start_time / units.k), int(end_time / units.k))
        return self.get_resources_info(
            command,
            self.parse_datas_to_list,
            pattern_str=consts.SRSTATVV_PATTERN,
            para_map={'command': 'parse_metric_table'})

    def list_storage_host_groups(self):
        para_map = {'command': 'parse_set_groups_table'}
        return self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWHOSTSET_D,
            self.parse_datas_to_list,
            pattern_str=consts.HOST_OR_VV_SET_PATTERN,
            para_map=para_map)

    def list_volume_groups(self):
        para_map = {'command': 'parse_set_groups_table'}
        return self.get_resources_info(
            SSHHandler.HPE3PAR_COMMAND_SHOWVVSET_D,
            self.parse_datas_to_list,
            pattern_str=consts.HOST_OR_VV_SET_PATTERN,
            para_map=para_map)

    def parse_set_groups_table(self, cols_size, titles_size, str_info,
                               obj_list):
        if cols_size >= titles_size:
            members = []
            value = str_info[2].replace('-', '')
            if value:
                members = [str_info[2]]
            obj_model = {
                'id': str_info[0],
                'name': str_info[1],
                'members': members,
                'comment': (" ".join(str_info[3:])).replace('-', ''),
            }
            obj_list.append(obj_model)
        elif obj_list and cols_size == 1:
            value = str_info[0].replace('-', '')
            if value:
                obj_model = obj_list[-1]
                if obj_model and obj_model.get('members'):
                    obj_model.get('members').append(str_info[0])
                else:
                    members = [str_info[0]]
                    obj_model['members'] = members

        return obj_list

    def parse_view_table(self, cols_size, titles_size, str_info, obj_list,
                         titles):
        if cols_size >= titles_size:
            obj_model = {}
            for i in range(titles_size):
                key = titles[i].lower().replace('-', '')
                obj_model[key] = str_info[i]
            if obj_model:
                obj_list.append(obj_model)
        return obj_list

    def get_resources_ids(self, command, pattern_str, para_map=None):
        if not para_map:
            para_map = {'key_position': 1, 'value_position': 0}
        return self.get_resources_info(command,
                                       self.parse_datas_to_map,
                                       pattern_str=pattern_str,
                                       para_map=para_map,
                                       throw_excep=False)

    def list_storage_host_initiators(self):
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWHOST_D,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.HOST_OR_VV_PATTERN)

    def list_masking_views(self):
        para_map = {'command': 'parse_view_table'}
        return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVLUN_T,
                                       self.parse_datas_to_list,
                                       pattern_str=consts.VLUN_PATTERN,
                                       para_map=para_map)